1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/accel.h" 8 #include "spdk/stdinc.h" 9 #include "spdk/crc32.h" 10 #include "spdk/endian.h" 11 #include "spdk/assert.h" 12 #include "spdk/thread.h" 13 #include "spdk/nvmf_transport.h" 14 #include "spdk/string.h" 15 #include "spdk/trace.h" 16 #include "spdk/util.h" 17 #include "spdk/log.h" 18 19 #include "spdk_internal/assert.h" 20 #include "spdk_internal/nvme_tcp.h" 21 #include "spdk_internal/sock.h" 22 23 #include "nvmf_internal.h" 24 25 #include "spdk_internal/trace_defs.h" 26 27 #define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16 28 #define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 16 29 #define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0 30 #define SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM 32 31 #define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true 32 33 #define SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH 2 34 #define SPDK_NVMF_TCP_MAX_IO_QUEUE_DEPTH 65535 35 #define SPDK_NVMF_TCP_MIN_ADMIN_QUEUE_DEPTH 2 36 #define SPDK_NVMF_TCP_MAX_ADMIN_QUEUE_DEPTH 4096 37 38 #define SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH 128 39 #define SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH 128 40 #define SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR 128 41 #define SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE 4096 42 #define SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE 131072 43 #define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072 44 #define SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS 511 45 #define SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE 32 46 #define SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP false 47 #define SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC 1 48 49 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp; 50 51 /* spdk nvmf related structure */ 52 enum spdk_nvmf_tcp_req_state { 53 54 /* The request is not currently in use */ 55 TCP_REQUEST_STATE_FREE = 0, 56 57 /* Initial state when request first received */ 58 TCP_REQUEST_STATE_NEW = 1, 59 60 /* The request is queued until a data buffer is available. */ 61 TCP_REQUEST_STATE_NEED_BUFFER = 2, 62 63 /* The request is waiting for zcopy_start to finish */ 64 TCP_REQUEST_STATE_AWAITING_ZCOPY_START = 3, 65 66 /* The request has received a zero-copy buffer */ 67 TCP_REQUEST_STATE_ZCOPY_START_COMPLETED = 4, 68 69 /* The request is currently transferring data from the host to the controller. */ 70 TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER = 5, 71 72 /* The request is waiting for the R2T send acknowledgement. */ 73 TCP_REQUEST_STATE_AWAITING_R2T_ACK = 6, 74 75 /* The request is ready to execute at the block device */ 76 TCP_REQUEST_STATE_READY_TO_EXECUTE = 7, 77 78 /* The request is currently executing at the block device */ 79 TCP_REQUEST_STATE_EXECUTING = 8, 80 81 /* The request is waiting for zcopy buffers to be committed */ 82 TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT = 9, 83 84 /* The request finished executing at the block device */ 85 TCP_REQUEST_STATE_EXECUTED = 10, 86 87 /* The request is ready to send a completion */ 88 TCP_REQUEST_STATE_READY_TO_COMPLETE = 11, 89 90 /* The request is currently transferring final pdus from the controller to the host. */ 91 TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST = 12, 92 93 /* The request is waiting for zcopy buffers to be released (without committing) */ 94 TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE = 13, 95 96 /* The request completed and can be marked free. */ 97 TCP_REQUEST_STATE_COMPLETED = 14, 98 99 /* Terminator */ 100 TCP_REQUEST_NUM_STATES, 101 }; 102 103 static const char *spdk_nvmf_tcp_term_req_fes_str[] = { 104 "Invalid PDU Header Field", 105 "PDU Sequence Error", 106 "Header Digiest Error", 107 "Data Transfer Out of Range", 108 "R2T Limit Exceeded", 109 "Unsupported parameter", 110 }; 111 112 SPDK_TRACE_REGISTER_FN(nvmf_tcp_trace, "nvmf_tcp", TRACE_GROUP_NVMF_TCP) 113 { 114 spdk_trace_register_owner(OWNER_NVMF_TCP, 't'); 115 spdk_trace_register_object(OBJECT_NVMF_TCP_IO, 'r'); 116 spdk_trace_register_description("TCP_REQ_NEW", 117 TRACE_TCP_REQUEST_STATE_NEW, 118 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 1, 119 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 120 spdk_trace_register_description("TCP_REQ_NEED_BUFFER", 121 TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 122 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 123 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 124 spdk_trace_register_description("TCP_REQ_WAIT_ZCPY_START", 125 TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_START, 126 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 127 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 128 spdk_trace_register_description("TCP_REQ_ZCPY_START_CPL", 129 TRACE_TCP_REQUEST_STATE_ZCOPY_START_COMPLETED, 130 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 131 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 132 spdk_trace_register_description("TCP_REQ_TX_H_TO_C", 133 TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 134 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 135 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 136 spdk_trace_register_description("TCP_REQ_RDY_TO_EXECUTE", 137 TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 138 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 139 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 140 spdk_trace_register_description("TCP_REQ_EXECUTING", 141 TRACE_TCP_REQUEST_STATE_EXECUTING, 142 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 143 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 144 spdk_trace_register_description("TCP_REQ_WAIT_ZCPY_CMT", 145 TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_COMMIT, 146 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 147 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 148 spdk_trace_register_description("TCP_REQ_EXECUTED", 149 TRACE_TCP_REQUEST_STATE_EXECUTED, 150 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 151 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 152 spdk_trace_register_description("TCP_REQ_RDY_TO_COMPLETE", 153 TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 154 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 155 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 156 spdk_trace_register_description("TCP_REQ_TRANSFER_C2H", 157 TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 158 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 159 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 160 spdk_trace_register_description("TCP_REQ_AWAIT_ZCPY_RLS", 161 TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_RELEASE, 162 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 163 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 164 spdk_trace_register_description("TCP_REQ_COMPLETED", 165 TRACE_TCP_REQUEST_STATE_COMPLETED, 166 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 167 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 168 spdk_trace_register_description("TCP_WRITE_START", 169 TRACE_TCP_FLUSH_WRITEBUF_START, 170 OWNER_NVMF_TCP, OBJECT_NONE, 0, 171 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 172 spdk_trace_register_description("TCP_WRITE_DONE", 173 TRACE_TCP_FLUSH_WRITEBUF_DONE, 174 OWNER_NVMF_TCP, OBJECT_NONE, 0, 175 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 176 spdk_trace_register_description("TCP_READ_DONE", 177 TRACE_TCP_READ_FROM_SOCKET_DONE, 178 OWNER_NVMF_TCP, OBJECT_NONE, 0, 179 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 180 spdk_trace_register_description("TCP_REQ_AWAIT_R2T_ACK", 181 TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 182 OWNER_NVMF_TCP, OBJECT_NVMF_TCP_IO, 0, 183 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 184 185 spdk_trace_register_description("TCP_QP_CREATE", TRACE_TCP_QP_CREATE, 186 OWNER_NVMF_TCP, OBJECT_NONE, 0, 187 SPDK_TRACE_ARG_TYPE_INT, ""); 188 spdk_trace_register_description("TCP_QP_SOCK_INIT", TRACE_TCP_QP_SOCK_INIT, 189 OWNER_NVMF_TCP, OBJECT_NONE, 0, 190 SPDK_TRACE_ARG_TYPE_INT, ""); 191 spdk_trace_register_description("TCP_QP_STATE_CHANGE", TRACE_TCP_QP_STATE_CHANGE, 192 OWNER_NVMF_TCP, OBJECT_NONE, 0, 193 SPDK_TRACE_ARG_TYPE_INT, "state"); 194 spdk_trace_register_description("TCP_QP_DISCONNECT", TRACE_TCP_QP_DISCONNECT, 195 OWNER_NVMF_TCP, OBJECT_NONE, 0, 196 SPDK_TRACE_ARG_TYPE_INT, ""); 197 spdk_trace_register_description("TCP_QP_DESTROY", TRACE_TCP_QP_DESTROY, 198 OWNER_NVMF_TCP, OBJECT_NONE, 0, 199 SPDK_TRACE_ARG_TYPE_INT, ""); 200 spdk_trace_register_description("TCP_QP_ABORT_REQ", TRACE_TCP_QP_ABORT_REQ, 201 OWNER_NVMF_TCP, OBJECT_NONE, 0, 202 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 203 spdk_trace_register_description("TCP_QP_RCV_STATE_CHANGE", TRACE_TCP_QP_RCV_STATE_CHANGE, 204 OWNER_NVMF_TCP, OBJECT_NONE, 0, 205 SPDK_TRACE_ARG_TYPE_INT, "state"); 206 207 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_NVMF_TCP_IO, 1); 208 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_NVMF_TCP_IO, 0); 209 } 210 211 struct spdk_nvmf_tcp_req { 212 struct spdk_nvmf_request req; 213 struct spdk_nvme_cpl rsp; 214 struct spdk_nvme_cmd cmd; 215 216 /* A PDU that can be used for sending responses. This is 217 * not the incoming PDU! */ 218 struct nvme_tcp_pdu *pdu; 219 220 /* In-capsule data buffer */ 221 uint8_t *buf; 222 223 struct spdk_nvmf_tcp_req *fused_pair; 224 225 /* 226 * The PDU for a request may be used multiple times in serial over 227 * the request's lifetime. For example, first to send an R2T, then 228 * to send a completion. To catch mistakes where the PDU is used 229 * twice at the same time, add a debug flag here for init/fini. 230 */ 231 bool pdu_in_use; 232 bool has_in_capsule_data; 233 bool fused_failed; 234 235 /* transfer_tag */ 236 uint16_t ttag; 237 238 enum spdk_nvmf_tcp_req_state state; 239 240 /* 241 * h2c_offset is used when we receive the h2c_data PDU. 242 */ 243 uint32_t h2c_offset; 244 245 STAILQ_ENTRY(spdk_nvmf_tcp_req) link; 246 TAILQ_ENTRY(spdk_nvmf_tcp_req) state_link; 247 }; 248 249 struct spdk_nvmf_tcp_qpair { 250 struct spdk_nvmf_qpair qpair; 251 struct spdk_nvmf_tcp_poll_group *group; 252 struct spdk_sock *sock; 253 254 enum nvme_tcp_pdu_recv_state recv_state; 255 enum nvme_tcp_qpair_state state; 256 257 /* PDU being actively received */ 258 struct nvme_tcp_pdu *pdu_in_progress; 259 260 struct spdk_nvmf_tcp_req *fused_first; 261 262 /* Queues to track the requests in all states */ 263 TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_working_queue; 264 TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_free_queue; 265 SLIST_HEAD(, nvme_tcp_pdu) tcp_pdu_free_queue; 266 267 /* Number of requests in each state */ 268 uint32_t state_cntr[TCP_REQUEST_NUM_STATES]; 269 270 uint8_t cpda; 271 272 bool host_hdgst_enable; 273 bool host_ddgst_enable; 274 275 /* This is a spare PDU used for sending special management 276 * operations. Primarily, this is used for the initial 277 * connection response and c2h termination request. */ 278 struct nvme_tcp_pdu *mgmt_pdu; 279 280 /* Arrays of in-capsule buffers, requests, and pdus. 281 * Each array is 'resource_count' number of elements */ 282 void *bufs; 283 struct spdk_nvmf_tcp_req *reqs; 284 struct nvme_tcp_pdu *pdus; 285 uint32_t resource_count; 286 uint32_t recv_buf_size; 287 288 struct spdk_nvmf_tcp_port *port; 289 290 /* IP address */ 291 char initiator_addr[SPDK_NVMF_TRADDR_MAX_LEN]; 292 char target_addr[SPDK_NVMF_TRADDR_MAX_LEN]; 293 294 /* IP port */ 295 uint16_t initiator_port; 296 uint16_t target_port; 297 298 /* Timer used to destroy qpair after detecting transport error issue if initiator does 299 * not close the connection. 300 */ 301 struct spdk_poller *timeout_poller; 302 303 spdk_nvmf_transport_qpair_fini_cb fini_cb_fn; 304 void *fini_cb_arg; 305 306 TAILQ_ENTRY(spdk_nvmf_tcp_qpair) link; 307 }; 308 309 struct spdk_nvmf_tcp_control_msg { 310 STAILQ_ENTRY(spdk_nvmf_tcp_control_msg) link; 311 }; 312 313 struct spdk_nvmf_tcp_control_msg_list { 314 void *msg_buf; 315 STAILQ_HEAD(, spdk_nvmf_tcp_control_msg) free_msgs; 316 }; 317 318 struct spdk_nvmf_tcp_poll_group { 319 struct spdk_nvmf_transport_poll_group group; 320 struct spdk_sock_group *sock_group; 321 322 TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs; 323 TAILQ_HEAD(, spdk_nvmf_tcp_qpair) await_req; 324 325 struct spdk_io_channel *accel_channel; 326 struct spdk_nvmf_tcp_control_msg_list *control_msg_list; 327 328 TAILQ_ENTRY(spdk_nvmf_tcp_poll_group) link; 329 }; 330 331 struct spdk_nvmf_tcp_port { 332 const struct spdk_nvme_transport_id *trid; 333 struct spdk_sock *listen_sock; 334 TAILQ_ENTRY(spdk_nvmf_tcp_port) link; 335 }; 336 337 struct tcp_transport_opts { 338 bool c2h_success; 339 uint16_t control_msg_num; 340 uint32_t sock_priority; 341 }; 342 343 struct spdk_nvmf_tcp_transport { 344 struct spdk_nvmf_transport transport; 345 struct tcp_transport_opts tcp_opts; 346 347 struct spdk_nvmf_tcp_poll_group *next_pg; 348 349 struct spdk_poller *accept_poller; 350 351 TAILQ_HEAD(, spdk_nvmf_tcp_port) ports; 352 TAILQ_HEAD(, spdk_nvmf_tcp_poll_group) poll_groups; 353 }; 354 355 static const struct spdk_json_object_decoder tcp_transport_opts_decoder[] = { 356 { 357 "c2h_success", offsetof(struct tcp_transport_opts, c2h_success), 358 spdk_json_decode_bool, true 359 }, 360 { 361 "control_msg_num", offsetof(struct tcp_transport_opts, control_msg_num), 362 spdk_json_decode_uint16, true 363 }, 364 { 365 "sock_priority", offsetof(struct tcp_transport_opts, sock_priority), 366 spdk_json_decode_uint32, true 367 }, 368 }; 369 370 static bool nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport, 371 struct spdk_nvmf_tcp_req *tcp_req); 372 static void nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group); 373 374 static void _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair, 375 struct spdk_nvmf_tcp_req *tcp_req); 376 377 static inline void 378 nvmf_tcp_req_set_state(struct spdk_nvmf_tcp_req *tcp_req, 379 enum spdk_nvmf_tcp_req_state state) 380 { 381 struct spdk_nvmf_qpair *qpair; 382 struct spdk_nvmf_tcp_qpair *tqpair; 383 384 qpair = tcp_req->req.qpair; 385 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 386 387 assert(tqpair->state_cntr[tcp_req->state] > 0); 388 tqpair->state_cntr[tcp_req->state]--; 389 tqpair->state_cntr[state]++; 390 391 tcp_req->state = state; 392 } 393 394 static inline struct nvme_tcp_pdu * 395 nvmf_tcp_req_pdu_init(struct spdk_nvmf_tcp_req *tcp_req) 396 { 397 assert(tcp_req->pdu_in_use == false); 398 399 memset(tcp_req->pdu, 0, sizeof(*tcp_req->pdu)); 400 tcp_req->pdu->qpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 401 402 return tcp_req->pdu; 403 } 404 405 static struct spdk_nvmf_tcp_req * 406 nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair) 407 { 408 struct spdk_nvmf_tcp_req *tcp_req; 409 410 tcp_req = TAILQ_FIRST(&tqpair->tcp_req_free_queue); 411 if (spdk_unlikely(!tcp_req)) { 412 return NULL; 413 } 414 415 memset(&tcp_req->rsp, 0, sizeof(tcp_req->rsp)); 416 tcp_req->h2c_offset = 0; 417 tcp_req->has_in_capsule_data = false; 418 tcp_req->req.dif_enabled = false; 419 tcp_req->req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 420 421 TAILQ_REMOVE(&tqpair->tcp_req_free_queue, tcp_req, state_link); 422 TAILQ_INSERT_TAIL(&tqpair->tcp_req_working_queue, tcp_req, state_link); 423 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEW); 424 return tcp_req; 425 } 426 427 static inline void 428 nvmf_tcp_req_put(struct spdk_nvmf_tcp_qpair *tqpair, struct spdk_nvmf_tcp_req *tcp_req) 429 { 430 assert(!tcp_req->pdu_in_use); 431 432 TAILQ_REMOVE(&tqpair->tcp_req_working_queue, tcp_req, state_link); 433 TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link); 434 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_FREE); 435 } 436 437 static void 438 nvmf_tcp_request_free(void *cb_arg) 439 { 440 struct spdk_nvmf_tcp_transport *ttransport; 441 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 442 443 assert(tcp_req != NULL); 444 445 SPDK_DEBUGLOG(nvmf_tcp, "tcp_req=%p will be freed\n", tcp_req); 446 ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport, 447 struct spdk_nvmf_tcp_transport, transport); 448 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED); 449 nvmf_tcp_req_process(ttransport, tcp_req); 450 } 451 452 static int 453 nvmf_tcp_req_free(struct spdk_nvmf_request *req) 454 { 455 struct spdk_nvmf_tcp_req *tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 456 457 nvmf_tcp_request_free(tcp_req); 458 459 return 0; 460 } 461 462 static void 463 nvmf_tcp_drain_state_queue(struct spdk_nvmf_tcp_qpair *tqpair, 464 enum spdk_nvmf_tcp_req_state state) 465 { 466 struct spdk_nvmf_tcp_req *tcp_req, *req_tmp; 467 468 assert(state != TCP_REQUEST_STATE_FREE); 469 TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) { 470 if (state == tcp_req->state) { 471 nvmf_tcp_request_free(tcp_req); 472 } 473 } 474 } 475 476 static void 477 nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair) 478 { 479 struct spdk_nvmf_tcp_req *tcp_req, *req_tmp; 480 481 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 482 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW); 483 484 /* Wipe the requests waiting for buffer from the global list */ 485 TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) { 486 if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) { 487 STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, &tcp_req->req, 488 spdk_nvmf_request, buf_link); 489 } 490 } 491 492 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER); 493 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING); 494 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 495 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK); 496 } 497 498 static void 499 nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair) 500 { 501 int i; 502 struct spdk_nvmf_tcp_req *tcp_req; 503 504 SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", tqpair->qpair.qid); 505 for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) { 506 SPDK_ERRLOG("\tNum of requests in state[%d] = %u\n", i, tqpair->state_cntr[i]); 507 TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) { 508 if ((int)tcp_req->state == i) { 509 SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool); 510 SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode); 511 } 512 } 513 } 514 } 515 516 static void 517 _nvmf_tcp_qpair_destroy(void *_tqpair) 518 { 519 struct spdk_nvmf_tcp_qpair *tqpair = _tqpair; 520 spdk_nvmf_transport_qpair_fini_cb cb_fn = tqpair->fini_cb_fn; 521 void *cb_arg = tqpair->fini_cb_arg; 522 int err = 0; 523 524 spdk_trace_record(TRACE_TCP_QP_DESTROY, 0, 0, (uintptr_t)tqpair); 525 526 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 527 528 err = spdk_sock_close(&tqpair->sock); 529 assert(err == 0); 530 nvmf_tcp_cleanup_all_states(tqpair); 531 532 if (tqpair->state_cntr[TCP_REQUEST_STATE_FREE] != tqpair->resource_count) { 533 SPDK_ERRLOG("tqpair(%p) free tcp request num is %u but should be %u\n", tqpair, 534 tqpair->state_cntr[TCP_REQUEST_STATE_FREE], 535 tqpair->resource_count); 536 err++; 537 } 538 539 if (err > 0) { 540 nvmf_tcp_dump_qpair_req_contents(tqpair); 541 } 542 543 /* The timeout poller might still be registered here if we close the qpair before host 544 * terminates the connection. 545 */ 546 spdk_poller_unregister(&tqpair->timeout_poller); 547 spdk_dma_free(tqpair->pdus); 548 free(tqpair->reqs); 549 spdk_free(tqpair->bufs); 550 free(tqpair); 551 552 if (cb_fn != NULL) { 553 cb_fn(cb_arg); 554 } 555 556 SPDK_DEBUGLOG(nvmf_tcp, "Leave\n"); 557 } 558 559 static void 560 nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair) 561 { 562 /* Delay the destruction to make sure it isn't performed from the context of a sock 563 * callback. Otherwise, spdk_sock_close() might not abort pending requests, causing their 564 * completions to be executed after the qpair is freed. (Note: this fixed issue #2471.) 565 */ 566 spdk_thread_send_msg(spdk_get_thread(), _nvmf_tcp_qpair_destroy, tqpair); 567 } 568 569 static void 570 nvmf_tcp_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w) 571 { 572 struct spdk_nvmf_tcp_transport *ttransport; 573 assert(w != NULL); 574 575 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 576 spdk_json_write_named_bool(w, "c2h_success", ttransport->tcp_opts.c2h_success); 577 spdk_json_write_named_uint32(w, "sock_priority", ttransport->tcp_opts.sock_priority); 578 } 579 580 static int 581 nvmf_tcp_destroy(struct spdk_nvmf_transport *transport, 582 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg) 583 { 584 struct spdk_nvmf_tcp_transport *ttransport; 585 586 assert(transport != NULL); 587 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 588 589 spdk_poller_unregister(&ttransport->accept_poller); 590 free(ttransport); 591 592 if (cb_fn) { 593 cb_fn(cb_arg); 594 } 595 return 0; 596 } 597 598 static int nvmf_tcp_accept(void *ctx); 599 600 static struct spdk_nvmf_transport * 601 nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts) 602 { 603 struct spdk_nvmf_tcp_transport *ttransport; 604 uint32_t sge_count; 605 uint32_t min_shared_buffers; 606 607 ttransport = calloc(1, sizeof(*ttransport)); 608 if (!ttransport) { 609 return NULL; 610 } 611 612 TAILQ_INIT(&ttransport->ports); 613 TAILQ_INIT(&ttransport->poll_groups); 614 615 ttransport->transport.ops = &spdk_nvmf_transport_tcp; 616 617 ttransport->tcp_opts.c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION; 618 ttransport->tcp_opts.sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY; 619 ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM; 620 if (opts->transport_specific != NULL && 621 spdk_json_decode_object_relaxed(opts->transport_specific, tcp_transport_opts_decoder, 622 SPDK_COUNTOF(tcp_transport_opts_decoder), 623 &ttransport->tcp_opts)) { 624 SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n"); 625 free(ttransport); 626 return NULL; 627 } 628 629 SPDK_NOTICELOG("*** TCP Transport Init ***\n"); 630 631 SPDK_INFOLOG(nvmf_tcp, "*** TCP Transport Init ***\n" 632 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 633 " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 634 " in_capsule_data_size=%d, max_aq_depth=%d\n" 635 " num_shared_buffers=%d, c2h_success=%d,\n" 636 " dif_insert_or_strip=%d, sock_priority=%d\n" 637 " abort_timeout_sec=%d, control_msg_num=%hu\n", 638 opts->max_queue_depth, 639 opts->max_io_size, 640 opts->max_qpairs_per_ctrlr - 1, 641 opts->io_unit_size, 642 opts->in_capsule_data_size, 643 opts->max_aq_depth, 644 opts->num_shared_buffers, 645 ttransport->tcp_opts.c2h_success, 646 opts->dif_insert_or_strip, 647 ttransport->tcp_opts.sock_priority, 648 opts->abort_timeout_sec, 649 ttransport->tcp_opts.control_msg_num); 650 651 if (ttransport->tcp_opts.sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) { 652 SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n" 653 "you can use man 7 socket to view the range of priority under SO_PRIORITY item\n", 654 ttransport->tcp_opts.sock_priority, SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY); 655 free(ttransport); 656 return NULL; 657 } 658 659 if (ttransport->tcp_opts.control_msg_num == 0 && 660 opts->in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 661 SPDK_WARNLOG("TCP param control_msg_num can't be 0 if ICD is less than %u bytes. Using default value %u\n", 662 SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE, SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM); 663 ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM; 664 } 665 666 /* I/O unit size cannot be larger than max I/O size */ 667 if (opts->io_unit_size > opts->max_io_size) { 668 SPDK_WARNLOG("TCP param io_unit_size %u can't be larger than max_io_size %u. Using max_io_size as io_unit_size\n", 669 opts->io_unit_size, opts->max_io_size); 670 opts->io_unit_size = opts->max_io_size; 671 } 672 673 /* In capsule data size cannot be larger than max I/O size */ 674 if (opts->in_capsule_data_size > opts->max_io_size) { 675 SPDK_WARNLOG("TCP param ICD size %u can't be larger than max_io_size %u. Using max_io_size as ICD size\n", 676 opts->io_unit_size, opts->max_io_size); 677 opts->in_capsule_data_size = opts->max_io_size; 678 } 679 680 /* max IO queue depth cannot be smaller than 2 or larger than 65535. 681 * We will not check SPDK_NVMF_TCP_MAX_IO_QUEUE_DEPTH, because max_queue_depth is 16bits and always not larger than 64k. */ 682 if (opts->max_queue_depth < SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH) { 683 SPDK_WARNLOG("TCP param max_queue_depth %u can't be smaller than %u or larger than %u. Using default value %u\n", 684 opts->max_queue_depth, SPDK_NVMF_TCP_MIN_IO_QUEUE_DEPTH, 685 SPDK_NVMF_TCP_MAX_IO_QUEUE_DEPTH, SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 686 opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH; 687 } 688 689 /* max admin queue depth cannot be smaller than 2 or larger than 4096 */ 690 if (opts->max_aq_depth < SPDK_NVMF_TCP_MIN_ADMIN_QUEUE_DEPTH || 691 opts->max_aq_depth > SPDK_NVMF_TCP_MAX_ADMIN_QUEUE_DEPTH) { 692 SPDK_WARNLOG("TCP param max_aq_depth %u can't be smaller than %u or larger than %u. Using default value %u\n", 693 opts->max_aq_depth, SPDK_NVMF_TCP_MIN_ADMIN_QUEUE_DEPTH, 694 SPDK_NVMF_TCP_MAX_ADMIN_QUEUE_DEPTH, SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 695 opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH; 696 } 697 698 sge_count = opts->max_io_size / opts->io_unit_size; 699 if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) { 700 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 701 free(ttransport); 702 return NULL; 703 } 704 705 min_shared_buffers = spdk_env_get_core_count() * opts->buf_cache_size; 706 if (min_shared_buffers > opts->num_shared_buffers) { 707 SPDK_ERRLOG("There are not enough buffers to satisfy " 708 "per-poll group caches for each thread. (%" PRIu32 ") " 709 "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers); 710 SPDK_ERRLOG("Please specify a larger number of shared buffers\n"); 711 free(ttransport); 712 return NULL; 713 } 714 715 ttransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_tcp_accept, &ttransport->transport, 716 opts->acceptor_poll_rate); 717 if (!ttransport->accept_poller) { 718 free(ttransport); 719 return NULL; 720 } 721 722 return &ttransport->transport; 723 } 724 725 static int 726 nvmf_tcp_trsvcid_to_int(const char *trsvcid) 727 { 728 unsigned long long ull; 729 char *end = NULL; 730 731 ull = strtoull(trsvcid, &end, 10); 732 if (end == NULL || end == trsvcid || *end != '\0') { 733 return -1; 734 } 735 736 /* Valid TCP/IP port numbers are in [0, 65535] */ 737 if (ull > 65535) { 738 return -1; 739 } 740 741 return (int)ull; 742 } 743 744 /** 745 * Canonicalize a listen address trid. 746 */ 747 static int 748 nvmf_tcp_canon_listen_trid(struct spdk_nvme_transport_id *canon_trid, 749 const struct spdk_nvme_transport_id *trid) 750 { 751 int trsvcid_int; 752 753 trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid); 754 if (trsvcid_int < 0) { 755 return -EINVAL; 756 } 757 758 memset(canon_trid, 0, sizeof(*canon_trid)); 759 spdk_nvme_trid_populate_transport(canon_trid, SPDK_NVME_TRANSPORT_TCP); 760 canon_trid->adrfam = trid->adrfam; 761 snprintf(canon_trid->traddr, sizeof(canon_trid->traddr), "%s", trid->traddr); 762 snprintf(canon_trid->trsvcid, sizeof(canon_trid->trsvcid), "%d", trsvcid_int); 763 764 return 0; 765 } 766 767 /** 768 * Find an existing listening port. 769 */ 770 static struct spdk_nvmf_tcp_port * 771 nvmf_tcp_find_port(struct spdk_nvmf_tcp_transport *ttransport, 772 const struct spdk_nvme_transport_id *trid) 773 { 774 struct spdk_nvme_transport_id canon_trid; 775 struct spdk_nvmf_tcp_port *port; 776 777 if (nvmf_tcp_canon_listen_trid(&canon_trid, trid) != 0) { 778 return NULL; 779 } 780 781 TAILQ_FOREACH(port, &ttransport->ports, link) { 782 if (spdk_nvme_transport_id_compare(&canon_trid, port->trid) == 0) { 783 return port; 784 } 785 } 786 787 return NULL; 788 } 789 790 static int 791 nvmf_tcp_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid, 792 struct spdk_nvmf_listen_opts *listen_opts) 793 { 794 struct spdk_nvmf_tcp_transport *ttransport; 795 struct spdk_nvmf_tcp_port *port; 796 int trsvcid_int; 797 uint8_t adrfam; 798 struct spdk_sock_opts opts; 799 800 if (!strlen(trid->trsvcid)) { 801 SPDK_ERRLOG("Service id is required\n"); 802 return -EINVAL; 803 } 804 805 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 806 807 trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid); 808 if (trsvcid_int < 0) { 809 SPDK_ERRLOG("Invalid trsvcid '%s'\n", trid->trsvcid); 810 return -EINVAL; 811 } 812 813 port = calloc(1, sizeof(*port)); 814 if (!port) { 815 SPDK_ERRLOG("Port allocation failed\n"); 816 return -ENOMEM; 817 } 818 819 port->trid = trid; 820 opts.opts_size = sizeof(opts); 821 spdk_sock_get_default_opts(&opts); 822 opts.priority = ttransport->tcp_opts.sock_priority; 823 /* TODO: also add impl_opts like on the initiator side */ 824 port->listen_sock = spdk_sock_listen_ext(trid->traddr, trsvcid_int, 825 NULL, &opts); 826 if (port->listen_sock == NULL) { 827 SPDK_ERRLOG("spdk_sock_listen(%s, %d) failed: %s (%d)\n", 828 trid->traddr, trsvcid_int, 829 spdk_strerror(errno), errno); 830 free(port); 831 return -errno; 832 } 833 834 if (spdk_sock_is_ipv4(port->listen_sock)) { 835 adrfam = SPDK_NVMF_ADRFAM_IPV4; 836 } else if (spdk_sock_is_ipv6(port->listen_sock)) { 837 adrfam = SPDK_NVMF_ADRFAM_IPV6; 838 } else { 839 SPDK_ERRLOG("Unhandled socket type\n"); 840 adrfam = 0; 841 } 842 843 if (adrfam != trid->adrfam) { 844 SPDK_ERRLOG("Socket address family mismatch\n"); 845 spdk_sock_close(&port->listen_sock); 846 free(port); 847 return -EINVAL; 848 } 849 850 SPDK_NOTICELOG("*** NVMe/TCP Target Listening on %s port %s ***\n", 851 trid->traddr, trid->trsvcid); 852 853 TAILQ_INSERT_TAIL(&ttransport->ports, port, link); 854 return 0; 855 } 856 857 static void 858 nvmf_tcp_stop_listen(struct spdk_nvmf_transport *transport, 859 const struct spdk_nvme_transport_id *trid) 860 { 861 struct spdk_nvmf_tcp_transport *ttransport; 862 struct spdk_nvmf_tcp_port *port; 863 864 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 865 866 SPDK_DEBUGLOG(nvmf_tcp, "Removing listen address %s port %s\n", 867 trid->traddr, trid->trsvcid); 868 869 port = nvmf_tcp_find_port(ttransport, trid); 870 if (port) { 871 TAILQ_REMOVE(&ttransport->ports, port, link); 872 spdk_sock_close(&port->listen_sock); 873 free(port); 874 } 875 } 876 877 static void nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair, 878 enum nvme_tcp_pdu_recv_state state); 879 880 static void 881 nvmf_tcp_qpair_set_state(struct spdk_nvmf_tcp_qpair *tqpair, enum nvme_tcp_qpair_state state) 882 { 883 tqpair->state = state; 884 spdk_trace_record(TRACE_TCP_QP_STATE_CHANGE, tqpair->qpair.qid, 0, (uintptr_t)tqpair, 885 tqpair->state); 886 } 887 888 static void 889 nvmf_tcp_qpair_disconnect(struct spdk_nvmf_tcp_qpair *tqpair) 890 { 891 SPDK_DEBUGLOG(nvmf_tcp, "Disconnecting qpair %p\n", tqpair); 892 893 spdk_trace_record(TRACE_TCP_QP_DISCONNECT, 0, 0, (uintptr_t)tqpair); 894 895 if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) { 896 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITING); 897 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 898 spdk_poller_unregister(&tqpair->timeout_poller); 899 900 /* This will end up calling nvmf_tcp_close_qpair */ 901 spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL); 902 } 903 } 904 905 static void 906 _mgmt_pdu_write_done(void *_tqpair, int err) 907 { 908 struct spdk_nvmf_tcp_qpair *tqpair = _tqpair; 909 struct nvme_tcp_pdu *pdu = tqpair->mgmt_pdu; 910 911 if (spdk_unlikely(err != 0)) { 912 nvmf_tcp_qpair_disconnect(tqpair); 913 return; 914 } 915 916 assert(pdu->cb_fn != NULL); 917 pdu->cb_fn(pdu->cb_arg); 918 } 919 920 static void 921 _req_pdu_write_done(void *req, int err) 922 { 923 struct spdk_nvmf_tcp_req *tcp_req = req; 924 struct nvme_tcp_pdu *pdu = tcp_req->pdu; 925 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 926 927 assert(tcp_req->pdu_in_use); 928 tcp_req->pdu_in_use = false; 929 930 /* If the request is in a completed state, we're waiting for write completion to free it */ 931 if (spdk_unlikely(tcp_req->state == TCP_REQUEST_STATE_COMPLETED)) { 932 nvmf_tcp_request_free(tcp_req); 933 return; 934 } 935 936 if (spdk_unlikely(err != 0)) { 937 nvmf_tcp_qpair_disconnect(tqpair); 938 return; 939 } 940 941 assert(pdu->cb_fn != NULL); 942 pdu->cb_fn(pdu->cb_arg); 943 } 944 945 static void 946 _pdu_write_done(struct nvme_tcp_pdu *pdu, int err) 947 { 948 pdu->sock_req.cb_fn(pdu->sock_req.cb_arg, err); 949 } 950 951 static void 952 _tcp_write_pdu(struct nvme_tcp_pdu *pdu) 953 { 954 int rc; 955 uint32_t mapped_length; 956 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 957 958 pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu, 959 tqpair->host_hdgst_enable, tqpair->host_ddgst_enable, &mapped_length); 960 spdk_sock_writev_async(tqpair->sock, &pdu->sock_req); 961 962 if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP || 963 pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) { 964 /* Try to force the send immediately. */ 965 rc = spdk_sock_flush(tqpair->sock); 966 if (rc > 0 && (uint32_t)rc == mapped_length) { 967 _pdu_write_done(pdu, 0); 968 } else { 969 SPDK_ERRLOG("Could not write %s to socket: rc=%d, errno=%d\n", 970 pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP ? 971 "IC_RESP" : "TERM_REQ", rc, errno); 972 _pdu_write_done(pdu, rc >= 0 ? -EAGAIN : -errno); 973 } 974 } 975 } 976 977 static void 978 data_crc32_accel_done(void *cb_arg, int status) 979 { 980 struct nvme_tcp_pdu *pdu = cb_arg; 981 982 if (spdk_unlikely(status)) { 983 SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu); 984 _pdu_write_done(pdu, status); 985 return; 986 } 987 988 pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR; 989 MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32); 990 991 _tcp_write_pdu(pdu); 992 } 993 994 static void 995 pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu) 996 { 997 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 998 int rc = 0; 999 1000 /* Data Digest */ 1001 if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) { 1002 /* Only support this limitated case for the first step */ 1003 if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0) 1004 && tqpair->group)) { 1005 rc = spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32, pdu->data_iov, 1006 pdu->data_iovcnt, 0, data_crc32_accel_done, pdu); 1007 if (spdk_likely(rc == 0)) { 1008 return; 1009 } 1010 } else { 1011 pdu->data_digest_crc32 = nvme_tcp_pdu_calc_data_digest(pdu); 1012 } 1013 data_crc32_accel_done(pdu, rc); 1014 } else { 1015 _tcp_write_pdu(pdu); 1016 } 1017 } 1018 1019 static void 1020 nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 1021 struct nvme_tcp_pdu *pdu, 1022 nvme_tcp_qpair_xfer_complete_cb cb_fn, 1023 void *cb_arg) 1024 { 1025 int hlen; 1026 uint32_t crc32c; 1027 1028 assert(tqpair->pdu_in_progress != pdu); 1029 1030 hlen = pdu->hdr.common.hlen; 1031 pdu->cb_fn = cb_fn; 1032 pdu->cb_arg = cb_arg; 1033 1034 pdu->iov[0].iov_base = &pdu->hdr.raw; 1035 pdu->iov[0].iov_len = hlen; 1036 1037 /* Header Digest */ 1038 if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->host_hdgst_enable) { 1039 crc32c = nvme_tcp_pdu_calc_header_digest(pdu); 1040 MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c); 1041 } 1042 1043 /* Data Digest */ 1044 pdu_data_crc32_compute(pdu); 1045 } 1046 1047 static void 1048 nvmf_tcp_qpair_write_mgmt_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 1049 nvme_tcp_qpair_xfer_complete_cb cb_fn, 1050 void *cb_arg) 1051 { 1052 struct nvme_tcp_pdu *pdu = tqpair->mgmt_pdu; 1053 1054 pdu->sock_req.cb_fn = _mgmt_pdu_write_done; 1055 pdu->sock_req.cb_arg = tqpair; 1056 1057 nvmf_tcp_qpair_write_pdu(tqpair, pdu, cb_fn, cb_arg); 1058 } 1059 1060 static void 1061 nvmf_tcp_qpair_write_req_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 1062 struct spdk_nvmf_tcp_req *tcp_req, 1063 nvme_tcp_qpair_xfer_complete_cb cb_fn, 1064 void *cb_arg) 1065 { 1066 struct nvme_tcp_pdu *pdu = tcp_req->pdu; 1067 1068 pdu->sock_req.cb_fn = _req_pdu_write_done; 1069 pdu->sock_req.cb_arg = tcp_req; 1070 1071 assert(!tcp_req->pdu_in_use); 1072 tcp_req->pdu_in_use = true; 1073 1074 nvmf_tcp_qpair_write_pdu(tqpair, pdu, cb_fn, cb_arg); 1075 } 1076 1077 static int 1078 nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair) 1079 { 1080 uint32_t i; 1081 struct spdk_nvmf_transport_opts *opts; 1082 uint32_t in_capsule_data_size; 1083 1084 opts = &tqpair->qpair.transport->opts; 1085 1086 in_capsule_data_size = opts->in_capsule_data_size; 1087 if (opts->dif_insert_or_strip) { 1088 in_capsule_data_size = SPDK_BDEV_BUF_SIZE_WITH_MD(in_capsule_data_size); 1089 } 1090 1091 tqpair->resource_count = opts->max_queue_depth; 1092 1093 tqpair->reqs = calloc(tqpair->resource_count, sizeof(*tqpair->reqs)); 1094 if (!tqpair->reqs) { 1095 SPDK_ERRLOG("Unable to allocate reqs on tqpair=%p\n", tqpair); 1096 return -1; 1097 } 1098 1099 if (in_capsule_data_size) { 1100 tqpair->bufs = spdk_zmalloc(tqpair->resource_count * in_capsule_data_size, 0x1000, 1101 NULL, SPDK_ENV_LCORE_ID_ANY, 1102 SPDK_MALLOC_DMA); 1103 if (!tqpair->bufs) { 1104 SPDK_ERRLOG("Unable to allocate bufs on tqpair=%p.\n", tqpair); 1105 return -1; 1106 } 1107 } 1108 /* prepare memory space for receiving pdus and tcp_req */ 1109 /* Add additional 1 member, which will be used for mgmt_pdu owned by the tqpair */ 1110 tqpair->pdus = spdk_dma_zmalloc((2 * tqpair->resource_count + 1) * sizeof(*tqpair->pdus), 0x1000, 1111 NULL); 1112 if (!tqpair->pdus) { 1113 SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair); 1114 return -1; 1115 } 1116 1117 for (i = 0; i < tqpair->resource_count; i++) { 1118 struct spdk_nvmf_tcp_req *tcp_req = &tqpair->reqs[i]; 1119 1120 tcp_req->ttag = i + 1; 1121 tcp_req->req.qpair = &tqpair->qpair; 1122 1123 tcp_req->pdu = &tqpair->pdus[i]; 1124 tcp_req->pdu->qpair = tqpair; 1125 1126 /* Set up memory to receive commands */ 1127 if (tqpair->bufs) { 1128 tcp_req->buf = (void *)((uintptr_t)tqpair->bufs + (i * in_capsule_data_size)); 1129 } 1130 1131 /* Set the cmdn and rsp */ 1132 tcp_req->req.rsp = (union nvmf_c2h_msg *)&tcp_req->rsp; 1133 tcp_req->req.cmd = (union nvmf_h2c_msg *)&tcp_req->cmd; 1134 1135 tcp_req->req.stripped_data = NULL; 1136 1137 /* Initialize request state to FREE */ 1138 tcp_req->state = TCP_REQUEST_STATE_FREE; 1139 TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link); 1140 tqpair->state_cntr[TCP_REQUEST_STATE_FREE]++; 1141 } 1142 1143 for (; i < 2 * tqpair->resource_count; i++) { 1144 struct nvme_tcp_pdu *pdu = &tqpair->pdus[i]; 1145 1146 pdu->qpair = tqpair; 1147 SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, pdu, slist); 1148 } 1149 1150 tqpair->mgmt_pdu = &tqpair->pdus[i]; 1151 tqpair->mgmt_pdu->qpair = tqpair; 1152 tqpair->pdu_in_progress = SLIST_FIRST(&tqpair->tcp_pdu_free_queue); 1153 SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist); 1154 1155 tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 1156 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1157 1158 return 0; 1159 } 1160 1161 static int 1162 nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair) 1163 { 1164 struct spdk_nvmf_tcp_qpair *tqpair; 1165 1166 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 1167 1168 SPDK_DEBUGLOG(nvmf_tcp, "New TCP Connection: %p\n", qpair); 1169 1170 spdk_trace_record(TRACE_TCP_QP_CREATE, 0, 0, (uintptr_t)tqpair); 1171 1172 /* Initialise request state queues of the qpair */ 1173 TAILQ_INIT(&tqpair->tcp_req_free_queue); 1174 TAILQ_INIT(&tqpair->tcp_req_working_queue); 1175 SLIST_INIT(&tqpair->tcp_pdu_free_queue); 1176 1177 tqpair->host_hdgst_enable = true; 1178 tqpair->host_ddgst_enable = true; 1179 1180 return 0; 1181 } 1182 1183 static int 1184 nvmf_tcp_qpair_sock_init(struct spdk_nvmf_tcp_qpair *tqpair) 1185 { 1186 int rc; 1187 1188 spdk_trace_record(TRACE_TCP_QP_SOCK_INIT, 0, 0, (uintptr_t)tqpair); 1189 1190 /* set low water mark */ 1191 rc = spdk_sock_set_recvlowat(tqpair->sock, 1); 1192 if (rc != 0) { 1193 SPDK_ERRLOG("spdk_sock_set_recvlowat() failed\n"); 1194 return rc; 1195 } 1196 1197 return 0; 1198 } 1199 1200 static void 1201 nvmf_tcp_handle_connect(struct spdk_nvmf_transport *transport, 1202 struct spdk_nvmf_tcp_port *port, 1203 struct spdk_sock *sock) 1204 { 1205 struct spdk_nvmf_tcp_qpair *tqpair; 1206 int rc; 1207 1208 SPDK_DEBUGLOG(nvmf_tcp, "New connection accepted on %s port %s\n", 1209 port->trid->traddr, port->trid->trsvcid); 1210 1211 tqpair = calloc(1, sizeof(struct spdk_nvmf_tcp_qpair)); 1212 if (tqpair == NULL) { 1213 SPDK_ERRLOG("Could not allocate new connection.\n"); 1214 spdk_sock_close(&sock); 1215 return; 1216 } 1217 1218 tqpair->sock = sock; 1219 tqpair->state_cntr[TCP_REQUEST_STATE_FREE] = 0; 1220 tqpair->port = port; 1221 tqpair->qpair.transport = transport; 1222 1223 rc = spdk_sock_getaddr(tqpair->sock, tqpair->target_addr, 1224 sizeof(tqpair->target_addr), &tqpair->target_port, 1225 tqpair->initiator_addr, sizeof(tqpair->initiator_addr), 1226 &tqpair->initiator_port); 1227 if (rc < 0) { 1228 SPDK_ERRLOG("spdk_sock_getaddr() failed of tqpair=%p\n", tqpair); 1229 nvmf_tcp_qpair_destroy(tqpair); 1230 return; 1231 } 1232 1233 spdk_nvmf_tgt_new_qpair(transport->tgt, &tqpair->qpair); 1234 } 1235 1236 static uint32_t 1237 nvmf_tcp_port_accept(struct spdk_nvmf_transport *transport, struct spdk_nvmf_tcp_port *port) 1238 { 1239 struct spdk_sock *sock; 1240 uint32_t count = 0; 1241 int i; 1242 1243 for (i = 0; i < NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME; i++) { 1244 sock = spdk_sock_accept(port->listen_sock); 1245 if (sock == NULL) { 1246 break; 1247 } 1248 count++; 1249 nvmf_tcp_handle_connect(transport, port, sock); 1250 } 1251 1252 return count; 1253 } 1254 1255 static int 1256 nvmf_tcp_accept(void *ctx) 1257 { 1258 struct spdk_nvmf_transport *transport = ctx; 1259 struct spdk_nvmf_tcp_transport *ttransport; 1260 struct spdk_nvmf_tcp_port *port; 1261 uint32_t count = 0; 1262 1263 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1264 1265 TAILQ_FOREACH(port, &ttransport->ports, link) { 1266 count += nvmf_tcp_port_accept(transport, port); 1267 } 1268 1269 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 1270 } 1271 1272 static void 1273 nvmf_tcp_discover(struct spdk_nvmf_transport *transport, 1274 struct spdk_nvme_transport_id *trid, 1275 struct spdk_nvmf_discovery_log_page_entry *entry) 1276 { 1277 entry->trtype = SPDK_NVMF_TRTYPE_TCP; 1278 entry->adrfam = trid->adrfam; 1279 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED; 1280 1281 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 1282 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 1283 1284 entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE; 1285 } 1286 1287 static struct spdk_nvmf_tcp_control_msg_list * 1288 nvmf_tcp_control_msg_list_create(uint16_t num_messages) 1289 { 1290 struct spdk_nvmf_tcp_control_msg_list *list; 1291 struct spdk_nvmf_tcp_control_msg *msg; 1292 uint16_t i; 1293 1294 list = calloc(1, sizeof(*list)); 1295 if (!list) { 1296 SPDK_ERRLOG("Failed to allocate memory for list structure\n"); 1297 return NULL; 1298 } 1299 1300 list->msg_buf = spdk_zmalloc(num_messages * SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE, 1301 NVMF_DATA_BUFFER_ALIGNMENT, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1302 if (!list->msg_buf) { 1303 SPDK_ERRLOG("Failed to allocate memory for control message buffers\n"); 1304 free(list); 1305 return NULL; 1306 } 1307 1308 STAILQ_INIT(&list->free_msgs); 1309 1310 for (i = 0; i < num_messages; i++) { 1311 msg = (struct spdk_nvmf_tcp_control_msg *)((char *)list->msg_buf + i * 1312 SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE); 1313 STAILQ_INSERT_TAIL(&list->free_msgs, msg, link); 1314 } 1315 1316 return list; 1317 } 1318 1319 static void 1320 nvmf_tcp_control_msg_list_free(struct spdk_nvmf_tcp_control_msg_list *list) 1321 { 1322 if (!list) { 1323 return; 1324 } 1325 1326 spdk_free(list->msg_buf); 1327 free(list); 1328 } 1329 1330 static struct spdk_nvmf_transport_poll_group * 1331 nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport, 1332 struct spdk_nvmf_poll_group *group) 1333 { 1334 struct spdk_nvmf_tcp_transport *ttransport; 1335 struct spdk_nvmf_tcp_poll_group *tgroup; 1336 1337 tgroup = calloc(1, sizeof(*tgroup)); 1338 if (!tgroup) { 1339 return NULL; 1340 } 1341 1342 tgroup->sock_group = spdk_sock_group_create(&tgroup->group); 1343 if (!tgroup->sock_group) { 1344 goto cleanup; 1345 } 1346 1347 TAILQ_INIT(&tgroup->qpairs); 1348 TAILQ_INIT(&tgroup->await_req); 1349 1350 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1351 1352 if (transport->opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 1353 SPDK_DEBUGLOG(nvmf_tcp, "ICD %u is less than min required for admin/fabric commands (%u). " 1354 "Creating control messages list\n", transport->opts.in_capsule_data_size, 1355 SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE); 1356 tgroup->control_msg_list = nvmf_tcp_control_msg_list_create(ttransport->tcp_opts.control_msg_num); 1357 if (!tgroup->control_msg_list) { 1358 goto cleanup; 1359 } 1360 } 1361 1362 tgroup->accel_channel = spdk_accel_get_io_channel(); 1363 if (spdk_unlikely(!tgroup->accel_channel)) { 1364 SPDK_ERRLOG("Cannot create accel_channel for tgroup=%p\n", tgroup); 1365 goto cleanup; 1366 } 1367 1368 TAILQ_INSERT_TAIL(&ttransport->poll_groups, tgroup, link); 1369 if (ttransport->next_pg == NULL) { 1370 ttransport->next_pg = tgroup; 1371 } 1372 1373 return &tgroup->group; 1374 1375 cleanup: 1376 nvmf_tcp_poll_group_destroy(&tgroup->group); 1377 return NULL; 1378 } 1379 1380 static struct spdk_nvmf_transport_poll_group * 1381 nvmf_tcp_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1382 { 1383 struct spdk_nvmf_tcp_transport *ttransport; 1384 struct spdk_nvmf_tcp_poll_group **pg; 1385 struct spdk_nvmf_tcp_qpair *tqpair; 1386 struct spdk_sock_group *group = NULL, *hint = NULL; 1387 int rc; 1388 1389 ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport); 1390 1391 if (TAILQ_EMPTY(&ttransport->poll_groups)) { 1392 return NULL; 1393 } 1394 1395 pg = &ttransport->next_pg; 1396 assert(*pg != NULL); 1397 hint = (*pg)->sock_group; 1398 1399 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 1400 rc = spdk_sock_get_optimal_sock_group(tqpair->sock, &group, hint); 1401 if (rc != 0) { 1402 return NULL; 1403 } else if (group != NULL) { 1404 /* Optimal poll group was found */ 1405 return spdk_sock_group_get_ctx(group); 1406 } 1407 1408 /* The hint was used for optimal poll group, advance next_pg. */ 1409 *pg = TAILQ_NEXT(*pg, link); 1410 if (*pg == NULL) { 1411 *pg = TAILQ_FIRST(&ttransport->poll_groups); 1412 } 1413 1414 return spdk_sock_group_get_ctx(hint); 1415 } 1416 1417 static void 1418 nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 1419 { 1420 struct spdk_nvmf_tcp_poll_group *tgroup, *next_tgroup; 1421 struct spdk_nvmf_tcp_transport *ttransport; 1422 1423 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 1424 spdk_sock_group_close(&tgroup->sock_group); 1425 if (tgroup->control_msg_list) { 1426 nvmf_tcp_control_msg_list_free(tgroup->control_msg_list); 1427 } 1428 1429 if (tgroup->accel_channel) { 1430 spdk_put_io_channel(tgroup->accel_channel); 1431 } 1432 1433 ttransport = SPDK_CONTAINEROF(tgroup->group.transport, struct spdk_nvmf_tcp_transport, transport); 1434 1435 next_tgroup = TAILQ_NEXT(tgroup, link); 1436 TAILQ_REMOVE(&ttransport->poll_groups, tgroup, link); 1437 if (next_tgroup == NULL) { 1438 next_tgroup = TAILQ_FIRST(&ttransport->poll_groups); 1439 } 1440 if (ttransport->next_pg == tgroup) { 1441 ttransport->next_pg = next_tgroup; 1442 } 1443 1444 free(tgroup); 1445 } 1446 1447 static void 1448 nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair, 1449 enum nvme_tcp_pdu_recv_state state) 1450 { 1451 if (tqpair->recv_state == state) { 1452 SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n", 1453 tqpair, state); 1454 return; 1455 } 1456 1457 if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) { 1458 /* When leaving the await req state, move the qpair to the main list */ 1459 TAILQ_REMOVE(&tqpair->group->await_req, tqpair, link); 1460 TAILQ_INSERT_TAIL(&tqpair->group->qpairs, tqpair, link); 1461 } else if (state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) { 1462 TAILQ_REMOVE(&tqpair->group->qpairs, tqpair, link); 1463 TAILQ_INSERT_TAIL(&tqpair->group->await_req, tqpair, link); 1464 } 1465 1466 SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv state=%d\n", tqpair, state); 1467 tqpair->recv_state = state; 1468 1469 spdk_trace_record(TRACE_TCP_QP_RCV_STATE_CHANGE, tqpair->qpair.qid, 0, (uintptr_t)tqpair, 1470 tqpair->recv_state); 1471 } 1472 1473 static int 1474 nvmf_tcp_qpair_handle_timeout(void *ctx) 1475 { 1476 struct spdk_nvmf_tcp_qpair *tqpair = ctx; 1477 1478 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1479 1480 SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, 1481 SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT); 1482 1483 nvmf_tcp_qpair_disconnect(tqpair); 1484 return SPDK_POLLER_BUSY; 1485 } 1486 1487 static void 1488 nvmf_tcp_send_c2h_term_req_complete(void *cb_arg) 1489 { 1490 struct spdk_nvmf_tcp_qpair *tqpair = (struct spdk_nvmf_tcp_qpair *)cb_arg; 1491 1492 if (!tqpair->timeout_poller) { 1493 tqpair->timeout_poller = SPDK_POLLER_REGISTER(nvmf_tcp_qpair_handle_timeout, tqpair, 1494 SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT * 1000000); 1495 } 1496 } 1497 1498 static void 1499 nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu, 1500 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset) 1501 { 1502 struct nvme_tcp_pdu *rsp_pdu; 1503 struct spdk_nvme_tcp_term_req_hdr *c2h_term_req; 1504 uint32_t c2h_term_req_hdr_len = sizeof(*c2h_term_req); 1505 uint32_t copy_len; 1506 1507 rsp_pdu = tqpair->mgmt_pdu; 1508 1509 c2h_term_req = &rsp_pdu->hdr.term_req; 1510 c2h_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ; 1511 c2h_term_req->common.hlen = c2h_term_req_hdr_len; 1512 c2h_term_req->fes = fes; 1513 1514 if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) || 1515 (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) { 1516 DSET32(&c2h_term_req->fei, error_offset); 1517 } 1518 1519 copy_len = spdk_min(pdu->hdr.common.hlen, SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1520 1521 /* Copy the error info into the buffer */ 1522 memcpy((uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, pdu->hdr.raw, copy_len); 1523 nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, copy_len); 1524 1525 /* Contain the header of the wrong received pdu */ 1526 c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len; 1527 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 1528 nvmf_tcp_qpair_write_mgmt_pdu(tqpair, nvmf_tcp_send_c2h_term_req_complete, tqpair); 1529 } 1530 1531 static void 1532 nvmf_tcp_capsule_cmd_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport, 1533 struct spdk_nvmf_tcp_qpair *tqpair, 1534 struct nvme_tcp_pdu *pdu) 1535 { 1536 struct spdk_nvmf_tcp_req *tcp_req; 1537 1538 assert(pdu->psh_valid_bytes == pdu->psh_len); 1539 assert(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD); 1540 1541 tcp_req = nvmf_tcp_req_get(tqpair); 1542 if (!tcp_req) { 1543 /* Directly return and make the allocation retry again. This can happen if we're 1544 * using asynchronous writes to send the response to the host or when releasing 1545 * zero-copy buffers after a response has been sent. In both cases, the host might 1546 * receive the response before we've finished processing the request and is free to 1547 * send another one. 1548 */ 1549 if (tqpair->state_cntr[TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST] > 0 || 1550 tqpair->state_cntr[TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE] > 0) { 1551 return; 1552 } 1553 1554 /* The host sent more commands than the maximum queue depth. */ 1555 SPDK_ERRLOG("Cannot allocate tcp_req on tqpair=%p\n", tqpair); 1556 nvmf_tcp_qpair_disconnect(tqpair); 1557 return; 1558 } 1559 1560 pdu->req = tcp_req; 1561 assert(tcp_req->state == TCP_REQUEST_STATE_NEW); 1562 nvmf_tcp_req_process(ttransport, tcp_req); 1563 } 1564 1565 static void 1566 nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransport, 1567 struct spdk_nvmf_tcp_qpair *tqpair, 1568 struct nvme_tcp_pdu *pdu) 1569 { 1570 struct spdk_nvmf_tcp_req *tcp_req; 1571 struct spdk_nvme_tcp_cmd *capsule_cmd; 1572 uint32_t error_offset = 0; 1573 enum spdk_nvme_tcp_term_req_fes fes; 1574 struct spdk_nvme_cpl *rsp; 1575 1576 capsule_cmd = &pdu->hdr.capsule_cmd; 1577 tcp_req = pdu->req; 1578 assert(tcp_req != NULL); 1579 1580 /* Zero-copy requests don't support ICD */ 1581 assert(!spdk_nvmf_request_using_zcopy(&tcp_req->req)); 1582 1583 if (capsule_cmd->common.pdo > SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET) { 1584 SPDK_ERRLOG("Expected ICReq capsule_cmd pdu offset <= %d, got %c\n", 1585 SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET, capsule_cmd->common.pdo); 1586 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1587 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo); 1588 goto err; 1589 } 1590 1591 rsp = &tcp_req->req.rsp->nvme_cpl; 1592 if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) { 1593 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 1594 } else { 1595 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1596 } 1597 1598 nvmf_tcp_req_process(ttransport, tcp_req); 1599 1600 return; 1601 err: 1602 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1603 } 1604 1605 static void 1606 nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport, 1607 struct spdk_nvmf_tcp_qpair *tqpair, 1608 struct nvme_tcp_pdu *pdu) 1609 { 1610 struct spdk_nvmf_tcp_req *tcp_req; 1611 uint32_t error_offset = 0; 1612 enum spdk_nvme_tcp_term_req_fes fes = 0; 1613 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 1614 1615 h2c_data = &pdu->hdr.h2c_data; 1616 1617 SPDK_DEBUGLOG(nvmf_tcp, "tqpair=%p, r2t_info: datao=%u, datal=%u, cccid=%u, ttag=%u\n", 1618 tqpair, h2c_data->datao, h2c_data->datal, h2c_data->cccid, h2c_data->ttag); 1619 1620 if (h2c_data->ttag > tqpair->resource_count) { 1621 SPDK_DEBUGLOG(nvmf_tcp, "ttag %u is larger than allowed %u.\n", h2c_data->ttag, 1622 tqpair->resource_count); 1623 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 1624 error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag); 1625 goto err; 1626 } 1627 1628 tcp_req = &tqpair->reqs[h2c_data->ttag - 1]; 1629 1630 if (spdk_unlikely(tcp_req->state != TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER && 1631 tcp_req->state != TCP_REQUEST_STATE_AWAITING_R2T_ACK)) { 1632 SPDK_DEBUGLOG(nvmf_tcp, "tcp_req(%p), tqpair=%p, has error state in %d\n", tcp_req, tqpair, 1633 tcp_req->state); 1634 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1635 error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag); 1636 goto err; 1637 } 1638 1639 if (spdk_unlikely(tcp_req->req.cmd->nvme_cmd.cid != h2c_data->cccid)) { 1640 SPDK_DEBUGLOG(nvmf_tcp, "tcp_req(%p), tqpair=%p, expected %u but %u for cccid.\n", tcp_req, tqpair, 1641 tcp_req->req.cmd->nvme_cmd.cid, h2c_data->cccid); 1642 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 1643 error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, cccid); 1644 goto err; 1645 } 1646 1647 if (tcp_req->h2c_offset != h2c_data->datao) { 1648 SPDK_DEBUGLOG(nvmf_tcp, 1649 "tcp_req(%p), tqpair=%p, expected data offset %u, but data offset is %u\n", 1650 tcp_req, tqpair, tcp_req->h2c_offset, h2c_data->datao); 1651 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE; 1652 goto err; 1653 } 1654 1655 if ((h2c_data->datao + h2c_data->datal) > tcp_req->req.length) { 1656 SPDK_DEBUGLOG(nvmf_tcp, 1657 "tcp_req(%p), tqpair=%p, (datao=%u + datal=%u) exceeds requested length=%u\n", 1658 tcp_req, tqpair, h2c_data->datao, h2c_data->datal, tcp_req->req.length); 1659 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE; 1660 goto err; 1661 } 1662 1663 pdu->req = tcp_req; 1664 1665 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 1666 pdu->dif_ctx = &tcp_req->req.dif.dif_ctx; 1667 } 1668 1669 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 1670 h2c_data->datao, h2c_data->datal); 1671 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1672 return; 1673 1674 err: 1675 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1676 } 1677 1678 static void 1679 nvmf_tcp_send_capsule_resp_pdu(struct spdk_nvmf_tcp_req *tcp_req, 1680 struct spdk_nvmf_tcp_qpair *tqpair) 1681 { 1682 struct nvme_tcp_pdu *rsp_pdu; 1683 struct spdk_nvme_tcp_rsp *capsule_resp; 1684 1685 SPDK_DEBUGLOG(nvmf_tcp, "enter, tqpair=%p\n", tqpair); 1686 1687 rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req); 1688 assert(rsp_pdu != NULL); 1689 1690 capsule_resp = &rsp_pdu->hdr.capsule_resp; 1691 capsule_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1692 capsule_resp->common.plen = capsule_resp->common.hlen = sizeof(*capsule_resp); 1693 capsule_resp->rccqe = tcp_req->req.rsp->nvme_cpl; 1694 if (tqpair->host_hdgst_enable) { 1695 capsule_resp->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1696 capsule_resp->common.plen += SPDK_NVME_TCP_DIGEST_LEN; 1697 } 1698 1699 nvmf_tcp_qpair_write_req_pdu(tqpair, tcp_req, nvmf_tcp_request_free, tcp_req); 1700 } 1701 1702 static void 1703 nvmf_tcp_pdu_c2h_data_complete(void *cb_arg) 1704 { 1705 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 1706 struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, 1707 struct spdk_nvmf_tcp_qpair, qpair); 1708 1709 assert(tqpair != NULL); 1710 1711 if (spdk_unlikely(tcp_req->pdu->rw_offset < tcp_req->req.length)) { 1712 SPDK_DEBUGLOG(nvmf_tcp, "sending another C2H part, offset %u length %u\n", tcp_req->pdu->rw_offset, 1713 tcp_req->req.length); 1714 _nvmf_tcp_send_c2h_data(tqpair, tcp_req); 1715 return; 1716 } 1717 1718 if (tcp_req->pdu->hdr.c2h_data.common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) { 1719 nvmf_tcp_request_free(tcp_req); 1720 } else { 1721 nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 1722 } 1723 } 1724 1725 static void 1726 nvmf_tcp_r2t_complete(void *cb_arg) 1727 { 1728 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 1729 struct spdk_nvmf_tcp_transport *ttransport; 1730 1731 ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport, 1732 struct spdk_nvmf_tcp_transport, transport); 1733 1734 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 1735 1736 if (tcp_req->h2c_offset == tcp_req->req.length) { 1737 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1738 nvmf_tcp_req_process(ttransport, tcp_req); 1739 } 1740 } 1741 1742 static void 1743 nvmf_tcp_send_r2t_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 1744 struct spdk_nvmf_tcp_req *tcp_req) 1745 { 1746 struct nvme_tcp_pdu *rsp_pdu; 1747 struct spdk_nvme_tcp_r2t_hdr *r2t; 1748 1749 rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req); 1750 assert(rsp_pdu != NULL); 1751 1752 r2t = &rsp_pdu->hdr.r2t; 1753 r2t->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T; 1754 r2t->common.plen = r2t->common.hlen = sizeof(*r2t); 1755 1756 if (tqpair->host_hdgst_enable) { 1757 r2t->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1758 r2t->common.plen += SPDK_NVME_TCP_DIGEST_LEN; 1759 } 1760 1761 r2t->cccid = tcp_req->req.cmd->nvme_cmd.cid; 1762 r2t->ttag = tcp_req->ttag; 1763 r2t->r2to = tcp_req->h2c_offset; 1764 r2t->r2tl = tcp_req->req.length; 1765 1766 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_R2T_ACK); 1767 1768 SPDK_DEBUGLOG(nvmf_tcp, 1769 "tcp_req(%p) on tqpair(%p), r2t_info: cccid=%u, ttag=%u, r2to=%u, r2tl=%u\n", 1770 tcp_req, tqpair, r2t->cccid, r2t->ttag, r2t->r2to, r2t->r2tl); 1771 nvmf_tcp_qpair_write_req_pdu(tqpair, tcp_req, nvmf_tcp_r2t_complete, tcp_req); 1772 } 1773 1774 static void 1775 nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport, 1776 struct spdk_nvmf_tcp_qpair *tqpair, 1777 struct nvme_tcp_pdu *pdu) 1778 { 1779 struct spdk_nvmf_tcp_req *tcp_req; 1780 struct spdk_nvme_cpl *rsp; 1781 1782 tcp_req = pdu->req; 1783 assert(tcp_req != NULL); 1784 1785 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 1786 1787 tcp_req->h2c_offset += pdu->data_len; 1788 1789 /* Wait for all of the data to arrive AND for the initial R2T PDU send to be 1790 * acknowledged before moving on. */ 1791 if (tcp_req->h2c_offset == tcp_req->req.length && 1792 tcp_req->state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER) { 1793 /* After receiving all the h2c data, we need to check whether there is 1794 * transient transport error */ 1795 rsp = &tcp_req->req.rsp->nvme_cpl; 1796 if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) { 1797 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 1798 } else { 1799 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1800 } 1801 nvmf_tcp_req_process(ttransport, tcp_req); 1802 } 1803 } 1804 1805 static void 1806 nvmf_tcp_h2c_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *h2c_term_req) 1807 { 1808 SPDK_ERRLOG("Error info of pdu(%p): %s\n", h2c_term_req, 1809 spdk_nvmf_tcp_term_req_fes_str[h2c_term_req->fes]); 1810 if ((h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) || 1811 (h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) { 1812 SPDK_DEBUGLOG(nvmf_tcp, "The offset from the start of the PDU header is %u\n", 1813 DGET32(h2c_term_req->fei)); 1814 } 1815 } 1816 1817 static void 1818 nvmf_tcp_h2c_term_req_hdr_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1819 struct nvme_tcp_pdu *pdu) 1820 { 1821 struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req; 1822 uint32_t error_offset = 0; 1823 enum spdk_nvme_tcp_term_req_fes fes; 1824 1825 if (h2c_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) { 1826 SPDK_ERRLOG("Fatal Error Status(FES) is unknown for h2c_term_req pdu=%p\n", pdu); 1827 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1828 error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes); 1829 goto end; 1830 } 1831 1832 /* set the data buffer */ 1833 nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + h2c_term_req->common.hlen, 1834 h2c_term_req->common.plen - h2c_term_req->common.hlen); 1835 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1836 return; 1837 end: 1838 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1839 } 1840 1841 static void 1842 nvmf_tcp_h2c_term_req_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1843 struct nvme_tcp_pdu *pdu) 1844 { 1845 struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req; 1846 1847 nvmf_tcp_h2c_term_req_dump(h2c_term_req); 1848 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 1849 } 1850 1851 static void 1852 _nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu) 1853 { 1854 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, 1855 struct spdk_nvmf_tcp_transport, transport); 1856 1857 switch (pdu->hdr.common.pdu_type) { 1858 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 1859 nvmf_tcp_capsule_cmd_payload_handle(ttransport, tqpair, pdu); 1860 break; 1861 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 1862 nvmf_tcp_h2c_data_payload_handle(ttransport, tqpair, pdu); 1863 break; 1864 1865 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 1866 nvmf_tcp_h2c_term_req_payload_handle(tqpair, pdu); 1867 break; 1868 1869 default: 1870 /* The code should not go to here */ 1871 SPDK_ERRLOG("ERROR pdu type %d\n", pdu->hdr.common.pdu_type); 1872 break; 1873 } 1874 SLIST_INSERT_HEAD(&tqpair->tcp_pdu_free_queue, pdu, slist); 1875 } 1876 1877 static void 1878 data_crc32_calc_done(void *cb_arg, int status) 1879 { 1880 struct nvme_tcp_pdu *pdu = cb_arg; 1881 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 1882 struct spdk_nvmf_tcp_req *tcp_req; 1883 struct spdk_nvme_cpl *rsp; 1884 1885 /* async crc32 calculation is failed and use direct calculation to check */ 1886 if (spdk_unlikely(status)) { 1887 SPDK_ERRLOG("Data digest on tqpair=(%p) with pdu=%p failed to be calculated asynchronously\n", 1888 tqpair, pdu); 1889 pdu->data_digest_crc32 = nvme_tcp_pdu_calc_data_digest(pdu); 1890 } 1891 pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR; 1892 if (!MATCH_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32)) { 1893 SPDK_ERRLOG("Data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu); 1894 tcp_req = pdu->req; 1895 assert(tcp_req != NULL); 1896 rsp = &tcp_req->req.rsp->nvme_cpl; 1897 rsp->status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR; 1898 } 1899 _nvmf_tcp_pdu_payload_handle(tqpair, pdu); 1900 } 1901 1902 static void 1903 nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu) 1904 { 1905 int rc = 0; 1906 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1907 tqpair->pdu_in_progress = NULL; 1908 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1909 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 1910 /* check data digest if need */ 1911 if (pdu->ddgst_enable) { 1912 if (tqpair->qpair.qid != 0 && !pdu->dif_ctx && tqpair->group && 1913 (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)) { 1914 rc = spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32, pdu->data_iov, 1915 pdu->data_iovcnt, 0, data_crc32_calc_done, pdu); 1916 if (spdk_likely(rc == 0)) { 1917 return; 1918 } 1919 } else { 1920 pdu->data_digest_crc32 = nvme_tcp_pdu_calc_data_digest(pdu); 1921 } 1922 data_crc32_calc_done(pdu, rc); 1923 } else { 1924 _nvmf_tcp_pdu_payload_handle(tqpair, pdu); 1925 } 1926 } 1927 1928 static void 1929 nvmf_tcp_send_icresp_complete(void *cb_arg) 1930 { 1931 struct spdk_nvmf_tcp_qpair *tqpair = cb_arg; 1932 1933 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_RUNNING); 1934 } 1935 1936 static void 1937 nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport, 1938 struct spdk_nvmf_tcp_qpair *tqpair, 1939 struct nvme_tcp_pdu *pdu) 1940 { 1941 struct spdk_nvme_tcp_ic_req *ic_req = &pdu->hdr.ic_req; 1942 struct nvme_tcp_pdu *rsp_pdu; 1943 struct spdk_nvme_tcp_ic_resp *ic_resp; 1944 uint32_t error_offset = 0; 1945 enum spdk_nvme_tcp_term_req_fes fes; 1946 1947 /* Only PFV 0 is defined currently */ 1948 if (ic_req->pfv != 0) { 1949 SPDK_ERRLOG("Expected ICReq PFV %u, got %u\n", 0u, ic_req->pfv); 1950 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1951 error_offset = offsetof(struct spdk_nvme_tcp_ic_req, pfv); 1952 goto end; 1953 } 1954 1955 /* This value is 0’s based value in units of dwords should not be larger than SPDK_NVME_TCP_HPDA_MAX */ 1956 if (ic_req->hpda > SPDK_NVME_TCP_HPDA_MAX) { 1957 SPDK_ERRLOG("ICReq HPDA out of range 0 to 31, got %u\n", ic_req->hpda); 1958 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1959 error_offset = offsetof(struct spdk_nvme_tcp_ic_req, hpda); 1960 goto end; 1961 } 1962 1963 /* MAXR2T is 0's based */ 1964 SPDK_DEBUGLOG(nvmf_tcp, "maxr2t =%u\n", (ic_req->maxr2t + 1u)); 1965 1966 tqpair->host_hdgst_enable = ic_req->dgst.bits.hdgst_enable ? true : false; 1967 if (!tqpair->host_hdgst_enable) { 1968 tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1969 } 1970 1971 tqpair->host_ddgst_enable = ic_req->dgst.bits.ddgst_enable ? true : false; 1972 if (!tqpair->host_ddgst_enable) { 1973 tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1974 } 1975 1976 tqpair->recv_buf_size = spdk_max(tqpair->recv_buf_size, MIN_SOCK_PIPE_SIZE); 1977 /* Now that we know whether digests are enabled, properly size the receive buffer */ 1978 if (spdk_sock_set_recvbuf(tqpair->sock, tqpair->recv_buf_size) < 0) { 1979 SPDK_WARNLOG("Unable to allocate enough memory for receive buffer on tqpair=%p with size=%d\n", 1980 tqpair, 1981 tqpair->recv_buf_size); 1982 /* Not fatal. */ 1983 } 1984 1985 tqpair->cpda = spdk_min(ic_req->hpda, SPDK_NVME_TCP_CPDA_MAX); 1986 SPDK_DEBUGLOG(nvmf_tcp, "cpda of tqpair=(%p) is : %u\n", tqpair, tqpair->cpda); 1987 1988 rsp_pdu = tqpair->mgmt_pdu; 1989 1990 ic_resp = &rsp_pdu->hdr.ic_resp; 1991 ic_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1992 ic_resp->common.hlen = ic_resp->common.plen = sizeof(*ic_resp); 1993 ic_resp->pfv = 0; 1994 ic_resp->cpda = tqpair->cpda; 1995 ic_resp->maxh2cdata = ttransport->transport.opts.max_io_size; 1996 ic_resp->dgst.bits.hdgst_enable = tqpair->host_hdgst_enable ? 1 : 0; 1997 ic_resp->dgst.bits.ddgst_enable = tqpair->host_ddgst_enable ? 1 : 0; 1998 1999 SPDK_DEBUGLOG(nvmf_tcp, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable); 2000 SPDK_DEBUGLOG(nvmf_tcp, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable); 2001 2002 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INITIALIZING); 2003 nvmf_tcp_qpair_write_mgmt_pdu(tqpair, nvmf_tcp_send_icresp_complete, tqpair); 2004 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2005 return; 2006 end: 2007 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 2008 } 2009 2010 static void 2011 nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair, 2012 struct spdk_nvmf_tcp_transport *ttransport) 2013 { 2014 struct nvme_tcp_pdu *pdu; 2015 int rc; 2016 uint32_t crc32c, error_offset = 0; 2017 enum spdk_nvme_tcp_term_req_fes fes; 2018 2019 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 2020 pdu = tqpair->pdu_in_progress; 2021 2022 SPDK_DEBUGLOG(nvmf_tcp, "pdu type of tqpair(%p) is %d\n", tqpair, 2023 pdu->hdr.common.pdu_type); 2024 /* check header digest if needed */ 2025 if (pdu->has_hdgst) { 2026 SPDK_DEBUGLOG(nvmf_tcp, "Compare the header of pdu=%p on tqpair=%p\n", pdu, tqpair); 2027 crc32c = nvme_tcp_pdu_calc_header_digest(pdu); 2028 rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c); 2029 if (rc == 0) { 2030 SPDK_ERRLOG("Header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu); 2031 fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR; 2032 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 2033 return; 2034 2035 } 2036 } 2037 2038 switch (pdu->hdr.common.pdu_type) { 2039 case SPDK_NVME_TCP_PDU_TYPE_IC_REQ: 2040 nvmf_tcp_icreq_handle(ttransport, tqpair, pdu); 2041 break; 2042 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 2043 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_REQ); 2044 break; 2045 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 2046 nvmf_tcp_h2c_data_hdr_handle(ttransport, tqpair, pdu); 2047 break; 2048 2049 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 2050 nvmf_tcp_h2c_term_req_hdr_handle(tqpair, pdu); 2051 break; 2052 2053 default: 2054 SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress->hdr.common.pdu_type); 2055 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2056 error_offset = 1; 2057 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 2058 break; 2059 } 2060 } 2061 2062 static void 2063 nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair) 2064 { 2065 struct nvme_tcp_pdu *pdu; 2066 uint32_t error_offset = 0; 2067 enum spdk_nvme_tcp_term_req_fes fes; 2068 uint8_t expected_hlen, pdo; 2069 bool plen_error = false, pdo_error = false; 2070 2071 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH); 2072 pdu = tqpair->pdu_in_progress; 2073 assert(pdu); 2074 if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) { 2075 if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) { 2076 SPDK_ERRLOG("Already received ICreq PDU, and reject this pdu=%p\n", pdu); 2077 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 2078 goto err; 2079 } 2080 expected_hlen = sizeof(struct spdk_nvme_tcp_ic_req); 2081 if (pdu->hdr.common.plen != expected_hlen) { 2082 plen_error = true; 2083 } 2084 } else { 2085 if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) { 2086 SPDK_ERRLOG("The TCP/IP connection is not negotiated\n"); 2087 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 2088 goto err; 2089 } 2090 2091 switch (pdu->hdr.common.pdu_type) { 2092 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 2093 expected_hlen = sizeof(struct spdk_nvme_tcp_cmd); 2094 pdo = pdu->hdr.common.pdo; 2095 if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) { 2096 pdo_error = true; 2097 break; 2098 } 2099 2100 if (pdu->hdr.common.plen < expected_hlen) { 2101 plen_error = true; 2102 } 2103 break; 2104 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 2105 expected_hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 2106 pdo = pdu->hdr.common.pdo; 2107 if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) { 2108 pdo_error = true; 2109 break; 2110 } 2111 if (pdu->hdr.common.plen < expected_hlen) { 2112 plen_error = true; 2113 } 2114 break; 2115 2116 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 2117 expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 2118 if ((pdu->hdr.common.plen <= expected_hlen) || 2119 (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) { 2120 plen_error = true; 2121 } 2122 break; 2123 2124 default: 2125 SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", pdu->hdr.common.pdu_type); 2126 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2127 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type); 2128 goto err; 2129 } 2130 } 2131 2132 if (pdu->hdr.common.hlen != expected_hlen) { 2133 SPDK_ERRLOG("PDU type=0x%02x, Expected ICReq header length %u, got %u on tqpair=%p\n", 2134 pdu->hdr.common.pdu_type, 2135 expected_hlen, pdu->hdr.common.hlen, tqpair); 2136 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2137 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen); 2138 goto err; 2139 } else if (pdo_error) { 2140 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2141 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo); 2142 } else if (plen_error) { 2143 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2144 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen); 2145 goto err; 2146 } else { 2147 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 2148 nvme_tcp_pdu_calc_psh_len(tqpair->pdu_in_progress, tqpair->host_hdgst_enable); 2149 return; 2150 } 2151 err: 2152 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 2153 } 2154 2155 static int 2156 nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair) 2157 { 2158 int rc = 0; 2159 struct nvme_tcp_pdu *pdu; 2160 enum nvme_tcp_pdu_recv_state prev_state; 2161 uint32_t data_len; 2162 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, 2163 struct spdk_nvmf_tcp_transport, transport); 2164 2165 /* The loop here is to allow for several back-to-back state changes. */ 2166 do { 2167 prev_state = tqpair->recv_state; 2168 SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv pdu entering state %d\n", tqpair, prev_state); 2169 2170 pdu = tqpair->pdu_in_progress; 2171 assert(pdu || tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2172 switch (tqpair->recv_state) { 2173 /* Wait for the common header */ 2174 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY: 2175 if (!pdu) { 2176 pdu = SLIST_FIRST(&tqpair->tcp_pdu_free_queue); 2177 if (spdk_unlikely(!pdu)) { 2178 return NVME_TCP_PDU_IN_PROGRESS; 2179 } 2180 SLIST_REMOVE_HEAD(&tqpair->tcp_pdu_free_queue, slist); 2181 tqpair->pdu_in_progress = pdu; 2182 } 2183 memset(pdu, 0, offsetof(struct nvme_tcp_pdu, qpair)); 2184 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH); 2185 /* FALLTHROUGH */ 2186 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH: 2187 if (spdk_unlikely(tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) { 2188 return rc; 2189 } 2190 2191 rc = nvme_tcp_read_data(tqpair->sock, 2192 sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes, 2193 (void *)&pdu->hdr.common + pdu->ch_valid_bytes); 2194 if (rc < 0) { 2195 SPDK_DEBUGLOG(nvmf_tcp, "will disconnect tqpair=%p\n", tqpair); 2196 return NVME_TCP_PDU_FATAL; 2197 } else if (rc > 0) { 2198 pdu->ch_valid_bytes += rc; 2199 spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.qid, rc, 0, tqpair); 2200 } 2201 2202 if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) { 2203 return NVME_TCP_PDU_IN_PROGRESS; 2204 } 2205 2206 /* The command header of this PDU has now been read from the socket. */ 2207 nvmf_tcp_pdu_ch_handle(tqpair); 2208 break; 2209 /* Wait for the pdu specific header */ 2210 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH: 2211 rc = nvme_tcp_read_data(tqpair->sock, 2212 pdu->psh_len - pdu->psh_valid_bytes, 2213 (void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes); 2214 if (rc < 0) { 2215 return NVME_TCP_PDU_FATAL; 2216 } else if (rc > 0) { 2217 spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, tqpair->qpair.qid, rc, 0, tqpair); 2218 pdu->psh_valid_bytes += rc; 2219 } 2220 2221 if (pdu->psh_valid_bytes < pdu->psh_len) { 2222 return NVME_TCP_PDU_IN_PROGRESS; 2223 } 2224 2225 /* All header(ch, psh, head digist) of this PDU has now been read from the socket. */ 2226 nvmf_tcp_pdu_psh_handle(tqpair, ttransport); 2227 break; 2228 /* Wait for the req slot */ 2229 case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ: 2230 nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu); 2231 break; 2232 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD: 2233 /* check whether the data is valid, if not we just return */ 2234 if (!pdu->data_len) { 2235 return NVME_TCP_PDU_IN_PROGRESS; 2236 } 2237 2238 data_len = pdu->data_len; 2239 /* data digest */ 2240 if (spdk_unlikely((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) && 2241 tqpair->host_ddgst_enable)) { 2242 data_len += SPDK_NVME_TCP_DIGEST_LEN; 2243 pdu->ddgst_enable = true; 2244 } 2245 2246 rc = nvme_tcp_read_payload_data(tqpair->sock, pdu); 2247 if (rc < 0) { 2248 return NVME_TCP_PDU_FATAL; 2249 } 2250 pdu->rw_offset += rc; 2251 2252 if (pdu->rw_offset < data_len) { 2253 return NVME_TCP_PDU_IN_PROGRESS; 2254 } 2255 2256 /* Generate and insert DIF to whole data block received if DIF is enabled */ 2257 if (spdk_unlikely(pdu->dif_ctx != NULL) && 2258 spdk_dif_generate_stream(pdu->data_iov, pdu->data_iovcnt, 0, data_len, 2259 pdu->dif_ctx) != 0) { 2260 SPDK_ERRLOG("DIF generate failed\n"); 2261 return NVME_TCP_PDU_FATAL; 2262 } 2263 2264 /* All of this PDU has now been read from the socket. */ 2265 nvmf_tcp_pdu_payload_handle(tqpair, pdu); 2266 break; 2267 case NVME_TCP_PDU_RECV_STATE_ERROR: 2268 if (!spdk_sock_is_connected(tqpair->sock)) { 2269 return NVME_TCP_PDU_FATAL; 2270 } 2271 break; 2272 default: 2273 SPDK_ERRLOG("The state(%d) is invalid\n", tqpair->recv_state); 2274 abort(); 2275 break; 2276 } 2277 } while (tqpair->recv_state != prev_state); 2278 2279 return rc; 2280 } 2281 2282 static inline void * 2283 nvmf_tcp_control_msg_get(struct spdk_nvmf_tcp_control_msg_list *list) 2284 { 2285 struct spdk_nvmf_tcp_control_msg *msg; 2286 2287 assert(list); 2288 2289 msg = STAILQ_FIRST(&list->free_msgs); 2290 if (!msg) { 2291 SPDK_DEBUGLOG(nvmf_tcp, "Out of control messages\n"); 2292 return NULL; 2293 } 2294 STAILQ_REMOVE_HEAD(&list->free_msgs, link); 2295 return msg; 2296 } 2297 2298 static inline void 2299 nvmf_tcp_control_msg_put(struct spdk_nvmf_tcp_control_msg_list *list, void *_msg) 2300 { 2301 struct spdk_nvmf_tcp_control_msg *msg = _msg; 2302 2303 assert(list); 2304 STAILQ_INSERT_HEAD(&list->free_msgs, msg, link); 2305 } 2306 2307 static int 2308 nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req, 2309 struct spdk_nvmf_transport *transport, 2310 struct spdk_nvmf_transport_poll_group *group) 2311 { 2312 struct spdk_nvmf_request *req = &tcp_req->req; 2313 struct spdk_nvme_cmd *cmd; 2314 struct spdk_nvme_sgl_descriptor *sgl; 2315 struct spdk_nvmf_tcp_poll_group *tgroup; 2316 enum spdk_nvme_tcp_term_req_fes fes; 2317 struct nvme_tcp_pdu *pdu; 2318 struct spdk_nvmf_tcp_qpair *tqpair; 2319 uint32_t length, error_offset = 0; 2320 2321 cmd = &req->cmd->nvme_cmd; 2322 sgl = &cmd->dptr.sgl1; 2323 2324 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK && 2325 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_TRANSPORT) { 2326 /* get request length from sgl */ 2327 length = sgl->unkeyed.length; 2328 if (spdk_unlikely(length > transport->opts.max_io_size)) { 2329 SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n", 2330 length, transport->opts.max_io_size); 2331 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED; 2332 goto fatal_err; 2333 } 2334 2335 /* fill request length and populate iovs */ 2336 req->length = length; 2337 2338 SPDK_DEBUGLOG(nvmf_tcp, "Data requested length= 0x%x\n", length); 2339 2340 if (spdk_unlikely(req->dif_enabled)) { 2341 req->dif.orig_length = length; 2342 length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx); 2343 req->dif.elba_length = length; 2344 } 2345 2346 if (nvmf_ctrlr_use_zcopy(req)) { 2347 SPDK_DEBUGLOG(nvmf_tcp, "Using zero-copy to execute request %p\n", tcp_req); 2348 req->data_from_pool = false; 2349 return 0; 2350 } 2351 2352 if (spdk_nvmf_request_get_buffers(req, group, transport, length)) { 2353 /* No available buffers. Queue this request up. */ 2354 SPDK_DEBUGLOG(nvmf_tcp, "No available large data buffers. Queueing request %p\n", 2355 tcp_req); 2356 return 0; 2357 } 2358 2359 /* backward compatible */ 2360 req->data = req->iov[0].iov_base; 2361 2362 SPDK_DEBUGLOG(nvmf_tcp, "Request %p took %d buffer/s from central pool, and data=%p\n", 2363 tcp_req, req->iovcnt, req->iov[0].iov_base); 2364 2365 return 0; 2366 } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK && 2367 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) { 2368 uint64_t offset = sgl->address; 2369 uint32_t max_len = transport->opts.in_capsule_data_size; 2370 2371 assert(tcp_req->has_in_capsule_data); 2372 /* Capsule Cmd with In-capsule Data should get data length from pdu header */ 2373 tqpair = tcp_req->pdu->qpair; 2374 /* receiving pdu is not same with the pdu in tcp_req */ 2375 pdu = tqpair->pdu_in_progress; 2376 length = pdu->hdr.common.plen - pdu->psh_len - sizeof(struct spdk_nvme_tcp_common_pdu_hdr); 2377 if (tqpair->host_ddgst_enable) { 2378 length -= SPDK_NVME_TCP_DIGEST_LEN; 2379 } 2380 /* This error is not defined in NVMe/TCP spec, take this error as fatal error */ 2381 if (spdk_unlikely(length != sgl->unkeyed.length)) { 2382 SPDK_ERRLOG("In-Capsule Data length 0x%x is not equal to SGL data length 0x%x\n", 2383 length, sgl->unkeyed.length); 2384 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2385 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen); 2386 goto fatal_err; 2387 } 2388 2389 SPDK_DEBUGLOG(nvmf_tcp, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n", 2390 offset, length); 2391 2392 /* The NVMe/TCP transport does not use ICDOFF to control the in-capsule data offset. ICDOFF should be '0' */ 2393 if (spdk_unlikely(offset != 0)) { 2394 /* Not defined fatal error in NVMe/TCP spec, handle this error as a fatal error */ 2395 SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " should be ZERO in NVMe/TCP\n", offset); 2396 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER; 2397 error_offset = offsetof(struct spdk_nvme_tcp_cmd, ccsqe.dptr.sgl1.address); 2398 goto fatal_err; 2399 } 2400 2401 if (spdk_unlikely(length > max_len)) { 2402 /* According to the SPEC we should support ICD up to 8192 bytes for admin and fabric commands */ 2403 if (length <= SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE && 2404 (cmd->opc == SPDK_NVME_OPC_FABRIC || req->qpair->qid == 0)) { 2405 2406 /* Get a buffer from dedicated list */ 2407 SPDK_DEBUGLOG(nvmf_tcp, "Getting a buffer from control msg list\n"); 2408 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2409 assert(tgroup->control_msg_list); 2410 req->iov[0].iov_base = nvmf_tcp_control_msg_get(tgroup->control_msg_list); 2411 if (!req->iov[0].iov_base) { 2412 /* No available buffers. Queue this request up. */ 2413 SPDK_DEBUGLOG(nvmf_tcp, "No available ICD buffers. Queueing request %p\n", tcp_req); 2414 return 0; 2415 } 2416 } else { 2417 SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n", 2418 length, max_len); 2419 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED; 2420 goto fatal_err; 2421 } 2422 } else { 2423 req->iov[0].iov_base = tcp_req->buf; 2424 } 2425 2426 req->length = length; 2427 req->data_from_pool = false; 2428 req->data = req->iov[0].iov_base; 2429 2430 if (spdk_unlikely(req->dif_enabled)) { 2431 length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx); 2432 req->dif.elba_length = length; 2433 } 2434 2435 req->iov[0].iov_len = length; 2436 req->iovcnt = 1; 2437 2438 return 0; 2439 } 2440 /* If we want to handle the problem here, then we can't skip the following data segment. 2441 * Because this function runs before reading data part, now handle all errors as fatal errors. */ 2442 SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n", 2443 sgl->generic.type, sgl->generic.subtype); 2444 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER; 2445 error_offset = offsetof(struct spdk_nvme_tcp_cmd, ccsqe.dptr.sgl1.generic); 2446 fatal_err: 2447 nvmf_tcp_send_c2h_term_req(tcp_req->pdu->qpair, tcp_req->pdu, fes, error_offset); 2448 return -1; 2449 } 2450 2451 static inline enum spdk_nvme_media_error_status_code 2452 nvmf_tcp_dif_error_to_compl_status(uint8_t err_type) { 2453 enum spdk_nvme_media_error_status_code result; 2454 2455 switch (err_type) 2456 { 2457 case SPDK_DIF_REFTAG_ERROR: 2458 result = SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR; 2459 break; 2460 case SPDK_DIF_APPTAG_ERROR: 2461 result = SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR; 2462 break; 2463 case SPDK_DIF_GUARD_ERROR: 2464 result = SPDK_NVME_SC_GUARD_CHECK_ERROR; 2465 break; 2466 default: 2467 SPDK_UNREACHABLE(); 2468 break; 2469 } 2470 2471 return result; 2472 } 2473 2474 static void 2475 _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair, 2476 struct spdk_nvmf_tcp_req *tcp_req) 2477 { 2478 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF( 2479 tqpair->qpair.transport, struct spdk_nvmf_tcp_transport, transport); 2480 struct nvme_tcp_pdu *rsp_pdu; 2481 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 2482 uint32_t plen, pdo, alignment; 2483 int rc; 2484 2485 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 2486 2487 rsp_pdu = tcp_req->pdu; 2488 assert(rsp_pdu != NULL); 2489 2490 c2h_data = &rsp_pdu->hdr.c2h_data; 2491 c2h_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA; 2492 plen = c2h_data->common.hlen = sizeof(*c2h_data); 2493 2494 if (tqpair->host_hdgst_enable) { 2495 plen += SPDK_NVME_TCP_DIGEST_LEN; 2496 c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 2497 } 2498 2499 /* set the psh */ 2500 c2h_data->cccid = tcp_req->req.cmd->nvme_cmd.cid; 2501 c2h_data->datal = tcp_req->req.length - tcp_req->pdu->rw_offset; 2502 c2h_data->datao = tcp_req->pdu->rw_offset; 2503 2504 /* set the padding */ 2505 rsp_pdu->padding_len = 0; 2506 pdo = plen; 2507 if (tqpair->cpda) { 2508 alignment = (tqpair->cpda + 1) << 2; 2509 if (plen % alignment != 0) { 2510 pdo = (plen + alignment) / alignment * alignment; 2511 rsp_pdu->padding_len = pdo - plen; 2512 plen = pdo; 2513 } 2514 } 2515 2516 c2h_data->common.pdo = pdo; 2517 plen += c2h_data->datal; 2518 if (tqpair->host_ddgst_enable) { 2519 c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF; 2520 plen += SPDK_NVME_TCP_DIGEST_LEN; 2521 } 2522 2523 c2h_data->common.plen = plen; 2524 2525 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2526 rsp_pdu->dif_ctx = &tcp_req->req.dif.dif_ctx; 2527 } 2528 2529 nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 2530 c2h_data->datao, c2h_data->datal); 2531 2532 2533 c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU; 2534 /* Need to send the capsule response if response is not all 0 */ 2535 if (ttransport->tcp_opts.c2h_success && 2536 tcp_req->rsp.cdw0 == 0 && tcp_req->rsp.cdw1 == 0) { 2537 c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS; 2538 } 2539 2540 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2541 struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl; 2542 struct spdk_dif_error err_blk = {}; 2543 uint32_t mapped_length = 0; 2544 uint32_t available_iovs = SPDK_COUNTOF(rsp_pdu->iov); 2545 uint32_t ddgst_len = 0; 2546 2547 if (tqpair->host_ddgst_enable) { 2548 /* Data digest consumes additional iov entry */ 2549 available_iovs--; 2550 /* plen needs to be updated since nvme_tcp_build_iovs compares expected and actual plen */ 2551 ddgst_len = SPDK_NVME_TCP_DIGEST_LEN; 2552 c2h_data->common.plen -= ddgst_len; 2553 } 2554 /* Temp call to estimate if data can be described by limited number of iovs. 2555 * iov vector will be rebuilt in nvmf_tcp_qpair_write_pdu */ 2556 nvme_tcp_build_iovs(rsp_pdu->iov, available_iovs, rsp_pdu, tqpair->host_hdgst_enable, 2557 false, &mapped_length); 2558 2559 if (mapped_length != c2h_data->common.plen) { 2560 c2h_data->datal = mapped_length - (c2h_data->common.plen - c2h_data->datal); 2561 SPDK_DEBUGLOG(nvmf_tcp, 2562 "Part C2H, data_len %u (of %u), PDU len %u, updated PDU len %u, offset %u\n", 2563 c2h_data->datal, tcp_req->req.length, c2h_data->common.plen, mapped_length, rsp_pdu->rw_offset); 2564 c2h_data->common.plen = mapped_length; 2565 2566 /* Rebuild pdu->data_iov since data length is changed */ 2567 nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt, c2h_data->datao, 2568 c2h_data->datal); 2569 2570 c2h_data->common.flags &= ~(SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU | 2571 SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 2572 } 2573 2574 c2h_data->common.plen += ddgst_len; 2575 2576 assert(rsp_pdu->rw_offset <= tcp_req->req.length); 2577 2578 rc = spdk_dif_verify_stream(rsp_pdu->data_iov, rsp_pdu->data_iovcnt, 2579 0, rsp_pdu->data_len, rsp_pdu->dif_ctx, &err_blk); 2580 if (rc != 0) { 2581 SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n", 2582 err_blk.err_type, err_blk.err_offset); 2583 rsp->status.sct = SPDK_NVME_SCT_MEDIA_ERROR; 2584 rsp->status.sc = nvmf_tcp_dif_error_to_compl_status(err_blk.err_type); 2585 nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 2586 return; 2587 } 2588 } 2589 2590 rsp_pdu->rw_offset += c2h_data->datal; 2591 nvmf_tcp_qpair_write_req_pdu(tqpair, tcp_req, nvmf_tcp_pdu_c2h_data_complete, tcp_req); 2592 } 2593 2594 static void 2595 nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair, 2596 struct spdk_nvmf_tcp_req *tcp_req) 2597 { 2598 nvmf_tcp_req_pdu_init(tcp_req); 2599 _nvmf_tcp_send_c2h_data(tqpair, tcp_req); 2600 } 2601 2602 static int 2603 request_transfer_out(struct spdk_nvmf_request *req) 2604 { 2605 struct spdk_nvmf_tcp_req *tcp_req; 2606 struct spdk_nvmf_qpair *qpair; 2607 struct spdk_nvmf_tcp_qpair *tqpair; 2608 struct spdk_nvme_cpl *rsp; 2609 2610 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 2611 2612 qpair = req->qpair; 2613 rsp = &req->rsp->nvme_cpl; 2614 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 2615 2616 /* Advance our sq_head pointer */ 2617 if (qpair->sq_head == qpair->sq_head_max) { 2618 qpair->sq_head = 0; 2619 } else { 2620 qpair->sq_head++; 2621 } 2622 rsp->sqhd = qpair->sq_head; 2623 2624 tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 2625 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 2626 if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 2627 nvmf_tcp_send_c2h_data(tqpair, tcp_req); 2628 } else { 2629 nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 2630 } 2631 2632 return 0; 2633 } 2634 2635 static void 2636 nvmf_tcp_check_fused_ordering(struct spdk_nvmf_tcp_transport *ttransport, 2637 struct spdk_nvmf_tcp_qpair *tqpair, 2638 struct spdk_nvmf_tcp_req *tcp_req) 2639 { 2640 enum spdk_nvme_cmd_fuse last, next; 2641 2642 last = tqpair->fused_first ? tqpair->fused_first->cmd.fuse : SPDK_NVME_CMD_FUSE_NONE; 2643 next = tcp_req->cmd.fuse; 2644 2645 assert(last != SPDK_NVME_CMD_FUSE_SECOND); 2646 2647 if (spdk_likely(last == SPDK_NVME_CMD_FUSE_NONE && next == SPDK_NVME_CMD_FUSE_NONE)) { 2648 return; 2649 } 2650 2651 if (last == SPDK_NVME_CMD_FUSE_FIRST) { 2652 if (next == SPDK_NVME_CMD_FUSE_SECOND) { 2653 /* This is a valid pair of fused commands. Point them at each other 2654 * so they can be submitted consecutively once ready to be executed. 2655 */ 2656 tqpair->fused_first->fused_pair = tcp_req; 2657 tcp_req->fused_pair = tqpair->fused_first; 2658 tqpair->fused_first = NULL; 2659 return; 2660 } else { 2661 /* Mark the last req as failed since it wasn't followed by a SECOND. */ 2662 tqpair->fused_first->fused_failed = true; 2663 2664 /* 2665 * If the last req is in READY_TO_EXECUTE state, then call 2666 * nvmf_tcp_req_process(), otherwise nothing else will kick it. 2667 */ 2668 if (tqpair->fused_first->state == TCP_REQUEST_STATE_READY_TO_EXECUTE) { 2669 nvmf_tcp_req_process(ttransport, tqpair->fused_first); 2670 } 2671 2672 tqpair->fused_first = NULL; 2673 } 2674 } 2675 2676 if (next == SPDK_NVME_CMD_FUSE_FIRST) { 2677 /* Set tqpair->fused_first here so that we know to check that the next request 2678 * is a SECOND (and to fail this one if it isn't). 2679 */ 2680 tqpair->fused_first = tcp_req; 2681 } else if (next == SPDK_NVME_CMD_FUSE_SECOND) { 2682 /* Mark this req failed since it is a SECOND and the last one was not a FIRST. */ 2683 tcp_req->fused_failed = true; 2684 } 2685 } 2686 2687 static bool 2688 nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport, 2689 struct spdk_nvmf_tcp_req *tcp_req) 2690 { 2691 struct spdk_nvmf_tcp_qpair *tqpair; 2692 uint32_t plen; 2693 struct nvme_tcp_pdu *pdu; 2694 enum spdk_nvmf_tcp_req_state prev_state; 2695 bool progress = false; 2696 struct spdk_nvmf_transport *transport = &ttransport->transport; 2697 struct spdk_nvmf_transport_poll_group *group; 2698 struct spdk_nvmf_tcp_poll_group *tgroup; 2699 2700 tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 2701 group = &tqpair->group->group; 2702 assert(tcp_req->state != TCP_REQUEST_STATE_FREE); 2703 2704 /* If the qpair is not active, we need to abort the outstanding requests. */ 2705 if (tqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) { 2706 if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) { 2707 STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link); 2708 } 2709 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED); 2710 } 2711 2712 /* The loop here is to allow for several back-to-back state changes. */ 2713 do { 2714 prev_state = tcp_req->state; 2715 2716 SPDK_DEBUGLOG(nvmf_tcp, "Request %p entering state %d on tqpair=%p\n", tcp_req, prev_state, 2717 tqpair); 2718 2719 switch (tcp_req->state) { 2720 case TCP_REQUEST_STATE_FREE: 2721 /* Some external code must kick a request into TCP_REQUEST_STATE_NEW 2722 * to escape this state. */ 2723 break; 2724 case TCP_REQUEST_STATE_NEW: 2725 spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, tqpair->qpair.qid, 0, (uintptr_t)tcp_req, tqpair); 2726 2727 /* copy the cmd from the receive pdu */ 2728 tcp_req->cmd = tqpair->pdu_in_progress->hdr.capsule_cmd.ccsqe; 2729 2730 if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) { 2731 tcp_req->req.dif_enabled = true; 2732 tqpair->pdu_in_progress->dif_ctx = &tcp_req->req.dif.dif_ctx; 2733 } 2734 2735 nvmf_tcp_check_fused_ordering(ttransport, tqpair, tcp_req); 2736 2737 /* The next state transition depends on the data transfer needs of this request. */ 2738 tcp_req->req.xfer = spdk_nvmf_req_get_xfer(&tcp_req->req); 2739 2740 if (spdk_unlikely(tcp_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) { 2741 tcp_req->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2742 tcp_req->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2743 tcp_req->req.rsp->nvme_cpl.cid = tcp_req->req.cmd->nvme_cmd.cid; 2744 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2745 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2746 SPDK_DEBUGLOG(nvmf_tcp, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", tcp_req); 2747 break; 2748 } 2749 2750 /* If no data to transfer, ready to execute. */ 2751 if (tcp_req->req.xfer == SPDK_NVME_DATA_NONE) { 2752 /* Reset the tqpair receiving pdu state */ 2753 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2754 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 2755 break; 2756 } 2757 2758 pdu = tqpair->pdu_in_progress; 2759 plen = pdu->hdr.common.hlen; 2760 if (tqpair->host_hdgst_enable) { 2761 plen += SPDK_NVME_TCP_DIGEST_LEN; 2762 } 2763 if (pdu->hdr.common.plen != plen) { 2764 tcp_req->has_in_capsule_data = true; 2765 } else { 2766 /* Data is transmitted by C2H PDUs */ 2767 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2768 } 2769 2770 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER); 2771 STAILQ_INSERT_TAIL(&group->pending_buf_queue, &tcp_req->req, buf_link); 2772 break; 2773 case TCP_REQUEST_STATE_NEED_BUFFER: 2774 spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, tqpair->qpair.qid, 0, (uintptr_t)tcp_req, 2775 tqpair); 2776 2777 assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE); 2778 2779 if (!tcp_req->has_in_capsule_data && (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue))) { 2780 SPDK_DEBUGLOG(nvmf_tcp, 2781 "Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n", 2782 tcp_req, tqpair); 2783 /* This request needs to wait in line to obtain a buffer */ 2784 break; 2785 } 2786 2787 /* Try to get a data buffer */ 2788 if (nvmf_tcp_req_parse_sgl(tcp_req, transport, group) < 0) { 2789 break; 2790 } 2791 2792 /* Get a zcopy buffer if the request can be serviced through zcopy */ 2793 if (spdk_nvmf_request_using_zcopy(&tcp_req->req)) { 2794 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2795 assert(tcp_req->req.dif.elba_length >= tcp_req->req.length); 2796 tcp_req->req.length = tcp_req->req.dif.elba_length; 2797 } 2798 2799 STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link); 2800 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_START); 2801 spdk_nvmf_request_zcopy_start(&tcp_req->req); 2802 break; 2803 } 2804 2805 if (tcp_req->req.iovcnt < 1) { 2806 SPDK_DEBUGLOG(nvmf_tcp, "No buffer allocated for tcp_req(%p) on tqpair(%p\n)", 2807 tcp_req, tqpair); 2808 /* No buffers available. */ 2809 break; 2810 } 2811 2812 STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link); 2813 2814 /* If data is transferring from host to controller, we need to do a transfer from the host. */ 2815 if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 2816 if (tcp_req->req.data_from_pool) { 2817 SPDK_DEBUGLOG(nvmf_tcp, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair); 2818 nvmf_tcp_send_r2t_pdu(tqpair, tcp_req); 2819 } else { 2820 struct nvme_tcp_pdu *pdu; 2821 2822 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 2823 2824 pdu = tqpair->pdu_in_progress; 2825 SPDK_DEBUGLOG(nvmf_tcp, "Not need to send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req, 2826 tqpair); 2827 /* No need to send r2t, contained in the capsuled data */ 2828 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 2829 0, tcp_req->req.length); 2830 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 2831 } 2832 break; 2833 } 2834 2835 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 2836 break; 2837 case TCP_REQUEST_STATE_AWAITING_ZCOPY_START: 2838 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_START, tqpair->qpair.qid, 0, 2839 (uintptr_t)tcp_req, tqpair); 2840 /* Some external code must kick a request into TCP_REQUEST_STATE_ZCOPY_START_COMPLETED 2841 * to escape this state. */ 2842 break; 2843 case TCP_REQUEST_STATE_ZCOPY_START_COMPLETED: 2844 spdk_trace_record(TRACE_TCP_REQUEST_STATE_ZCOPY_START_COMPLETED, tqpair->qpair.qid, 0, 2845 (uintptr_t)tcp_req, tqpair); 2846 if (spdk_unlikely(spdk_nvme_cpl_is_error(&tcp_req->req.rsp->nvme_cpl))) { 2847 SPDK_DEBUGLOG(nvmf_tcp, "Zero-copy start failed for tcp_req(%p) on tqpair=%p\n", 2848 tcp_req, tqpair); 2849 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2850 break; 2851 } 2852 if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 2853 SPDK_DEBUGLOG(nvmf_tcp, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair); 2854 nvmf_tcp_send_r2t_pdu(tqpair, tcp_req); 2855 } else { 2856 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED); 2857 } 2858 break; 2859 case TCP_REQUEST_STATE_AWAITING_R2T_ACK: 2860 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, tqpair->qpair.qid, 0, (uintptr_t)tcp_req, 2861 tqpair); 2862 /* The R2T completion or the h2c data incoming will kick it out of this state. */ 2863 break; 2864 case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER: 2865 2866 spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, tqpair->qpair.qid, 0, 2867 (uintptr_t)tcp_req, tqpair); 2868 /* Some external code must kick a request into TCP_REQUEST_STATE_READY_TO_EXECUTE 2869 * to escape this state. */ 2870 break; 2871 case TCP_REQUEST_STATE_READY_TO_EXECUTE: 2872 spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, tqpair->qpair.qid, 0, 2873 (uintptr_t)tcp_req, tqpair); 2874 2875 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2876 assert(tcp_req->req.dif.elba_length >= tcp_req->req.length); 2877 tcp_req->req.length = tcp_req->req.dif.elba_length; 2878 } 2879 2880 if (tcp_req->cmd.fuse != SPDK_NVME_CMD_FUSE_NONE) { 2881 if (tcp_req->fused_failed) { 2882 /* This request failed FUSED semantics. Fail it immediately, without 2883 * even sending it to the target layer. 2884 */ 2885 tcp_req->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2886 tcp_req->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; 2887 tcp_req->req.rsp->nvme_cpl.cid = tcp_req->req.cmd->nvme_cmd.cid; 2888 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2889 break; 2890 } 2891 2892 if (tcp_req->fused_pair == NULL || 2893 tcp_req->fused_pair->state != TCP_REQUEST_STATE_READY_TO_EXECUTE) { 2894 /* This request is ready to execute, but either we don't know yet if it's 2895 * valid - i.e. this is a FIRST but we haven't received the next request yet), 2896 * or the other request of this fused pair isn't ready to execute. So 2897 * break here and this request will get processed later either when the 2898 * other request is ready or we find that this request isn't valid. 2899 */ 2900 break; 2901 } 2902 } 2903 2904 if (!spdk_nvmf_request_using_zcopy(&tcp_req->req)) { 2905 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTING); 2906 /* If we get to this point, and this request is a fused command, we know that 2907 * it is part of a valid sequence (FIRST followed by a SECOND) and that both 2908 * requests are READY_TO_EXECUTE. So call spdk_nvmf_request_exec() both on this 2909 * request, and the other request of the fused pair, in the correct order. 2910 * Also clear the ->fused_pair pointers on both requests, since after this point 2911 * we no longer need to maintain the relationship between these two requests. 2912 */ 2913 if (tcp_req->cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND) { 2914 assert(tcp_req->fused_pair != NULL); 2915 assert(tcp_req->fused_pair->fused_pair == tcp_req); 2916 nvmf_tcp_req_set_state(tcp_req->fused_pair, TCP_REQUEST_STATE_EXECUTING); 2917 spdk_nvmf_request_exec(&tcp_req->fused_pair->req); 2918 tcp_req->fused_pair->fused_pair = NULL; 2919 tcp_req->fused_pair = NULL; 2920 } 2921 spdk_nvmf_request_exec(&tcp_req->req); 2922 if (tcp_req->cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST) { 2923 assert(tcp_req->fused_pair != NULL); 2924 assert(tcp_req->fused_pair->fused_pair == tcp_req); 2925 nvmf_tcp_req_set_state(tcp_req->fused_pair, TCP_REQUEST_STATE_EXECUTING); 2926 spdk_nvmf_request_exec(&tcp_req->fused_pair->req); 2927 tcp_req->fused_pair->fused_pair = NULL; 2928 tcp_req->fused_pair = NULL; 2929 } 2930 } else { 2931 /* For zero-copy, only requests with data coming from host to the 2932 * controller can end up here. */ 2933 assert(tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 2934 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT); 2935 spdk_nvmf_request_zcopy_end(&tcp_req->req, true); 2936 } 2937 2938 break; 2939 case TCP_REQUEST_STATE_EXECUTING: 2940 spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTING, tqpair->qpair.qid, 0, (uintptr_t)tcp_req, 2941 tqpair); 2942 /* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED 2943 * to escape this state. */ 2944 break; 2945 case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT: 2946 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_COMMIT, tqpair->qpair.qid, 0, 2947 (uintptr_t)tcp_req, tqpair); 2948 /* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED 2949 * to escape this state. */ 2950 break; 2951 case TCP_REQUEST_STATE_EXECUTED: 2952 spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, tqpair->qpair.qid, 0, (uintptr_t)tcp_req, 2953 tqpair); 2954 2955 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2956 tcp_req->req.length = tcp_req->req.dif.orig_length; 2957 } 2958 2959 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2960 break; 2961 case TCP_REQUEST_STATE_READY_TO_COMPLETE: 2962 spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, tqpair->qpair.qid, 0, 2963 (uintptr_t)tcp_req, tqpair); 2964 if (request_transfer_out(&tcp_req->req) != 0) { 2965 assert(0); /* No good way to handle this currently */ 2966 } 2967 break; 2968 case TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST: 2969 spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, tqpair->qpair.qid, 0, 2970 (uintptr_t)tcp_req, tqpair); 2971 /* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED 2972 * to escape this state. */ 2973 break; 2974 case TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE: 2975 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_RELEASE, tqpair->qpair.qid, 0, 2976 (uintptr_t)tcp_req, tqpair); 2977 /* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED 2978 * to escape this state. */ 2979 break; 2980 case TCP_REQUEST_STATE_COMPLETED: 2981 spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, tqpair->qpair.qid, 0, (uintptr_t)tcp_req, 2982 tqpair); 2983 /* If there's an outstanding PDU sent to the host, the request is completed 2984 * due to the qpair being disconnected. We must delay the completion until 2985 * that write is done to avoid freeing the request twice. */ 2986 if (spdk_unlikely(tcp_req->pdu_in_use)) { 2987 SPDK_DEBUGLOG(nvmf_tcp, "Delaying completion due to outstanding " 2988 "write on req=%p\n", tcp_req); 2989 /* This can only happen for zcopy requests */ 2990 assert(spdk_nvmf_request_using_zcopy(&tcp_req->req)); 2991 assert(tqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE); 2992 break; 2993 } 2994 2995 if (tcp_req->req.data_from_pool) { 2996 spdk_nvmf_request_free_buffers(&tcp_req->req, group, transport); 2997 } else if (spdk_unlikely(tcp_req->has_in_capsule_data && 2998 (tcp_req->cmd.opc == SPDK_NVME_OPC_FABRIC || 2999 tqpair->qpair.qid == 0) && tcp_req->req.length > transport->opts.in_capsule_data_size)) { 3000 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 3001 assert(tgroup->control_msg_list); 3002 SPDK_DEBUGLOG(nvmf_tcp, "Put buf to control msg list\n"); 3003 nvmf_tcp_control_msg_put(tgroup->control_msg_list, 3004 tcp_req->req.iov[0].iov_base); 3005 } else if (tcp_req->req.zcopy_bdev_io != NULL) { 3006 /* If the request has an unreleased zcopy bdev_io, it's either a 3007 * read, a failed write, or the qpair is being disconnected */ 3008 assert(spdk_nvmf_request_using_zcopy(&tcp_req->req)); 3009 assert(tcp_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST || 3010 spdk_nvme_cpl_is_error(&tcp_req->req.rsp->nvme_cpl) || 3011 tqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE); 3012 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE); 3013 spdk_nvmf_request_zcopy_end(&tcp_req->req, false); 3014 break; 3015 } 3016 tcp_req->req.length = 0; 3017 tcp_req->req.iovcnt = 0; 3018 tcp_req->req.data = NULL; 3019 tcp_req->fused_failed = false; 3020 if (tcp_req->fused_pair) { 3021 /* This req was part of a valid fused pair, but failed before it got to 3022 * READ_TO_EXECUTE state. This means we need to fail the other request 3023 * in the pair, because it is no longer part of a valid pair. If the pair 3024 * already reached READY_TO_EXECUTE state, we need to kick it. 3025 */ 3026 tcp_req->fused_pair->fused_failed = true; 3027 if (tcp_req->fused_pair->state == TCP_REQUEST_STATE_READY_TO_EXECUTE) { 3028 nvmf_tcp_req_process(ttransport, tcp_req->fused_pair); 3029 } 3030 tcp_req->fused_pair = NULL; 3031 } 3032 3033 nvmf_tcp_req_put(tqpair, tcp_req); 3034 break; 3035 case TCP_REQUEST_NUM_STATES: 3036 default: 3037 assert(0); 3038 break; 3039 } 3040 3041 if (tcp_req->state != prev_state) { 3042 progress = true; 3043 } 3044 } while (tcp_req->state != prev_state); 3045 3046 return progress; 3047 } 3048 3049 static void 3050 nvmf_tcp_sock_cb(void *arg, struct spdk_sock_group *group, struct spdk_sock *sock) 3051 { 3052 struct spdk_nvmf_tcp_qpair *tqpair = arg; 3053 int rc; 3054 3055 assert(tqpair != NULL); 3056 rc = nvmf_tcp_sock_process(tqpair); 3057 3058 /* If there was a new socket error, disconnect */ 3059 if (rc < 0) { 3060 nvmf_tcp_qpair_disconnect(tqpair); 3061 } 3062 } 3063 3064 static int 3065 nvmf_tcp_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 3066 struct spdk_nvmf_qpair *qpair) 3067 { 3068 struct spdk_nvmf_tcp_poll_group *tgroup; 3069 struct spdk_nvmf_tcp_qpair *tqpair; 3070 int rc; 3071 3072 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 3073 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 3074 3075 rc = nvmf_tcp_qpair_sock_init(tqpair); 3076 if (rc != 0) { 3077 SPDK_ERRLOG("Cannot set sock opt for tqpair=%p\n", tqpair); 3078 return -1; 3079 } 3080 3081 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 3082 if (rc < 0) { 3083 SPDK_ERRLOG("Cannot init tqpair=%p\n", tqpair); 3084 return -1; 3085 } 3086 3087 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 3088 if (rc < 0) { 3089 SPDK_ERRLOG("Cannot init memory resource info for tqpair=%p\n", tqpair); 3090 return -1; 3091 } 3092 3093 rc = spdk_sock_group_add_sock(tgroup->sock_group, tqpair->sock, 3094 nvmf_tcp_sock_cb, tqpair); 3095 if (rc != 0) { 3096 SPDK_ERRLOG("Could not add sock to sock_group: %s (%d)\n", 3097 spdk_strerror(errno), errno); 3098 return -1; 3099 } 3100 3101 tqpair->group = tgroup; 3102 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INVALID); 3103 TAILQ_INSERT_TAIL(&tgroup->qpairs, tqpair, link); 3104 3105 return 0; 3106 } 3107 3108 static int 3109 nvmf_tcp_poll_group_remove(struct spdk_nvmf_transport_poll_group *group, 3110 struct spdk_nvmf_qpair *qpair) 3111 { 3112 struct spdk_nvmf_tcp_poll_group *tgroup; 3113 struct spdk_nvmf_tcp_qpair *tqpair; 3114 int rc; 3115 3116 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 3117 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 3118 3119 assert(tqpair->group == tgroup); 3120 3121 SPDK_DEBUGLOG(nvmf_tcp, "remove tqpair=%p from the tgroup=%p\n", tqpair, tgroup); 3122 if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) { 3123 TAILQ_REMOVE(&tgroup->await_req, tqpair, link); 3124 } else { 3125 TAILQ_REMOVE(&tgroup->qpairs, tqpair, link); 3126 } 3127 3128 rc = spdk_sock_group_remove_sock(tgroup->sock_group, tqpair->sock); 3129 if (rc != 0) { 3130 SPDK_ERRLOG("Could not remove sock from sock_group: %s (%d)\n", 3131 spdk_strerror(errno), errno); 3132 } 3133 3134 return rc; 3135 } 3136 3137 static int 3138 nvmf_tcp_req_complete(struct spdk_nvmf_request *req) 3139 { 3140 struct spdk_nvmf_tcp_transport *ttransport; 3141 struct spdk_nvmf_tcp_req *tcp_req; 3142 3143 ttransport = SPDK_CONTAINEROF(req->qpair->transport, struct spdk_nvmf_tcp_transport, transport); 3144 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 3145 3146 switch (tcp_req->state) { 3147 case TCP_REQUEST_STATE_EXECUTING: 3148 case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT: 3149 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED); 3150 break; 3151 case TCP_REQUEST_STATE_AWAITING_ZCOPY_START: 3152 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_ZCOPY_START_COMPLETED); 3153 break; 3154 case TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE: 3155 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED); 3156 break; 3157 default: 3158 assert(0 && "Unexpected request state"); 3159 break; 3160 } 3161 3162 nvmf_tcp_req_process(ttransport, tcp_req); 3163 3164 return 0; 3165 } 3166 3167 static void 3168 nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair, 3169 spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg) 3170 { 3171 struct spdk_nvmf_tcp_qpair *tqpair; 3172 3173 SPDK_DEBUGLOG(nvmf_tcp, "Qpair: %p\n", qpair); 3174 3175 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 3176 3177 assert(tqpair->fini_cb_fn == NULL); 3178 tqpair->fini_cb_fn = cb_fn; 3179 tqpair->fini_cb_arg = cb_arg; 3180 3181 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITED); 3182 nvmf_tcp_qpair_destroy(tqpair); 3183 } 3184 3185 static int 3186 nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 3187 { 3188 struct spdk_nvmf_tcp_poll_group *tgroup; 3189 int rc; 3190 struct spdk_nvmf_request *req, *req_tmp; 3191 struct spdk_nvmf_tcp_req *tcp_req; 3192 struct spdk_nvmf_tcp_qpair *tqpair, *tqpair_tmp; 3193 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(group->transport, 3194 struct spdk_nvmf_tcp_transport, transport); 3195 3196 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 3197 3198 if (spdk_unlikely(TAILQ_EMPTY(&tgroup->qpairs) && TAILQ_EMPTY(&tgroup->await_req))) { 3199 return 0; 3200 } 3201 3202 STAILQ_FOREACH_SAFE(req, &group->pending_buf_queue, buf_link, req_tmp) { 3203 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 3204 if (nvmf_tcp_req_process(ttransport, tcp_req) == false) { 3205 break; 3206 } 3207 } 3208 3209 rc = spdk_sock_group_poll(tgroup->sock_group); 3210 if (rc < 0) { 3211 SPDK_ERRLOG("Failed to poll sock_group=%p\n", tgroup->sock_group); 3212 } 3213 3214 TAILQ_FOREACH_SAFE(tqpair, &tgroup->await_req, link, tqpair_tmp) { 3215 nvmf_tcp_sock_process(tqpair); 3216 } 3217 3218 return rc; 3219 } 3220 3221 static int 3222 nvmf_tcp_qpair_get_trid(struct spdk_nvmf_qpair *qpair, 3223 struct spdk_nvme_transport_id *trid, bool peer) 3224 { 3225 struct spdk_nvmf_tcp_qpair *tqpair; 3226 uint16_t port; 3227 3228 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 3229 spdk_nvme_trid_populate_transport(trid, SPDK_NVME_TRANSPORT_TCP); 3230 3231 if (peer) { 3232 snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->initiator_addr); 3233 port = tqpair->initiator_port; 3234 } else { 3235 snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->target_addr); 3236 port = tqpair->target_port; 3237 } 3238 3239 if (spdk_sock_is_ipv4(tqpair->sock)) { 3240 trid->adrfam = SPDK_NVMF_ADRFAM_IPV4; 3241 } else if (spdk_sock_is_ipv6(tqpair->sock)) { 3242 trid->adrfam = SPDK_NVMF_ADRFAM_IPV6; 3243 } else { 3244 return -1; 3245 } 3246 3247 snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%d", port); 3248 return 0; 3249 } 3250 3251 static int 3252 nvmf_tcp_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 3253 struct spdk_nvme_transport_id *trid) 3254 { 3255 return nvmf_tcp_qpair_get_trid(qpair, trid, 0); 3256 } 3257 3258 static int 3259 nvmf_tcp_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 3260 struct spdk_nvme_transport_id *trid) 3261 { 3262 return nvmf_tcp_qpair_get_trid(qpair, trid, 1); 3263 } 3264 3265 static int 3266 nvmf_tcp_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 3267 struct spdk_nvme_transport_id *trid) 3268 { 3269 return nvmf_tcp_qpair_get_trid(qpair, trid, 0); 3270 } 3271 3272 static void 3273 nvmf_tcp_req_set_abort_status(struct spdk_nvmf_request *req, 3274 struct spdk_nvmf_tcp_req *tcp_req_to_abort) 3275 { 3276 tcp_req_to_abort->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3277 tcp_req_to_abort->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 3278 tcp_req_to_abort->req.rsp->nvme_cpl.cid = tcp_req_to_abort->req.cmd->nvme_cmd.cid; 3279 3280 nvmf_tcp_req_set_state(tcp_req_to_abort, TCP_REQUEST_STATE_READY_TO_COMPLETE); 3281 3282 req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command was successfully aborted. */ 3283 } 3284 3285 static int 3286 _nvmf_tcp_qpair_abort_request(void *ctx) 3287 { 3288 struct spdk_nvmf_request *req = ctx; 3289 struct spdk_nvmf_tcp_req *tcp_req_to_abort = SPDK_CONTAINEROF(req->req_to_abort, 3290 struct spdk_nvmf_tcp_req, req); 3291 struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(req->req_to_abort->qpair, 3292 struct spdk_nvmf_tcp_qpair, qpair); 3293 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, 3294 struct spdk_nvmf_tcp_transport, transport); 3295 int rc; 3296 3297 spdk_poller_unregister(&req->poller); 3298 3299 switch (tcp_req_to_abort->state) { 3300 case TCP_REQUEST_STATE_EXECUTING: 3301 case TCP_REQUEST_STATE_AWAITING_ZCOPY_START: 3302 case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT: 3303 rc = nvmf_ctrlr_abort_request(req); 3304 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) { 3305 return SPDK_POLLER_BUSY; 3306 } 3307 break; 3308 3309 case TCP_REQUEST_STATE_NEED_BUFFER: 3310 STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, 3311 &tcp_req_to_abort->req, spdk_nvmf_request, buf_link); 3312 3313 nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort); 3314 nvmf_tcp_req_process(ttransport, tcp_req_to_abort); 3315 break; 3316 3317 case TCP_REQUEST_STATE_AWAITING_R2T_ACK: 3318 case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER: 3319 if (spdk_get_ticks() < req->timeout_tsc) { 3320 req->poller = SPDK_POLLER_REGISTER(_nvmf_tcp_qpair_abort_request, req, 0); 3321 return SPDK_POLLER_BUSY; 3322 } 3323 break; 3324 3325 default: 3326 /* Requests in other states are either un-abortable (e.g. 3327 * TRANSFERRING_CONTROLLER_TO_HOST) or should never end up here, as they're 3328 * immediately transitioned to other states in nvmf_tcp_req_process() (e.g. 3329 * READY_TO_EXECUTE). But it is fine to end up here, as we'll simply complete the 3330 * abort request with the bit0 of dword0 set (command not aborted). 3331 */ 3332 break; 3333 } 3334 3335 spdk_nvmf_request_complete(req); 3336 return SPDK_POLLER_BUSY; 3337 } 3338 3339 static void 3340 nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 3341 struct spdk_nvmf_request *req) 3342 { 3343 struct spdk_nvmf_tcp_qpair *tqpair; 3344 struct spdk_nvmf_tcp_transport *ttransport; 3345 struct spdk_nvmf_transport *transport; 3346 uint16_t cid; 3347 uint32_t i; 3348 struct spdk_nvmf_tcp_req *tcp_req_to_abort = NULL; 3349 3350 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 3351 ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport); 3352 transport = &ttransport->transport; 3353 3354 cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid; 3355 3356 for (i = 0; i < tqpair->resource_count; i++) { 3357 if (tqpair->reqs[i].state != TCP_REQUEST_STATE_FREE && 3358 tqpair->reqs[i].req.cmd->nvme_cmd.cid == cid) { 3359 tcp_req_to_abort = &tqpair->reqs[i]; 3360 break; 3361 } 3362 } 3363 3364 spdk_trace_record(TRACE_TCP_QP_ABORT_REQ, qpair->qid, 0, (uintptr_t)req, tqpair); 3365 3366 if (tcp_req_to_abort == NULL) { 3367 spdk_nvmf_request_complete(req); 3368 return; 3369 } 3370 3371 req->req_to_abort = &tcp_req_to_abort->req; 3372 req->timeout_tsc = spdk_get_ticks() + 3373 transport->opts.abort_timeout_sec * spdk_get_ticks_hz(); 3374 req->poller = NULL; 3375 3376 _nvmf_tcp_qpair_abort_request(req); 3377 } 3378 3379 static void 3380 nvmf_tcp_opts_init(struct spdk_nvmf_transport_opts *opts) 3381 { 3382 opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH; 3383 opts->max_qpairs_per_ctrlr = SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR; 3384 opts->in_capsule_data_size = SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE; 3385 opts->max_io_size = SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE; 3386 opts->io_unit_size = SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE; 3387 opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH; 3388 opts->num_shared_buffers = SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS; 3389 opts->buf_cache_size = SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE; 3390 opts->dif_insert_or_strip = SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP; 3391 opts->abort_timeout_sec = SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC; 3392 opts->transport_specific = NULL; 3393 } 3394 3395 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = { 3396 .name = "TCP", 3397 .type = SPDK_NVME_TRANSPORT_TCP, 3398 .opts_init = nvmf_tcp_opts_init, 3399 .create = nvmf_tcp_create, 3400 .dump_opts = nvmf_tcp_dump_opts, 3401 .destroy = nvmf_tcp_destroy, 3402 3403 .listen = nvmf_tcp_listen, 3404 .stop_listen = nvmf_tcp_stop_listen, 3405 3406 .listener_discover = nvmf_tcp_discover, 3407 3408 .poll_group_create = nvmf_tcp_poll_group_create, 3409 .get_optimal_poll_group = nvmf_tcp_get_optimal_poll_group, 3410 .poll_group_destroy = nvmf_tcp_poll_group_destroy, 3411 .poll_group_add = nvmf_tcp_poll_group_add, 3412 .poll_group_remove = nvmf_tcp_poll_group_remove, 3413 .poll_group_poll = nvmf_tcp_poll_group_poll, 3414 3415 .req_free = nvmf_tcp_req_free, 3416 .req_complete = nvmf_tcp_req_complete, 3417 3418 .qpair_fini = nvmf_tcp_close_qpair, 3419 .qpair_get_local_trid = nvmf_tcp_qpair_get_local_trid, 3420 .qpair_get_peer_trid = nvmf_tcp_qpair_get_peer_trid, 3421 .qpair_get_listen_trid = nvmf_tcp_qpair_get_listen_trid, 3422 .qpair_abort_request = nvmf_tcp_qpair_abort_request, 3423 }; 3424 3425 SPDK_NVMF_TRANSPORT_REGISTER(tcp, &spdk_nvmf_transport_tcp); 3426 SPDK_LOG_REGISTER_COMPONENT(nvmf_tcp) 3427