1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/accel_engine.h" 36 #include "spdk/stdinc.h" 37 #include "spdk/crc32.h" 38 #include "spdk/endian.h" 39 #include "spdk/assert.h" 40 #include "spdk/thread.h" 41 #include "spdk/nvmf_transport.h" 42 #include "spdk/string.h" 43 #include "spdk/trace.h" 44 #include "spdk/util.h" 45 #include "spdk/log.h" 46 47 #include "spdk_internal/assert.h" 48 #include "spdk_internal/nvme_tcp.h" 49 #include "spdk_internal/sock.h" 50 51 #include "nvmf_internal.h" 52 53 #include "spdk_internal/trace_defs.h" 54 55 #define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16 56 #define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 16 57 #define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0 58 #define SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM 32 59 #define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true 60 61 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp; 62 63 /* spdk nvmf related structure */ 64 enum spdk_nvmf_tcp_req_state { 65 66 /* The request is not currently in use */ 67 TCP_REQUEST_STATE_FREE = 0, 68 69 /* Initial state when request first received */ 70 TCP_REQUEST_STATE_NEW = 1, 71 72 /* The request is queued until a data buffer is available. */ 73 TCP_REQUEST_STATE_NEED_BUFFER = 2, 74 75 /* The request is waiting for zcopy_start to finish */ 76 TCP_REQUEST_STATE_AWAITING_ZCOPY_START = 3, 77 78 /* The request has received a zero-copy buffer */ 79 TCP_REQUEST_STATE_ZCOPY_START_COMPLETED = 4, 80 81 /* The request is currently transferring data from the host to the controller. */ 82 TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER = 5, 83 84 /* The request is waiting for the R2T send acknowledgement. */ 85 TCP_REQUEST_STATE_AWAITING_R2T_ACK = 6, 86 87 /* The request is ready to execute at the block device */ 88 TCP_REQUEST_STATE_READY_TO_EXECUTE = 7, 89 90 /* The request is currently executing at the block device */ 91 TCP_REQUEST_STATE_EXECUTING = 8, 92 93 /* The request is waiting for zcopy buffers to be commited */ 94 TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT = 9, 95 96 /* The request finished executing at the block device */ 97 TCP_REQUEST_STATE_EXECUTED = 10, 98 99 /* The request is ready to send a completion */ 100 TCP_REQUEST_STATE_READY_TO_COMPLETE = 11, 101 102 /* The request is currently transferring final pdus from the controller to the host. */ 103 TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST = 12, 104 105 /* The request is waiting for zcopy buffers to be released (without committing) */ 106 TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE = 13, 107 108 /* The request completed and can be marked free. */ 109 TCP_REQUEST_STATE_COMPLETED = 14, 110 111 /* Terminator */ 112 TCP_REQUEST_NUM_STATES, 113 }; 114 115 static const char *spdk_nvmf_tcp_term_req_fes_str[] = { 116 "Invalid PDU Header Field", 117 "PDU Sequence Error", 118 "Header Digiest Error", 119 "Data Transfer Out of Range", 120 "R2T Limit Exceeded", 121 "Unsupported parameter", 122 }; 123 124 SPDK_TRACE_REGISTER_FN(nvmf_tcp_trace, "nvmf_tcp", TRACE_GROUP_NVMF_TCP) 125 { 126 spdk_trace_register_object(OBJECT_NVMF_TCP_IO, 'r'); 127 spdk_trace_register_description("TCP_REQ_NEW", 128 TRACE_TCP_REQUEST_STATE_NEW, 129 OWNER_NONE, OBJECT_NVMF_TCP_IO, 1, 130 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 131 spdk_trace_register_description("TCP_REQ_NEED_BUFFER", 132 TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 133 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 134 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 135 spdk_trace_register_description("TCP_REQ_WAIT_ZCPY_START", 136 TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_START, 137 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 138 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 139 spdk_trace_register_description("TCP_REQ_ZCPY_START_CPL", 140 TRACE_TCP_REQUEST_STATE_ZCOPY_START_COMPLETED, 141 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 142 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 143 spdk_trace_register_description("TCP_REQ_TX_H_TO_C", 144 TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 145 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 146 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 147 spdk_trace_register_description("TCP_REQ_RDY_TO_EXECUTE", 148 TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 149 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 150 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 151 spdk_trace_register_description("TCP_REQ_EXECUTING", 152 TRACE_TCP_REQUEST_STATE_EXECUTING, 153 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 154 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 155 spdk_trace_register_description("TCP_REQ_WAIT_ZCPY_CMT", 156 TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_COMMIT, 157 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 158 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 159 spdk_trace_register_description("TCP_REQ_EXECUTED", 160 TRACE_TCP_REQUEST_STATE_EXECUTED, 161 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 162 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 163 spdk_trace_register_description("TCP_REQ_RDY_TO_COMPLETE", 164 TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 165 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 166 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 167 spdk_trace_register_description("TCP_REQ_TRANSFER_C2H", 168 TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 169 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 170 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 171 spdk_trace_register_description("TCP_REQ_AWAIT_ZCPY_RLS", 172 TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_RELEASE, 173 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 174 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 175 spdk_trace_register_description("TCP_REQ_COMPLETED", 176 TRACE_TCP_REQUEST_STATE_COMPLETED, 177 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 178 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 179 spdk_trace_register_description("TCP_WRITE_START", 180 TRACE_TCP_FLUSH_WRITEBUF_START, 181 OWNER_NONE, OBJECT_NONE, 0, 182 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 183 spdk_trace_register_description("TCP_WRITE_DONE", 184 TRACE_TCP_FLUSH_WRITEBUF_DONE, 185 OWNER_NONE, OBJECT_NONE, 0, 186 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 187 spdk_trace_register_description("TCP_READ_DONE", 188 TRACE_TCP_READ_FROM_SOCKET_DONE, 189 OWNER_NONE, OBJECT_NONE, 0, 190 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 191 spdk_trace_register_description("TCP_REQ_AWAIT_R2T_ACK", 192 TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 193 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 194 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 195 196 spdk_trace_register_description("TCP_QP_CREATE", TRACE_TCP_QP_CREATE, 197 OWNER_NONE, OBJECT_NONE, 0, 198 SPDK_TRACE_ARG_TYPE_INT, ""); 199 spdk_trace_register_description("TCP_QP_SOCK_INIT", TRACE_TCP_QP_SOCK_INIT, 200 OWNER_NONE, OBJECT_NONE, 0, 201 SPDK_TRACE_ARG_TYPE_INT, ""); 202 spdk_trace_register_description("TCP_QP_STATE_CHANGE", TRACE_TCP_QP_STATE_CHANGE, 203 OWNER_NONE, OBJECT_NONE, 0, 204 SPDK_TRACE_ARG_TYPE_INT, "state"); 205 spdk_trace_register_description("TCP_QP_DISCONNECT", TRACE_TCP_QP_DISCONNECT, 206 OWNER_NONE, OBJECT_NONE, 0, 207 SPDK_TRACE_ARG_TYPE_INT, ""); 208 spdk_trace_register_description("TCP_QP_DESTROY", TRACE_TCP_QP_DESTROY, 209 OWNER_NONE, OBJECT_NONE, 0, 210 SPDK_TRACE_ARG_TYPE_INT, ""); 211 spdk_trace_register_description("TCP_QP_ABORT_REQ", TRACE_TCP_QP_ABORT_REQ, 212 OWNER_NONE, OBJECT_NONE, 0, 213 SPDK_TRACE_ARG_TYPE_PTR, "qpair"); 214 spdk_trace_register_description("TCP_QP_RCV_STATE_CHANGE", TRACE_TCP_QP_RCV_STATE_CHANGE, 215 OWNER_NONE, OBJECT_NONE, 0, 216 SPDK_TRACE_ARG_TYPE_INT, "state"); 217 218 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_START, OBJECT_NVMF_TCP_IO, 1); 219 spdk_trace_tpoint_register_relation(TRACE_BDEV_IO_DONE, OBJECT_NVMF_TCP_IO, 0); 220 } 221 222 struct spdk_nvmf_tcp_req { 223 struct spdk_nvmf_request req; 224 struct spdk_nvme_cpl rsp; 225 struct spdk_nvme_cmd cmd; 226 227 /* A PDU that can be used for sending responses. This is 228 * not the incoming PDU! */ 229 struct nvme_tcp_pdu *pdu; 230 231 /* In-capsule data buffer */ 232 uint8_t *buf; 233 /* 234 * The PDU for a request may be used multiple times in serial over 235 * the request's lifetime. For example, first to send an R2T, then 236 * to send a completion. To catch mistakes where the PDU is used 237 * twice at the same time, add a debug flag here for init/fini. 238 */ 239 bool pdu_in_use; 240 bool has_in_capsule_data; 241 242 /* transfer_tag */ 243 uint16_t ttag; 244 245 enum spdk_nvmf_tcp_req_state state; 246 247 /* 248 * h2c_offset is used when we receive the h2c_data PDU. 249 */ 250 uint32_t h2c_offset; 251 252 STAILQ_ENTRY(spdk_nvmf_tcp_req) link; 253 TAILQ_ENTRY(spdk_nvmf_tcp_req) state_link; 254 }; 255 256 struct spdk_nvmf_tcp_qpair { 257 struct spdk_nvmf_qpair qpair; 258 struct spdk_nvmf_tcp_poll_group *group; 259 struct spdk_sock *sock; 260 261 enum nvme_tcp_pdu_recv_state recv_state; 262 enum nvme_tcp_qpair_state state; 263 264 /* PDU being actively received */ 265 struct nvme_tcp_pdu *pdu_in_progress; 266 267 /* Queues to track the requests in all states */ 268 TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_working_queue; 269 TAILQ_HEAD(, spdk_nvmf_tcp_req) tcp_req_free_queue; 270 271 /* Number of requests in each state */ 272 uint32_t state_cntr[TCP_REQUEST_NUM_STATES]; 273 274 uint8_t cpda; 275 276 bool host_hdgst_enable; 277 bool host_ddgst_enable; 278 279 /* This is a spare PDU used for sending special management 280 * operations. Primarily, this is used for the initial 281 * connection response and c2h termination request. */ 282 struct nvme_tcp_pdu *mgmt_pdu; 283 284 /* Arrays of in-capsule buffers, requests, and pdus. 285 * Each array is 'resource_count' number of elements */ 286 void *bufs; 287 struct spdk_nvmf_tcp_req *reqs; 288 struct nvme_tcp_pdu *pdus; 289 uint32_t resource_count; 290 uint32_t recv_buf_size; 291 292 struct spdk_nvmf_tcp_port *port; 293 294 /* IP address */ 295 char initiator_addr[SPDK_NVMF_TRADDR_MAX_LEN]; 296 char target_addr[SPDK_NVMF_TRADDR_MAX_LEN]; 297 298 /* IP port */ 299 uint16_t initiator_port; 300 uint16_t target_port; 301 302 /* Timer used to destroy qpair after detecting transport error issue if initiator does 303 * not close the connection. 304 */ 305 struct spdk_poller *timeout_poller; 306 307 308 TAILQ_ENTRY(spdk_nvmf_tcp_qpair) link; 309 }; 310 311 struct spdk_nvmf_tcp_control_msg { 312 STAILQ_ENTRY(spdk_nvmf_tcp_control_msg) link; 313 }; 314 315 struct spdk_nvmf_tcp_control_msg_list { 316 void *msg_buf; 317 STAILQ_HEAD(, spdk_nvmf_tcp_control_msg) free_msgs; 318 }; 319 320 struct spdk_nvmf_tcp_poll_group { 321 struct spdk_nvmf_transport_poll_group group; 322 struct spdk_sock_group *sock_group; 323 324 TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs; 325 TAILQ_HEAD(, spdk_nvmf_tcp_qpair) await_req; 326 327 struct spdk_io_channel *accel_channel; 328 struct spdk_nvmf_tcp_control_msg_list *control_msg_list; 329 330 TAILQ_ENTRY(spdk_nvmf_tcp_poll_group) link; 331 }; 332 333 struct spdk_nvmf_tcp_port { 334 const struct spdk_nvme_transport_id *trid; 335 struct spdk_sock *listen_sock; 336 TAILQ_ENTRY(spdk_nvmf_tcp_port) link; 337 }; 338 339 struct tcp_transport_opts { 340 bool c2h_success; 341 uint16_t control_msg_num; 342 uint32_t sock_priority; 343 }; 344 345 struct spdk_nvmf_tcp_transport { 346 struct spdk_nvmf_transport transport; 347 struct tcp_transport_opts tcp_opts; 348 349 struct spdk_nvmf_tcp_poll_group *next_pg; 350 351 struct spdk_poller *accept_poller; 352 pthread_mutex_t lock; 353 354 TAILQ_HEAD(, spdk_nvmf_tcp_port) ports; 355 TAILQ_HEAD(, spdk_nvmf_tcp_poll_group) poll_groups; 356 }; 357 358 static const struct spdk_json_object_decoder tcp_transport_opts_decoder[] = { 359 { 360 "c2h_success", offsetof(struct tcp_transport_opts, c2h_success), 361 spdk_json_decode_bool, true 362 }, 363 { 364 "control_msg_num", offsetof(struct tcp_transport_opts, control_msg_num), 365 spdk_json_decode_uint16, true 366 }, 367 { 368 "sock_priority", offsetof(struct tcp_transport_opts, sock_priority), 369 spdk_json_decode_uint32, true 370 }, 371 }; 372 373 static bool nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport, 374 struct spdk_nvmf_tcp_req *tcp_req); 375 static void nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group); 376 377 static void _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair, 378 struct spdk_nvmf_tcp_req *tcp_req); 379 380 static void 381 nvmf_tcp_req_set_state(struct spdk_nvmf_tcp_req *tcp_req, 382 enum spdk_nvmf_tcp_req_state state) 383 { 384 struct spdk_nvmf_qpair *qpair; 385 struct spdk_nvmf_tcp_qpair *tqpair; 386 387 qpair = tcp_req->req.qpair; 388 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 389 390 assert(tqpair->state_cntr[tcp_req->state] > 0); 391 tqpair->state_cntr[tcp_req->state]--; 392 tqpair->state_cntr[state]++; 393 394 tcp_req->state = state; 395 } 396 397 static inline struct nvme_tcp_pdu * 398 nvmf_tcp_req_pdu_init(struct spdk_nvmf_tcp_req *tcp_req) 399 { 400 assert(tcp_req->pdu_in_use == false); 401 tcp_req->pdu_in_use = true; 402 403 memset(tcp_req->pdu, 0, sizeof(*tcp_req->pdu)); 404 tcp_req->pdu->qpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 405 406 return tcp_req->pdu; 407 } 408 409 static inline void 410 nvmf_tcp_req_pdu_fini(struct spdk_nvmf_tcp_req *tcp_req) 411 { 412 tcp_req->pdu_in_use = false; 413 } 414 415 static struct spdk_nvmf_tcp_req * 416 nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair) 417 { 418 struct spdk_nvmf_tcp_req *tcp_req; 419 420 tcp_req = TAILQ_FIRST(&tqpair->tcp_req_free_queue); 421 if (!tcp_req) { 422 return NULL; 423 } 424 425 memset(&tcp_req->rsp, 0, sizeof(tcp_req->rsp)); 426 tcp_req->h2c_offset = 0; 427 tcp_req->has_in_capsule_data = false; 428 tcp_req->req.dif_enabled = false; 429 tcp_req->req.zcopy_phase = NVMF_ZCOPY_PHASE_NONE; 430 431 TAILQ_REMOVE(&tqpair->tcp_req_free_queue, tcp_req, state_link); 432 TAILQ_INSERT_TAIL(&tqpair->tcp_req_working_queue, tcp_req, state_link); 433 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEW); 434 return tcp_req; 435 } 436 437 static inline void 438 nvmf_tcp_req_put(struct spdk_nvmf_tcp_qpair *tqpair, struct spdk_nvmf_tcp_req *tcp_req) 439 { 440 TAILQ_REMOVE(&tqpair->tcp_req_working_queue, tcp_req, state_link); 441 TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link); 442 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_FREE); 443 } 444 445 static void 446 nvmf_tcp_request_free(void *cb_arg) 447 { 448 struct spdk_nvmf_tcp_transport *ttransport; 449 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 450 451 assert(tcp_req != NULL); 452 453 SPDK_DEBUGLOG(nvmf_tcp, "tcp_req=%p will be freed\n", tcp_req); 454 ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport, 455 struct spdk_nvmf_tcp_transport, transport); 456 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED); 457 nvmf_tcp_req_process(ttransport, tcp_req); 458 } 459 460 static int 461 nvmf_tcp_req_free(struct spdk_nvmf_request *req) 462 { 463 struct spdk_nvmf_tcp_req *tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 464 465 nvmf_tcp_request_free(tcp_req); 466 467 return 0; 468 } 469 470 static void 471 nvmf_tcp_drain_state_queue(struct spdk_nvmf_tcp_qpair *tqpair, 472 enum spdk_nvmf_tcp_req_state state) 473 { 474 struct spdk_nvmf_tcp_req *tcp_req, *req_tmp; 475 476 assert(state != TCP_REQUEST_STATE_FREE); 477 TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) { 478 if (state == tcp_req->state) { 479 nvmf_tcp_request_free(tcp_req); 480 } 481 } 482 } 483 484 static void 485 nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair) 486 { 487 struct spdk_nvmf_tcp_req *tcp_req, *req_tmp; 488 489 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 490 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW); 491 492 /* Wipe the requests waiting for buffer from the global list */ 493 TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) { 494 if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) { 495 STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, &tcp_req->req, 496 spdk_nvmf_request, buf_link); 497 } 498 } 499 500 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER); 501 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING); 502 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 503 nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK); 504 } 505 506 static void 507 nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair) 508 { 509 int i; 510 struct spdk_nvmf_tcp_req *tcp_req; 511 512 SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", tqpair->qpair.qid); 513 for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) { 514 SPDK_ERRLOG("\tNum of requests in state[%d] = %u\n", i, tqpair->state_cntr[i]); 515 TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) { 516 if ((int)tcp_req->state == i) { 517 SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool); 518 SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode); 519 } 520 } 521 } 522 } 523 524 static void 525 nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair) 526 { 527 int err = 0; 528 529 spdk_trace_record(TRACE_TCP_QP_DESTROY, 0, 0, (uintptr_t)tqpair); 530 531 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 532 533 err = spdk_sock_close(&tqpair->sock); 534 assert(err == 0); 535 nvmf_tcp_cleanup_all_states(tqpair); 536 537 if (tqpair->state_cntr[TCP_REQUEST_STATE_FREE] != tqpair->resource_count) { 538 SPDK_ERRLOG("tqpair(%p) free tcp request num is %u but should be %u\n", tqpair, 539 tqpair->state_cntr[TCP_REQUEST_STATE_FREE], 540 tqpair->resource_count); 541 err++; 542 } 543 544 if (err > 0) { 545 nvmf_tcp_dump_qpair_req_contents(tqpair); 546 } 547 548 spdk_dma_free(tqpair->pdus); 549 free(tqpair->reqs); 550 spdk_free(tqpair->bufs); 551 free(tqpair); 552 SPDK_DEBUGLOG(nvmf_tcp, "Leave\n"); 553 } 554 555 static void 556 nvmf_tcp_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w) 557 { 558 struct spdk_nvmf_tcp_transport *ttransport; 559 assert(w != NULL); 560 561 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 562 spdk_json_write_named_bool(w, "c2h_success", ttransport->tcp_opts.c2h_success); 563 spdk_json_write_named_uint32(w, "sock_priority", ttransport->tcp_opts.sock_priority); 564 } 565 566 static int 567 nvmf_tcp_destroy(struct spdk_nvmf_transport *transport, 568 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg) 569 { 570 struct spdk_nvmf_tcp_transport *ttransport; 571 572 assert(transport != NULL); 573 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 574 575 spdk_poller_unregister(&ttransport->accept_poller); 576 pthread_mutex_destroy(&ttransport->lock); 577 free(ttransport); 578 579 if (cb_fn) { 580 cb_fn(cb_arg); 581 } 582 return 0; 583 } 584 585 static int 586 nvmf_tcp_accept(void *ctx); 587 588 static struct spdk_nvmf_transport * 589 nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts) 590 { 591 struct spdk_nvmf_tcp_transport *ttransport; 592 uint32_t sge_count; 593 uint32_t min_shared_buffers; 594 595 ttransport = calloc(1, sizeof(*ttransport)); 596 if (!ttransport) { 597 return NULL; 598 } 599 600 TAILQ_INIT(&ttransport->ports); 601 TAILQ_INIT(&ttransport->poll_groups); 602 603 ttransport->transport.ops = &spdk_nvmf_transport_tcp; 604 605 ttransport->tcp_opts.c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION; 606 ttransport->tcp_opts.sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY; 607 ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM; 608 if (opts->transport_specific != NULL && 609 spdk_json_decode_object_relaxed(opts->transport_specific, tcp_transport_opts_decoder, 610 SPDK_COUNTOF(tcp_transport_opts_decoder), 611 &ttransport->tcp_opts)) { 612 SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n"); 613 free(ttransport); 614 return NULL; 615 } 616 617 SPDK_NOTICELOG("*** TCP Transport Init ***\n"); 618 619 SPDK_INFOLOG(nvmf_tcp, "*** TCP Transport Init ***\n" 620 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 621 " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 622 " in_capsule_data_size=%d, max_aq_depth=%d\n" 623 " num_shared_buffers=%d, c2h_success=%d,\n" 624 " dif_insert_or_strip=%d, sock_priority=%d\n" 625 " abort_timeout_sec=%d, control_msg_num=%hu\n", 626 opts->max_queue_depth, 627 opts->max_io_size, 628 opts->max_qpairs_per_ctrlr - 1, 629 opts->io_unit_size, 630 opts->in_capsule_data_size, 631 opts->max_aq_depth, 632 opts->num_shared_buffers, 633 ttransport->tcp_opts.c2h_success, 634 opts->dif_insert_or_strip, 635 ttransport->tcp_opts.sock_priority, 636 opts->abort_timeout_sec, 637 ttransport->tcp_opts.control_msg_num); 638 639 if (ttransport->tcp_opts.sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) { 640 SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n" 641 "you can use man 7 socket to view the range of priority under SO_PRIORITY item\n", 642 ttransport->tcp_opts.sock_priority, SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY); 643 free(ttransport); 644 return NULL; 645 } 646 647 if (ttransport->tcp_opts.control_msg_num == 0 && 648 opts->in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 649 SPDK_WARNLOG("TCP param control_msg_num can't be 0 if ICD is less than %u bytes. Using default value %u\n", 650 SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE, SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM); 651 ttransport->tcp_opts.control_msg_num = SPDK_NVMF_TCP_DEFAULT_CONTROL_MSG_NUM; 652 } 653 654 /* I/O unit size cannot be larger than max I/O size */ 655 if (opts->io_unit_size > opts->max_io_size) { 656 opts->io_unit_size = opts->max_io_size; 657 } 658 659 sge_count = opts->max_io_size / opts->io_unit_size; 660 if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) { 661 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 662 free(ttransport); 663 return NULL; 664 } 665 666 min_shared_buffers = spdk_env_get_core_count() * opts->buf_cache_size; 667 if (min_shared_buffers > opts->num_shared_buffers) { 668 SPDK_ERRLOG("There are not enough buffers to satisfy " 669 "per-poll group caches for each thread. (%" PRIu32 ") " 670 "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers); 671 SPDK_ERRLOG("Please specify a larger number of shared buffers\n"); 672 free(ttransport); 673 return NULL; 674 } 675 676 pthread_mutex_init(&ttransport->lock, NULL); 677 678 ttransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_tcp_accept, &ttransport->transport, 679 opts->acceptor_poll_rate); 680 if (!ttransport->accept_poller) { 681 pthread_mutex_destroy(&ttransport->lock); 682 free(ttransport); 683 return NULL; 684 } 685 686 return &ttransport->transport; 687 } 688 689 static int 690 nvmf_tcp_trsvcid_to_int(const char *trsvcid) 691 { 692 unsigned long long ull; 693 char *end = NULL; 694 695 ull = strtoull(trsvcid, &end, 10); 696 if (end == NULL || end == trsvcid || *end != '\0') { 697 return -1; 698 } 699 700 /* Valid TCP/IP port numbers are in [0, 65535] */ 701 if (ull > 65535) { 702 return -1; 703 } 704 705 return (int)ull; 706 } 707 708 /** 709 * Canonicalize a listen address trid. 710 */ 711 static int 712 nvmf_tcp_canon_listen_trid(struct spdk_nvme_transport_id *canon_trid, 713 const struct spdk_nvme_transport_id *trid) 714 { 715 int trsvcid_int; 716 717 trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid); 718 if (trsvcid_int < 0) { 719 return -EINVAL; 720 } 721 722 memset(canon_trid, 0, sizeof(*canon_trid)); 723 spdk_nvme_trid_populate_transport(canon_trid, SPDK_NVME_TRANSPORT_TCP); 724 canon_trid->adrfam = trid->adrfam; 725 snprintf(canon_trid->traddr, sizeof(canon_trid->traddr), "%s", trid->traddr); 726 snprintf(canon_trid->trsvcid, sizeof(canon_trid->trsvcid), "%d", trsvcid_int); 727 728 return 0; 729 } 730 731 /** 732 * Find an existing listening port. 733 * 734 * Caller must hold ttransport->lock. 735 */ 736 static struct spdk_nvmf_tcp_port * 737 nvmf_tcp_find_port(struct spdk_nvmf_tcp_transport *ttransport, 738 const struct spdk_nvme_transport_id *trid) 739 { 740 struct spdk_nvme_transport_id canon_trid; 741 struct spdk_nvmf_tcp_port *port; 742 743 if (nvmf_tcp_canon_listen_trid(&canon_trid, trid) != 0) { 744 return NULL; 745 } 746 747 TAILQ_FOREACH(port, &ttransport->ports, link) { 748 if (spdk_nvme_transport_id_compare(&canon_trid, port->trid) == 0) { 749 return port; 750 } 751 } 752 753 return NULL; 754 } 755 756 static int 757 nvmf_tcp_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid, 758 struct spdk_nvmf_listen_opts *listen_opts) 759 { 760 struct spdk_nvmf_tcp_transport *ttransport; 761 struct spdk_nvmf_tcp_port *port; 762 int trsvcid_int; 763 uint8_t adrfam; 764 struct spdk_sock_opts opts; 765 766 if (!strlen(trid->trsvcid)) { 767 SPDK_ERRLOG("Service id is required\n"); 768 return -EINVAL; 769 } 770 771 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 772 773 trsvcid_int = nvmf_tcp_trsvcid_to_int(trid->trsvcid); 774 if (trsvcid_int < 0) { 775 SPDK_ERRLOG("Invalid trsvcid '%s'\n", trid->trsvcid); 776 return -EINVAL; 777 } 778 779 pthread_mutex_lock(&ttransport->lock); 780 port = calloc(1, sizeof(*port)); 781 if (!port) { 782 SPDK_ERRLOG("Port allocation failed\n"); 783 pthread_mutex_unlock(&ttransport->lock); 784 return -ENOMEM; 785 } 786 787 port->trid = trid; 788 opts.opts_size = sizeof(opts); 789 spdk_sock_get_default_opts(&opts); 790 opts.priority = ttransport->tcp_opts.sock_priority; 791 port->listen_sock = spdk_sock_listen_ext(trid->traddr, trsvcid_int, 792 NULL, &opts); 793 if (port->listen_sock == NULL) { 794 SPDK_ERRLOG("spdk_sock_listen(%s, %d) failed: %s (%d)\n", 795 trid->traddr, trsvcid_int, 796 spdk_strerror(errno), errno); 797 free(port); 798 pthread_mutex_unlock(&ttransport->lock); 799 return -errno; 800 } 801 802 if (spdk_sock_is_ipv4(port->listen_sock)) { 803 adrfam = SPDK_NVMF_ADRFAM_IPV4; 804 } else if (spdk_sock_is_ipv6(port->listen_sock)) { 805 adrfam = SPDK_NVMF_ADRFAM_IPV6; 806 } else { 807 SPDK_ERRLOG("Unhandled socket type\n"); 808 adrfam = 0; 809 } 810 811 if (adrfam != trid->adrfam) { 812 SPDK_ERRLOG("Socket address family mismatch\n"); 813 spdk_sock_close(&port->listen_sock); 814 free(port); 815 pthread_mutex_unlock(&ttransport->lock); 816 return -EINVAL; 817 } 818 819 SPDK_NOTICELOG("*** NVMe/TCP Target Listening on %s port %s ***\n", 820 trid->traddr, trid->trsvcid); 821 822 TAILQ_INSERT_TAIL(&ttransport->ports, port, link); 823 pthread_mutex_unlock(&ttransport->lock); 824 return 0; 825 } 826 827 static void 828 nvmf_tcp_stop_listen(struct spdk_nvmf_transport *transport, 829 const struct spdk_nvme_transport_id *trid) 830 { 831 struct spdk_nvmf_tcp_transport *ttransport; 832 struct spdk_nvmf_tcp_port *port; 833 834 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 835 836 SPDK_DEBUGLOG(nvmf_tcp, "Removing listen address %s port %s\n", 837 trid->traddr, trid->trsvcid); 838 839 pthread_mutex_lock(&ttransport->lock); 840 port = nvmf_tcp_find_port(ttransport, trid); 841 if (port) { 842 TAILQ_REMOVE(&ttransport->ports, port, link); 843 spdk_sock_close(&port->listen_sock); 844 free(port); 845 } 846 847 pthread_mutex_unlock(&ttransport->lock); 848 } 849 850 static void nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair, 851 enum nvme_tcp_pdu_recv_state state); 852 853 static void 854 nvmf_tcp_qpair_set_state(struct spdk_nvmf_tcp_qpair *tqpair, enum nvme_tcp_qpair_state state) 855 { 856 tqpair->state = state; 857 spdk_trace_record(TRACE_TCP_QP_STATE_CHANGE, 0, 0, (uintptr_t)tqpair, tqpair->state); 858 } 859 860 static void 861 nvmf_tcp_qpair_disconnect(struct spdk_nvmf_tcp_qpair *tqpair) 862 { 863 SPDK_DEBUGLOG(nvmf_tcp, "Disconnecting qpair %p\n", tqpair); 864 865 spdk_trace_record(TRACE_TCP_QP_DISCONNECT, 0, 0, (uintptr_t)tqpair); 866 867 if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) { 868 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITING); 869 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 870 spdk_poller_unregister(&tqpair->timeout_poller); 871 872 /* This will end up calling nvmf_tcp_close_qpair */ 873 spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL); 874 } 875 } 876 877 static void 878 _pdu_write_done(void *_pdu, int err) 879 { 880 struct nvme_tcp_pdu *pdu = _pdu; 881 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 882 883 if (err != 0) { 884 nvmf_tcp_qpair_disconnect(tqpair); 885 return; 886 } 887 888 assert(pdu->cb_fn != NULL); 889 pdu->cb_fn(pdu->cb_arg); 890 } 891 892 static void 893 _tcp_write_pdu(struct nvme_tcp_pdu *pdu) 894 { 895 uint32_t mapped_length = 0; 896 ssize_t rc; 897 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 898 899 pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu, 900 tqpair->host_hdgst_enable, tqpair->host_ddgst_enable, 901 &mapped_length); 902 pdu->sock_req.cb_fn = _pdu_write_done; 903 pdu->sock_req.cb_arg = pdu; 904 if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP || 905 pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) { 906 rc = spdk_sock_writev(tqpair->sock, pdu->iov, pdu->sock_req.iovcnt); 907 if (rc == mapped_length) { 908 _pdu_write_done(pdu, 0); 909 } else { 910 SPDK_ERRLOG("IC_RESP or TERM_REQ could not write to socket.\n"); 911 _pdu_write_done(pdu, -1); 912 } 913 } else { 914 spdk_sock_writev_async(tqpair->sock, &pdu->sock_req); 915 } 916 } 917 918 static void 919 data_crc32_accel_done(void *cb_arg, int status) 920 { 921 struct nvme_tcp_pdu *pdu = cb_arg; 922 923 if (spdk_unlikely(status)) { 924 SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu); 925 _pdu_write_done(pdu, status); 926 return; 927 } 928 929 pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR; 930 MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32); 931 932 _tcp_write_pdu(pdu); 933 } 934 935 static void 936 pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu) 937 { 938 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 939 uint32_t crc32c; 940 941 /* Data Digest */ 942 if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) { 943 /* Only suport this limitated case for the first step */ 944 if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0) 945 && tqpair->group)) { 946 spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32, 947 pdu->data_iov, pdu->data_iovcnt, 0, data_crc32_accel_done, pdu); 948 return; 949 } 950 951 crc32c = nvme_tcp_pdu_calc_data_digest(pdu); 952 MAKE_DIGEST_WORD(pdu->data_digest, crc32c); 953 } 954 955 _tcp_write_pdu(pdu); 956 } 957 958 static void 959 nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 960 struct nvme_tcp_pdu *pdu, 961 nvme_tcp_qpair_xfer_complete_cb cb_fn, 962 void *cb_arg) 963 { 964 int hlen; 965 uint32_t crc32c; 966 967 assert(tqpair->pdu_in_progress != pdu); 968 969 hlen = pdu->hdr.common.hlen; 970 pdu->cb_fn = cb_fn; 971 pdu->cb_arg = cb_arg; 972 973 pdu->iov[0].iov_base = &pdu->hdr.raw; 974 pdu->iov[0].iov_len = hlen; 975 976 /* Header Digest */ 977 if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->host_hdgst_enable) { 978 crc32c = nvme_tcp_pdu_calc_header_digest(pdu); 979 MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c); 980 } 981 982 /* Data Digest */ 983 pdu_data_crc32_compute(pdu); 984 } 985 986 static int 987 nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair) 988 { 989 uint32_t i; 990 struct spdk_nvmf_transport_opts *opts; 991 uint32_t in_capsule_data_size; 992 993 opts = &tqpair->qpair.transport->opts; 994 995 in_capsule_data_size = opts->in_capsule_data_size; 996 if (opts->dif_insert_or_strip) { 997 in_capsule_data_size = SPDK_BDEV_BUF_SIZE_WITH_MD(in_capsule_data_size); 998 } 999 1000 tqpair->resource_count = opts->max_queue_depth; 1001 1002 tqpair->reqs = calloc(tqpair->resource_count, sizeof(*tqpair->reqs)); 1003 if (!tqpair->reqs) { 1004 SPDK_ERRLOG("Unable to allocate reqs on tqpair=%p\n", tqpair); 1005 return -1; 1006 } 1007 1008 if (in_capsule_data_size) { 1009 tqpair->bufs = spdk_zmalloc(tqpair->resource_count * in_capsule_data_size, 0x1000, 1010 NULL, SPDK_ENV_LCORE_ID_ANY, 1011 SPDK_MALLOC_DMA); 1012 if (!tqpair->bufs) { 1013 SPDK_ERRLOG("Unable to allocate bufs on tqpair=%p.\n", tqpair); 1014 return -1; 1015 } 1016 } 1017 1018 /* Add additional 2 members, which will be used for mgmt_pdu and pdu_in_progress owned by the tqpair */ 1019 tqpair->pdus = spdk_dma_zmalloc((tqpair->resource_count + 2) * sizeof(*tqpair->pdus), 0x1000, NULL); 1020 if (!tqpair->pdus) { 1021 SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair); 1022 return -1; 1023 } 1024 1025 for (i = 0; i < tqpair->resource_count; i++) { 1026 struct spdk_nvmf_tcp_req *tcp_req = &tqpair->reqs[i]; 1027 1028 tcp_req->ttag = i + 1; 1029 tcp_req->req.qpair = &tqpair->qpair; 1030 1031 tcp_req->pdu = &tqpair->pdus[i]; 1032 tcp_req->pdu->qpair = tqpair; 1033 1034 /* Set up memory to receive commands */ 1035 if (tqpair->bufs) { 1036 tcp_req->buf = (void *)((uintptr_t)tqpair->bufs + (i * in_capsule_data_size)); 1037 } 1038 1039 /* Set the cmdn and rsp */ 1040 tcp_req->req.rsp = (union nvmf_c2h_msg *)&tcp_req->rsp; 1041 tcp_req->req.cmd = (union nvmf_h2c_msg *)&tcp_req->cmd; 1042 1043 /* Initialize request state to FREE */ 1044 tcp_req->state = TCP_REQUEST_STATE_FREE; 1045 TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link); 1046 tqpair->state_cntr[TCP_REQUEST_STATE_FREE]++; 1047 } 1048 1049 tqpair->mgmt_pdu = &tqpair->pdus[i]; 1050 tqpair->mgmt_pdu->qpair = tqpair; 1051 tqpair->pdu_in_progress = &tqpair->pdus[i + 1]; 1052 1053 tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 1054 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1055 1056 return 0; 1057 } 1058 1059 static int 1060 nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair) 1061 { 1062 struct spdk_nvmf_tcp_qpair *tqpair; 1063 1064 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 1065 1066 SPDK_DEBUGLOG(nvmf_tcp, "New TCP Connection: %p\n", qpair); 1067 1068 spdk_trace_record(TRACE_TCP_QP_CREATE, 0, 0, (uintptr_t)tqpair); 1069 1070 /* Initialise request state queues of the qpair */ 1071 TAILQ_INIT(&tqpair->tcp_req_free_queue); 1072 TAILQ_INIT(&tqpair->tcp_req_working_queue); 1073 1074 tqpair->host_hdgst_enable = true; 1075 tqpair->host_ddgst_enable = true; 1076 1077 return 0; 1078 } 1079 1080 static int 1081 nvmf_tcp_qpair_sock_init(struct spdk_nvmf_tcp_qpair *tqpair) 1082 { 1083 int rc; 1084 1085 spdk_trace_record(TRACE_TCP_QP_SOCK_INIT, 0, 0, (uintptr_t)tqpair); 1086 1087 /* set low water mark */ 1088 rc = spdk_sock_set_recvlowat(tqpair->sock, sizeof(struct spdk_nvme_tcp_common_pdu_hdr)); 1089 if (rc != 0) { 1090 SPDK_ERRLOG("spdk_sock_set_recvlowat() failed\n"); 1091 return rc; 1092 } 1093 1094 return 0; 1095 } 1096 1097 static void 1098 nvmf_tcp_handle_connect(struct spdk_nvmf_transport *transport, 1099 struct spdk_nvmf_tcp_port *port, 1100 struct spdk_sock *sock) 1101 { 1102 struct spdk_nvmf_tcp_qpair *tqpair; 1103 int rc; 1104 1105 SPDK_DEBUGLOG(nvmf_tcp, "New connection accepted on %s port %s\n", 1106 port->trid->traddr, port->trid->trsvcid); 1107 1108 tqpair = calloc(1, sizeof(struct spdk_nvmf_tcp_qpair)); 1109 if (tqpair == NULL) { 1110 SPDK_ERRLOG("Could not allocate new connection.\n"); 1111 spdk_sock_close(&sock); 1112 return; 1113 } 1114 1115 tqpair->sock = sock; 1116 tqpair->state_cntr[TCP_REQUEST_STATE_FREE] = 0; 1117 tqpair->port = port; 1118 tqpair->qpair.transport = transport; 1119 1120 rc = spdk_sock_getaddr(tqpair->sock, tqpair->target_addr, 1121 sizeof(tqpair->target_addr), &tqpair->target_port, 1122 tqpair->initiator_addr, sizeof(tqpair->initiator_addr), 1123 &tqpair->initiator_port); 1124 if (rc < 0) { 1125 SPDK_ERRLOG("spdk_sock_getaddr() failed of tqpair=%p\n", tqpair); 1126 nvmf_tcp_qpair_destroy(tqpair); 1127 return; 1128 } 1129 1130 spdk_nvmf_tgt_new_qpair(transport->tgt, &tqpair->qpair); 1131 } 1132 1133 static uint32_t 1134 nvmf_tcp_port_accept(struct spdk_nvmf_transport *transport, struct spdk_nvmf_tcp_port *port) 1135 { 1136 struct spdk_sock *sock; 1137 uint32_t count = 0; 1138 int i; 1139 1140 for (i = 0; i < NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME; i++) { 1141 sock = spdk_sock_accept(port->listen_sock); 1142 if (sock == NULL) { 1143 break; 1144 } 1145 count++; 1146 nvmf_tcp_handle_connect(transport, port, sock); 1147 } 1148 1149 return count; 1150 } 1151 1152 static int 1153 nvmf_tcp_accept(void *ctx) 1154 { 1155 struct spdk_nvmf_transport *transport = ctx; 1156 struct spdk_nvmf_tcp_transport *ttransport; 1157 struct spdk_nvmf_tcp_port *port; 1158 uint32_t count = 0; 1159 1160 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1161 1162 TAILQ_FOREACH(port, &ttransport->ports, link) { 1163 count += nvmf_tcp_port_accept(transport, port); 1164 } 1165 1166 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 1167 } 1168 1169 static void 1170 nvmf_tcp_discover(struct spdk_nvmf_transport *transport, 1171 struct spdk_nvme_transport_id *trid, 1172 struct spdk_nvmf_discovery_log_page_entry *entry) 1173 { 1174 entry->trtype = SPDK_NVMF_TRTYPE_TCP; 1175 entry->adrfam = trid->adrfam; 1176 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED; 1177 1178 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 1179 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 1180 1181 entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE; 1182 } 1183 1184 static struct spdk_nvmf_tcp_control_msg_list * 1185 nvmf_tcp_control_msg_list_create(uint16_t num_messages) 1186 { 1187 struct spdk_nvmf_tcp_control_msg_list *list; 1188 struct spdk_nvmf_tcp_control_msg *msg; 1189 uint16_t i; 1190 1191 list = calloc(1, sizeof(*list)); 1192 if (!list) { 1193 SPDK_ERRLOG("Failed to allocate memory for list structure\n"); 1194 return NULL; 1195 } 1196 1197 list->msg_buf = spdk_zmalloc(num_messages * SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE, 1198 NVMF_DATA_BUFFER_ALIGNMENT, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1199 if (!list->msg_buf) { 1200 SPDK_ERRLOG("Failed to allocate memory for control message buffers\n"); 1201 free(list); 1202 return NULL; 1203 } 1204 1205 STAILQ_INIT(&list->free_msgs); 1206 1207 for (i = 0; i < num_messages; i++) { 1208 msg = (struct spdk_nvmf_tcp_control_msg *)((char *)list->msg_buf + i * 1209 SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE); 1210 STAILQ_INSERT_TAIL(&list->free_msgs, msg, link); 1211 } 1212 1213 return list; 1214 } 1215 1216 static void 1217 nvmf_tcp_control_msg_list_free(struct spdk_nvmf_tcp_control_msg_list *list) 1218 { 1219 if (!list) { 1220 return; 1221 } 1222 1223 spdk_free(list->msg_buf); 1224 free(list); 1225 } 1226 1227 static struct spdk_nvmf_transport_poll_group * 1228 nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport) 1229 { 1230 struct spdk_nvmf_tcp_transport *ttransport; 1231 struct spdk_nvmf_tcp_poll_group *tgroup; 1232 1233 tgroup = calloc(1, sizeof(*tgroup)); 1234 if (!tgroup) { 1235 return NULL; 1236 } 1237 1238 tgroup->sock_group = spdk_sock_group_create(&tgroup->group); 1239 if (!tgroup->sock_group) { 1240 goto cleanup; 1241 } 1242 1243 TAILQ_INIT(&tgroup->qpairs); 1244 TAILQ_INIT(&tgroup->await_req); 1245 1246 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1247 1248 if (transport->opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 1249 SPDK_DEBUGLOG(nvmf_tcp, "ICD %u is less than min required for admin/fabric commands (%u). " 1250 "Creating control messages list\n", transport->opts.in_capsule_data_size, 1251 SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE); 1252 tgroup->control_msg_list = nvmf_tcp_control_msg_list_create(ttransport->tcp_opts.control_msg_num); 1253 if (!tgroup->control_msg_list) { 1254 goto cleanup; 1255 } 1256 } 1257 1258 tgroup->accel_channel = spdk_accel_engine_get_io_channel(); 1259 if (spdk_unlikely(!tgroup->accel_channel)) { 1260 SPDK_ERRLOG("Cannot create accel_channel for tgroup=%p\n", tgroup); 1261 goto cleanup; 1262 } 1263 1264 pthread_mutex_lock(&ttransport->lock); 1265 TAILQ_INSERT_TAIL(&ttransport->poll_groups, tgroup, link); 1266 if (ttransport->next_pg == NULL) { 1267 ttransport->next_pg = tgroup; 1268 } 1269 pthread_mutex_unlock(&ttransport->lock); 1270 1271 return &tgroup->group; 1272 1273 cleanup: 1274 nvmf_tcp_poll_group_destroy(&tgroup->group); 1275 return NULL; 1276 } 1277 1278 static struct spdk_nvmf_transport_poll_group * 1279 nvmf_tcp_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 1280 { 1281 struct spdk_nvmf_tcp_transport *ttransport; 1282 struct spdk_nvmf_transport_poll_group *result; 1283 struct spdk_nvmf_tcp_poll_group **pg; 1284 struct spdk_nvmf_tcp_qpair *tqpair; 1285 struct spdk_sock_group *group = NULL; 1286 int rc; 1287 1288 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 1289 rc = spdk_sock_get_optimal_sock_group(tqpair->sock, &group); 1290 if (!rc && group != NULL) { 1291 return spdk_sock_group_get_ctx(group); 1292 } 1293 1294 ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport); 1295 1296 pthread_mutex_lock(&ttransport->lock); 1297 1298 if (TAILQ_EMPTY(&ttransport->poll_groups)) { 1299 pthread_mutex_unlock(&ttransport->lock); 1300 return NULL; 1301 } 1302 1303 pg = &ttransport->next_pg; 1304 assert(*pg != NULL); 1305 1306 result = &(*pg)->group; 1307 1308 *pg = TAILQ_NEXT(*pg, link); 1309 if (*pg == NULL) { 1310 *pg = TAILQ_FIRST(&ttransport->poll_groups); 1311 } 1312 1313 pthread_mutex_unlock(&ttransport->lock); 1314 return result; 1315 } 1316 1317 static void 1318 nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 1319 { 1320 struct spdk_nvmf_tcp_poll_group *tgroup, *next_tgroup; 1321 struct spdk_nvmf_tcp_transport *ttransport; 1322 1323 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 1324 spdk_sock_group_close(&tgroup->sock_group); 1325 if (tgroup->control_msg_list) { 1326 nvmf_tcp_control_msg_list_free(tgroup->control_msg_list); 1327 } 1328 1329 if (tgroup->accel_channel) { 1330 spdk_put_io_channel(tgroup->accel_channel); 1331 } 1332 1333 ttransport = SPDK_CONTAINEROF(tgroup->group.transport, struct spdk_nvmf_tcp_transport, transport); 1334 1335 pthread_mutex_lock(&ttransport->lock); 1336 next_tgroup = TAILQ_NEXT(tgroup, link); 1337 TAILQ_REMOVE(&ttransport->poll_groups, tgroup, link); 1338 if (next_tgroup == NULL) { 1339 next_tgroup = TAILQ_FIRST(&ttransport->poll_groups); 1340 } 1341 if (ttransport->next_pg == tgroup) { 1342 ttransport->next_pg = next_tgroup; 1343 } 1344 pthread_mutex_unlock(&ttransport->lock); 1345 1346 free(tgroup); 1347 } 1348 1349 static void 1350 nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair, 1351 enum nvme_tcp_pdu_recv_state state) 1352 { 1353 if (tqpair->recv_state == state) { 1354 SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n", 1355 tqpair, state); 1356 return; 1357 } 1358 1359 if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) { 1360 /* When leaving the await req state, move the qpair to the main list */ 1361 TAILQ_REMOVE(&tqpair->group->await_req, tqpair, link); 1362 TAILQ_INSERT_TAIL(&tqpair->group->qpairs, tqpair, link); 1363 } 1364 1365 SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv state=%d\n", tqpair, state); 1366 tqpair->recv_state = state; 1367 1368 spdk_trace_record(TRACE_TCP_QP_RCV_STATE_CHANGE, 0, 0, (uintptr_t)tqpair, tqpair->recv_state); 1369 1370 switch (state) { 1371 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH: 1372 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH: 1373 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD: 1374 break; 1375 case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ: 1376 TAILQ_REMOVE(&tqpair->group->qpairs, tqpair, link); 1377 TAILQ_INSERT_TAIL(&tqpair->group->await_req, tqpair, link); 1378 break; 1379 case NVME_TCP_PDU_RECV_STATE_ERROR: 1380 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY: 1381 memset(tqpair->pdu_in_progress, 0, sizeof(*(tqpair->pdu_in_progress))); 1382 break; 1383 default: 1384 SPDK_ERRLOG("The state(%d) is invalid\n", state); 1385 abort(); 1386 break; 1387 } 1388 } 1389 1390 static int 1391 nvmf_tcp_qpair_handle_timeout(void *ctx) 1392 { 1393 struct spdk_nvmf_tcp_qpair *tqpair = ctx; 1394 1395 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1396 1397 SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, 1398 SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT); 1399 1400 nvmf_tcp_qpair_disconnect(tqpair); 1401 return SPDK_POLLER_BUSY; 1402 } 1403 1404 static void 1405 nvmf_tcp_send_c2h_term_req_complete(void *cb_arg) 1406 { 1407 struct spdk_nvmf_tcp_qpair *tqpair = (struct spdk_nvmf_tcp_qpair *)cb_arg; 1408 1409 if (!tqpair->timeout_poller) { 1410 tqpair->timeout_poller = SPDK_POLLER_REGISTER(nvmf_tcp_qpair_handle_timeout, tqpair, 1411 SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT * 1000000); 1412 } 1413 } 1414 1415 static void 1416 nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu, 1417 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset) 1418 { 1419 struct nvme_tcp_pdu *rsp_pdu; 1420 struct spdk_nvme_tcp_term_req_hdr *c2h_term_req; 1421 uint32_t c2h_term_req_hdr_len = sizeof(*c2h_term_req); 1422 uint32_t copy_len; 1423 1424 rsp_pdu = tqpair->mgmt_pdu; 1425 1426 c2h_term_req = &rsp_pdu->hdr.term_req; 1427 c2h_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ; 1428 c2h_term_req->common.hlen = c2h_term_req_hdr_len; 1429 1430 if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) || 1431 (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) { 1432 DSET32(&c2h_term_req->fei, error_offset); 1433 } 1434 1435 copy_len = spdk_min(pdu->hdr.common.hlen, SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1436 1437 /* Copy the error info into the buffer */ 1438 memcpy((uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, pdu->hdr.raw, copy_len); 1439 nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, copy_len); 1440 1441 /* Contain the header of the wrong received pdu */ 1442 c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len; 1443 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 1444 nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_send_c2h_term_req_complete, tqpair); 1445 } 1446 1447 static void 1448 nvmf_tcp_capsule_cmd_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport, 1449 struct spdk_nvmf_tcp_qpair *tqpair, 1450 struct nvme_tcp_pdu *pdu) 1451 { 1452 struct spdk_nvmf_tcp_req *tcp_req; 1453 1454 assert(pdu->psh_valid_bytes == pdu->psh_len); 1455 assert(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD); 1456 1457 tcp_req = nvmf_tcp_req_get(tqpair); 1458 if (!tcp_req) { 1459 /* Directly return and make the allocation retry again. This can happen if we're 1460 * using asynchronous writes to send the response to the host or when releasing 1461 * zero-copy buffers after a response has been sent. In both cases, the host might 1462 * receive the response before we've finished processing the request and is free to 1463 * send another one. 1464 */ 1465 if (tqpair->state_cntr[TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST] > 0 || 1466 tqpair->state_cntr[TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE] > 0) { 1467 return; 1468 } 1469 1470 /* The host sent more commands than the maximum queue depth. */ 1471 SPDK_ERRLOG("Cannot allocate tcp_req on tqpair=%p\n", tqpair); 1472 nvmf_tcp_qpair_disconnect(tqpair); 1473 return; 1474 } 1475 1476 pdu->req = tcp_req; 1477 assert(tcp_req->state == TCP_REQUEST_STATE_NEW); 1478 nvmf_tcp_req_process(ttransport, tcp_req); 1479 } 1480 1481 static void 1482 nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransport, 1483 struct spdk_nvmf_tcp_qpair *tqpair, 1484 struct nvme_tcp_pdu *pdu) 1485 { 1486 struct spdk_nvmf_tcp_req *tcp_req; 1487 struct spdk_nvme_tcp_cmd *capsule_cmd; 1488 uint32_t error_offset = 0; 1489 enum spdk_nvme_tcp_term_req_fes fes; 1490 struct spdk_nvme_cpl *rsp; 1491 1492 capsule_cmd = &pdu->hdr.capsule_cmd; 1493 tcp_req = pdu->req; 1494 assert(tcp_req != NULL); 1495 1496 /* Zero-copy requests don't support ICD */ 1497 assert(!spdk_nvmf_request_using_zcopy(&tcp_req->req)); 1498 1499 if (capsule_cmd->common.pdo > SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET) { 1500 SPDK_ERRLOG("Expected ICReq capsule_cmd pdu offset <= %d, got %c\n", 1501 SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET, capsule_cmd->common.pdo); 1502 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1503 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo); 1504 goto err; 1505 } 1506 1507 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1508 1509 rsp = &tcp_req->req.rsp->nvme_cpl; 1510 if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) { 1511 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 1512 } else { 1513 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1514 } 1515 1516 nvmf_tcp_req_process(ttransport, tcp_req); 1517 1518 return; 1519 err: 1520 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1521 } 1522 1523 static int 1524 nvmf_tcp_find_req_in_state(struct spdk_nvmf_tcp_qpair *tqpair, 1525 enum spdk_nvmf_tcp_req_state state, 1526 uint16_t cid, uint16_t tag, 1527 struct spdk_nvmf_tcp_req **req) 1528 { 1529 struct spdk_nvmf_tcp_req *tcp_req = NULL; 1530 1531 TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) { 1532 if (tcp_req->state != state) { 1533 continue; 1534 } 1535 1536 if (tcp_req->req.cmd->nvme_cmd.cid != cid) { 1537 continue; 1538 } 1539 1540 if (tcp_req->ttag == tag) { 1541 *req = tcp_req; 1542 return 0; 1543 } 1544 1545 *req = NULL; 1546 return -1; 1547 } 1548 1549 /* Didn't find it, but not an error */ 1550 *req = NULL; 1551 return 0; 1552 } 1553 1554 static void 1555 nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport, 1556 struct spdk_nvmf_tcp_qpair *tqpair, 1557 struct nvme_tcp_pdu *pdu) 1558 { 1559 struct spdk_nvmf_tcp_req *tcp_req; 1560 uint32_t error_offset = 0; 1561 enum spdk_nvme_tcp_term_req_fes fes = 0; 1562 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 1563 int rc; 1564 1565 h2c_data = &pdu->hdr.h2c_data; 1566 1567 SPDK_DEBUGLOG(nvmf_tcp, "tqpair=%p, r2t_info: datao=%u, datal=%u, cccid=%u, ttag=%u\n", 1568 tqpair, h2c_data->datao, h2c_data->datal, h2c_data->cccid, h2c_data->ttag); 1569 1570 rc = nvmf_tcp_find_req_in_state(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 1571 h2c_data->cccid, h2c_data->ttag, &tcp_req); 1572 if (rc == 0 && tcp_req == NULL) { 1573 rc = nvmf_tcp_find_req_in_state(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK, h2c_data->cccid, 1574 h2c_data->ttag, &tcp_req); 1575 } 1576 1577 if (!tcp_req) { 1578 SPDK_DEBUGLOG(nvmf_tcp, "tcp_req is not found for tqpair=%p\n", tqpair); 1579 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER; 1580 if (rc == 0) { 1581 error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, cccid); 1582 } else { 1583 error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag); 1584 } 1585 goto err; 1586 } 1587 1588 if (tcp_req->h2c_offset != h2c_data->datao) { 1589 SPDK_DEBUGLOG(nvmf_tcp, 1590 "tcp_req(%p), tqpair=%p, expected data offset %u, but data offset is %u\n", 1591 tcp_req, tqpair, tcp_req->h2c_offset, h2c_data->datao); 1592 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE; 1593 goto err; 1594 } 1595 1596 if ((h2c_data->datao + h2c_data->datal) > tcp_req->req.length) { 1597 SPDK_DEBUGLOG(nvmf_tcp, 1598 "tcp_req(%p), tqpair=%p, (datao=%u + datal=%u) exceeds requested length=%u\n", 1599 tcp_req, tqpair, h2c_data->datao, h2c_data->datal, tcp_req->req.length); 1600 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE; 1601 goto err; 1602 } 1603 1604 pdu->req = tcp_req; 1605 1606 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 1607 pdu->dif_ctx = &tcp_req->req.dif.dif_ctx; 1608 } 1609 1610 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 1611 h2c_data->datao, h2c_data->datal); 1612 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1613 return; 1614 1615 err: 1616 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1617 } 1618 1619 static void 1620 nvmf_tcp_send_capsule_resp_pdu(struct spdk_nvmf_tcp_req *tcp_req, 1621 struct spdk_nvmf_tcp_qpair *tqpair) 1622 { 1623 struct nvme_tcp_pdu *rsp_pdu; 1624 struct spdk_nvme_tcp_rsp *capsule_resp; 1625 1626 SPDK_DEBUGLOG(nvmf_tcp, "enter, tqpair=%p\n", tqpair); 1627 1628 rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req); 1629 assert(rsp_pdu != NULL); 1630 1631 capsule_resp = &rsp_pdu->hdr.capsule_resp; 1632 capsule_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1633 capsule_resp->common.plen = capsule_resp->common.hlen = sizeof(*capsule_resp); 1634 capsule_resp->rccqe = tcp_req->req.rsp->nvme_cpl; 1635 if (tqpair->host_hdgst_enable) { 1636 capsule_resp->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1637 capsule_resp->common.plen += SPDK_NVME_TCP_DIGEST_LEN; 1638 } 1639 1640 nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_request_free, tcp_req); 1641 } 1642 1643 static void 1644 nvmf_tcp_pdu_c2h_data_complete(void *cb_arg) 1645 { 1646 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 1647 struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, 1648 struct spdk_nvmf_tcp_qpair, qpair); 1649 1650 assert(tqpair != NULL); 1651 1652 if (spdk_unlikely(tcp_req->pdu->rw_offset < tcp_req->req.length)) { 1653 SPDK_DEBUGLOG(nvmf_tcp, "sending another C2H part, offset %u length %u\n", tcp_req->pdu->rw_offset, 1654 tcp_req->req.length); 1655 _nvmf_tcp_send_c2h_data(tqpair, tcp_req); 1656 return; 1657 } 1658 1659 if (tcp_req->pdu->hdr.c2h_data.common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) { 1660 nvmf_tcp_request_free(tcp_req); 1661 } else { 1662 nvmf_tcp_req_pdu_fini(tcp_req); 1663 nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 1664 } 1665 } 1666 1667 static void 1668 nvmf_tcp_r2t_complete(void *cb_arg) 1669 { 1670 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 1671 struct spdk_nvmf_tcp_transport *ttransport; 1672 1673 nvmf_tcp_req_pdu_fini(tcp_req); 1674 1675 ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport, 1676 struct spdk_nvmf_tcp_transport, transport); 1677 1678 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 1679 1680 if (tcp_req->h2c_offset == tcp_req->req.length) { 1681 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1682 nvmf_tcp_req_process(ttransport, tcp_req); 1683 } 1684 } 1685 1686 static void 1687 nvmf_tcp_send_r2t_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 1688 struct spdk_nvmf_tcp_req *tcp_req) 1689 { 1690 struct nvme_tcp_pdu *rsp_pdu; 1691 struct spdk_nvme_tcp_r2t_hdr *r2t; 1692 1693 rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req); 1694 assert(rsp_pdu != NULL); 1695 1696 r2t = &rsp_pdu->hdr.r2t; 1697 r2t->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T; 1698 r2t->common.plen = r2t->common.hlen = sizeof(*r2t); 1699 1700 if (tqpair->host_hdgst_enable) { 1701 r2t->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1702 r2t->common.plen += SPDK_NVME_TCP_DIGEST_LEN; 1703 } 1704 1705 r2t->cccid = tcp_req->req.cmd->nvme_cmd.cid; 1706 r2t->ttag = tcp_req->ttag; 1707 r2t->r2to = tcp_req->h2c_offset; 1708 r2t->r2tl = tcp_req->req.length; 1709 1710 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_R2T_ACK); 1711 1712 SPDK_DEBUGLOG(nvmf_tcp, 1713 "tcp_req(%p) on tqpair(%p), r2t_info: cccid=%u, ttag=%u, r2to=%u, r2tl=%u\n", 1714 tcp_req, tqpair, r2t->cccid, r2t->ttag, r2t->r2to, r2t->r2tl); 1715 nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_r2t_complete, tcp_req); 1716 } 1717 1718 static void 1719 nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport, 1720 struct spdk_nvmf_tcp_qpair *tqpair, 1721 struct nvme_tcp_pdu *pdu) 1722 { 1723 struct spdk_nvmf_tcp_req *tcp_req; 1724 struct spdk_nvme_cpl *rsp; 1725 1726 tcp_req = pdu->req; 1727 assert(tcp_req != NULL); 1728 1729 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 1730 1731 tcp_req->h2c_offset += pdu->data_len; 1732 1733 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1734 1735 /* Wait for all of the data to arrive AND for the initial R2T PDU send to be 1736 * acknowledged before moving on. */ 1737 if (tcp_req->h2c_offset == tcp_req->req.length && 1738 tcp_req->state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER) { 1739 /* After receiving all the h2c data, we need to check whether there is 1740 * transient transport error */ 1741 rsp = &tcp_req->req.rsp->nvme_cpl; 1742 if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) { 1743 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 1744 } else { 1745 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1746 } 1747 nvmf_tcp_req_process(ttransport, tcp_req); 1748 } 1749 } 1750 1751 static void 1752 nvmf_tcp_h2c_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *h2c_term_req) 1753 { 1754 SPDK_ERRLOG("Error info of pdu(%p): %s\n", h2c_term_req, 1755 spdk_nvmf_tcp_term_req_fes_str[h2c_term_req->fes]); 1756 if ((h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) || 1757 (h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) { 1758 SPDK_DEBUGLOG(nvmf_tcp, "The offset from the start of the PDU header is %u\n", 1759 DGET32(h2c_term_req->fei)); 1760 } 1761 } 1762 1763 static void 1764 nvmf_tcp_h2c_term_req_hdr_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1765 struct nvme_tcp_pdu *pdu) 1766 { 1767 struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req; 1768 uint32_t error_offset = 0; 1769 enum spdk_nvme_tcp_term_req_fes fes; 1770 1771 if (h2c_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) { 1772 SPDK_ERRLOG("Fatal Error Status(FES) is unknown for h2c_term_req pdu=%p\n", pdu); 1773 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1774 error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes); 1775 goto end; 1776 } 1777 1778 /* set the data buffer */ 1779 nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + h2c_term_req->common.hlen, 1780 h2c_term_req->common.plen - h2c_term_req->common.hlen); 1781 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1782 return; 1783 end: 1784 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1785 } 1786 1787 static void 1788 nvmf_tcp_h2c_term_req_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1789 struct nvme_tcp_pdu *pdu) 1790 { 1791 struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req; 1792 1793 nvmf_tcp_h2c_term_req_dump(h2c_term_req); 1794 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 1795 } 1796 1797 static void 1798 _nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1799 struct spdk_nvmf_tcp_transport *ttransport) 1800 { 1801 struct nvme_tcp_pdu *pdu = tqpair->pdu_in_progress; 1802 1803 switch (pdu->hdr.common.pdu_type) { 1804 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 1805 nvmf_tcp_capsule_cmd_payload_handle(ttransport, tqpair, pdu); 1806 break; 1807 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 1808 nvmf_tcp_h2c_data_payload_handle(ttransport, tqpair, pdu); 1809 break; 1810 1811 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 1812 nvmf_tcp_h2c_term_req_payload_handle(tqpair, pdu); 1813 break; 1814 1815 default: 1816 /* The code should not go to here */ 1817 SPDK_ERRLOG("The code should not go to here\n"); 1818 break; 1819 } 1820 } 1821 1822 static void 1823 nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1824 struct spdk_nvmf_tcp_transport *ttransport) 1825 { 1826 int rc = 0; 1827 struct nvme_tcp_pdu *pdu; 1828 uint32_t crc32c; 1829 struct spdk_nvmf_tcp_req *tcp_req; 1830 struct spdk_nvme_cpl *rsp; 1831 1832 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1833 pdu = tqpair->pdu_in_progress; 1834 1835 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 1836 /* check data digest if need */ 1837 if (pdu->ddgst_enable) { 1838 crc32c = nvme_tcp_pdu_calc_data_digest(pdu); 1839 rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c); 1840 if (rc == 0) { 1841 SPDK_ERRLOG("Data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu); 1842 tcp_req = pdu->req; 1843 assert(tcp_req != NULL); 1844 rsp = &tcp_req->req.rsp->nvme_cpl; 1845 rsp->status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR; 1846 } 1847 } 1848 1849 _nvmf_tcp_pdu_payload_handle(tqpair, ttransport); 1850 } 1851 1852 static void 1853 nvmf_tcp_send_icresp_complete(void *cb_arg) 1854 { 1855 struct spdk_nvmf_tcp_qpair *tqpair = cb_arg; 1856 1857 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_RUNNING); 1858 } 1859 1860 static void 1861 nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport, 1862 struct spdk_nvmf_tcp_qpair *tqpair, 1863 struct nvme_tcp_pdu *pdu) 1864 { 1865 struct spdk_nvme_tcp_ic_req *ic_req = &pdu->hdr.ic_req; 1866 struct nvme_tcp_pdu *rsp_pdu; 1867 struct spdk_nvme_tcp_ic_resp *ic_resp; 1868 uint32_t error_offset = 0; 1869 enum spdk_nvme_tcp_term_req_fes fes; 1870 1871 /* Only PFV 0 is defined currently */ 1872 if (ic_req->pfv != 0) { 1873 SPDK_ERRLOG("Expected ICReq PFV %u, got %u\n", 0u, ic_req->pfv); 1874 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1875 error_offset = offsetof(struct spdk_nvme_tcp_ic_req, pfv); 1876 goto end; 1877 } 1878 1879 /* MAXR2T is 0's based */ 1880 SPDK_DEBUGLOG(nvmf_tcp, "maxr2t =%u\n", (ic_req->maxr2t + 1u)); 1881 1882 tqpair->host_hdgst_enable = ic_req->dgst.bits.hdgst_enable ? true : false; 1883 if (!tqpair->host_hdgst_enable) { 1884 tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1885 } 1886 1887 tqpair->host_ddgst_enable = ic_req->dgst.bits.ddgst_enable ? true : false; 1888 if (!tqpair->host_ddgst_enable) { 1889 tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1890 } 1891 1892 tqpair->recv_buf_size = spdk_max(tqpair->recv_buf_size, MIN_SOCK_PIPE_SIZE); 1893 /* Now that we know whether digests are enabled, properly size the receive buffer */ 1894 if (spdk_sock_set_recvbuf(tqpair->sock, tqpair->recv_buf_size) < 0) { 1895 SPDK_WARNLOG("Unable to allocate enough memory for receive buffer on tqpair=%p with size=%d\n", 1896 tqpair, 1897 tqpair->recv_buf_size); 1898 /* Not fatal. */ 1899 } 1900 1901 tqpair->cpda = spdk_min(ic_req->hpda, SPDK_NVME_TCP_CPDA_MAX); 1902 SPDK_DEBUGLOG(nvmf_tcp, "cpda of tqpair=(%p) is : %u\n", tqpair, tqpair->cpda); 1903 1904 rsp_pdu = tqpair->mgmt_pdu; 1905 1906 ic_resp = &rsp_pdu->hdr.ic_resp; 1907 ic_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1908 ic_resp->common.hlen = ic_resp->common.plen = sizeof(*ic_resp); 1909 ic_resp->pfv = 0; 1910 ic_resp->cpda = tqpair->cpda; 1911 ic_resp->maxh2cdata = ttransport->transport.opts.max_io_size; 1912 ic_resp->dgst.bits.hdgst_enable = tqpair->host_hdgst_enable ? 1 : 0; 1913 ic_resp->dgst.bits.ddgst_enable = tqpair->host_ddgst_enable ? 1 : 0; 1914 1915 SPDK_DEBUGLOG(nvmf_tcp, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable); 1916 SPDK_DEBUGLOG(nvmf_tcp, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable); 1917 1918 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INITIALIZING); 1919 nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_send_icresp_complete, tqpair); 1920 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1921 return; 1922 end: 1923 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1924 } 1925 1926 static void 1927 nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1928 struct spdk_nvmf_tcp_transport *ttransport) 1929 { 1930 struct nvme_tcp_pdu *pdu; 1931 int rc; 1932 uint32_t crc32c, error_offset = 0; 1933 enum spdk_nvme_tcp_term_req_fes fes; 1934 1935 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1936 pdu = tqpair->pdu_in_progress; 1937 1938 SPDK_DEBUGLOG(nvmf_tcp, "pdu type of tqpair(%p) is %d\n", tqpair, 1939 pdu->hdr.common.pdu_type); 1940 /* check header digest if needed */ 1941 if (pdu->has_hdgst) { 1942 SPDK_DEBUGLOG(nvmf_tcp, "Compare the header of pdu=%p on tqpair=%p\n", pdu, tqpair); 1943 crc32c = nvme_tcp_pdu_calc_header_digest(pdu); 1944 rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c); 1945 if (rc == 0) { 1946 SPDK_ERRLOG("Header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu); 1947 fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR; 1948 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1949 return; 1950 1951 } 1952 } 1953 1954 switch (pdu->hdr.common.pdu_type) { 1955 case SPDK_NVME_TCP_PDU_TYPE_IC_REQ: 1956 nvmf_tcp_icreq_handle(ttransport, tqpair, pdu); 1957 break; 1958 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 1959 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_REQ); 1960 break; 1961 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 1962 nvmf_tcp_h2c_data_hdr_handle(ttransport, tqpair, pdu); 1963 break; 1964 1965 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 1966 nvmf_tcp_h2c_term_req_hdr_handle(tqpair, pdu); 1967 break; 1968 1969 default: 1970 SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress->hdr.common.pdu_type); 1971 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1972 error_offset = 1; 1973 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1974 break; 1975 } 1976 } 1977 1978 static void 1979 nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair) 1980 { 1981 struct nvme_tcp_pdu *pdu; 1982 uint32_t error_offset = 0; 1983 enum spdk_nvme_tcp_term_req_fes fes; 1984 uint8_t expected_hlen, pdo; 1985 bool plen_error = false, pdo_error = false; 1986 1987 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH); 1988 pdu = tqpair->pdu_in_progress; 1989 1990 if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) { 1991 if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) { 1992 SPDK_ERRLOG("Already received ICreq PDU, and reject this pdu=%p\n", pdu); 1993 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 1994 goto err; 1995 } 1996 expected_hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1997 if (pdu->hdr.common.plen != expected_hlen) { 1998 plen_error = true; 1999 } 2000 } else { 2001 if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) { 2002 SPDK_ERRLOG("The TCP/IP connection is not negotiated\n"); 2003 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 2004 goto err; 2005 } 2006 2007 switch (pdu->hdr.common.pdu_type) { 2008 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 2009 expected_hlen = sizeof(struct spdk_nvme_tcp_cmd); 2010 pdo = pdu->hdr.common.pdo; 2011 if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) { 2012 pdo_error = true; 2013 break; 2014 } 2015 2016 if (pdu->hdr.common.plen < expected_hlen) { 2017 plen_error = true; 2018 } 2019 break; 2020 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 2021 expected_hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 2022 pdo = pdu->hdr.common.pdo; 2023 if ((tqpair->cpda != 0) && (pdo % ((tqpair->cpda + 1) << 2) != 0)) { 2024 pdo_error = true; 2025 break; 2026 } 2027 if (pdu->hdr.common.plen < expected_hlen) { 2028 plen_error = true; 2029 } 2030 break; 2031 2032 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 2033 expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 2034 if ((pdu->hdr.common.plen <= expected_hlen) || 2035 (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) { 2036 plen_error = true; 2037 } 2038 break; 2039 2040 default: 2041 SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", pdu->hdr.common.pdu_type); 2042 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2043 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type); 2044 goto err; 2045 } 2046 } 2047 2048 if (pdu->hdr.common.hlen != expected_hlen) { 2049 SPDK_ERRLOG("PDU type=0x%02x, Expected ICReq header length %u, got %u on tqpair=%p\n", 2050 pdu->hdr.common.pdu_type, 2051 expected_hlen, pdu->hdr.common.hlen, tqpair); 2052 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2053 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen); 2054 goto err; 2055 } else if (pdo_error) { 2056 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2057 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo); 2058 } else if (plen_error) { 2059 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 2060 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen); 2061 goto err; 2062 } else { 2063 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 2064 nvme_tcp_pdu_calc_psh_len(tqpair->pdu_in_progress, tqpair->host_hdgst_enable); 2065 return; 2066 } 2067 err: 2068 nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 2069 } 2070 2071 static int 2072 nvmf_tcp_pdu_payload_insert_dif(struct nvme_tcp_pdu *pdu, uint32_t read_offset, 2073 int read_len) 2074 { 2075 int rc; 2076 2077 rc = spdk_dif_generate_stream(pdu->data_iov, pdu->data_iovcnt, 2078 read_offset, read_len, pdu->dif_ctx); 2079 if (rc != 0) { 2080 SPDK_ERRLOG("DIF generate failed\n"); 2081 } 2082 2083 return rc; 2084 } 2085 2086 static int 2087 nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair) 2088 { 2089 int rc = 0; 2090 struct nvme_tcp_pdu *pdu; 2091 enum nvme_tcp_pdu_recv_state prev_state; 2092 uint32_t data_len; 2093 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, 2094 struct spdk_nvmf_tcp_transport, transport); 2095 2096 /* The loop here is to allow for several back-to-back state changes. */ 2097 do { 2098 prev_state = tqpair->recv_state; 2099 SPDK_DEBUGLOG(nvmf_tcp, "tqpair(%p) recv pdu entering state %d\n", tqpair, prev_state); 2100 2101 pdu = tqpair->pdu_in_progress; 2102 switch (tqpair->recv_state) { 2103 /* Wait for the common header */ 2104 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY: 2105 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH: 2106 if (spdk_unlikely(tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) { 2107 return rc; 2108 } 2109 2110 rc = nvme_tcp_read_data(tqpair->sock, 2111 sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes, 2112 (void *)&pdu->hdr.common + pdu->ch_valid_bytes); 2113 if (rc < 0) { 2114 SPDK_DEBUGLOG(nvmf_tcp, "will disconnect tqpair=%p\n", tqpair); 2115 return NVME_TCP_PDU_FATAL; 2116 } else if (rc > 0) { 2117 pdu->ch_valid_bytes += rc; 2118 spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 0, rc, 0, tqpair); 2119 if (spdk_likely(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY)) { 2120 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH); 2121 } 2122 } 2123 2124 if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) { 2125 return NVME_TCP_PDU_IN_PROGRESS; 2126 } 2127 2128 /* The command header of this PDU has now been read from the socket. */ 2129 nvmf_tcp_pdu_ch_handle(tqpair); 2130 break; 2131 /* Wait for the pdu specific header */ 2132 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH: 2133 rc = nvme_tcp_read_data(tqpair->sock, 2134 pdu->psh_len - pdu->psh_valid_bytes, 2135 (void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes); 2136 if (rc < 0) { 2137 return NVME_TCP_PDU_FATAL; 2138 } else if (rc > 0) { 2139 spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 0, rc, 0, tqpair); 2140 pdu->psh_valid_bytes += rc; 2141 } 2142 2143 if (pdu->psh_valid_bytes < pdu->psh_len) { 2144 return NVME_TCP_PDU_IN_PROGRESS; 2145 } 2146 2147 /* All header(ch, psh, head digist) of this PDU has now been read from the socket. */ 2148 nvmf_tcp_pdu_psh_handle(tqpair, ttransport); 2149 break; 2150 /* Wait for the req slot */ 2151 case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ: 2152 nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu); 2153 break; 2154 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD: 2155 /* check whether the data is valid, if not we just return */ 2156 if (!pdu->data_len) { 2157 return NVME_TCP_PDU_IN_PROGRESS; 2158 } 2159 2160 data_len = pdu->data_len; 2161 /* data digest */ 2162 if (spdk_unlikely((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) && 2163 tqpair->host_ddgst_enable)) { 2164 data_len += SPDK_NVME_TCP_DIGEST_LEN; 2165 pdu->ddgst_enable = true; 2166 } 2167 2168 rc = nvme_tcp_read_payload_data(tqpair->sock, pdu); 2169 if (rc < 0) { 2170 return NVME_TCP_PDU_FATAL; 2171 } 2172 pdu->rw_offset += rc; 2173 2174 if (spdk_unlikely(pdu->dif_ctx != NULL)) { 2175 rc = nvmf_tcp_pdu_payload_insert_dif(pdu, pdu->rw_offset - rc, rc); 2176 if (rc != 0) { 2177 return NVME_TCP_PDU_FATAL; 2178 } 2179 } 2180 2181 if (pdu->rw_offset < data_len) { 2182 return NVME_TCP_PDU_IN_PROGRESS; 2183 } 2184 2185 /* All of this PDU has now been read from the socket. */ 2186 nvmf_tcp_pdu_payload_handle(tqpair, ttransport); 2187 break; 2188 case NVME_TCP_PDU_RECV_STATE_ERROR: 2189 if (!spdk_sock_is_connected(tqpair->sock)) { 2190 return NVME_TCP_PDU_FATAL; 2191 } 2192 break; 2193 default: 2194 assert(0); 2195 SPDK_ERRLOG("code should not come to here"); 2196 break; 2197 } 2198 } while (tqpair->recv_state != prev_state); 2199 2200 return rc; 2201 } 2202 2203 static inline void * 2204 nvmf_tcp_control_msg_get(struct spdk_nvmf_tcp_control_msg_list *list) 2205 { 2206 struct spdk_nvmf_tcp_control_msg *msg; 2207 2208 assert(list); 2209 2210 msg = STAILQ_FIRST(&list->free_msgs); 2211 if (!msg) { 2212 SPDK_DEBUGLOG(nvmf_tcp, "Out of control messages\n"); 2213 return NULL; 2214 } 2215 STAILQ_REMOVE_HEAD(&list->free_msgs, link); 2216 return msg; 2217 } 2218 2219 static inline void 2220 nvmf_tcp_control_msg_put(struct spdk_nvmf_tcp_control_msg_list *list, void *_msg) 2221 { 2222 struct spdk_nvmf_tcp_control_msg *msg = _msg; 2223 2224 assert(list); 2225 STAILQ_INSERT_HEAD(&list->free_msgs, msg, link); 2226 } 2227 2228 static int 2229 nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req, 2230 struct spdk_nvmf_transport *transport, 2231 struct spdk_nvmf_transport_poll_group *group) 2232 { 2233 struct spdk_nvmf_request *req = &tcp_req->req; 2234 struct spdk_nvme_cmd *cmd; 2235 struct spdk_nvme_cpl *rsp; 2236 struct spdk_nvme_sgl_descriptor *sgl; 2237 struct spdk_nvmf_tcp_poll_group *tgroup; 2238 uint32_t length; 2239 2240 cmd = &req->cmd->nvme_cmd; 2241 rsp = &req->rsp->nvme_cpl; 2242 sgl = &cmd->dptr.sgl1; 2243 2244 length = sgl->unkeyed.length; 2245 2246 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK && 2247 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_TRANSPORT) { 2248 if (length > transport->opts.max_io_size) { 2249 SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n", 2250 length, transport->opts.max_io_size); 2251 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 2252 return -1; 2253 } 2254 2255 /* fill request length and populate iovs */ 2256 req->length = length; 2257 2258 SPDK_DEBUGLOG(nvmf_tcp, "Data requested length= 0x%x\n", length); 2259 2260 if (spdk_unlikely(req->dif_enabled)) { 2261 req->dif.orig_length = length; 2262 length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx); 2263 req->dif.elba_length = length; 2264 } 2265 2266 if (nvmf_ctrlr_use_zcopy(req)) { 2267 SPDK_DEBUGLOG(nvmf_tcp, "Using zero-copy to execute request %p\n", tcp_req); 2268 req->data_from_pool = false; 2269 return 0; 2270 } 2271 2272 if (spdk_nvmf_request_get_buffers(req, group, transport, length)) { 2273 /* No available buffers. Queue this request up. */ 2274 SPDK_DEBUGLOG(nvmf_tcp, "No available large data buffers. Queueing request %p\n", 2275 tcp_req); 2276 return 0; 2277 } 2278 2279 /* backward compatible */ 2280 req->data = req->iov[0].iov_base; 2281 2282 SPDK_DEBUGLOG(nvmf_tcp, "Request %p took %d buffer/s from central pool, and data=%p\n", 2283 tcp_req, req->iovcnt, req->data); 2284 2285 return 0; 2286 } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK && 2287 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) { 2288 uint64_t offset = sgl->address; 2289 uint32_t max_len = transport->opts.in_capsule_data_size; 2290 assert(tcp_req->has_in_capsule_data); 2291 2292 SPDK_DEBUGLOG(nvmf_tcp, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n", 2293 offset, length); 2294 2295 if (offset > max_len) { 2296 SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n", 2297 offset, max_len); 2298 rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET; 2299 return -1; 2300 } 2301 max_len -= (uint32_t)offset; 2302 2303 if (spdk_unlikely(length > max_len)) { 2304 /* According to the SPEC we should support ICD up to 8192 bytes for admin and fabric commands */ 2305 if (length <= SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE && 2306 (cmd->opc == SPDK_NVME_OPC_FABRIC || req->qpair->qid == 0)) { 2307 2308 /* Get a buffer from dedicated list */ 2309 SPDK_DEBUGLOG(nvmf_tcp, "Getting a buffer from control msg list\n"); 2310 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2311 assert(tgroup->control_msg_list); 2312 req->data = nvmf_tcp_control_msg_get(tgroup->control_msg_list); 2313 if (!req->data) { 2314 /* No available buffers. Queue this request up. */ 2315 SPDK_DEBUGLOG(nvmf_tcp, "No available ICD buffers. Queueing request %p\n", tcp_req); 2316 return 0; 2317 } 2318 } else { 2319 SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n", 2320 length, max_len); 2321 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 2322 return -1; 2323 } 2324 } else { 2325 req->data = tcp_req->buf; 2326 } 2327 2328 req->length = length; 2329 req->data_from_pool = false; 2330 2331 if (spdk_unlikely(req->dif_enabled)) { 2332 length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx); 2333 req->dif.elba_length = length; 2334 } 2335 2336 req->iov[0].iov_base = req->data; 2337 req->iov[0].iov_len = length; 2338 req->iovcnt = 1; 2339 2340 return 0; 2341 } 2342 2343 SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n", 2344 sgl->generic.type, sgl->generic.subtype); 2345 rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID; 2346 return -1; 2347 } 2348 2349 static inline enum spdk_nvme_media_error_status_code 2350 nvmf_tcp_dif_error_to_compl_status(uint8_t err_type) { 2351 enum spdk_nvme_media_error_status_code result; 2352 2353 switch (err_type) 2354 { 2355 case SPDK_DIF_REFTAG_ERROR: 2356 result = SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR; 2357 break; 2358 case SPDK_DIF_APPTAG_ERROR: 2359 result = SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR; 2360 break; 2361 case SPDK_DIF_GUARD_ERROR: 2362 result = SPDK_NVME_SC_GUARD_CHECK_ERROR; 2363 break; 2364 default: 2365 SPDK_UNREACHABLE(); 2366 break; 2367 } 2368 2369 return result; 2370 } 2371 2372 static void 2373 _nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair, 2374 struct spdk_nvmf_tcp_req *tcp_req) 2375 { 2376 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF( 2377 tqpair->qpair.transport, struct spdk_nvmf_tcp_transport, transport); 2378 struct nvme_tcp_pdu *rsp_pdu; 2379 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 2380 uint32_t plen, pdo, alignment; 2381 int rc; 2382 2383 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 2384 2385 rsp_pdu = tcp_req->pdu; 2386 assert(rsp_pdu != NULL); 2387 assert(tcp_req->pdu_in_use); 2388 2389 c2h_data = &rsp_pdu->hdr.c2h_data; 2390 c2h_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA; 2391 plen = c2h_data->common.hlen = sizeof(*c2h_data); 2392 2393 if (tqpair->host_hdgst_enable) { 2394 plen += SPDK_NVME_TCP_DIGEST_LEN; 2395 c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 2396 } 2397 2398 /* set the psh */ 2399 c2h_data->cccid = tcp_req->req.cmd->nvme_cmd.cid; 2400 c2h_data->datal = tcp_req->req.length - tcp_req->pdu->rw_offset; 2401 c2h_data->datao = tcp_req->pdu->rw_offset; 2402 2403 /* set the padding */ 2404 rsp_pdu->padding_len = 0; 2405 pdo = plen; 2406 if (tqpair->cpda) { 2407 alignment = (tqpair->cpda + 1) << 2; 2408 if (plen % alignment != 0) { 2409 pdo = (plen + alignment) / alignment * alignment; 2410 rsp_pdu->padding_len = pdo - plen; 2411 plen = pdo; 2412 } 2413 } 2414 2415 c2h_data->common.pdo = pdo; 2416 plen += c2h_data->datal; 2417 if (tqpair->host_ddgst_enable) { 2418 c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF; 2419 plen += SPDK_NVME_TCP_DIGEST_LEN; 2420 } 2421 2422 c2h_data->common.plen = plen; 2423 2424 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2425 rsp_pdu->dif_ctx = &tcp_req->req.dif.dif_ctx; 2426 } 2427 2428 nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 2429 c2h_data->datao, c2h_data->datal); 2430 2431 2432 c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU; 2433 /* Need to send the capsule response if response is not all 0 */ 2434 if (ttransport->tcp_opts.c2h_success && 2435 tcp_req->rsp.cdw0 == 0 && tcp_req->rsp.cdw1 == 0) { 2436 c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS; 2437 } 2438 2439 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2440 struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl; 2441 struct spdk_dif_error err_blk = {}; 2442 uint32_t mapped_length = 0; 2443 uint32_t available_iovs = SPDK_COUNTOF(rsp_pdu->iov); 2444 uint32_t ddgst_len = 0; 2445 2446 if (tqpair->host_ddgst_enable) { 2447 /* Data digest consumes additional iov entry */ 2448 available_iovs--; 2449 /* plen needs to be updated since nvme_tcp_build_iovs compares expected and actual plen */ 2450 ddgst_len = SPDK_NVME_TCP_DIGEST_LEN; 2451 c2h_data->common.plen -= ddgst_len; 2452 } 2453 /* Temp call to estimate if data can be described by limited number of iovs. 2454 * iov vector will be rebuilt in nvmf_tcp_qpair_write_pdu */ 2455 nvme_tcp_build_iovs(rsp_pdu->iov, available_iovs, rsp_pdu, tqpair->host_hdgst_enable, 2456 false, &mapped_length); 2457 2458 if (mapped_length != c2h_data->common.plen) { 2459 c2h_data->datal = mapped_length - (c2h_data->common.plen - c2h_data->datal); 2460 SPDK_DEBUGLOG(nvmf_tcp, 2461 "Part C2H, data_len %u (of %u), PDU len %u, updated PDU len %u, offset %u\n", 2462 c2h_data->datal, tcp_req->req.length, c2h_data->common.plen, mapped_length, rsp_pdu->rw_offset); 2463 c2h_data->common.plen = mapped_length; 2464 2465 /* Rebuild pdu->data_iov since data length is changed */ 2466 nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt, c2h_data->datao, 2467 c2h_data->datal); 2468 2469 c2h_data->common.flags &= ~(SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU | 2470 SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 2471 } 2472 2473 c2h_data->common.plen += ddgst_len; 2474 2475 assert(rsp_pdu->rw_offset <= tcp_req->req.length); 2476 2477 rc = spdk_dif_verify_stream(rsp_pdu->data_iov, rsp_pdu->data_iovcnt, 2478 0, rsp_pdu->data_len, rsp_pdu->dif_ctx, &err_blk); 2479 if (rc != 0) { 2480 SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n", 2481 err_blk.err_type, err_blk.err_offset); 2482 rsp->status.sct = SPDK_NVME_SCT_MEDIA_ERROR; 2483 rsp->status.sc = nvmf_tcp_dif_error_to_compl_status(err_blk.err_type); 2484 nvmf_tcp_req_pdu_fini(tcp_req); 2485 nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 2486 return; 2487 } 2488 } 2489 2490 rsp_pdu->rw_offset += c2h_data->datal; 2491 nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, nvmf_tcp_pdu_c2h_data_complete, tcp_req); 2492 } 2493 2494 static void 2495 nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair, 2496 struct spdk_nvmf_tcp_req *tcp_req) 2497 { 2498 nvmf_tcp_req_pdu_init(tcp_req); 2499 _nvmf_tcp_send_c2h_data(tqpair, tcp_req); 2500 } 2501 2502 static int 2503 request_transfer_out(struct spdk_nvmf_request *req) 2504 { 2505 struct spdk_nvmf_tcp_req *tcp_req; 2506 struct spdk_nvmf_qpair *qpair; 2507 struct spdk_nvmf_tcp_qpair *tqpair; 2508 struct spdk_nvme_cpl *rsp; 2509 2510 SPDK_DEBUGLOG(nvmf_tcp, "enter\n"); 2511 2512 qpair = req->qpair; 2513 rsp = &req->rsp->nvme_cpl; 2514 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 2515 2516 /* Advance our sq_head pointer */ 2517 if (qpair->sq_head == qpair->sq_head_max) { 2518 qpair->sq_head = 0; 2519 } else { 2520 qpair->sq_head++; 2521 } 2522 rsp->sqhd = qpair->sq_head; 2523 2524 tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 2525 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 2526 if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 2527 nvmf_tcp_send_c2h_data(tqpair, tcp_req); 2528 } else { 2529 nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 2530 } 2531 2532 return 0; 2533 } 2534 2535 static void 2536 nvmf_tcp_set_in_capsule_data(struct spdk_nvmf_tcp_qpair *tqpair, 2537 struct spdk_nvmf_tcp_req *tcp_req) 2538 { 2539 struct nvme_tcp_pdu *pdu; 2540 uint32_t plen = 0; 2541 2542 pdu = tqpair->pdu_in_progress; 2543 plen = pdu->hdr.common.hlen; 2544 2545 if (tqpair->host_hdgst_enable) { 2546 plen += SPDK_NVME_TCP_DIGEST_LEN; 2547 } 2548 2549 if (pdu->hdr.common.plen != plen) { 2550 tcp_req->has_in_capsule_data = true; 2551 } 2552 } 2553 2554 static bool 2555 nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport, 2556 struct spdk_nvmf_tcp_req *tcp_req) 2557 { 2558 struct spdk_nvmf_tcp_qpair *tqpair; 2559 int rc; 2560 enum spdk_nvmf_tcp_req_state prev_state; 2561 bool progress = false; 2562 struct spdk_nvmf_transport *transport = &ttransport->transport; 2563 struct spdk_nvmf_transport_poll_group *group; 2564 struct spdk_nvmf_tcp_poll_group *tgroup; 2565 2566 tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 2567 group = &tqpair->group->group; 2568 assert(tcp_req->state != TCP_REQUEST_STATE_FREE); 2569 2570 /* If the qpair is not active, we need to abort the outstanding requests. */ 2571 if (tqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) { 2572 if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) { 2573 STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link); 2574 } 2575 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED); 2576 } 2577 2578 /* The loop here is to allow for several back-to-back state changes. */ 2579 do { 2580 prev_state = tcp_req->state; 2581 2582 SPDK_DEBUGLOG(nvmf_tcp, "Request %p entering state %d on tqpair=%p\n", tcp_req, prev_state, 2583 tqpair); 2584 2585 switch (tcp_req->state) { 2586 case TCP_REQUEST_STATE_FREE: 2587 /* Some external code must kick a request into TCP_REQUEST_STATE_NEW 2588 * to escape this state. */ 2589 break; 2590 case TCP_REQUEST_STATE_NEW: 2591 spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, 0, 0, (uintptr_t)tcp_req, tqpair); 2592 2593 /* copy the cmd from the receive pdu */ 2594 tcp_req->cmd = tqpair->pdu_in_progress->hdr.capsule_cmd.ccsqe; 2595 2596 if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) { 2597 tcp_req->req.dif_enabled = true; 2598 tqpair->pdu_in_progress->dif_ctx = &tcp_req->req.dif.dif_ctx; 2599 } 2600 2601 /* The next state transition depends on the data transfer needs of this request. */ 2602 tcp_req->req.xfer = spdk_nvmf_req_get_xfer(&tcp_req->req); 2603 2604 if (spdk_unlikely(tcp_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) { 2605 tcp_req->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 2606 tcp_req->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; 2607 tcp_req->req.rsp->nvme_cpl.cid = tcp_req->req.cmd->nvme_cmd.cid; 2608 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2609 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2610 SPDK_DEBUGLOG(nvmf_tcp, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", tcp_req); 2611 break; 2612 } 2613 2614 /* If no data to transfer, ready to execute. */ 2615 if (tcp_req->req.xfer == SPDK_NVME_DATA_NONE) { 2616 /* Reset the tqpair receiving pdu state */ 2617 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2618 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 2619 break; 2620 } 2621 2622 nvmf_tcp_set_in_capsule_data(tqpair, tcp_req); 2623 2624 if (!tcp_req->has_in_capsule_data) { 2625 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2626 } 2627 2628 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER); 2629 STAILQ_INSERT_TAIL(&group->pending_buf_queue, &tcp_req->req, buf_link); 2630 break; 2631 case TCP_REQUEST_STATE_NEED_BUFFER: 2632 spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req, tqpair); 2633 2634 assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE); 2635 2636 if (!tcp_req->has_in_capsule_data && (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue))) { 2637 SPDK_DEBUGLOG(nvmf_tcp, 2638 "Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n", 2639 tcp_req, tqpair); 2640 /* This request needs to wait in line to obtain a buffer */ 2641 break; 2642 } 2643 2644 /* Try to get a data buffer */ 2645 rc = nvmf_tcp_req_parse_sgl(tcp_req, transport, group); 2646 if (rc < 0) { 2647 STAILQ_REMOVE_HEAD(&group->pending_buf_queue, buf_link); 2648 /* Reset the tqpair receiving pdu state */ 2649 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 2650 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2651 tcp_req->req.rsp->nvme_cpl.cid = tcp_req->req.cmd->nvme_cmd.cid; 2652 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2653 break; 2654 } 2655 2656 /* Get a zcopy buffer if the request can be serviced through zcopy */ 2657 if (spdk_nvmf_request_using_zcopy(&tcp_req->req)) { 2658 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2659 assert(tcp_req->req.dif.elba_length >= tcp_req->req.length); 2660 tcp_req->req.length = tcp_req->req.dif.elba_length; 2661 } 2662 2663 STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link); 2664 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_START); 2665 spdk_nvmf_request_zcopy_start(&tcp_req->req); 2666 break; 2667 } 2668 2669 if (!tcp_req->req.data) { 2670 SPDK_DEBUGLOG(nvmf_tcp, "No buffer allocated for tcp_req(%p) on tqpair(%p\n)", 2671 tcp_req, tqpair); 2672 /* No buffers available. */ 2673 break; 2674 } 2675 2676 STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link); 2677 2678 /* If data is transferring from host to controller, we need to do a transfer from the host. */ 2679 if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 2680 if (tcp_req->req.data_from_pool) { 2681 SPDK_DEBUGLOG(nvmf_tcp, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair); 2682 nvmf_tcp_send_r2t_pdu(tqpair, tcp_req); 2683 } else { 2684 struct nvme_tcp_pdu *pdu; 2685 2686 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 2687 2688 pdu = tqpair->pdu_in_progress; 2689 SPDK_DEBUGLOG(nvmf_tcp, "Not need to send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req, 2690 tqpair); 2691 /* No need to send r2t, contained in the capsuled data */ 2692 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 2693 0, tcp_req->req.length); 2694 nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 2695 } 2696 break; 2697 } 2698 2699 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 2700 break; 2701 case TCP_REQUEST_STATE_AWAITING_ZCOPY_START: 2702 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_START, 0, 0, 2703 (uintptr_t)tcp_req, tqpair); 2704 /* Some external code must kick a request into TCP_REQUEST_STATE_ZCOPY_START_COMPLETED 2705 * to escape this state. */ 2706 break; 2707 case TCP_REQUEST_STATE_ZCOPY_START_COMPLETED: 2708 spdk_trace_record(TRACE_TCP_REQUEST_STATE_ZCOPY_START_COMPLETED, 0, 0, 2709 (uintptr_t)tcp_req, tqpair); 2710 if (spdk_unlikely(spdk_nvme_cpl_is_error(&tcp_req->req.rsp->nvme_cpl))) { 2711 SPDK_DEBUGLOG(nvmf_tcp, "Zero-copy start failed for tcp_req(%p) on tqpair=%p\n", 2712 tcp_req, tqpair); 2713 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2714 break; 2715 } 2716 if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 2717 SPDK_DEBUGLOG(nvmf_tcp, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair); 2718 nvmf_tcp_send_r2t_pdu(tqpair, tcp_req); 2719 } else { 2720 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED); 2721 } 2722 break; 2723 case TCP_REQUEST_STATE_AWAITING_R2T_ACK: 2724 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 0, 0, (uintptr_t)tcp_req, 2725 tqpair); 2726 /* The R2T completion or the h2c data incoming will kick it out of this state. */ 2727 break; 2728 case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER: 2729 2730 spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0, 2731 (uintptr_t)tcp_req, tqpair); 2732 /* Some external code must kick a request into TCP_REQUEST_STATE_READY_TO_EXECUTE 2733 * to escape this state. */ 2734 break; 2735 case TCP_REQUEST_STATE_READY_TO_EXECUTE: 2736 spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req, 2737 tqpair); 2738 2739 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2740 assert(tcp_req->req.dif.elba_length >= tcp_req->req.length); 2741 tcp_req->req.length = tcp_req->req.dif.elba_length; 2742 } 2743 2744 if (!spdk_nvmf_request_using_zcopy(&tcp_req->req)) { 2745 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTING); 2746 spdk_nvmf_request_exec(&tcp_req->req); 2747 } else { 2748 /* For zero-copy, only requests with data coming from host to the 2749 * controller can end up here. */ 2750 assert(tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 2751 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT); 2752 spdk_nvmf_request_zcopy_end(&tcp_req->req, true); 2753 } 2754 break; 2755 case TCP_REQUEST_STATE_EXECUTING: 2756 spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTING, 0, 0, (uintptr_t)tcp_req, tqpair); 2757 /* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED 2758 * to escape this state. */ 2759 break; 2760 case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT: 2761 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_COMMIT, 0, 0, 2762 (uintptr_t)tcp_req, tqpair); 2763 /* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED 2764 * to escape this state. */ 2765 break; 2766 case TCP_REQUEST_STATE_EXECUTED: 2767 spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req, tqpair); 2768 2769 if (spdk_unlikely(tcp_req->req.dif_enabled)) { 2770 tcp_req->req.length = tcp_req->req.dif.orig_length; 2771 } 2772 2773 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2774 break; 2775 case TCP_REQUEST_STATE_READY_TO_COMPLETE: 2776 spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 0, 0, (uintptr_t)tcp_req, 2777 tqpair); 2778 rc = request_transfer_out(&tcp_req->req); 2779 assert(rc == 0); /* No good way to handle this currently */ 2780 break; 2781 case TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST: 2782 spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0, 2783 (uintptr_t)tcp_req, tqpair); 2784 /* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED 2785 * to escape this state. */ 2786 break; 2787 case TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE: 2788 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_ZCOPY_RELEASE, 0, 0, 2789 (uintptr_t)tcp_req, tqpair); 2790 /* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED 2791 * to escape this state. */ 2792 break; 2793 case TCP_REQUEST_STATE_COMPLETED: 2794 spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, tqpair); 2795 if (tcp_req->req.data_from_pool) { 2796 spdk_nvmf_request_free_buffers(&tcp_req->req, group, transport); 2797 } else if (spdk_unlikely(tcp_req->has_in_capsule_data && 2798 (tcp_req->cmd.opc == SPDK_NVME_OPC_FABRIC || 2799 tqpair->qpair.qid == 0) && tcp_req->req.length > transport->opts.in_capsule_data_size)) { 2800 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2801 assert(tgroup->control_msg_list); 2802 SPDK_DEBUGLOG(nvmf_tcp, "Put buf to control msg list\n"); 2803 nvmf_tcp_control_msg_put(tgroup->control_msg_list, tcp_req->req.data); 2804 } else if (tcp_req->req.zcopy_bdev_io != NULL) { 2805 /* If the request has an unreleased zcopy bdev_io, it's either a 2806 * read or a failed write */ 2807 assert(spdk_nvmf_request_using_zcopy(&tcp_req->req)); 2808 assert(tcp_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST || 2809 spdk_nvme_cpl_is_error(&tcp_req->req.rsp->nvme_cpl)); 2810 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE); 2811 spdk_nvmf_request_zcopy_end(&tcp_req->req, false); 2812 break; 2813 } 2814 tcp_req->req.length = 0; 2815 tcp_req->req.iovcnt = 0; 2816 tcp_req->req.data = NULL; 2817 2818 nvmf_tcp_req_pdu_fini(tcp_req); 2819 2820 nvmf_tcp_req_put(tqpair, tcp_req); 2821 break; 2822 case TCP_REQUEST_NUM_STATES: 2823 default: 2824 assert(0); 2825 break; 2826 } 2827 2828 if (tcp_req->state != prev_state) { 2829 progress = true; 2830 } 2831 } while (tcp_req->state != prev_state); 2832 2833 return progress; 2834 } 2835 2836 static void 2837 nvmf_tcp_sock_cb(void *arg, struct spdk_sock_group *group, struct spdk_sock *sock) 2838 { 2839 struct spdk_nvmf_tcp_qpair *tqpair = arg; 2840 int rc; 2841 2842 assert(tqpair != NULL); 2843 rc = nvmf_tcp_sock_process(tqpair); 2844 2845 /* If there was a new socket error, disconnect */ 2846 if (rc < 0) { 2847 nvmf_tcp_qpair_disconnect(tqpair); 2848 } 2849 } 2850 2851 static int 2852 nvmf_tcp_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 2853 struct spdk_nvmf_qpair *qpair) 2854 { 2855 struct spdk_nvmf_tcp_poll_group *tgroup; 2856 struct spdk_nvmf_tcp_qpair *tqpair; 2857 int rc; 2858 2859 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2860 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 2861 2862 rc = nvmf_tcp_qpair_sock_init(tqpair); 2863 if (rc != 0) { 2864 SPDK_ERRLOG("Cannot set sock opt for tqpair=%p\n", tqpair); 2865 return -1; 2866 } 2867 2868 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 2869 if (rc < 0) { 2870 SPDK_ERRLOG("Cannot init tqpair=%p\n", tqpair); 2871 return -1; 2872 } 2873 2874 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 2875 if (rc < 0) { 2876 SPDK_ERRLOG("Cannot init memory resource info for tqpair=%p\n", tqpair); 2877 return -1; 2878 } 2879 2880 rc = spdk_sock_group_add_sock(tgroup->sock_group, tqpair->sock, 2881 nvmf_tcp_sock_cb, tqpair); 2882 if (rc != 0) { 2883 SPDK_ERRLOG("Could not add sock to sock_group: %s (%d)\n", 2884 spdk_strerror(errno), errno); 2885 return -1; 2886 } 2887 2888 tqpair->group = tgroup; 2889 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INVALID); 2890 TAILQ_INSERT_TAIL(&tgroup->qpairs, tqpair, link); 2891 2892 return 0; 2893 } 2894 2895 static int 2896 nvmf_tcp_poll_group_remove(struct spdk_nvmf_transport_poll_group *group, 2897 struct spdk_nvmf_qpair *qpair) 2898 { 2899 struct spdk_nvmf_tcp_poll_group *tgroup; 2900 struct spdk_nvmf_tcp_qpair *tqpair; 2901 int rc; 2902 2903 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2904 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 2905 2906 assert(tqpair->group == tgroup); 2907 2908 SPDK_DEBUGLOG(nvmf_tcp, "remove tqpair=%p from the tgroup=%p\n", tqpair, tgroup); 2909 if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) { 2910 TAILQ_REMOVE(&tgroup->await_req, tqpair, link); 2911 } else { 2912 TAILQ_REMOVE(&tgroup->qpairs, tqpair, link); 2913 } 2914 2915 rc = spdk_sock_group_remove_sock(tgroup->sock_group, tqpair->sock); 2916 if (rc != 0) { 2917 SPDK_ERRLOG("Could not remove sock from sock_group: %s (%d)\n", 2918 spdk_strerror(errno), errno); 2919 } 2920 2921 return rc; 2922 } 2923 2924 static int 2925 nvmf_tcp_req_complete(struct spdk_nvmf_request *req) 2926 { 2927 struct spdk_nvmf_tcp_transport *ttransport; 2928 struct spdk_nvmf_tcp_req *tcp_req; 2929 2930 ttransport = SPDK_CONTAINEROF(req->qpair->transport, struct spdk_nvmf_tcp_transport, transport); 2931 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 2932 2933 switch (tcp_req->state) { 2934 case TCP_REQUEST_STATE_EXECUTING: 2935 case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT: 2936 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED); 2937 break; 2938 case TCP_REQUEST_STATE_AWAITING_ZCOPY_START: 2939 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_ZCOPY_START_COMPLETED); 2940 break; 2941 case TCP_REQUEST_STATE_AWAITING_ZCOPY_RELEASE: 2942 nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED); 2943 break; 2944 default: 2945 assert(0 && "Unexpected request state"); 2946 break; 2947 } 2948 2949 nvmf_tcp_req_process(ttransport, tcp_req); 2950 2951 return 0; 2952 } 2953 2954 static void 2955 nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair, 2956 spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg) 2957 { 2958 struct spdk_nvmf_tcp_qpair *tqpair; 2959 2960 SPDK_DEBUGLOG(nvmf_tcp, "Qpair: %p\n", qpair); 2961 2962 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 2963 nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITED); 2964 nvmf_tcp_qpair_destroy(tqpair); 2965 2966 if (cb_fn) { 2967 cb_fn(cb_arg); 2968 } 2969 } 2970 2971 static int 2972 nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 2973 { 2974 struct spdk_nvmf_tcp_poll_group *tgroup; 2975 int rc; 2976 struct spdk_nvmf_request *req, *req_tmp; 2977 struct spdk_nvmf_tcp_req *tcp_req; 2978 struct spdk_nvmf_tcp_qpair *tqpair, *tqpair_tmp; 2979 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(group->transport, 2980 struct spdk_nvmf_tcp_transport, transport); 2981 2982 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2983 2984 if (spdk_unlikely(TAILQ_EMPTY(&tgroup->qpairs) && TAILQ_EMPTY(&tgroup->await_req))) { 2985 return 0; 2986 } 2987 2988 STAILQ_FOREACH_SAFE(req, &group->pending_buf_queue, buf_link, req_tmp) { 2989 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 2990 if (nvmf_tcp_req_process(ttransport, tcp_req) == false) { 2991 break; 2992 } 2993 } 2994 2995 rc = spdk_sock_group_poll(tgroup->sock_group); 2996 if (rc < 0) { 2997 SPDK_ERRLOG("Failed to poll sock_group=%p\n", tgroup->sock_group); 2998 } 2999 3000 TAILQ_FOREACH_SAFE(tqpair, &tgroup->await_req, link, tqpair_tmp) { 3001 nvmf_tcp_sock_process(tqpair); 3002 } 3003 3004 return rc; 3005 } 3006 3007 static int 3008 nvmf_tcp_qpair_get_trid(struct spdk_nvmf_qpair *qpair, 3009 struct spdk_nvme_transport_id *trid, bool peer) 3010 { 3011 struct spdk_nvmf_tcp_qpair *tqpair; 3012 uint16_t port; 3013 3014 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 3015 spdk_nvme_trid_populate_transport(trid, SPDK_NVME_TRANSPORT_TCP); 3016 3017 if (peer) { 3018 snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->initiator_addr); 3019 port = tqpair->initiator_port; 3020 } else { 3021 snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->target_addr); 3022 port = tqpair->target_port; 3023 } 3024 3025 if (spdk_sock_is_ipv4(tqpair->sock)) { 3026 trid->adrfam = SPDK_NVMF_ADRFAM_IPV4; 3027 } else if (spdk_sock_is_ipv6(tqpair->sock)) { 3028 trid->adrfam = SPDK_NVMF_ADRFAM_IPV6; 3029 } else { 3030 return -1; 3031 } 3032 3033 snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%d", port); 3034 return 0; 3035 } 3036 3037 static int 3038 nvmf_tcp_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 3039 struct spdk_nvme_transport_id *trid) 3040 { 3041 return nvmf_tcp_qpair_get_trid(qpair, trid, 0); 3042 } 3043 3044 static int 3045 nvmf_tcp_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 3046 struct spdk_nvme_transport_id *trid) 3047 { 3048 return nvmf_tcp_qpair_get_trid(qpair, trid, 1); 3049 } 3050 3051 static int 3052 nvmf_tcp_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 3053 struct spdk_nvme_transport_id *trid) 3054 { 3055 return nvmf_tcp_qpair_get_trid(qpair, trid, 0); 3056 } 3057 3058 static void 3059 nvmf_tcp_req_set_abort_status(struct spdk_nvmf_request *req, 3060 struct spdk_nvmf_tcp_req *tcp_req_to_abort) 3061 { 3062 tcp_req_to_abort->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 3063 tcp_req_to_abort->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 3064 tcp_req_to_abort->req.rsp->nvme_cpl.cid = tcp_req_to_abort->req.cmd->nvme_cmd.cid; 3065 3066 nvmf_tcp_req_set_state(tcp_req_to_abort, TCP_REQUEST_STATE_READY_TO_COMPLETE); 3067 3068 req->rsp->nvme_cpl.cdw0 &= ~1U; /* Command was successfully aborted. */ 3069 } 3070 3071 static int 3072 _nvmf_tcp_qpair_abort_request(void *ctx) 3073 { 3074 struct spdk_nvmf_request *req = ctx; 3075 struct spdk_nvmf_tcp_req *tcp_req_to_abort = SPDK_CONTAINEROF(req->req_to_abort, 3076 struct spdk_nvmf_tcp_req, req); 3077 struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(req->req_to_abort->qpair, 3078 struct spdk_nvmf_tcp_qpair, qpair); 3079 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, 3080 struct spdk_nvmf_tcp_transport, transport); 3081 int rc; 3082 3083 spdk_poller_unregister(&req->poller); 3084 3085 switch (tcp_req_to_abort->state) { 3086 case TCP_REQUEST_STATE_EXECUTING: 3087 case TCP_REQUEST_STATE_AWAITING_ZCOPY_START: 3088 case TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT: 3089 rc = nvmf_ctrlr_abort_request(req); 3090 if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) { 3091 return SPDK_POLLER_BUSY; 3092 } 3093 break; 3094 3095 case TCP_REQUEST_STATE_NEED_BUFFER: 3096 STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, 3097 &tcp_req_to_abort->req, spdk_nvmf_request, buf_link); 3098 3099 nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort); 3100 nvmf_tcp_req_process(ttransport, tcp_req_to_abort); 3101 break; 3102 3103 case TCP_REQUEST_STATE_AWAITING_R2T_ACK: 3104 case TCP_REQUEST_STATE_ZCOPY_START_COMPLETED: 3105 nvmf_tcp_req_set_abort_status(req, tcp_req_to_abort); 3106 break; 3107 3108 case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER: 3109 if (spdk_get_ticks() < req->timeout_tsc) { 3110 req->poller = SPDK_POLLER_REGISTER(_nvmf_tcp_qpair_abort_request, req, 0); 3111 return SPDK_POLLER_BUSY; 3112 } 3113 break; 3114 3115 default: 3116 break; 3117 } 3118 3119 spdk_nvmf_request_complete(req); 3120 return SPDK_POLLER_BUSY; 3121 } 3122 3123 static void 3124 nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 3125 struct spdk_nvmf_request *req) 3126 { 3127 struct spdk_nvmf_tcp_qpair *tqpair; 3128 struct spdk_nvmf_tcp_transport *ttransport; 3129 struct spdk_nvmf_transport *transport; 3130 uint16_t cid; 3131 uint32_t i; 3132 struct spdk_nvmf_tcp_req *tcp_req_to_abort = NULL; 3133 3134 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 3135 ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport); 3136 transport = &ttransport->transport; 3137 3138 cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid; 3139 3140 for (i = 0; i < tqpair->resource_count; i++) { 3141 if (tqpair->reqs[i].state != TCP_REQUEST_STATE_FREE && 3142 tqpair->reqs[i].req.cmd->nvme_cmd.cid == cid) { 3143 tcp_req_to_abort = &tqpair->reqs[i]; 3144 break; 3145 } 3146 } 3147 3148 spdk_trace_record(TRACE_TCP_QP_ABORT_REQ, 0, 0, (uintptr_t)req, tqpair); 3149 3150 if (tcp_req_to_abort == NULL) { 3151 spdk_nvmf_request_complete(req); 3152 return; 3153 } 3154 3155 req->req_to_abort = &tcp_req_to_abort->req; 3156 req->timeout_tsc = spdk_get_ticks() + 3157 transport->opts.abort_timeout_sec * spdk_get_ticks_hz(); 3158 req->poller = NULL; 3159 3160 _nvmf_tcp_qpair_abort_request(req); 3161 } 3162 3163 #define SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH 128 3164 #define SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH 128 3165 #define SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR 128 3166 #define SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE 4096 3167 #define SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE 131072 3168 #define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072 3169 #define SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS 511 3170 #define SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE 32 3171 #define SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP false 3172 #define SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC 1 3173 3174 static void 3175 nvmf_tcp_opts_init(struct spdk_nvmf_transport_opts *opts) 3176 { 3177 opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH; 3178 opts->max_qpairs_per_ctrlr = SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR; 3179 opts->in_capsule_data_size = SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE; 3180 opts->max_io_size = SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE; 3181 opts->io_unit_size = SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE; 3182 opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH; 3183 opts->num_shared_buffers = SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS; 3184 opts->buf_cache_size = SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE; 3185 opts->dif_insert_or_strip = SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP; 3186 opts->abort_timeout_sec = SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC; 3187 opts->transport_specific = NULL; 3188 } 3189 3190 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = { 3191 .name = "TCP", 3192 .type = SPDK_NVME_TRANSPORT_TCP, 3193 .opts_init = nvmf_tcp_opts_init, 3194 .create = nvmf_tcp_create, 3195 .dump_opts = nvmf_tcp_dump_opts, 3196 .destroy = nvmf_tcp_destroy, 3197 3198 .listen = nvmf_tcp_listen, 3199 .stop_listen = nvmf_tcp_stop_listen, 3200 3201 .listener_discover = nvmf_tcp_discover, 3202 3203 .poll_group_create = nvmf_tcp_poll_group_create, 3204 .get_optimal_poll_group = nvmf_tcp_get_optimal_poll_group, 3205 .poll_group_destroy = nvmf_tcp_poll_group_destroy, 3206 .poll_group_add = nvmf_tcp_poll_group_add, 3207 .poll_group_remove = nvmf_tcp_poll_group_remove, 3208 .poll_group_poll = nvmf_tcp_poll_group_poll, 3209 3210 .req_free = nvmf_tcp_req_free, 3211 .req_complete = nvmf_tcp_req_complete, 3212 3213 .qpair_fini = nvmf_tcp_close_qpair, 3214 .qpair_get_local_trid = nvmf_tcp_qpair_get_local_trid, 3215 .qpair_get_peer_trid = nvmf_tcp_qpair_get_peer_trid, 3216 .qpair_get_listen_trid = nvmf_tcp_qpair_get_listen_trid, 3217 .qpair_abort_request = nvmf_tcp_qpair_abort_request, 3218 }; 3219 3220 SPDK_NVMF_TRANSPORT_REGISTER(tcp, &spdk_nvmf_transport_tcp); 3221 SPDK_LOG_REGISTER_COMPONENT(nvmf_tcp) 3222