1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/crc32.h" 36 #include "spdk/endian.h" 37 #include "spdk/assert.h" 38 #include "spdk/thread.h" 39 #include "spdk/nvmf_transport.h" 40 #include "spdk/sock.h" 41 #include "spdk/string.h" 42 #include "spdk/trace.h" 43 #include "spdk/util.h" 44 45 #include "spdk_internal/assert.h" 46 #include "spdk_internal/log.h" 47 #include "spdk_internal/nvme_tcp.h" 48 49 #define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16 50 #define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 6 51 #define SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR 4 52 53 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp; 54 55 /* spdk nvmf related structure */ 56 enum spdk_nvmf_tcp_req_state { 57 58 /* The request is not currently in use */ 59 TCP_REQUEST_STATE_FREE = 0, 60 61 /* Initial state when request first received */ 62 TCP_REQUEST_STATE_NEW, 63 64 /* The request is queued until a data buffer is available. */ 65 TCP_REQUEST_STATE_NEED_BUFFER, 66 67 /* The request is currently transferring data from the host to the controller. */ 68 TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 69 70 /* The request is waiting for the R2T send acknowledgement. */ 71 TCP_REQUEST_STATE_AWAITING_R2T_ACK, 72 73 /* The request is ready to execute at the block device */ 74 TCP_REQUEST_STATE_READY_TO_EXECUTE, 75 76 /* The request is currently executing at the block device */ 77 TCP_REQUEST_STATE_EXECUTING, 78 79 /* The request finished executing at the block device */ 80 TCP_REQUEST_STATE_EXECUTED, 81 82 /* The request is ready to send a completion */ 83 TCP_REQUEST_STATE_READY_TO_COMPLETE, 84 85 /* The request is currently transferring final pdus from the controller to the host. */ 86 TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 87 88 /* The request completed and can be marked free. */ 89 TCP_REQUEST_STATE_COMPLETED, 90 91 /* Terminator */ 92 TCP_REQUEST_NUM_STATES, 93 }; 94 95 static const char *spdk_nvmf_tcp_term_req_fes_str[] = { 96 "Invalid PDU Header Field", 97 "PDU Sequence Error", 98 "Header Digiest Error", 99 "Data Transfer Out of Range", 100 "R2T Limit Exceeded", 101 "Unsupported parameter", 102 }; 103 104 #define OBJECT_NVMF_TCP_IO 0x80 105 106 #define TRACE_GROUP_NVMF_TCP 0x5 107 #define TRACE_TCP_REQUEST_STATE_NEW SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x0) 108 #define TRACE_TCP_REQUEST_STATE_NEED_BUFFER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x1) 109 #define TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x2) 110 #define TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x3) 111 #define TRACE_TCP_REQUEST_STATE_EXECUTING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x4) 112 #define TRACE_TCP_REQUEST_STATE_EXECUTED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x5) 113 #define TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x6) 114 #define TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x7) 115 #define TRACE_TCP_REQUEST_STATE_COMPLETED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x8) 116 #define TRACE_TCP_FLUSH_WRITEBUF_START SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0x9) 117 #define TRACE_TCP_FLUSH_WRITEBUF_DONE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0xA) 118 #define TRACE_TCP_READ_FROM_SOCKET_DONE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0xB) 119 #define TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK SPDK_TPOINT_ID(TRACE_GROUP_NVMF_TCP, 0xC) 120 121 SPDK_TRACE_REGISTER_FN(nvmf_tcp_trace, "nvmf_tcp", TRACE_GROUP_NVMF_TCP) 122 { 123 spdk_trace_register_object(OBJECT_NVMF_TCP_IO, 'r'); 124 spdk_trace_register_description("TCP_REQ_NEW", 125 TRACE_TCP_REQUEST_STATE_NEW, 126 OWNER_NONE, OBJECT_NVMF_TCP_IO, 1, 1, ""); 127 spdk_trace_register_description("TCP_REQ_NEED_BUFFER", 128 TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 129 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 130 spdk_trace_register_description("TCP_REQ_TX_H_TO_C", 131 TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 132 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 133 spdk_trace_register_description("TCP_REQ_RDY_TO_EXECUTE", 134 TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 135 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 136 spdk_trace_register_description("TCP_REQ_EXECUTING", 137 TRACE_TCP_REQUEST_STATE_EXECUTING, 138 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 139 spdk_trace_register_description("TCP_REQ_EXECUTED", 140 TRACE_TCP_REQUEST_STATE_EXECUTED, 141 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 142 spdk_trace_register_description("TCP_REQ_RDY_TO_COMPLETE", 143 TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 144 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 145 spdk_trace_register_description("TCP_REQ_TRANSFER_C2H", 146 TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 147 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 148 spdk_trace_register_description("TCP_REQ_COMPLETED", 149 TRACE_TCP_REQUEST_STATE_COMPLETED, 150 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 151 spdk_trace_register_description("TCP_WRITE_START", 152 TRACE_TCP_FLUSH_WRITEBUF_START, 153 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 154 spdk_trace_register_description("TCP_WRITE_DONE", 155 TRACE_TCP_FLUSH_WRITEBUF_DONE, 156 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 157 spdk_trace_register_description("TCP_READ_DONE", 158 TRACE_TCP_READ_FROM_SOCKET_DONE, 159 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 160 spdk_trace_register_description("TCP_REQ_AWAIT_R2T_ACK", 161 TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 162 OWNER_NONE, OBJECT_NVMF_TCP_IO, 0, 1, ""); 163 } 164 165 struct spdk_nvmf_tcp_req { 166 struct spdk_nvmf_request req; 167 struct spdk_nvme_cpl rsp; 168 struct spdk_nvme_cmd cmd; 169 170 /* A PDU that can be used for sending responses. This is 171 * not the incoming PDU! */ 172 struct nvme_tcp_pdu *pdu; 173 174 /* 175 * The PDU for a request may be used multiple times in serial over 176 * the request's lifetime. For example, first to send an R2T, then 177 * to send a completion. To catch mistakes where the PDU is used 178 * twice at the same time, add a debug flag here for init/fini. 179 */ 180 bool pdu_in_use; 181 182 /* In-capsule data buffer */ 183 uint8_t *buf; 184 185 bool has_incapsule_data; 186 187 /* transfer_tag */ 188 uint16_t ttag; 189 190 enum spdk_nvmf_tcp_req_state state; 191 192 /* 193 * h2c_offset is used when we receive the h2c_data PDU. 194 */ 195 uint32_t h2c_offset; 196 197 STAILQ_ENTRY(spdk_nvmf_tcp_req) link; 198 TAILQ_ENTRY(spdk_nvmf_tcp_req) state_link; 199 }; 200 201 struct spdk_nvmf_tcp_qpair { 202 struct spdk_nvmf_qpair qpair; 203 struct spdk_nvmf_tcp_poll_group *group; 204 struct spdk_nvmf_tcp_port *port; 205 struct spdk_sock *sock; 206 207 enum nvme_tcp_pdu_recv_state recv_state; 208 enum nvme_tcp_qpair_state state; 209 210 /* PDU being actively received */ 211 struct nvme_tcp_pdu pdu_in_progress; 212 uint32_t recv_buf_size; 213 214 /* This is a spare PDU used for sending special management 215 * operations. Primarily, this is used for the initial 216 * connection response and c2h termination request. */ 217 struct nvme_tcp_pdu mgmt_pdu; 218 219 TAILQ_HEAD(, nvme_tcp_pdu) send_queue; 220 221 /* Arrays of in-capsule buffers, requests, and pdus. 222 * Each array is 'resource_count' number of elements */ 223 void *bufs; 224 struct spdk_nvmf_tcp_req *reqs; 225 struct nvme_tcp_pdu *pdus; 226 uint32_t resource_count; 227 228 /* Queues to track the requests in all states */ 229 TAILQ_HEAD(, spdk_nvmf_tcp_req) state_queue[TCP_REQUEST_NUM_STATES]; 230 /* Number of requests in each state */ 231 uint32_t state_cntr[TCP_REQUEST_NUM_STATES]; 232 233 uint8_t cpda; 234 235 bool host_hdgst_enable; 236 bool host_ddgst_enable; 237 238 /* IP address */ 239 char initiator_addr[SPDK_NVMF_TRADDR_MAX_LEN]; 240 char target_addr[SPDK_NVMF_TRADDR_MAX_LEN]; 241 242 /* IP port */ 243 uint16_t initiator_port; 244 uint16_t target_port; 245 246 /* Timer used to destroy qpair after detecting transport error issue if initiator does 247 * not close the connection. 248 */ 249 struct spdk_poller *timeout_poller; 250 251 TAILQ_ENTRY(spdk_nvmf_tcp_qpair) link; 252 }; 253 254 struct spdk_nvmf_tcp_poll_group { 255 struct spdk_nvmf_transport_poll_group group; 256 struct spdk_sock_group *sock_group; 257 258 TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs; 259 TAILQ_HEAD(, spdk_nvmf_tcp_qpair) await_req; 260 }; 261 262 struct spdk_nvmf_tcp_port { 263 const struct spdk_nvme_transport_id *trid; 264 struct spdk_sock *listen_sock; 265 TAILQ_ENTRY(spdk_nvmf_tcp_port) link; 266 }; 267 268 struct spdk_nvmf_tcp_transport { 269 struct spdk_nvmf_transport transport; 270 271 pthread_mutex_t lock; 272 273 TAILQ_HEAD(, spdk_nvmf_tcp_port) ports; 274 }; 275 276 static bool spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport, 277 struct spdk_nvmf_tcp_req *tcp_req); 278 279 static void 280 spdk_nvmf_tcp_req_set_state(struct spdk_nvmf_tcp_req *tcp_req, 281 enum spdk_nvmf_tcp_req_state state) 282 { 283 struct spdk_nvmf_qpair *qpair; 284 struct spdk_nvmf_tcp_qpair *tqpair; 285 286 qpair = tcp_req->req.qpair; 287 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 288 289 TAILQ_REMOVE(&tqpair->state_queue[tcp_req->state], tcp_req, state_link); 290 assert(tqpair->state_cntr[tcp_req->state] > 0); 291 tqpair->state_cntr[tcp_req->state]--; 292 293 TAILQ_INSERT_TAIL(&tqpair->state_queue[state], tcp_req, state_link); 294 tqpair->state_cntr[state]++; 295 296 tcp_req->state = state; 297 } 298 299 static inline struct nvme_tcp_pdu * 300 nvmf_tcp_req_pdu_init(struct spdk_nvmf_tcp_req *tcp_req) 301 { 302 assert(tcp_req->pdu_in_use == false); 303 tcp_req->pdu_in_use = true; 304 305 memset(tcp_req->pdu, 0, sizeof(*tcp_req->pdu)); 306 tcp_req->pdu->qpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 307 308 return tcp_req->pdu; 309 } 310 311 static inline void 312 nvmf_tcp_req_pdu_fini(struct spdk_nvmf_tcp_req *tcp_req) 313 { 314 tcp_req->pdu_in_use = false; 315 } 316 317 static struct spdk_nvmf_tcp_req * 318 spdk_nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair) 319 { 320 struct spdk_nvmf_tcp_req *tcp_req; 321 322 tcp_req = TAILQ_FIRST(&tqpair->state_queue[TCP_REQUEST_STATE_FREE]); 323 if (!tcp_req) { 324 return NULL; 325 } 326 327 memset(&tcp_req->rsp, 0, sizeof(tcp_req->rsp)); 328 tcp_req->h2c_offset = 0; 329 tcp_req->has_incapsule_data = false; 330 tcp_req->req.dif.dif_insert_or_strip = false; 331 332 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEW); 333 return tcp_req; 334 } 335 336 static void 337 nvmf_tcp_request_free(struct spdk_nvmf_tcp_req *tcp_req) 338 { 339 struct spdk_nvmf_tcp_transport *ttransport; 340 341 assert(tcp_req != NULL); 342 343 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "tcp_req=%p will be freed\n", tcp_req); 344 ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport, 345 struct spdk_nvmf_tcp_transport, transport); 346 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_COMPLETED); 347 spdk_nvmf_tcp_req_process(ttransport, tcp_req); 348 } 349 350 static int 351 spdk_nvmf_tcp_req_free(struct spdk_nvmf_request *req) 352 { 353 struct spdk_nvmf_tcp_req *tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 354 355 nvmf_tcp_request_free(tcp_req); 356 357 return 0; 358 } 359 360 static void 361 spdk_nvmf_tcp_drain_state_queue(struct spdk_nvmf_tcp_qpair *tqpair, 362 enum spdk_nvmf_tcp_req_state state) 363 { 364 struct spdk_nvmf_tcp_req *tcp_req, *req_tmp; 365 366 TAILQ_FOREACH_SAFE(tcp_req, &tqpair->state_queue[state], state_link, req_tmp) { 367 nvmf_tcp_request_free(tcp_req); 368 } 369 } 370 371 static void 372 spdk_nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair) 373 { 374 struct spdk_nvmf_tcp_req *tcp_req, *req_tmp; 375 376 assert(TAILQ_EMPTY(&tqpair->send_queue)); 377 378 spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 379 spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW); 380 381 /* Wipe the requests waiting for buffer from the global list */ 382 TAILQ_FOREACH_SAFE(tcp_req, &tqpair->state_queue[TCP_REQUEST_STATE_NEED_BUFFER], state_link, 383 req_tmp) { 384 STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, &tcp_req->req, 385 spdk_nvmf_request, buf_link); 386 } 387 388 spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER); 389 spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING); 390 spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 391 spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK); 392 } 393 394 static void 395 nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair) 396 { 397 int i; 398 struct spdk_nvmf_tcp_req *tcp_req; 399 400 SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", tqpair->qpair.qid); 401 for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) { 402 SPDK_ERRLOG("\tNum of requests in state[%d] = %u\n", i, tqpair->state_cntr[i]); 403 TAILQ_FOREACH(tcp_req, &tqpair->state_queue[i], state_link) { 404 SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool); 405 SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode); 406 } 407 } 408 } 409 410 static void 411 spdk_nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair) 412 { 413 int err = 0; 414 415 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n"); 416 417 err = spdk_sock_close(&tqpair->sock); 418 assert(err == 0); 419 spdk_nvmf_tcp_cleanup_all_states(tqpair); 420 421 if (tqpair->state_cntr[TCP_REQUEST_STATE_FREE] != tqpair->resource_count) { 422 SPDK_ERRLOG("tqpair(%p) free tcp request num is %u but should be %u\n", tqpair, 423 tqpair->state_cntr[TCP_REQUEST_STATE_FREE], 424 tqpair->resource_count); 425 err++; 426 } 427 428 if (err > 0) { 429 nvmf_tcp_dump_qpair_req_contents(tqpair); 430 } 431 432 spdk_dma_free(tqpair->pdus); 433 free(tqpair->reqs); 434 spdk_free(tqpair->bufs); 435 free(tqpair); 436 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Leave\n"); 437 } 438 439 static int 440 spdk_nvmf_tcp_destroy(struct spdk_nvmf_transport *transport) 441 { 442 struct spdk_nvmf_tcp_transport *ttransport; 443 444 assert(transport != NULL); 445 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 446 447 pthread_mutex_destroy(&ttransport->lock); 448 free(ttransport); 449 return 0; 450 } 451 452 static struct spdk_nvmf_transport * 453 spdk_nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts) 454 { 455 struct spdk_nvmf_tcp_transport *ttransport; 456 uint32_t sge_count; 457 uint32_t min_shared_buffers; 458 459 ttransport = calloc(1, sizeof(*ttransport)); 460 if (!ttransport) { 461 return NULL; 462 } 463 464 TAILQ_INIT(&ttransport->ports); 465 466 ttransport->transport.ops = &spdk_nvmf_transport_tcp; 467 468 SPDK_NOTICELOG("*** TCP Transport Init ***\n"); 469 470 SPDK_INFOLOG(SPDK_LOG_NVMF_TCP, "*** TCP Transport Init ***\n" 471 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 472 " max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 473 " in_capsule_data_size=%d, max_aq_depth=%d\n" 474 " num_shared_buffers=%d, c2h_success=%d,\n" 475 " dif_insert_or_strip=%d, sock_priority=%d\n", 476 opts->max_queue_depth, 477 opts->max_io_size, 478 opts->max_qpairs_per_ctrlr, 479 opts->io_unit_size, 480 opts->in_capsule_data_size, 481 opts->max_aq_depth, 482 opts->num_shared_buffers, 483 opts->c2h_success, 484 opts->dif_insert_or_strip, 485 opts->sock_priority); 486 487 if (opts->sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) { 488 SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n" 489 "you can use man 7 socket to view the range of priority under SO_PRIORITY item\n", 490 opts->sock_priority, SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY); 491 free(ttransport); 492 return NULL; 493 } 494 495 /* I/O unit size cannot be larger than max I/O size */ 496 if (opts->io_unit_size > opts->max_io_size) { 497 opts->io_unit_size = opts->max_io_size; 498 } 499 500 sge_count = opts->max_io_size / opts->io_unit_size; 501 if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) { 502 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 503 free(ttransport); 504 return NULL; 505 } 506 507 min_shared_buffers = spdk_thread_get_count() * opts->buf_cache_size; 508 if (min_shared_buffers > opts->num_shared_buffers) { 509 SPDK_ERRLOG("There are not enough buffers to satisfy" 510 "per-poll group caches for each thread. (%" PRIu32 ")" 511 "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers); 512 SPDK_ERRLOG("Please specify a larger number of shared buffers\n"); 513 spdk_nvmf_tcp_destroy(&ttransport->transport); 514 return NULL; 515 } 516 517 spdk_nvmf_ctrlr_data_init(opts, &ttransport->transport.cdata); 518 519 pthread_mutex_init(&ttransport->lock, NULL); 520 521 return &ttransport->transport; 522 } 523 524 static int 525 _spdk_nvmf_tcp_trsvcid_to_int(const char *trsvcid) 526 { 527 unsigned long long ull; 528 char *end = NULL; 529 530 ull = strtoull(trsvcid, &end, 10); 531 if (end == NULL || end == trsvcid || *end != '\0') { 532 return -1; 533 } 534 535 /* Valid TCP/IP port numbers are in [0, 65535] */ 536 if (ull > 65535) { 537 return -1; 538 } 539 540 return (int)ull; 541 } 542 543 /** 544 * Canonicalize a listen address trid. 545 */ 546 static int 547 _spdk_nvmf_tcp_canon_listen_trid(struct spdk_nvme_transport_id *canon_trid, 548 const struct spdk_nvme_transport_id *trid) 549 { 550 int trsvcid_int; 551 552 trsvcid_int = _spdk_nvmf_tcp_trsvcid_to_int(trid->trsvcid); 553 if (trsvcid_int < 0) { 554 return -EINVAL; 555 } 556 557 memset(canon_trid, 0, sizeof(*canon_trid)); 558 spdk_nvme_trid_populate_transport(canon_trid, SPDK_NVME_TRANSPORT_TCP); 559 canon_trid->adrfam = trid->adrfam; 560 snprintf(canon_trid->traddr, sizeof(canon_trid->traddr), "%s", trid->traddr); 561 snprintf(canon_trid->trsvcid, sizeof(canon_trid->trsvcid), "%d", trsvcid_int); 562 563 return 0; 564 } 565 566 /** 567 * Find an existing listening port. 568 * 569 * Caller must hold ttransport->lock. 570 */ 571 static struct spdk_nvmf_tcp_port * 572 _spdk_nvmf_tcp_find_port(struct spdk_nvmf_tcp_transport *ttransport, 573 const struct spdk_nvme_transport_id *trid) 574 { 575 struct spdk_nvme_transport_id canon_trid; 576 struct spdk_nvmf_tcp_port *port; 577 578 if (_spdk_nvmf_tcp_canon_listen_trid(&canon_trid, trid) != 0) { 579 return NULL; 580 } 581 582 TAILQ_FOREACH(port, &ttransport->ports, link) { 583 if (spdk_nvme_transport_id_compare(&canon_trid, port->trid) == 0) { 584 return port; 585 } 586 } 587 588 return NULL; 589 } 590 591 static int 592 spdk_nvmf_tcp_listen(struct spdk_nvmf_transport *transport, 593 const struct spdk_nvme_transport_id *trid) 594 { 595 struct spdk_nvmf_tcp_transport *ttransport; 596 struct spdk_nvmf_tcp_port *port; 597 int trsvcid_int; 598 uint8_t adrfam; 599 struct spdk_sock_opts opts; 600 601 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 602 603 trsvcid_int = _spdk_nvmf_tcp_trsvcid_to_int(trid->trsvcid); 604 if (trsvcid_int < 0) { 605 SPDK_ERRLOG("Invalid trsvcid '%s'\n", trid->trsvcid); 606 return -EINVAL; 607 } 608 609 pthread_mutex_lock(&ttransport->lock); 610 port = calloc(1, sizeof(*port)); 611 if (!port) { 612 SPDK_ERRLOG("Port allocation failed\n"); 613 pthread_mutex_unlock(&ttransport->lock); 614 return -ENOMEM; 615 } 616 617 port->trid = trid; 618 opts.opts_size = sizeof(opts); 619 spdk_sock_get_default_opts(&opts); 620 opts.priority = transport->opts.sock_priority; 621 port->listen_sock = spdk_sock_listen_ext(trid->traddr, trsvcid_int, 622 NULL, &opts); 623 if (port->listen_sock == NULL) { 624 SPDK_ERRLOG("spdk_sock_listen(%s, %d) failed: %s (%d)\n", 625 trid->traddr, trsvcid_int, 626 spdk_strerror(errno), errno); 627 free(port); 628 pthread_mutex_unlock(&ttransport->lock); 629 return -errno; 630 } 631 632 if (spdk_sock_is_ipv4(port->listen_sock)) { 633 adrfam = SPDK_NVMF_ADRFAM_IPV4; 634 } else if (spdk_sock_is_ipv6(port->listen_sock)) { 635 adrfam = SPDK_NVMF_ADRFAM_IPV6; 636 } else { 637 SPDK_ERRLOG("Unhandled socket type\n"); 638 adrfam = 0; 639 } 640 641 if (adrfam != trid->adrfam) { 642 SPDK_ERRLOG("Socket address family mismatch\n"); 643 spdk_sock_close(&port->listen_sock); 644 free(port); 645 pthread_mutex_unlock(&ttransport->lock); 646 return -EINVAL; 647 } 648 649 SPDK_NOTICELOG("*** NVMe/TCP Target Listening on %s port %s ***\n", 650 trid->traddr, trid->trsvcid); 651 652 TAILQ_INSERT_TAIL(&ttransport->ports, port, link); 653 pthread_mutex_unlock(&ttransport->lock); 654 return 0; 655 } 656 657 static void 658 spdk_nvmf_tcp_stop_listen(struct spdk_nvmf_transport *transport, 659 const struct spdk_nvme_transport_id *trid) 660 { 661 struct spdk_nvmf_tcp_transport *ttransport; 662 struct spdk_nvmf_tcp_port *port; 663 664 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 665 666 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Removing listen address %s port %s\n", 667 trid->traddr, trid->trsvcid); 668 669 pthread_mutex_lock(&ttransport->lock); 670 port = _spdk_nvmf_tcp_find_port(ttransport, trid); 671 if (port) { 672 TAILQ_REMOVE(&ttransport->ports, port, link); 673 spdk_sock_close(&port->listen_sock); 674 free(port); 675 } 676 677 pthread_mutex_unlock(&ttransport->lock); 678 } 679 680 static void spdk_nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair, 681 enum nvme_tcp_pdu_recv_state state); 682 683 static void 684 spdk_nvmf_tcp_qpair_disconnect(struct spdk_nvmf_tcp_qpair *tqpair) 685 { 686 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Disconnecting qpair %p\n", tqpair); 687 688 if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) { 689 tqpair->state = NVME_TCP_QPAIR_STATE_EXITING; 690 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 691 spdk_poller_unregister(&tqpair->timeout_poller); 692 693 /* This will end up calling spdk_nvmf_tcp_close_qpair */ 694 spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL); 695 } 696 } 697 698 static void 699 _pdu_write_done(void *_pdu, int err) 700 { 701 struct nvme_tcp_pdu *pdu = _pdu; 702 struct spdk_nvmf_tcp_qpair *tqpair = pdu->qpair; 703 704 TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq); 705 706 if (err != 0) { 707 spdk_nvmf_tcp_qpair_disconnect(tqpair); 708 return; 709 } 710 711 assert(pdu->cb_fn != NULL); 712 pdu->cb_fn(pdu->cb_arg); 713 } 714 715 static void 716 spdk_nvmf_tcp_qpair_write_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 717 struct nvme_tcp_pdu *pdu, 718 nvme_tcp_qpair_xfer_complete_cb cb_fn, 719 void *cb_arg) 720 { 721 int hlen; 722 uint32_t crc32c; 723 uint32_t mapped_length = 0; 724 ssize_t rc; 725 726 assert(&tqpair->pdu_in_progress != pdu); 727 728 hlen = pdu->hdr.common.hlen; 729 730 /* Header Digest */ 731 if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->host_hdgst_enable) { 732 crc32c = nvme_tcp_pdu_calc_header_digest(pdu); 733 MAKE_DIGEST_WORD((uint8_t *)pdu->hdr.raw + hlen, crc32c); 734 } 735 736 /* Data Digest */ 737 if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) { 738 crc32c = nvme_tcp_pdu_calc_data_digest(pdu); 739 MAKE_DIGEST_WORD(pdu->data_digest, crc32c); 740 } 741 742 pdu->cb_fn = cb_fn; 743 pdu->cb_arg = cb_arg; 744 745 pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu, 746 tqpair->host_hdgst_enable, tqpair->host_ddgst_enable, 747 &mapped_length); 748 pdu->sock_req.cb_fn = _pdu_write_done; 749 pdu->sock_req.cb_arg = pdu; 750 TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq); 751 if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP || 752 pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ) { 753 rc = spdk_sock_writev(tqpair->sock, pdu->iov, pdu->sock_req.iovcnt); 754 if (rc == mapped_length) { 755 _pdu_write_done(pdu, 0); 756 } else { 757 SPDK_ERRLOG("IC_RESP or TERM_REQ could not write to socket.\n"); 758 _pdu_write_done(pdu, -1); 759 } 760 } else { 761 spdk_sock_writev_async(tqpair->sock, &pdu->sock_req); 762 } 763 } 764 765 static int 766 spdk_nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair) 767 { 768 uint32_t i; 769 struct spdk_nvmf_transport_opts *opts; 770 uint32_t in_capsule_data_size; 771 772 opts = &tqpair->qpair.transport->opts; 773 774 in_capsule_data_size = opts->in_capsule_data_size; 775 if (opts->dif_insert_or_strip) { 776 in_capsule_data_size = SPDK_BDEV_BUF_SIZE_WITH_MD(in_capsule_data_size); 777 } 778 779 tqpair->resource_count = opts->max_queue_depth; 780 781 tqpair->mgmt_pdu.qpair = tqpair; 782 783 tqpair->reqs = calloc(tqpair->resource_count, sizeof(*tqpair->reqs)); 784 if (!tqpair->reqs) { 785 SPDK_ERRLOG("Unable to allocate reqs on tqpair=%p\n", tqpair); 786 return -1; 787 } 788 789 if (in_capsule_data_size) { 790 tqpair->bufs = spdk_zmalloc(tqpair->resource_count * in_capsule_data_size, 0x1000, 791 NULL, SPDK_ENV_LCORE_ID_ANY, 792 SPDK_MALLOC_DMA); 793 if (!tqpair->bufs) { 794 SPDK_ERRLOG("Unable to allocate bufs on tqpair=%p.\n", tqpair); 795 return -1; 796 } 797 } 798 799 tqpair->pdus = spdk_dma_malloc(tqpair->resource_count * sizeof(*tqpair->pdus), 0x1000, NULL); 800 if (!tqpair->pdus) { 801 SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair); 802 return -1; 803 } 804 805 for (i = 0; i < tqpair->resource_count; i++) { 806 struct spdk_nvmf_tcp_req *tcp_req = &tqpair->reqs[i]; 807 808 tcp_req->ttag = i + 1; 809 tcp_req->req.qpair = &tqpair->qpair; 810 811 tcp_req->pdu = &tqpair->pdus[i]; 812 tcp_req->pdu->qpair = tqpair; 813 814 /* Set up memory to receive commands */ 815 if (tqpair->bufs) { 816 tcp_req->buf = (void *)((uintptr_t)tqpair->bufs + (i * in_capsule_data_size)); 817 } 818 819 /* Set the cmdn and rsp */ 820 tcp_req->req.rsp = (union nvmf_c2h_msg *)&tcp_req->rsp; 821 tcp_req->req.cmd = (union nvmf_h2c_msg *)&tcp_req->cmd; 822 823 /* Initialize request state to FREE */ 824 tcp_req->state = TCP_REQUEST_STATE_FREE; 825 TAILQ_INSERT_TAIL(&tqpair->state_queue[tcp_req->state], tcp_req, state_link); 826 tqpair->state_cntr[TCP_REQUEST_STATE_FREE]++; 827 } 828 829 tqpair->recv_buf_size = (in_capsule_data_size + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 830 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 831 832 return 0; 833 } 834 835 static int 836 spdk_nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair) 837 { 838 struct spdk_nvmf_tcp_qpair *tqpair; 839 int i; 840 841 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 842 843 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "New TCP Connection: %p\n", qpair); 844 845 TAILQ_INIT(&tqpair->send_queue); 846 847 /* Initialise request state queues of the qpair */ 848 for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) { 849 TAILQ_INIT(&tqpair->state_queue[i]); 850 } 851 852 tqpair->host_hdgst_enable = true; 853 tqpair->host_ddgst_enable = true; 854 855 return 0; 856 } 857 858 static int 859 spdk_nvmf_tcp_qpair_sock_init(struct spdk_nvmf_tcp_qpair *tqpair) 860 { 861 int rc; 862 863 /* set low water mark */ 864 rc = spdk_sock_set_recvlowat(tqpair->sock, sizeof(struct spdk_nvme_tcp_common_pdu_hdr)); 865 if (rc != 0) { 866 SPDK_ERRLOG("spdk_sock_set_recvlowat() failed\n"); 867 return rc; 868 } 869 870 return 0; 871 } 872 873 static void 874 _spdk_nvmf_tcp_handle_connect(struct spdk_nvmf_transport *transport, 875 struct spdk_nvmf_tcp_port *port, 876 struct spdk_sock *sock, 877 new_qpair_fn cb_fn, void *cb_arg) 878 { 879 struct spdk_nvmf_tcp_qpair *tqpair; 880 int rc; 881 882 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "New connection accepted on %s port %s\n", 883 port->trid->traddr, port->trid->trsvcid); 884 885 tqpair = calloc(1, sizeof(struct spdk_nvmf_tcp_qpair)); 886 if (tqpair == NULL) { 887 SPDK_ERRLOG("Could not allocate new connection.\n"); 888 spdk_sock_close(&sock); 889 return; 890 } 891 892 tqpair->sock = sock; 893 tqpair->state_cntr[TCP_REQUEST_STATE_FREE] = 0; 894 tqpair->port = port; 895 tqpair->qpair.transport = transport; 896 897 rc = spdk_sock_getaddr(tqpair->sock, tqpair->target_addr, 898 sizeof(tqpair->target_addr), &tqpair->target_port, 899 tqpair->initiator_addr, sizeof(tqpair->initiator_addr), 900 &tqpair->initiator_port); 901 if (rc < 0) { 902 SPDK_ERRLOG("spdk_sock_getaddr() failed of tqpair=%p\n", tqpair); 903 spdk_nvmf_tcp_qpair_destroy(tqpair); 904 return; 905 } 906 907 cb_fn(&tqpair->qpair, cb_arg); 908 } 909 910 static void 911 spdk_nvmf_tcp_port_accept(struct spdk_nvmf_transport *transport, struct spdk_nvmf_tcp_port *port, 912 new_qpair_fn cb_fn, void *cb_arg) 913 { 914 struct spdk_sock *sock; 915 int i; 916 917 for (i = 0; i < NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME; i++) { 918 sock = spdk_sock_accept(port->listen_sock); 919 if (sock == NULL) { 920 break; 921 } 922 _spdk_nvmf_tcp_handle_connect(transport, port, sock, cb_fn, cb_arg); 923 } 924 } 925 926 static void 927 spdk_nvmf_tcp_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg) 928 { 929 struct spdk_nvmf_tcp_transport *ttransport; 930 struct spdk_nvmf_tcp_port *port; 931 932 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 933 934 TAILQ_FOREACH(port, &ttransport->ports, link) { 935 spdk_nvmf_tcp_port_accept(transport, port, cb_fn, cb_arg); 936 } 937 } 938 939 static void 940 spdk_nvmf_tcp_discover(struct spdk_nvmf_transport *transport, 941 struct spdk_nvme_transport_id *trid, 942 struct spdk_nvmf_discovery_log_page_entry *entry) 943 { 944 entry->trtype = SPDK_NVMF_TRTYPE_TCP; 945 entry->adrfam = trid->adrfam; 946 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED; 947 948 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 949 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 950 951 entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE; 952 } 953 954 static struct spdk_nvmf_transport_poll_group * 955 spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport) 956 { 957 struct spdk_nvmf_tcp_poll_group *tgroup; 958 959 tgroup = calloc(1, sizeof(*tgroup)); 960 if (!tgroup) { 961 return NULL; 962 } 963 964 tgroup->sock_group = spdk_sock_group_create(&tgroup->group); 965 if (!tgroup->sock_group) { 966 goto cleanup; 967 } 968 969 TAILQ_INIT(&tgroup->qpairs); 970 TAILQ_INIT(&tgroup->await_req); 971 972 return &tgroup->group; 973 974 cleanup: 975 free(tgroup); 976 return NULL; 977 } 978 979 static struct spdk_nvmf_transport_poll_group * 980 spdk_nvmf_tcp_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 981 { 982 struct spdk_nvmf_tcp_qpair *tqpair; 983 struct spdk_sock_group *group = NULL; 984 int rc; 985 986 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 987 rc = spdk_sock_get_optimal_sock_group(tqpair->sock, &group); 988 if (!rc && group != NULL) { 989 return spdk_sock_group_get_ctx(group); 990 } 991 992 return NULL; 993 } 994 995 static void 996 spdk_nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 997 { 998 struct spdk_nvmf_tcp_poll_group *tgroup; 999 1000 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 1001 spdk_sock_group_close(&tgroup->sock_group); 1002 1003 free(tgroup); 1004 } 1005 1006 static void 1007 spdk_nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair, 1008 enum nvme_tcp_pdu_recv_state state) 1009 { 1010 if (tqpair->recv_state == state) { 1011 SPDK_ERRLOG("The recv state of tqpair=%p is same with the state(%d) to be set\n", 1012 tqpair, state); 1013 return; 1014 } 1015 1016 if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) { 1017 /* When leaving the await req state, move the qpair to the main list */ 1018 TAILQ_REMOVE(&tqpair->group->await_req, tqpair, link); 1019 TAILQ_INSERT_TAIL(&tqpair->group->qpairs, tqpair, link); 1020 } 1021 1022 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "tqpair(%p) recv state=%d\n", tqpair, state); 1023 tqpair->recv_state = state; 1024 1025 switch (state) { 1026 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH: 1027 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH: 1028 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD: 1029 break; 1030 case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ: 1031 TAILQ_REMOVE(&tqpair->group->qpairs, tqpair, link); 1032 TAILQ_INSERT_TAIL(&tqpair->group->await_req, tqpair, link); 1033 break; 1034 case NVME_TCP_PDU_RECV_STATE_ERROR: 1035 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY: 1036 memset(&tqpair->pdu_in_progress, 0, sizeof(tqpair->pdu_in_progress)); 1037 break; 1038 default: 1039 SPDK_ERRLOG("The state(%d) is invalid\n", state); 1040 abort(); 1041 break; 1042 } 1043 } 1044 1045 static int 1046 spdk_nvmf_tcp_qpair_handle_timeout(void *ctx) 1047 { 1048 struct spdk_nvmf_tcp_qpair *tqpair = ctx; 1049 1050 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1051 1052 SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, 1053 SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT); 1054 1055 spdk_nvmf_tcp_qpair_disconnect(tqpair); 1056 return 0; 1057 } 1058 1059 static void 1060 spdk_nvmf_tcp_send_c2h_term_req_complete(void *cb_arg) 1061 { 1062 struct spdk_nvmf_tcp_qpair *tqpair = (struct spdk_nvmf_tcp_qpair *)cb_arg; 1063 1064 if (!tqpair->timeout_poller) { 1065 tqpair->timeout_poller = SPDK_POLLER_REGISTER(spdk_nvmf_tcp_qpair_handle_timeout, tqpair, 1066 SPDK_NVME_TCP_QPAIR_EXIT_TIMEOUT * 1000000); 1067 } 1068 } 1069 1070 static void 1071 spdk_nvmf_tcp_send_c2h_term_req(struct spdk_nvmf_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu, 1072 enum spdk_nvme_tcp_term_req_fes fes, uint32_t error_offset) 1073 { 1074 struct nvme_tcp_pdu *rsp_pdu; 1075 struct spdk_nvme_tcp_term_req_hdr *c2h_term_req; 1076 uint32_t c2h_term_req_hdr_len = sizeof(*c2h_term_req); 1077 uint32_t copy_len; 1078 1079 rsp_pdu = &tqpair->mgmt_pdu; 1080 1081 c2h_term_req = &rsp_pdu->hdr.term_req; 1082 c2h_term_req->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ; 1083 c2h_term_req->common.hlen = c2h_term_req_hdr_len; 1084 1085 if ((fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) || 1086 (fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) { 1087 DSET32(&c2h_term_req->fei, error_offset); 1088 } 1089 1090 copy_len = spdk_min(pdu->hdr.common.hlen, SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1091 1092 /* Copy the error info into the buffer */ 1093 memcpy((uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, pdu->hdr.raw, copy_len); 1094 nvme_tcp_pdu_set_data(rsp_pdu, (uint8_t *)rsp_pdu->hdr.raw + c2h_term_req_hdr_len, copy_len); 1095 1096 /* Contain the header of the wrong received pdu */ 1097 c2h_term_req->common.plen = c2h_term_req->common.hlen + copy_len; 1098 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 1099 spdk_nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, spdk_nvmf_tcp_send_c2h_term_req_complete, tqpair); 1100 } 1101 1102 static void 1103 spdk_nvmf_tcp_capsule_cmd_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport, 1104 struct spdk_nvmf_tcp_qpair *tqpair, 1105 struct nvme_tcp_pdu *pdu) 1106 { 1107 struct spdk_nvmf_tcp_req *tcp_req; 1108 1109 assert(pdu->psh_valid_bytes == pdu->psh_len); 1110 assert(pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD); 1111 1112 tcp_req = spdk_nvmf_tcp_req_get(tqpair); 1113 if (!tcp_req) { 1114 /* Directly return and make the allocation retry again */ 1115 if (tqpair->state_cntr[TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST] > 0) { 1116 return; 1117 } 1118 1119 /* The host sent more commands than the maximum queue depth. */ 1120 SPDK_ERRLOG("Cannot allocate tcp_req on tqpair=%p\n", tqpair); 1121 spdk_nvmf_tcp_qpair_disconnect(tqpair); 1122 return; 1123 } 1124 1125 pdu->req = tcp_req; 1126 assert(tcp_req->state == TCP_REQUEST_STATE_NEW); 1127 spdk_nvmf_tcp_req_process(ttransport, tcp_req); 1128 } 1129 1130 static void 1131 spdk_nvmf_tcp_capsule_cmd_payload_handle(struct spdk_nvmf_tcp_transport *ttransport, 1132 struct spdk_nvmf_tcp_qpair *tqpair, 1133 struct nvme_tcp_pdu *pdu) 1134 { 1135 struct spdk_nvmf_tcp_req *tcp_req; 1136 struct spdk_nvme_tcp_cmd *capsule_cmd; 1137 uint32_t error_offset = 0; 1138 enum spdk_nvme_tcp_term_req_fes fes; 1139 1140 capsule_cmd = &pdu->hdr.capsule_cmd; 1141 tcp_req = pdu->req; 1142 assert(tcp_req != NULL); 1143 if (capsule_cmd->common.pdo > SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET) { 1144 SPDK_ERRLOG("Expected ICReq capsule_cmd pdu offset <= %d, got %c\n", 1145 SPDK_NVME_TCP_PDU_PDO_MAX_OFFSET, capsule_cmd->common.pdo); 1146 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1147 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo); 1148 goto err; 1149 } 1150 1151 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1152 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1153 spdk_nvmf_tcp_req_process(ttransport, tcp_req); 1154 1155 return; 1156 err: 1157 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1158 } 1159 1160 static int 1161 nvmf_tcp_find_req_in_state(struct spdk_nvmf_tcp_qpair *tqpair, 1162 enum spdk_nvmf_tcp_req_state state, 1163 uint16_t cid, uint16_t tag, 1164 struct spdk_nvmf_tcp_req **req) 1165 { 1166 struct spdk_nvmf_tcp_req *tcp_req = NULL; 1167 1168 TAILQ_FOREACH(tcp_req, &tqpair->state_queue[state], state_link) { 1169 if (tcp_req->req.cmd->nvme_cmd.cid != cid) { 1170 continue; 1171 } 1172 1173 if (tcp_req->ttag == tag) { 1174 *req = tcp_req; 1175 return 0; 1176 } 1177 1178 *req = NULL; 1179 return -1; 1180 } 1181 1182 /* Didn't find it, but not an error */ 1183 *req = NULL; 1184 return 0; 1185 } 1186 1187 static void 1188 spdk_nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport, 1189 struct spdk_nvmf_tcp_qpair *tqpair, 1190 struct nvme_tcp_pdu *pdu) 1191 { 1192 struct spdk_nvmf_tcp_req *tcp_req; 1193 uint32_t error_offset = 0; 1194 enum spdk_nvme_tcp_term_req_fes fes = 0; 1195 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 1196 int rc; 1197 1198 h2c_data = &pdu->hdr.h2c_data; 1199 1200 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "tqpair=%p, r2t_info: datao=%u, datal=%u, cccid=%u, ttag=%u\n", 1201 tqpair, h2c_data->datao, h2c_data->datal, h2c_data->cccid, h2c_data->ttag); 1202 1203 rc = nvmf_tcp_find_req_in_state(tqpair, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 1204 h2c_data->cccid, h2c_data->ttag, &tcp_req); 1205 if (rc == 0 && tcp_req == NULL) { 1206 rc = nvmf_tcp_find_req_in_state(tqpair, TCP_REQUEST_STATE_AWAITING_R2T_ACK, h2c_data->cccid, 1207 h2c_data->ttag, &tcp_req); 1208 } 1209 1210 if (!tcp_req) { 1211 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "tcp_req is not found for tqpair=%p\n", tqpair); 1212 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER; 1213 if (rc == 0) { 1214 error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, cccid); 1215 } else { 1216 error_offset = offsetof(struct spdk_nvme_tcp_h2c_data_hdr, ttag); 1217 } 1218 goto err; 1219 } 1220 1221 if (tcp_req->h2c_offset != h2c_data->datao) { 1222 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, 1223 "tcp_req(%p), tqpair=%p, expected data offset %u, but data offset is %u\n", 1224 tcp_req, tqpair, tcp_req->h2c_offset, h2c_data->datao); 1225 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE; 1226 goto err; 1227 } 1228 1229 if ((h2c_data->datao + h2c_data->datal) > tcp_req->req.length) { 1230 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, 1231 "tcp_req(%p), tqpair=%p, (datao=%u + datal=%u) execeeds requested length=%u\n", 1232 tcp_req, tqpair, h2c_data->datao, h2c_data->datal, tcp_req->req.length); 1233 fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE; 1234 goto err; 1235 } 1236 1237 pdu->req = tcp_req; 1238 1239 if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) { 1240 pdu->dif_ctx = &tcp_req->req.dif.dif_ctx; 1241 } 1242 1243 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 1244 h2c_data->datao, h2c_data->datal); 1245 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1246 return; 1247 1248 err: 1249 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1250 } 1251 1252 static void 1253 spdk_nvmf_tcp_pdu_cmd_complete(void *cb_arg) 1254 { 1255 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 1256 nvmf_tcp_request_free(tcp_req); 1257 } 1258 1259 static void 1260 spdk_nvmf_tcp_send_capsule_resp_pdu(struct spdk_nvmf_tcp_req *tcp_req, 1261 struct spdk_nvmf_tcp_qpair *tqpair) 1262 { 1263 struct nvme_tcp_pdu *rsp_pdu; 1264 struct spdk_nvme_tcp_rsp *capsule_resp; 1265 1266 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter, tqpair=%p\n", tqpair); 1267 1268 rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req); 1269 assert(rsp_pdu != NULL); 1270 1271 capsule_resp = &rsp_pdu->hdr.capsule_resp; 1272 capsule_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1273 capsule_resp->common.plen = capsule_resp->common.hlen = sizeof(*capsule_resp); 1274 capsule_resp->rccqe = tcp_req->req.rsp->nvme_cpl; 1275 if (tqpair->host_hdgst_enable) { 1276 capsule_resp->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1277 capsule_resp->common.plen += SPDK_NVME_TCP_DIGEST_LEN; 1278 } 1279 1280 spdk_nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, spdk_nvmf_tcp_pdu_cmd_complete, tcp_req); 1281 } 1282 1283 static void 1284 spdk_nvmf_tcp_pdu_c2h_data_complete(void *cb_arg) 1285 { 1286 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 1287 struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, 1288 struct spdk_nvmf_tcp_qpair, qpair); 1289 1290 assert(tqpair != NULL); 1291 if (tqpair->qpair.transport->opts.c2h_success) { 1292 nvmf_tcp_request_free(tcp_req); 1293 } else { 1294 nvmf_tcp_req_pdu_fini(tcp_req); 1295 spdk_nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 1296 } 1297 } 1298 1299 static void 1300 spdk_nvmf_tcp_r2t_complete(void *cb_arg) 1301 { 1302 struct spdk_nvmf_tcp_req *tcp_req = cb_arg; 1303 struct spdk_nvmf_tcp_transport *ttransport; 1304 1305 nvmf_tcp_req_pdu_fini(tcp_req); 1306 1307 ttransport = SPDK_CONTAINEROF(tcp_req->req.qpair->transport, 1308 struct spdk_nvmf_tcp_transport, transport); 1309 1310 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 1311 1312 if (tcp_req->h2c_offset == tcp_req->req.length) { 1313 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1314 spdk_nvmf_tcp_req_process(ttransport, tcp_req); 1315 } 1316 } 1317 1318 static void 1319 spdk_nvmf_tcp_send_r2t_pdu(struct spdk_nvmf_tcp_qpair *tqpair, 1320 struct spdk_nvmf_tcp_req *tcp_req) 1321 { 1322 struct nvme_tcp_pdu *rsp_pdu; 1323 struct spdk_nvme_tcp_r2t_hdr *r2t; 1324 1325 rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req); 1326 assert(rsp_pdu != NULL); 1327 1328 r2t = &rsp_pdu->hdr.r2t; 1329 r2t->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T; 1330 r2t->common.plen = r2t->common.hlen = sizeof(*r2t); 1331 1332 if (tqpair->host_hdgst_enable) { 1333 r2t->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1334 r2t->common.plen += SPDK_NVME_TCP_DIGEST_LEN; 1335 } 1336 1337 r2t->cccid = tcp_req->req.cmd->nvme_cmd.cid; 1338 r2t->ttag = tcp_req->ttag; 1339 r2t->r2to = tcp_req->h2c_offset; 1340 r2t->r2tl = tcp_req->req.length; 1341 1342 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_AWAITING_R2T_ACK); 1343 1344 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, 1345 "tcp_req(%p) on tqpair(%p), r2t_info: cccid=%u, ttag=%u, r2to=%u, r2tl=%u\n", 1346 tcp_req, tqpair, r2t->cccid, r2t->ttag, r2t->r2to, r2t->r2tl); 1347 spdk_nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, spdk_nvmf_tcp_r2t_complete, tcp_req); 1348 } 1349 1350 static void 1351 spdk_nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport, 1352 struct spdk_nvmf_tcp_qpair *tqpair, 1353 struct nvme_tcp_pdu *pdu) 1354 { 1355 struct spdk_nvmf_tcp_req *tcp_req; 1356 1357 tcp_req = pdu->req; 1358 assert(tcp_req != NULL); 1359 1360 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n"); 1361 1362 tcp_req->h2c_offset += pdu->data_len; 1363 1364 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1365 1366 /* Wait for all of the data to arrive AND for the initial R2T PDU send to be 1367 * acknowledged before moving on. */ 1368 if (tcp_req->h2c_offset == tcp_req->req.length && 1369 tcp_req->state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER) { 1370 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 1371 spdk_nvmf_tcp_req_process(ttransport, tcp_req); 1372 } 1373 } 1374 1375 static void 1376 spdk_nvmf_tcp_h2c_term_req_dump(struct spdk_nvme_tcp_term_req_hdr *h2c_term_req) 1377 { 1378 SPDK_ERRLOG("Error info of pdu(%p): %s\n", h2c_term_req, 1379 spdk_nvmf_tcp_term_req_fes_str[h2c_term_req->fes]); 1380 if ((h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD) || 1381 (h2c_term_req->fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER)) { 1382 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "The offset from the start of the PDU header is %u\n", 1383 DGET32(h2c_term_req->fei)); 1384 } 1385 } 1386 1387 static void 1388 spdk_nvmf_tcp_h2c_term_req_hdr_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1389 struct nvme_tcp_pdu *pdu) 1390 { 1391 struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req; 1392 uint32_t error_offset = 0; 1393 enum spdk_nvme_tcp_term_req_fes fes; 1394 1395 1396 if (h2c_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) { 1397 SPDK_ERRLOG("Fatal Error Stauts(FES) is unknown for h2c_term_req pdu=%p\n", pdu); 1398 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1399 error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes); 1400 goto end; 1401 } 1402 1403 /* set the data buffer */ 1404 nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + h2c_term_req->common.hlen, 1405 h2c_term_req->common.plen - h2c_term_req->common.hlen); 1406 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1407 return; 1408 end: 1409 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1410 } 1411 1412 static void 1413 spdk_nvmf_tcp_h2c_term_req_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1414 struct nvme_tcp_pdu *pdu) 1415 { 1416 struct spdk_nvme_tcp_term_req_hdr *h2c_term_req = &pdu->hdr.term_req; 1417 1418 spdk_nvmf_tcp_h2c_term_req_dump(h2c_term_req); 1419 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 1420 } 1421 1422 static void 1423 spdk_nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1424 struct spdk_nvmf_tcp_transport *ttransport) 1425 { 1426 int rc = 0; 1427 struct nvme_tcp_pdu *pdu; 1428 uint32_t crc32c, error_offset = 0; 1429 enum spdk_nvme_tcp_term_req_fes fes; 1430 1431 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 1432 pdu = &tqpair->pdu_in_progress; 1433 1434 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n"); 1435 /* check data digest if need */ 1436 if (pdu->ddgst_enable) { 1437 crc32c = nvme_tcp_pdu_calc_data_digest(pdu); 1438 rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c); 1439 if (rc == 0) { 1440 SPDK_ERRLOG("Data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu); 1441 fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR; 1442 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1443 return; 1444 1445 } 1446 } 1447 1448 switch (pdu->hdr.common.pdu_type) { 1449 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 1450 spdk_nvmf_tcp_capsule_cmd_payload_handle(ttransport, tqpair, pdu); 1451 break; 1452 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 1453 spdk_nvmf_tcp_h2c_data_payload_handle(ttransport, tqpair, pdu); 1454 break; 1455 1456 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 1457 spdk_nvmf_tcp_h2c_term_req_payload_handle(tqpair, pdu); 1458 break; 1459 1460 default: 1461 /* The code should not go to here */ 1462 SPDK_ERRLOG("The code should not go to here\n"); 1463 break; 1464 } 1465 } 1466 1467 static void 1468 spdk_nvmf_tcp_send_icresp_complete(void *cb_arg) 1469 { 1470 struct spdk_nvmf_tcp_qpair *tqpair = cb_arg; 1471 1472 tqpair->state = NVME_TCP_QPAIR_STATE_RUNNING; 1473 } 1474 1475 static void 1476 spdk_nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport, 1477 struct spdk_nvmf_tcp_qpair *tqpair, 1478 struct nvme_tcp_pdu *pdu) 1479 { 1480 struct spdk_nvme_tcp_ic_req *ic_req = &pdu->hdr.ic_req; 1481 struct nvme_tcp_pdu *rsp_pdu; 1482 struct spdk_nvme_tcp_ic_resp *ic_resp; 1483 uint32_t error_offset = 0; 1484 enum spdk_nvme_tcp_term_req_fes fes; 1485 1486 /* Only PFV 0 is defined currently */ 1487 if (ic_req->pfv != 0) { 1488 SPDK_ERRLOG("Expected ICReq PFV %u, got %u\n", 0u, ic_req->pfv); 1489 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1490 error_offset = offsetof(struct spdk_nvme_tcp_ic_req, pfv); 1491 goto end; 1492 } 1493 1494 /* MAXR2T is 0's based */ 1495 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "maxr2t =%u\n", (ic_req->maxr2t + 1u)); 1496 1497 tqpair->host_hdgst_enable = ic_req->dgst.bits.hdgst_enable ? true : false; 1498 if (!tqpair->host_hdgst_enable) { 1499 tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1500 } 1501 1502 tqpair->host_ddgst_enable = ic_req->dgst.bits.ddgst_enable ? true : false; 1503 if (!tqpair->host_ddgst_enable) { 1504 tqpair->recv_buf_size -= SPDK_NVME_TCP_DIGEST_LEN * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR; 1505 } 1506 1507 /* Now that we know whether digests are enabled, properly size the receive buffer */ 1508 if (spdk_sock_set_recvbuf(tqpair->sock, tqpair->recv_buf_size) < 0) { 1509 SPDK_WARNLOG("Unable to allocate enough memory for receive buffer on tqpair=%p with size=%d\n", 1510 tqpair, 1511 tqpair->recv_buf_size); 1512 /* Not fatal. */ 1513 } 1514 1515 tqpair->cpda = spdk_min(ic_req->hpda, SPDK_NVME_TCP_CPDA_MAX); 1516 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "cpda of tqpair=(%p) is : %u\n", tqpair, tqpair->cpda); 1517 1518 rsp_pdu = &tqpair->mgmt_pdu; 1519 1520 ic_resp = &rsp_pdu->hdr.ic_resp; 1521 ic_resp->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1522 ic_resp->common.hlen = ic_resp->common.plen = sizeof(*ic_resp); 1523 ic_resp->pfv = 0; 1524 ic_resp->cpda = tqpair->cpda; 1525 ic_resp->maxh2cdata = ttransport->transport.opts.max_io_size; 1526 ic_resp->dgst.bits.hdgst_enable = tqpair->host_hdgst_enable ? 1 : 0; 1527 ic_resp->dgst.bits.ddgst_enable = tqpair->host_ddgst_enable ? 1 : 0; 1528 1529 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable); 1530 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable); 1531 1532 tqpair->state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1533 spdk_nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, spdk_nvmf_tcp_send_icresp_complete, tqpair); 1534 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1535 return; 1536 end: 1537 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1538 } 1539 1540 static void 1541 spdk_nvmf_tcp_pdu_psh_handle(struct spdk_nvmf_tcp_qpair *tqpair, 1542 struct spdk_nvmf_tcp_transport *ttransport) 1543 { 1544 struct nvme_tcp_pdu *pdu; 1545 int rc; 1546 uint32_t crc32c, error_offset = 0; 1547 enum spdk_nvme_tcp_term_req_fes fes; 1548 1549 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1550 pdu = &tqpair->pdu_in_progress; 1551 1552 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "pdu type of tqpair(%p) is %d\n", tqpair, 1553 pdu->hdr.common.pdu_type); 1554 /* check header digest if needed */ 1555 if (pdu->has_hdgst) { 1556 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Compare the header of pdu=%p on tqpair=%p\n", pdu, tqpair); 1557 crc32c = nvme_tcp_pdu_calc_header_digest(pdu); 1558 rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c); 1559 if (rc == 0) { 1560 SPDK_ERRLOG("Header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu); 1561 fes = SPDK_NVME_TCP_TERM_REQ_FES_HDGST_ERROR; 1562 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1563 return; 1564 1565 } 1566 } 1567 1568 switch (pdu->hdr.common.pdu_type) { 1569 case SPDK_NVME_TCP_PDU_TYPE_IC_REQ: 1570 spdk_nvmf_tcp_icreq_handle(ttransport, tqpair, pdu); 1571 break; 1572 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 1573 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_REQ); 1574 break; 1575 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 1576 spdk_nvmf_tcp_h2c_data_hdr_handle(ttransport, tqpair, pdu); 1577 break; 1578 1579 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 1580 spdk_nvmf_tcp_h2c_term_req_hdr_handle(tqpair, pdu); 1581 break; 1582 1583 default: 1584 SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", tqpair->pdu_in_progress.hdr.common.pdu_type); 1585 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1586 error_offset = 1; 1587 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1588 break; 1589 } 1590 } 1591 1592 static void 1593 spdk_nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair) 1594 { 1595 struct nvme_tcp_pdu *pdu; 1596 uint32_t error_offset = 0; 1597 enum spdk_nvme_tcp_term_req_fes fes; 1598 uint8_t expected_hlen, pdo; 1599 bool plen_error = false, pdo_error = false; 1600 1601 assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH); 1602 pdu = &tqpair->pdu_in_progress; 1603 1604 if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) { 1605 if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) { 1606 SPDK_ERRLOG("Already received ICreq PDU, and reject this pdu=%p\n", pdu); 1607 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 1608 goto err; 1609 } 1610 expected_hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1611 if (pdu->hdr.common.plen != expected_hlen) { 1612 plen_error = true; 1613 } 1614 } else { 1615 if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) { 1616 SPDK_ERRLOG("The TCP/IP connection is not negotitated\n"); 1617 fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR; 1618 goto err; 1619 } 1620 1621 switch (pdu->hdr.common.pdu_type) { 1622 case SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD: 1623 expected_hlen = sizeof(struct spdk_nvme_tcp_cmd); 1624 pdo = pdu->hdr.common.pdo; 1625 if ((tqpair->cpda != 0) && (pdo != ((tqpair->cpda + 1) << 2))) { 1626 pdo_error = true; 1627 break; 1628 } 1629 1630 if (pdu->hdr.common.plen < expected_hlen) { 1631 plen_error = true; 1632 } 1633 break; 1634 case SPDK_NVME_TCP_PDU_TYPE_H2C_DATA: 1635 expected_hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1636 pdo = pdu->hdr.common.pdo; 1637 if ((tqpair->cpda != 0) && (pdo != ((tqpair->cpda + 1) << 2))) { 1638 pdo_error = true; 1639 break; 1640 } 1641 if (pdu->hdr.common.plen < expected_hlen) { 1642 plen_error = true; 1643 } 1644 break; 1645 1646 case SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ: 1647 expected_hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1648 if ((pdu->hdr.common.plen <= expected_hlen) || 1649 (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) { 1650 plen_error = true; 1651 } 1652 break; 1653 1654 default: 1655 SPDK_ERRLOG("Unexpected PDU type 0x%02x\n", pdu->hdr.common.pdu_type); 1656 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1657 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdu_type); 1658 goto err; 1659 } 1660 } 1661 1662 if (pdu->hdr.common.hlen != expected_hlen) { 1663 SPDK_ERRLOG("PDU type=0x%02x, Expected ICReq header length %u, got %u on tqpair=%p\n", 1664 pdu->hdr.common.pdu_type, 1665 expected_hlen, pdu->hdr.common.hlen, tqpair); 1666 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1667 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, hlen); 1668 goto err; 1669 } else if (pdo_error) { 1670 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1671 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, pdo); 1672 } else if (plen_error) { 1673 fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 1674 error_offset = offsetof(struct spdk_nvme_tcp_common_pdu_hdr, plen); 1675 goto err; 1676 } else { 1677 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1678 nvme_tcp_pdu_calc_psh_len(&tqpair->pdu_in_progress, tqpair->host_hdgst_enable); 1679 return; 1680 } 1681 err: 1682 spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset); 1683 } 1684 1685 static int 1686 nvmf_tcp_pdu_payload_insert_dif(struct nvme_tcp_pdu *pdu, uint32_t read_offset, 1687 int read_len) 1688 { 1689 int rc; 1690 1691 rc = spdk_dif_generate_stream(pdu->data_iov, pdu->data_iovcnt, 1692 read_offset, read_len, pdu->dif_ctx); 1693 if (rc != 0) { 1694 SPDK_ERRLOG("DIF generate failed\n"); 1695 } 1696 1697 return rc; 1698 } 1699 1700 static int 1701 spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair) 1702 { 1703 int rc = 0; 1704 struct nvme_tcp_pdu *pdu; 1705 enum nvme_tcp_pdu_recv_state prev_state; 1706 uint32_t data_len; 1707 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(tqpair->qpair.transport, 1708 struct spdk_nvmf_tcp_transport, transport); 1709 1710 /* The loop here is to allow for several back-to-back state changes. */ 1711 do { 1712 prev_state = tqpair->recv_state; 1713 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "tqpair(%p) recv pdu entering state %d\n", tqpair, prev_state); 1714 1715 pdu = &tqpair->pdu_in_progress; 1716 switch (tqpair->recv_state) { 1717 /* Wait for the common header */ 1718 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY: 1719 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH: 1720 if (spdk_unlikely(tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) { 1721 return rc; 1722 } 1723 1724 rc = nvme_tcp_read_data(tqpair->sock, 1725 sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes, 1726 (void *)&pdu->hdr.common + pdu->ch_valid_bytes); 1727 if (rc < 0) { 1728 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconnect tqpair=%p\n", tqpair); 1729 return NVME_TCP_PDU_FATAL; 1730 } else if (rc > 0) { 1731 pdu->ch_valid_bytes += rc; 1732 spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 0, rc, 0, 0); 1733 if (spdk_likely(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY)) { 1734 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH); 1735 } 1736 } 1737 1738 if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) { 1739 return NVME_TCP_PDU_IN_PROGRESS; 1740 } 1741 1742 /* The command header of this PDU has now been read from the socket. */ 1743 spdk_nvmf_tcp_pdu_ch_handle(tqpair); 1744 break; 1745 /* Wait for the pdu specific header */ 1746 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH: 1747 rc = nvme_tcp_read_data(tqpair->sock, 1748 pdu->psh_len - pdu->psh_valid_bytes, 1749 (void *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes); 1750 if (rc < 0) { 1751 return NVME_TCP_PDU_FATAL; 1752 } else if (rc > 0) { 1753 spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 1754 0, rc, 0, 0); 1755 pdu->psh_valid_bytes += rc; 1756 } 1757 1758 if (pdu->psh_valid_bytes < pdu->psh_len) { 1759 return NVME_TCP_PDU_IN_PROGRESS; 1760 } 1761 1762 /* All header(ch, psh, head digist) of this PDU has now been read from the socket. */ 1763 spdk_nvmf_tcp_pdu_psh_handle(tqpair, ttransport); 1764 break; 1765 /* Wait for the req slot */ 1766 case NVME_TCP_PDU_RECV_STATE_AWAIT_REQ: 1767 spdk_nvmf_tcp_capsule_cmd_hdr_handle(ttransport, tqpair, pdu); 1768 break; 1769 case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD: 1770 /* check whether the data is valid, if not we just return */ 1771 if (!pdu->data_len) { 1772 return NVME_TCP_PDU_IN_PROGRESS; 1773 } 1774 1775 data_len = pdu->data_len; 1776 /* data digest */ 1777 if (spdk_unlikely((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) && 1778 tqpair->host_ddgst_enable)) { 1779 data_len += SPDK_NVME_TCP_DIGEST_LEN; 1780 pdu->ddgst_enable = true; 1781 } 1782 1783 rc = nvme_tcp_read_payload_data(tqpair->sock, pdu); 1784 if (rc < 0) { 1785 return NVME_TCP_PDU_IN_PROGRESS; 1786 } 1787 pdu->readv_offset += rc; 1788 1789 if (spdk_unlikely(pdu->dif_ctx != NULL)) { 1790 rc = nvmf_tcp_pdu_payload_insert_dif(pdu, pdu->readv_offset - rc, rc); 1791 if (rc != 0) { 1792 return NVME_TCP_PDU_FATAL; 1793 } 1794 } 1795 1796 if (pdu->readv_offset < data_len) { 1797 return NVME_TCP_PDU_IN_PROGRESS; 1798 } 1799 1800 /* All of this PDU has now been read from the socket. */ 1801 spdk_nvmf_tcp_pdu_payload_handle(tqpair, ttransport); 1802 break; 1803 case NVME_TCP_PDU_RECV_STATE_ERROR: 1804 if (!spdk_sock_is_connected(tqpair->sock)) { 1805 return NVME_TCP_PDU_FATAL; 1806 } 1807 break; 1808 default: 1809 assert(0); 1810 SPDK_ERRLOG("code should not come to here"); 1811 break; 1812 } 1813 } while (tqpair->recv_state != prev_state); 1814 1815 return rc; 1816 } 1817 1818 static int 1819 spdk_nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req, 1820 struct spdk_nvmf_transport *transport, 1821 struct spdk_nvmf_transport_poll_group *group) 1822 { 1823 struct spdk_nvmf_request *req = &tcp_req->req; 1824 struct spdk_nvme_cmd *cmd; 1825 struct spdk_nvme_cpl *rsp; 1826 struct spdk_nvme_sgl_descriptor *sgl; 1827 uint32_t length; 1828 1829 cmd = &req->cmd->nvme_cmd; 1830 rsp = &req->rsp->nvme_cpl; 1831 sgl = &cmd->dptr.sgl1; 1832 1833 length = sgl->unkeyed.length; 1834 1835 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK && 1836 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_TRANSPORT) { 1837 if (length > transport->opts.max_io_size) { 1838 SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n", 1839 length, transport->opts.max_io_size); 1840 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 1841 return -1; 1842 } 1843 1844 /* fill request length and populate iovs */ 1845 req->length = length; 1846 1847 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Data requested length= 0x%x\n", length); 1848 1849 if (spdk_unlikely(req->dif.dif_insert_or_strip)) { 1850 req->dif.orig_length = length; 1851 length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx); 1852 req->dif.elba_length = length; 1853 } 1854 1855 if (spdk_nvmf_request_get_buffers(req, group, transport, length)) { 1856 /* No available buffers. Queue this request up. */ 1857 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "No available large data buffers. Queueing request %p\n", 1858 tcp_req); 1859 return 0; 1860 } 1861 1862 /* backward compatible */ 1863 req->data = req->iov[0].iov_base; 1864 1865 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Request %p took %d buffer/s from central pool, and data=%p\n", 1866 tcp_req, req->iovcnt, req->data); 1867 1868 return 0; 1869 } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK && 1870 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) { 1871 uint64_t offset = sgl->address; 1872 uint32_t max_len = transport->opts.in_capsule_data_size; 1873 1874 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n", 1875 offset, length); 1876 1877 if (offset > max_len) { 1878 SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n", 1879 offset, max_len); 1880 rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET; 1881 return -1; 1882 } 1883 max_len -= (uint32_t)offset; 1884 1885 if (length > max_len) { 1886 SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n", 1887 length, max_len); 1888 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 1889 return -1; 1890 } 1891 1892 req->data = tcp_req->buf + offset; 1893 req->data_from_pool = false; 1894 req->length = length; 1895 1896 if (spdk_unlikely(req->dif.dif_insert_or_strip)) { 1897 length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx); 1898 req->dif.elba_length = length; 1899 } 1900 1901 req->iov[0].iov_base = req->data; 1902 req->iov[0].iov_len = length; 1903 req->iovcnt = 1; 1904 1905 return 0; 1906 } 1907 1908 SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n", 1909 sgl->generic.type, sgl->generic.subtype); 1910 rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID; 1911 return -1; 1912 } 1913 1914 static inline enum spdk_nvme_media_error_status_code 1915 nvmf_tcp_dif_error_to_compl_status(uint8_t err_type) { 1916 enum spdk_nvme_media_error_status_code result; 1917 1918 switch (err_type) 1919 { 1920 case SPDK_DIF_REFTAG_ERROR: 1921 result = SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR; 1922 break; 1923 case SPDK_DIF_APPTAG_ERROR: 1924 result = SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR; 1925 break; 1926 case SPDK_DIF_GUARD_ERROR: 1927 result = SPDK_NVME_SC_GUARD_CHECK_ERROR; 1928 break; 1929 default: 1930 SPDK_UNREACHABLE(); 1931 break; 1932 } 1933 1934 return result; 1935 } 1936 1937 static void 1938 spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair, 1939 struct spdk_nvmf_tcp_req *tcp_req) 1940 { 1941 struct nvme_tcp_pdu *rsp_pdu; 1942 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 1943 uint32_t plen, pdo, alignment; 1944 int rc; 1945 1946 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n"); 1947 1948 rsp_pdu = nvmf_tcp_req_pdu_init(tcp_req); 1949 assert(rsp_pdu != NULL); 1950 1951 c2h_data = &rsp_pdu->hdr.c2h_data; 1952 c2h_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA; 1953 plen = c2h_data->common.hlen = sizeof(*c2h_data); 1954 1955 if (tqpair->host_hdgst_enable) { 1956 plen += SPDK_NVME_TCP_DIGEST_LEN; 1957 c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1958 } 1959 1960 /* set the psh */ 1961 c2h_data->cccid = tcp_req->req.cmd->nvme_cmd.cid; 1962 c2h_data->datal = tcp_req->req.length; 1963 c2h_data->datao = 0; 1964 1965 /* set the padding */ 1966 rsp_pdu->padding_len = 0; 1967 pdo = plen; 1968 if (tqpair->cpda) { 1969 alignment = (tqpair->cpda + 1) << 2; 1970 if (alignment > plen) { 1971 rsp_pdu->padding_len = alignment - plen; 1972 pdo = plen = alignment; 1973 } 1974 } 1975 1976 c2h_data->common.pdo = pdo; 1977 plen += c2h_data->datal; 1978 if (tqpair->host_ddgst_enable) { 1979 c2h_data->common.flags |= SPDK_NVME_TCP_CH_FLAGS_DDGSTF; 1980 plen += SPDK_NVME_TCP_DIGEST_LEN; 1981 } 1982 1983 c2h_data->common.plen = plen; 1984 1985 if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) { 1986 rsp_pdu->dif_ctx = &tcp_req->req.dif.dif_ctx; 1987 } 1988 1989 nvme_tcp_pdu_set_data_buf(rsp_pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 1990 c2h_data->datao, c2h_data->datal); 1991 1992 if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) { 1993 struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl; 1994 struct spdk_dif_error err_blk = {}; 1995 1996 rc = spdk_dif_verify_stream(rsp_pdu->data_iov, rsp_pdu->data_iovcnt, 1997 0, rsp_pdu->data_len, rsp_pdu->dif_ctx, &err_blk); 1998 if (rc != 0) { 1999 SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n", 2000 err_blk.err_type, err_blk.err_offset); 2001 rsp->status.sct = SPDK_NVME_SCT_MEDIA_ERROR; 2002 rsp->status.sc = nvmf_tcp_dif_error_to_compl_status(err_blk.err_type); 2003 nvmf_tcp_req_pdu_fini(tcp_req); 2004 spdk_nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 2005 return; 2006 } 2007 } 2008 2009 c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU; 2010 if (tqpair->qpair.transport->opts.c2h_success) { 2011 c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS; 2012 } 2013 2014 spdk_nvmf_tcp_qpair_write_pdu(tqpair, rsp_pdu, spdk_nvmf_tcp_pdu_c2h_data_complete, tcp_req); 2015 } 2016 2017 static int 2018 request_transfer_out(struct spdk_nvmf_request *req) 2019 { 2020 struct spdk_nvmf_tcp_req *tcp_req; 2021 struct spdk_nvmf_qpair *qpair; 2022 struct spdk_nvmf_tcp_qpair *tqpair; 2023 struct spdk_nvme_cpl *rsp; 2024 2025 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n"); 2026 2027 qpair = req->qpair; 2028 rsp = &req->rsp->nvme_cpl; 2029 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 2030 2031 /* Advance our sq_head pointer */ 2032 if (qpair->sq_head == qpair->sq_head_max) { 2033 qpair->sq_head = 0; 2034 } else { 2035 qpair->sq_head++; 2036 } 2037 rsp->sqhd = qpair->sq_head; 2038 2039 tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 2040 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 2041 if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 2042 spdk_nvmf_tcp_send_c2h_data(tqpair, tcp_req); 2043 } else { 2044 spdk_nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair); 2045 } 2046 2047 return 0; 2048 } 2049 2050 static void 2051 spdk_nvmf_tcp_set_incapsule_data(struct spdk_nvmf_tcp_qpair *tqpair, 2052 struct spdk_nvmf_tcp_req *tcp_req) 2053 { 2054 struct nvme_tcp_pdu *pdu; 2055 uint32_t plen = 0; 2056 2057 pdu = &tqpair->pdu_in_progress; 2058 plen = pdu->hdr.common.hlen; 2059 2060 if (tqpair->host_hdgst_enable) { 2061 plen += SPDK_NVME_TCP_DIGEST_LEN; 2062 } 2063 2064 if (pdu->hdr.common.plen != plen) { 2065 tcp_req->has_incapsule_data = true; 2066 } 2067 } 2068 2069 static bool 2070 spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport, 2071 struct spdk_nvmf_tcp_req *tcp_req) 2072 { 2073 struct spdk_nvmf_tcp_qpair *tqpair; 2074 int rc; 2075 enum spdk_nvmf_tcp_req_state prev_state; 2076 bool progress = false; 2077 struct spdk_nvmf_transport *transport = &ttransport->transport; 2078 struct spdk_nvmf_transport_poll_group *group; 2079 2080 tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair); 2081 group = &tqpair->group->group; 2082 assert(tcp_req->state != TCP_REQUEST_STATE_FREE); 2083 2084 /* The loop here is to allow for several back-to-back state changes. */ 2085 do { 2086 prev_state = tcp_req->state; 2087 2088 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Request %p entering state %d on tqpair=%p\n", tcp_req, prev_state, 2089 tqpair); 2090 2091 switch (tcp_req->state) { 2092 case TCP_REQUEST_STATE_FREE: 2093 /* Some external code must kick a request into TCP_REQUEST_STATE_NEW 2094 * to escape this state. */ 2095 break; 2096 case TCP_REQUEST_STATE_NEW: 2097 spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, 0, 0, (uintptr_t)tcp_req, 0); 2098 2099 /* copy the cmd from the receive pdu */ 2100 tcp_req->cmd = tqpair->pdu_in_progress.hdr.capsule_cmd.ccsqe; 2101 2102 if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&tcp_req->req, &tcp_req->req.dif.dif_ctx))) { 2103 tcp_req->req.dif.dif_insert_or_strip = true; 2104 tqpair->pdu_in_progress.dif_ctx = &tcp_req->req.dif.dif_ctx; 2105 } 2106 2107 /* The next state transition depends on the data transfer needs of this request. */ 2108 tcp_req->req.xfer = spdk_nvmf_req_get_xfer(&tcp_req->req); 2109 2110 /* If no data to transfer, ready to execute. */ 2111 if (tcp_req->req.xfer == SPDK_NVME_DATA_NONE) { 2112 /* Reset the tqpair receving pdu state */ 2113 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2114 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 2115 break; 2116 } 2117 2118 spdk_nvmf_tcp_set_incapsule_data(tqpair, tcp_req); 2119 2120 if (!tcp_req->has_incapsule_data) { 2121 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 2122 } 2123 2124 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER); 2125 STAILQ_INSERT_TAIL(&group->pending_buf_queue, &tcp_req->req, buf_link); 2126 break; 2127 case TCP_REQUEST_STATE_NEED_BUFFER: 2128 spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req, 0); 2129 2130 assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE); 2131 2132 if (!tcp_req->has_incapsule_data && (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue))) { 2133 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, 2134 "Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n", 2135 tcp_req, tqpair); 2136 /* This request needs to wait in line to obtain a buffer */ 2137 break; 2138 } 2139 2140 /* Try to get a data buffer */ 2141 rc = spdk_nvmf_tcp_req_parse_sgl(tcp_req, transport, group); 2142 if (rc < 0) { 2143 STAILQ_REMOVE_HEAD(&group->pending_buf_queue, buf_link); 2144 /* Reset the tqpair receving pdu state */ 2145 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR); 2146 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2147 break; 2148 } 2149 2150 if (!tcp_req->req.data) { 2151 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "No buffer allocated for tcp_req(%p) on tqpair(%p\n)", 2152 tcp_req, tqpair); 2153 /* No buffers available. */ 2154 break; 2155 } 2156 2157 STAILQ_REMOVE(&group->pending_buf_queue, &tcp_req->req, spdk_nvmf_request, buf_link); 2158 2159 /* If data is transferring from host to controller, we need to do a transfer from the host. */ 2160 if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 2161 if (tcp_req->req.data_from_pool) { 2162 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Sending R2T for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair); 2163 spdk_nvmf_tcp_send_r2t_pdu(tqpair, tcp_req); 2164 } else { 2165 struct nvme_tcp_pdu *pdu; 2166 2167 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 2168 2169 pdu = &tqpair->pdu_in_progress; 2170 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Not need to send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req, 2171 tqpair); 2172 /* No need to send r2t, contained in the capsuled data */ 2173 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->req.iov, tcp_req->req.iovcnt, 2174 0, tcp_req->req.length); 2175 spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 2176 } 2177 break; 2178 } 2179 2180 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE); 2181 break; 2182 case TCP_REQUEST_STATE_AWAITING_R2T_ACK: 2183 spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 0, 0, (uintptr_t)tcp_req, 0); 2184 /* The R2T completion or the h2c data incoming will kick it out of this state. */ 2185 break; 2186 case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER: 2187 2188 spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0, 2189 (uintptr_t)tcp_req, 0); 2190 /* Some external code must kick a request into TCP_REQUEST_STATE_READY_TO_EXECUTE 2191 * to escape this state. */ 2192 break; 2193 case TCP_REQUEST_STATE_READY_TO_EXECUTE: 2194 spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req, 0); 2195 2196 if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) { 2197 assert(tcp_req->req.dif.elba_length >= tcp_req->req.length); 2198 tcp_req->req.length = tcp_req->req.dif.elba_length; 2199 } 2200 2201 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTING); 2202 spdk_nvmf_request_exec(&tcp_req->req); 2203 break; 2204 case TCP_REQUEST_STATE_EXECUTING: 2205 spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTING, 0, 0, (uintptr_t)tcp_req, 0); 2206 /* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED 2207 * to escape this state. */ 2208 break; 2209 case TCP_REQUEST_STATE_EXECUTED: 2210 spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req, 0); 2211 2212 if (spdk_unlikely(tcp_req->req.dif.dif_insert_or_strip)) { 2213 tcp_req->req.length = tcp_req->req.dif.orig_length; 2214 } 2215 2216 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE); 2217 break; 2218 case TCP_REQUEST_STATE_READY_TO_COMPLETE: 2219 spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 0, 0, (uintptr_t)tcp_req, 0); 2220 rc = request_transfer_out(&tcp_req->req); 2221 assert(rc == 0); /* No good way to handle this currently */ 2222 break; 2223 case TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST: 2224 spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0, 2225 (uintptr_t)tcp_req, 2226 0); 2227 /* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED 2228 * to escape this state. */ 2229 break; 2230 case TCP_REQUEST_STATE_COMPLETED: 2231 spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0); 2232 if (tcp_req->req.data_from_pool) { 2233 spdk_nvmf_request_free_buffers(&tcp_req->req, group, transport); 2234 } 2235 tcp_req->req.length = 0; 2236 tcp_req->req.iovcnt = 0; 2237 tcp_req->req.data = NULL; 2238 2239 nvmf_tcp_req_pdu_fini(tcp_req); 2240 2241 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_FREE); 2242 break; 2243 case TCP_REQUEST_NUM_STATES: 2244 default: 2245 assert(0); 2246 break; 2247 } 2248 2249 if (tcp_req->state != prev_state) { 2250 progress = true; 2251 } 2252 } while (tcp_req->state != prev_state); 2253 2254 return progress; 2255 } 2256 2257 static void 2258 spdk_nvmf_tcp_sock_cb(void *arg, struct spdk_sock_group *group, struct spdk_sock *sock) 2259 { 2260 struct spdk_nvmf_tcp_qpair *tqpair = arg; 2261 int rc; 2262 2263 assert(tqpair != NULL); 2264 rc = spdk_nvmf_tcp_sock_process(tqpair); 2265 2266 /* If there was a new socket error, disconnect */ 2267 if (rc < 0) { 2268 spdk_nvmf_tcp_qpair_disconnect(tqpair); 2269 } 2270 } 2271 2272 static int 2273 spdk_nvmf_tcp_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 2274 struct spdk_nvmf_qpair *qpair) 2275 { 2276 struct spdk_nvmf_tcp_poll_group *tgroup; 2277 struct spdk_nvmf_tcp_qpair *tqpair; 2278 int rc; 2279 2280 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2281 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 2282 2283 rc = spdk_sock_group_add_sock(tgroup->sock_group, tqpair->sock, 2284 spdk_nvmf_tcp_sock_cb, tqpair); 2285 if (rc != 0) { 2286 SPDK_ERRLOG("Could not add sock to sock_group: %s (%d)\n", 2287 spdk_strerror(errno), errno); 2288 return -1; 2289 } 2290 2291 rc = spdk_nvmf_tcp_qpair_sock_init(tqpair); 2292 if (rc != 0) { 2293 SPDK_ERRLOG("Cannot set sock opt for tqpair=%p\n", tqpair); 2294 return -1; 2295 } 2296 2297 rc = spdk_nvmf_tcp_qpair_init(&tqpair->qpair); 2298 if (rc < 0) { 2299 SPDK_ERRLOG("Cannot init tqpair=%p\n", tqpair); 2300 return -1; 2301 } 2302 2303 rc = spdk_nvmf_tcp_qpair_init_mem_resource(tqpair); 2304 if (rc < 0) { 2305 SPDK_ERRLOG("Cannot init memory resource info for tqpair=%p\n", tqpair); 2306 return -1; 2307 } 2308 2309 tqpair->group = tgroup; 2310 tqpair->state = NVME_TCP_QPAIR_STATE_INVALID; 2311 TAILQ_INSERT_TAIL(&tgroup->qpairs, tqpair, link); 2312 2313 return 0; 2314 } 2315 2316 static int 2317 spdk_nvmf_tcp_poll_group_remove(struct spdk_nvmf_transport_poll_group *group, 2318 struct spdk_nvmf_qpair *qpair) 2319 { 2320 struct spdk_nvmf_tcp_poll_group *tgroup; 2321 struct spdk_nvmf_tcp_qpair *tqpair; 2322 int rc; 2323 2324 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2325 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 2326 2327 assert(tqpair->group == tgroup); 2328 2329 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "remove tqpair=%p from the tgroup=%p\n", tqpair, tgroup); 2330 if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_REQ) { 2331 TAILQ_REMOVE(&tgroup->await_req, tqpair, link); 2332 } else { 2333 TAILQ_REMOVE(&tgroup->qpairs, tqpair, link); 2334 } 2335 2336 rc = spdk_sock_group_remove_sock(tgroup->sock_group, tqpair->sock); 2337 if (rc != 0) { 2338 SPDK_ERRLOG("Could not remove sock from sock_group: %s (%d)\n", 2339 spdk_strerror(errno), errno); 2340 } 2341 2342 return rc; 2343 } 2344 2345 static int 2346 spdk_nvmf_tcp_req_complete(struct spdk_nvmf_request *req) 2347 { 2348 struct spdk_nvmf_tcp_transport *ttransport; 2349 struct spdk_nvmf_tcp_req *tcp_req; 2350 2351 ttransport = SPDK_CONTAINEROF(req->qpair->transport, struct spdk_nvmf_tcp_transport, transport); 2352 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 2353 2354 spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_EXECUTED); 2355 spdk_nvmf_tcp_req_process(ttransport, tcp_req); 2356 2357 return 0; 2358 } 2359 2360 static void 2361 spdk_nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair) 2362 { 2363 struct spdk_nvmf_tcp_qpair *tqpair; 2364 2365 SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Qpair: %p\n", qpair); 2366 2367 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 2368 tqpair->state = NVME_TCP_QPAIR_STATE_EXITED; 2369 spdk_nvmf_tcp_qpair_destroy(tqpair); 2370 } 2371 2372 static int 2373 spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 2374 { 2375 struct spdk_nvmf_tcp_poll_group *tgroup; 2376 int rc; 2377 struct spdk_nvmf_request *req, *req_tmp; 2378 struct spdk_nvmf_tcp_req *tcp_req; 2379 struct spdk_nvmf_tcp_qpair *tqpair, *tqpair_tmp; 2380 struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(group->transport, 2381 struct spdk_nvmf_tcp_transport, transport); 2382 2383 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 2384 2385 if (spdk_unlikely(TAILQ_EMPTY(&tgroup->qpairs) && TAILQ_EMPTY(&tgroup->await_req))) { 2386 return 0; 2387 } 2388 2389 STAILQ_FOREACH_SAFE(req, &group->pending_buf_queue, buf_link, req_tmp) { 2390 tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req); 2391 if (spdk_nvmf_tcp_req_process(ttransport, tcp_req) == false) { 2392 break; 2393 } 2394 } 2395 2396 rc = spdk_sock_group_poll(tgroup->sock_group); 2397 if (rc < 0) { 2398 SPDK_ERRLOG("Failed to poll sock_group=%p\n", tgroup->sock_group); 2399 } 2400 2401 TAILQ_FOREACH_SAFE(tqpair, &tgroup->await_req, link, tqpair_tmp) { 2402 spdk_nvmf_tcp_sock_process(tqpair); 2403 } 2404 2405 return rc; 2406 } 2407 2408 static int 2409 spdk_nvmf_tcp_qpair_get_trid(struct spdk_nvmf_qpair *qpair, 2410 struct spdk_nvme_transport_id *trid, bool peer) 2411 { 2412 struct spdk_nvmf_tcp_qpair *tqpair; 2413 uint16_t port; 2414 2415 tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair); 2416 spdk_nvme_trid_populate_transport(trid, SPDK_NVME_TRANSPORT_TCP); 2417 2418 if (peer) { 2419 snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->initiator_addr); 2420 port = tqpair->initiator_port; 2421 } else { 2422 snprintf(trid->traddr, sizeof(trid->traddr), "%s", tqpair->target_addr); 2423 port = tqpair->target_port; 2424 } 2425 2426 if (spdk_sock_is_ipv4(tqpair->sock)) { 2427 trid->adrfam = SPDK_NVMF_ADRFAM_IPV4; 2428 } else if (spdk_sock_is_ipv6(tqpair->sock)) { 2429 trid->adrfam = SPDK_NVMF_ADRFAM_IPV6; 2430 } else { 2431 return -1; 2432 } 2433 2434 snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%d", port); 2435 return 0; 2436 } 2437 2438 static int 2439 spdk_nvmf_tcp_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 2440 struct spdk_nvme_transport_id *trid) 2441 { 2442 return spdk_nvmf_tcp_qpair_get_trid(qpair, trid, 0); 2443 } 2444 2445 static int 2446 spdk_nvmf_tcp_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 2447 struct spdk_nvme_transport_id *trid) 2448 { 2449 return spdk_nvmf_tcp_qpair_get_trid(qpair, trid, 1); 2450 } 2451 2452 static int 2453 spdk_nvmf_tcp_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 2454 struct spdk_nvme_transport_id *trid) 2455 { 2456 return spdk_nvmf_tcp_qpair_get_trid(qpair, trid, 0); 2457 } 2458 2459 #define SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH 128 2460 #define SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH 128 2461 #define SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR 128 2462 #define SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE 4096 2463 #define SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE 131072 2464 #define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072 2465 #define SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS 511 2466 #define SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE 32 2467 #define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true 2468 #define SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP false 2469 #define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0 2470 2471 static void 2472 spdk_nvmf_tcp_opts_init(struct spdk_nvmf_transport_opts *opts) 2473 { 2474 opts->max_queue_depth = SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH; 2475 opts->max_qpairs_per_ctrlr = SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR; 2476 opts->in_capsule_data_size = SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE; 2477 opts->max_io_size = SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE; 2478 opts->io_unit_size = SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE; 2479 opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH; 2480 opts->num_shared_buffers = SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS; 2481 opts->buf_cache_size = SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE; 2482 opts->c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION; 2483 opts->dif_insert_or_strip = SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP; 2484 opts->sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY; 2485 } 2486 2487 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = { 2488 .name = "TCP", 2489 .type = SPDK_NVME_TRANSPORT_TCP, 2490 .opts_init = spdk_nvmf_tcp_opts_init, 2491 .create = spdk_nvmf_tcp_create, 2492 .destroy = spdk_nvmf_tcp_destroy, 2493 2494 .listen = spdk_nvmf_tcp_listen, 2495 .stop_listen = spdk_nvmf_tcp_stop_listen, 2496 .accept = spdk_nvmf_tcp_accept, 2497 2498 .listener_discover = spdk_nvmf_tcp_discover, 2499 2500 .poll_group_create = spdk_nvmf_tcp_poll_group_create, 2501 .get_optimal_poll_group = spdk_nvmf_tcp_get_optimal_poll_group, 2502 .poll_group_destroy = spdk_nvmf_tcp_poll_group_destroy, 2503 .poll_group_add = spdk_nvmf_tcp_poll_group_add, 2504 .poll_group_remove = spdk_nvmf_tcp_poll_group_remove, 2505 .poll_group_poll = spdk_nvmf_tcp_poll_group_poll, 2506 2507 .req_free = spdk_nvmf_tcp_req_free, 2508 .req_complete = spdk_nvmf_tcp_req_complete, 2509 2510 .qpair_fini = spdk_nvmf_tcp_close_qpair, 2511 .qpair_get_local_trid = spdk_nvmf_tcp_qpair_get_local_trid, 2512 .qpair_get_peer_trid = spdk_nvmf_tcp_qpair_get_peer_trid, 2513 .qpair_get_listen_trid = spdk_nvmf_tcp_qpair_get_listen_trid, 2514 }; 2515 2516 SPDK_NVMF_TRANSPORT_REGISTER(tcp, &spdk_nvmf_transport_tcp); 2517 SPDK_LOG_REGISTER_COMPONENT("nvmf_tcp", SPDK_LOG_NVMF_TCP) 2518