1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include <infiniband/verbs.h> 37 #include <rdma/rdma_cma.h> 38 #include <rdma/rdma_verbs.h> 39 40 #include "nvmf_internal.h" 41 #include "transport.h" 42 43 #include "spdk/config.h" 44 #include "spdk/assert.h" 45 #include "spdk/thread.h" 46 #include "spdk/nvmf.h" 47 #include "spdk/nvmf_spec.h" 48 #include "spdk/string.h" 49 #include "spdk/trace.h" 50 #include "spdk/util.h" 51 52 #include "spdk_internal/log.h" 53 54 /* 55 RDMA Connection Resource Defaults 56 */ 57 #define NVMF_DEFAULT_TX_SGE 1 58 #define NVMF_DEFAULT_RX_SGE 2 59 #define NVMF_DEFAULT_DATA_SGE 16 60 61 /* The RDMA completion queue size */ 62 #define NVMF_RDMA_CQ_SIZE 4096 63 64 /* AIO backend requires block size aligned data buffers, 65 * extra 4KiB aligned data buffer should work for most devices. 66 */ 67 #define SHIFT_4KB 12 68 #define NVMF_DATA_BUFFER_ALIGNMENT (1 << SHIFT_4KB) 69 #define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1) 70 71 enum spdk_nvmf_rdma_request_state { 72 /* The request is not currently in use */ 73 RDMA_REQUEST_STATE_FREE = 0, 74 75 /* Initial state when request first received */ 76 RDMA_REQUEST_STATE_NEW, 77 78 /* The request is queued until a data buffer is available. */ 79 RDMA_REQUEST_STATE_NEED_BUFFER, 80 81 /* The request is waiting on RDMA queue depth availability 82 * to transfer data between the host and the controller. 83 */ 84 RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING, 85 86 /* The request is currently transferring data from the host to the controller. */ 87 RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 88 89 /* The request is ready to execute at the block device */ 90 RDMA_REQUEST_STATE_READY_TO_EXECUTE, 91 92 /* The request is currently executing at the block device */ 93 RDMA_REQUEST_STATE_EXECUTING, 94 95 /* The request finished executing at the block device */ 96 RDMA_REQUEST_STATE_EXECUTED, 97 98 /* The request is ready to send a completion */ 99 RDMA_REQUEST_STATE_READY_TO_COMPLETE, 100 101 /* The request is currently transferring data from the controller to the host. */ 102 RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 103 104 /* The request currently has an outstanding completion without an 105 * associated data transfer. 106 */ 107 RDMA_REQUEST_STATE_COMPLETING, 108 109 /* The request completed and can be marked free. */ 110 RDMA_REQUEST_STATE_COMPLETED, 111 112 /* Terminator */ 113 RDMA_REQUEST_NUM_STATES, 114 }; 115 116 #define OBJECT_NVMF_RDMA_IO 0x40 117 118 #define TRACE_GROUP_NVMF_RDMA 0x4 119 #define TRACE_RDMA_REQUEST_STATE_NEW SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x0) 120 #define TRACE_RDMA_REQUEST_STATE_NEED_BUFFER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x1) 121 #define TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x2) 122 #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x3) 123 #define TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x4) 124 #define TRACE_RDMA_REQUEST_STATE_EXECUTING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x5) 125 #define TRACE_RDMA_REQUEST_STATE_EXECUTED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x6) 126 #define TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x7) 127 #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x8) 128 #define TRACE_RDMA_REQUEST_STATE_COMPLETING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x9) 129 #define TRACE_RDMA_REQUEST_STATE_COMPLETED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xA) 130 #define TRACE_RDMA_QP_CREATE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xB) 131 #define TRACE_RDMA_IBV_ASYNC_EVENT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xC) 132 #define TRACE_RDMA_CM_ASYNC_EVENT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xD) 133 #define TRACE_RDMA_QP_STATE_CHANGE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xE) 134 #define TRACE_RDMA_QP_DISCONNECT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xF) 135 #define TRACE_RDMA_QP_DESTROY SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x10) 136 137 SPDK_TRACE_REGISTER_FN(nvmf_trace) 138 { 139 spdk_trace_register_object(OBJECT_NVMF_RDMA_IO, 'r'); 140 spdk_trace_register_description("RDMA_REQ_NEW", "", 141 TRACE_RDMA_REQUEST_STATE_NEW, 142 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 1, 1, "cmid: "); 143 spdk_trace_register_description("RDMA_REQ_NEED_BUFFER", "", 144 TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 145 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 146 spdk_trace_register_description("RDMA_REQ_TX_PENDING_H_TO_C", "", 147 TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING, 148 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 149 spdk_trace_register_description("RDMA_REQ_TX_H_TO_C", "", 150 TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 151 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 152 spdk_trace_register_description("RDMA_REQ_RDY_TO_EXECUTE", "", 153 TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 154 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 155 spdk_trace_register_description("RDMA_REQ_EXECUTING", "", 156 TRACE_RDMA_REQUEST_STATE_EXECUTING, 157 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 158 spdk_trace_register_description("RDMA_REQ_EXECUTED", "", 159 TRACE_RDMA_REQUEST_STATE_EXECUTED, 160 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 161 spdk_trace_register_description("RDMA_REQ_RDY_TO_COMPLETE", "", 162 TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE, 163 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 164 spdk_trace_register_description("RDMA_REQ_COMPLETING_CONTROLLER_TO_HOST", "", 165 TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 166 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 167 spdk_trace_register_description("RDMA_REQ_COMPLETING_INCAPSULE", "", 168 TRACE_RDMA_REQUEST_STATE_COMPLETING, 169 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 170 spdk_trace_register_description("RDMA_REQ_COMPLETED", "", 171 TRACE_RDMA_REQUEST_STATE_COMPLETED, 172 OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); 173 174 spdk_trace_register_description("RDMA_QP_CREATE", "", TRACE_RDMA_QP_CREATE, 175 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 176 spdk_trace_register_description("RDMA_IBV_ASYNC_EVENT", "", TRACE_RDMA_IBV_ASYNC_EVENT, 177 OWNER_NONE, OBJECT_NONE, 0, 0, "type: "); 178 spdk_trace_register_description("RDMA_CM_ASYNC_EVENT", "", TRACE_RDMA_CM_ASYNC_EVENT, 179 OWNER_NONE, OBJECT_NONE, 0, 0, "type: "); 180 spdk_trace_register_description("RDMA_QP_STATE_CHANGE", "", TRACE_RDMA_QP_STATE_CHANGE, 181 OWNER_NONE, OBJECT_NONE, 0, 1, "state: "); 182 spdk_trace_register_description("RDMA_QP_DISCONNECT", "", TRACE_RDMA_QP_DISCONNECT, 183 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 184 spdk_trace_register_description("RDMA_QP_DESTROY", "", TRACE_RDMA_QP_DESTROY, 185 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 186 } 187 188 /* This structure holds commands as they are received off the wire. 189 * It must be dynamically paired with a full request object 190 * (spdk_nvmf_rdma_request) to service a request. It is separate 191 * from the request because RDMA does not appear to order 192 * completions, so occasionally we'll get a new incoming 193 * command when there aren't any free request objects. 194 */ 195 struct spdk_nvmf_rdma_recv { 196 struct ibv_recv_wr wr; 197 struct ibv_sge sgl[NVMF_DEFAULT_RX_SGE]; 198 199 struct spdk_nvmf_rdma_qpair *qpair; 200 201 /* In-capsule data buffer */ 202 uint8_t *buf; 203 204 TAILQ_ENTRY(spdk_nvmf_rdma_recv) link; 205 }; 206 207 struct spdk_nvmf_rdma_request { 208 struct spdk_nvmf_request req; 209 bool data_from_pool; 210 211 enum spdk_nvmf_rdma_request_state state; 212 213 struct spdk_nvmf_rdma_recv *recv; 214 215 struct { 216 struct ibv_send_wr wr; 217 struct ibv_sge sgl[NVMF_DEFAULT_TX_SGE]; 218 } rsp; 219 220 struct { 221 struct ibv_send_wr wr; 222 struct ibv_sge sgl[SPDK_NVMF_MAX_SGL_ENTRIES]; 223 void *buffers[SPDK_NVMF_MAX_SGL_ENTRIES]; 224 } data; 225 226 TAILQ_ENTRY(spdk_nvmf_rdma_request) link; 227 TAILQ_ENTRY(spdk_nvmf_rdma_request) state_link; 228 }; 229 230 struct spdk_nvmf_rdma_qpair { 231 struct spdk_nvmf_qpair qpair; 232 233 struct spdk_nvmf_rdma_port *port; 234 struct spdk_nvmf_rdma_poller *poller; 235 236 struct rdma_cm_id *cm_id; 237 struct rdma_cm_id *listen_id; 238 239 /* The maximum number of I/O outstanding on this connection at one time */ 240 uint16_t max_queue_depth; 241 242 /* The maximum number of active RDMA READ and WRITE operations at one time */ 243 uint16_t max_rw_depth; 244 245 /* Receives that are waiting for a request object */ 246 TAILQ_HEAD(, spdk_nvmf_rdma_recv) incoming_queue; 247 248 /* Queues to track the requests in all states */ 249 TAILQ_HEAD(, spdk_nvmf_rdma_request) state_queue[RDMA_REQUEST_NUM_STATES]; 250 251 /* Number of requests in each state */ 252 uint32_t state_cntr[RDMA_REQUEST_NUM_STATES]; 253 254 int max_sge; 255 256 /* Array of size "max_queue_depth" containing RDMA requests. */ 257 struct spdk_nvmf_rdma_request *reqs; 258 259 /* Array of size "max_queue_depth" containing RDMA recvs. */ 260 struct spdk_nvmf_rdma_recv *recvs; 261 262 /* Array of size "max_queue_depth" containing 64 byte capsules 263 * used for receive. 264 */ 265 union nvmf_h2c_msg *cmds; 266 struct ibv_mr *cmds_mr; 267 268 /* Array of size "max_queue_depth" containing 16 byte completions 269 * to be sent back to the user. 270 */ 271 union nvmf_c2h_msg *cpls; 272 struct ibv_mr *cpls_mr; 273 274 /* Array of size "max_queue_depth * InCapsuleDataSize" containing 275 * buffers to be used for in capsule data. 276 */ 277 void *bufs; 278 struct ibv_mr *bufs_mr; 279 280 TAILQ_ENTRY(spdk_nvmf_rdma_qpair) link; 281 282 /* Mgmt channel */ 283 struct spdk_io_channel *mgmt_channel; 284 struct spdk_nvmf_rdma_mgmt_channel *ch; 285 286 /* IBV queue pair attributes: they are used to manage 287 * qp state and recover from errors. 288 */ 289 struct ibv_qp_init_attr ibv_init_attr; 290 struct ibv_qp_attr ibv_attr; 291 292 bool qpair_disconnected; 293 294 /* Reference counter for how many unprocessed messages 295 * from other threads are currently outstanding. The 296 * qpair cannot be destroyed until this is 0. This is 297 * atomically incremented from any thread, but only 298 * decremented and read from the thread that owns this 299 * qpair. 300 */ 301 uint32_t refcnt; 302 }; 303 304 struct spdk_nvmf_rdma_poller { 305 struct spdk_nvmf_rdma_device *device; 306 struct spdk_nvmf_rdma_poll_group *group; 307 308 struct ibv_cq *cq; 309 310 TAILQ_HEAD(, spdk_nvmf_rdma_qpair) qpairs; 311 312 TAILQ_ENTRY(spdk_nvmf_rdma_poller) link; 313 }; 314 315 struct spdk_nvmf_rdma_poll_group { 316 struct spdk_nvmf_transport_poll_group group; 317 318 TAILQ_HEAD(, spdk_nvmf_rdma_poller) pollers; 319 }; 320 321 /* Assuming rdma_cm uses just one protection domain per ibv_context. */ 322 struct spdk_nvmf_rdma_device { 323 struct ibv_device_attr attr; 324 struct ibv_context *context; 325 326 struct spdk_mem_map *map; 327 struct ibv_pd *pd; 328 329 TAILQ_ENTRY(spdk_nvmf_rdma_device) link; 330 }; 331 332 struct spdk_nvmf_rdma_port { 333 struct spdk_nvme_transport_id trid; 334 struct rdma_cm_id *id; 335 struct spdk_nvmf_rdma_device *device; 336 uint32_t ref; 337 TAILQ_ENTRY(spdk_nvmf_rdma_port) link; 338 }; 339 340 struct spdk_nvmf_rdma_transport { 341 struct spdk_nvmf_transport transport; 342 343 struct rdma_event_channel *event_channel; 344 345 struct spdk_mempool *data_buf_pool; 346 347 pthread_mutex_t lock; 348 349 /* fields used to poll RDMA/IB events */ 350 nfds_t npoll_fds; 351 struct pollfd *poll_fds; 352 353 TAILQ_HEAD(, spdk_nvmf_rdma_device) devices; 354 TAILQ_HEAD(, spdk_nvmf_rdma_port) ports; 355 }; 356 357 struct spdk_nvmf_rdma_mgmt_channel { 358 /* Requests that are waiting to obtain a data buffer */ 359 TAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue; 360 }; 361 362 static inline void 363 spdk_nvmf_rdma_qpair_inc_refcnt(struct spdk_nvmf_rdma_qpair *rqpair) 364 { 365 __sync_fetch_and_add(&rqpair->refcnt, 1); 366 } 367 368 static inline uint32_t 369 spdk_nvmf_rdma_qpair_dec_refcnt(struct spdk_nvmf_rdma_qpair *rqpair) 370 { 371 uint32_t old_refcnt, new_refcnt; 372 373 do { 374 old_refcnt = rqpair->refcnt; 375 assert(old_refcnt > 0); 376 new_refcnt = old_refcnt - 1; 377 } while (__sync_bool_compare_and_swap(&rqpair->refcnt, old_refcnt, new_refcnt) == false); 378 379 return new_refcnt; 380 } 381 382 /* API to IBV QueuePair */ 383 static const char *str_ibv_qp_state[] = { 384 "IBV_QPS_RESET", 385 "IBV_QPS_INIT", 386 "IBV_QPS_RTR", 387 "IBV_QPS_RTS", 388 "IBV_QPS_SQD", 389 "IBV_QPS_SQE", 390 "IBV_QPS_ERR" 391 }; 392 393 static enum ibv_qp_state 394 spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) { 395 enum ibv_qp_state old_state, new_state; 396 int rc; 397 398 /* All the attributes needed for recovery */ 399 static int spdk_nvmf_ibv_attr_mask = 400 IBV_QP_STATE | 401 IBV_QP_PKEY_INDEX | 402 IBV_QP_PORT | 403 IBV_QP_ACCESS_FLAGS | 404 IBV_QP_AV | 405 IBV_QP_PATH_MTU | 406 IBV_QP_DEST_QPN | 407 IBV_QP_RQ_PSN | 408 IBV_QP_MAX_DEST_RD_ATOMIC | 409 IBV_QP_MIN_RNR_TIMER | 410 IBV_QP_SQ_PSN | 411 IBV_QP_TIMEOUT | 412 IBV_QP_RETRY_CNT | 413 IBV_QP_RNR_RETRY | 414 IBV_QP_MAX_QP_RD_ATOMIC; 415 416 old_state = rqpair->ibv_attr.qp_state; 417 rc = ibv_query_qp(rqpair->cm_id->qp, &rqpair->ibv_attr, 418 spdk_nvmf_ibv_attr_mask, &rqpair->ibv_init_attr); 419 420 if (rc) 421 { 422 SPDK_ERRLOG("Failed to get updated RDMA queue pair state!\n"); 423 assert(false); 424 } 425 426 new_state = rqpair->ibv_attr.qp_state; 427 if (old_state != new_state) 428 { 429 spdk_trace_record(TRACE_RDMA_QP_STATE_CHANGE, 0, 0, 430 (uintptr_t)rqpair->cm_id, new_state); 431 } 432 return new_state; 433 } 434 435 static int 436 spdk_nvmf_rdma_set_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair, 437 enum ibv_qp_state new_state) 438 { 439 int rc; 440 enum ibv_qp_state state; 441 static int attr_mask_rc[] = { 442 [IBV_QPS_RESET] = IBV_QP_STATE, 443 [IBV_QPS_INIT] = (IBV_QP_STATE | 444 IBV_QP_PKEY_INDEX | 445 IBV_QP_PORT | 446 IBV_QP_ACCESS_FLAGS), 447 [IBV_QPS_RTR] = (IBV_QP_STATE | 448 IBV_QP_AV | 449 IBV_QP_PATH_MTU | 450 IBV_QP_DEST_QPN | 451 IBV_QP_RQ_PSN | 452 IBV_QP_MAX_DEST_RD_ATOMIC | 453 IBV_QP_MIN_RNR_TIMER), 454 [IBV_QPS_RTS] = (IBV_QP_STATE | 455 IBV_QP_SQ_PSN | 456 IBV_QP_TIMEOUT | 457 IBV_QP_RETRY_CNT | 458 IBV_QP_RNR_RETRY | 459 IBV_QP_MAX_QP_RD_ATOMIC), 460 [IBV_QPS_SQD] = IBV_QP_STATE, 461 [IBV_QPS_SQE] = IBV_QP_STATE, 462 [IBV_QPS_ERR] = IBV_QP_STATE, 463 }; 464 465 switch (new_state) { 466 case IBV_QPS_RESET: 467 case IBV_QPS_INIT: 468 case IBV_QPS_RTR: 469 case IBV_QPS_RTS: 470 case IBV_QPS_SQD: 471 case IBV_QPS_SQE: 472 case IBV_QPS_ERR: 473 break; 474 default: 475 SPDK_ERRLOG("QP#%d: bad state requested: %u\n", 476 rqpair->qpair.qid, new_state); 477 return -1; 478 } 479 rqpair->ibv_attr.cur_qp_state = rqpair->ibv_attr.qp_state; 480 rqpair->ibv_attr.qp_state = new_state; 481 rqpair->ibv_attr.ah_attr.port_num = rqpair->ibv_attr.port_num; 482 483 rc = ibv_modify_qp(rqpair->cm_id->qp, &rqpair->ibv_attr, 484 attr_mask_rc[new_state]); 485 486 if (rc) { 487 SPDK_ERRLOG("QP#%d: failed to set state to: %s, %d (%s)\n", 488 rqpair->qpair.qid, str_ibv_qp_state[new_state], errno, strerror(errno)); 489 return rc; 490 } 491 492 state = spdk_nvmf_rdma_update_ibv_state(rqpair); 493 494 if (state != new_state) { 495 SPDK_ERRLOG("QP#%d: expected state: %s, actual state: %s\n", 496 rqpair->qpair.qid, str_ibv_qp_state[new_state], 497 str_ibv_qp_state[state]); 498 return -1; 499 } 500 SPDK_NOTICELOG("IBV QP#%u changed to: %s\n", rqpair->qpair.qid, 501 str_ibv_qp_state[state]); 502 return 0; 503 } 504 505 static void 506 spdk_nvmf_rdma_request_set_state(struct spdk_nvmf_rdma_request *rdma_req, 507 enum spdk_nvmf_rdma_request_state state) 508 { 509 struct spdk_nvmf_qpair *qpair; 510 struct spdk_nvmf_rdma_qpair *rqpair; 511 512 qpair = rdma_req->req.qpair; 513 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 514 515 TAILQ_REMOVE(&rqpair->state_queue[rdma_req->state], rdma_req, state_link); 516 rqpair->state_cntr[rdma_req->state]--; 517 518 rdma_req->state = state; 519 520 TAILQ_INSERT_TAIL(&rqpair->state_queue[rdma_req->state], rdma_req, state_link); 521 rqpair->state_cntr[rdma_req->state]++; 522 } 523 524 static int 525 spdk_nvmf_rdma_mgmt_channel_create(void *io_device, void *ctx_buf) 526 { 527 struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf; 528 529 TAILQ_INIT(&ch->pending_data_buf_queue); 530 return 0; 531 } 532 533 static void 534 spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf) 535 { 536 struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf; 537 538 if (!TAILQ_EMPTY(&ch->pending_data_buf_queue)) { 539 SPDK_ERRLOG("Pending I/O list wasn't empty on channel destruction\n"); 540 } 541 } 542 543 static int 544 spdk_nvmf_rdma_cur_rw_depth(struct spdk_nvmf_rdma_qpair *rqpair) 545 { 546 return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER] + 547 rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST]; 548 } 549 550 static int 551 spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair) 552 { 553 return rqpair->max_queue_depth - 554 rqpair->state_cntr[RDMA_REQUEST_STATE_FREE]; 555 } 556 557 static void 558 spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair) 559 { 560 spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair->cm_id, 0); 561 562 if (spdk_nvmf_rdma_cur_queue_depth(rqpair)) { 563 rqpair->qpair_disconnected = true; 564 return; 565 } 566 567 if (rqpair->refcnt > 0) { 568 return; 569 } 570 571 if (rqpair->poller) { 572 TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link); 573 } 574 575 if (rqpair->cmds_mr) { 576 ibv_dereg_mr(rqpair->cmds_mr); 577 } 578 579 if (rqpair->cpls_mr) { 580 ibv_dereg_mr(rqpair->cpls_mr); 581 } 582 583 if (rqpair->bufs_mr) { 584 ibv_dereg_mr(rqpair->bufs_mr); 585 } 586 587 if (rqpair->cm_id) { 588 rdma_destroy_qp(rqpair->cm_id); 589 rdma_destroy_id(rqpair->cm_id); 590 } 591 592 if (rqpair->mgmt_channel) { 593 spdk_put_io_channel(rqpair->mgmt_channel); 594 } 595 596 /* Free all memory */ 597 spdk_dma_free(rqpair->cmds); 598 spdk_dma_free(rqpair->cpls); 599 spdk_dma_free(rqpair->bufs); 600 free(rqpair->reqs); 601 free(rqpair->recvs); 602 free(rqpair); 603 } 604 605 static int 606 spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair) 607 { 608 struct spdk_nvmf_rdma_transport *rtransport; 609 struct spdk_nvmf_rdma_qpair *rqpair; 610 int rc, i; 611 struct spdk_nvmf_rdma_recv *rdma_recv; 612 struct spdk_nvmf_rdma_request *rdma_req; 613 struct spdk_nvmf_transport *transport; 614 615 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 616 rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport); 617 transport = &rtransport->transport; 618 619 memset(&rqpair->ibv_init_attr, 0, sizeof(struct ibv_qp_init_attr)); 620 rqpair->ibv_init_attr.qp_context = rqpair; 621 rqpair->ibv_init_attr.qp_type = IBV_QPT_RC; 622 rqpair->ibv_init_attr.send_cq = rqpair->poller->cq; 623 rqpair->ibv_init_attr.recv_cq = rqpair->poller->cq; 624 rqpair->ibv_init_attr.cap.max_send_wr = rqpair->max_queue_depth * 625 2; /* SEND, READ, and WRITE operations */ 626 rqpair->ibv_init_attr.cap.max_recv_wr = rqpair->max_queue_depth; /* RECV operations */ 627 rqpair->ibv_init_attr.cap.max_send_sge = rqpair->max_sge; 628 rqpair->ibv_init_attr.cap.max_recv_sge = NVMF_DEFAULT_RX_SGE; 629 630 rc = rdma_create_qp(rqpair->cm_id, rqpair->port->device->pd, &rqpair->ibv_init_attr); 631 if (rc) { 632 SPDK_ERRLOG("rdma_create_qp failed: errno %d: %s\n", errno, spdk_strerror(errno)); 633 rdma_destroy_id(rqpair->cm_id); 634 rqpair->cm_id = NULL; 635 spdk_nvmf_rdma_qpair_destroy(rqpair); 636 return -1; 637 } 638 639 spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair->cm_id, 0); 640 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair); 641 642 rqpair->reqs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->reqs)); 643 rqpair->recvs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->recvs)); 644 rqpair->cmds = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cmds), 645 0x1000, NULL); 646 rqpair->cpls = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cpls), 647 0x1000, NULL); 648 649 650 if (transport->opts.in_capsule_data_size > 0) { 651 rqpair->bufs = spdk_dma_zmalloc(rqpair->max_queue_depth * 652 transport->opts.in_capsule_data_size, 653 0x1000, NULL); 654 } 655 656 if (!rqpair->reqs || !rqpair->recvs || !rqpair->cmds || 657 !rqpair->cpls || (transport->opts.in_capsule_data_size && !rqpair->bufs)) { 658 SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n"); 659 spdk_nvmf_rdma_qpair_destroy(rqpair); 660 return -1; 661 } 662 663 rqpair->cmds_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->cmds, 664 rqpair->max_queue_depth * sizeof(*rqpair->cmds), 665 IBV_ACCESS_LOCAL_WRITE); 666 rqpair->cpls_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->cpls, 667 rqpair->max_queue_depth * sizeof(*rqpair->cpls), 668 0); 669 670 if (transport->opts.in_capsule_data_size) { 671 rqpair->bufs_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->bufs, 672 rqpair->max_queue_depth * 673 transport->opts.in_capsule_data_size, 674 IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); 675 } 676 677 if (!rqpair->cmds_mr || !rqpair->cpls_mr || (transport->opts.in_capsule_data_size && 678 !rqpair->bufs_mr)) { 679 SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n"); 680 spdk_nvmf_rdma_qpair_destroy(rqpair); 681 return -1; 682 } 683 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Command Array: %p Length: %lx LKey: %x\n", 684 rqpair->cmds, rqpair->max_queue_depth * sizeof(*rqpair->cmds), rqpair->cmds_mr->lkey); 685 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Completion Array: %p Length: %lx LKey: %x\n", 686 rqpair->cpls, rqpair->max_queue_depth * sizeof(*rqpair->cpls), rqpair->cpls_mr->lkey); 687 if (rqpair->bufs && rqpair->bufs_mr) { 688 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n", 689 rqpair->bufs, rqpair->max_queue_depth * 690 transport->opts.in_capsule_data_size, rqpair->bufs_mr->lkey); 691 } 692 693 /* Initialise request state queues and counters of the queue pair */ 694 for (i = RDMA_REQUEST_STATE_FREE; i < RDMA_REQUEST_NUM_STATES; i++) { 695 TAILQ_INIT(&rqpair->state_queue[i]); 696 rqpair->state_cntr[i] = 0; 697 } 698 699 for (i = 0; i < rqpair->max_queue_depth; i++) { 700 struct ibv_recv_wr *bad_wr = NULL; 701 702 rdma_recv = &rqpair->recvs[i]; 703 rdma_recv->qpair = rqpair; 704 705 /* Set up memory to receive commands */ 706 if (rqpair->bufs) { 707 rdma_recv->buf = (void *)((uintptr_t)rqpair->bufs + (i * 708 transport->opts.in_capsule_data_size)); 709 } 710 711 rdma_recv->sgl[0].addr = (uintptr_t)&rqpair->cmds[i]; 712 rdma_recv->sgl[0].length = sizeof(rqpair->cmds[i]); 713 rdma_recv->sgl[0].lkey = rqpair->cmds_mr->lkey; 714 rdma_recv->wr.num_sge = 1; 715 716 if (rdma_recv->buf && rqpair->bufs_mr) { 717 rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf; 718 rdma_recv->sgl[1].length = transport->opts.in_capsule_data_size; 719 rdma_recv->sgl[1].lkey = rqpair->bufs_mr->lkey; 720 rdma_recv->wr.num_sge++; 721 } 722 723 rdma_recv->wr.wr_id = (uintptr_t)rdma_recv; 724 rdma_recv->wr.sg_list = rdma_recv->sgl; 725 726 rc = ibv_post_recv(rqpair->cm_id->qp, &rdma_recv->wr, &bad_wr); 727 if (rc) { 728 SPDK_ERRLOG("Unable to post capsule for RDMA RECV\n"); 729 spdk_nvmf_rdma_qpair_destroy(rqpair); 730 return -1; 731 } 732 } 733 734 for (i = 0; i < rqpair->max_queue_depth; i++) { 735 rdma_req = &rqpair->reqs[i]; 736 737 rdma_req->req.qpair = &rqpair->qpair; 738 rdma_req->req.cmd = NULL; 739 740 /* Set up memory to send responses */ 741 rdma_req->req.rsp = &rqpair->cpls[i]; 742 743 rdma_req->rsp.sgl[0].addr = (uintptr_t)&rqpair->cpls[i]; 744 rdma_req->rsp.sgl[0].length = sizeof(rqpair->cpls[i]); 745 rdma_req->rsp.sgl[0].lkey = rqpair->cpls_mr->lkey; 746 747 rdma_req->rsp.wr.wr_id = (uintptr_t)rdma_req; 748 rdma_req->rsp.wr.next = NULL; 749 rdma_req->rsp.wr.opcode = IBV_WR_SEND; 750 rdma_req->rsp.wr.send_flags = IBV_SEND_SIGNALED; 751 rdma_req->rsp.wr.sg_list = rdma_req->rsp.sgl; 752 rdma_req->rsp.wr.num_sge = SPDK_COUNTOF(rdma_req->rsp.sgl); 753 754 /* Set up memory for data buffers */ 755 rdma_req->data.wr.wr_id = (uint64_t)rdma_req; 756 rdma_req->data.wr.next = NULL; 757 rdma_req->data.wr.send_flags = IBV_SEND_SIGNALED; 758 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 759 rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl); 760 761 /* Initialize request state to FREE */ 762 rdma_req->state = RDMA_REQUEST_STATE_FREE; 763 TAILQ_INSERT_TAIL(&rqpair->state_queue[rdma_req->state], rdma_req, state_link); 764 rqpair->state_cntr[rdma_req->state]++; 765 } 766 767 return 0; 768 } 769 770 static int 771 request_transfer_in(struct spdk_nvmf_request *req) 772 { 773 int rc; 774 struct spdk_nvmf_rdma_request *rdma_req; 775 struct spdk_nvmf_qpair *qpair; 776 struct spdk_nvmf_rdma_qpair *rqpair; 777 struct ibv_send_wr *bad_wr = NULL; 778 779 qpair = req->qpair; 780 rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req); 781 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 782 783 assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 784 785 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA READ POSTED. Request: %p Connection: %p\n", req, qpair); 786 787 rdma_req->data.wr.opcode = IBV_WR_RDMA_READ; 788 rdma_req->data.wr.next = NULL; 789 rc = ibv_post_send(rqpair->cm_id->qp, &rdma_req->data.wr, &bad_wr); 790 if (rc) { 791 SPDK_ERRLOG("Unable to transfer data from host to target\n"); 792 return -1; 793 } 794 return 0; 795 } 796 797 static int 798 request_transfer_out(struct spdk_nvmf_request *req, int *data_posted) 799 { 800 int rc; 801 struct spdk_nvmf_rdma_request *rdma_req; 802 struct spdk_nvmf_qpair *qpair; 803 struct spdk_nvmf_rdma_qpair *rqpair; 804 struct spdk_nvme_cpl *rsp; 805 struct ibv_recv_wr *bad_recv_wr = NULL; 806 struct ibv_send_wr *send_wr, *bad_send_wr = NULL; 807 808 *data_posted = 0; 809 qpair = req->qpair; 810 rsp = &req->rsp->nvme_cpl; 811 rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req); 812 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 813 814 /* Advance our sq_head pointer */ 815 if (qpair->sq_head == qpair->sq_head_max) { 816 qpair->sq_head = 0; 817 } else { 818 qpair->sq_head++; 819 } 820 rsp->sqhd = qpair->sq_head; 821 822 /* Post the capsule to the recv buffer */ 823 assert(rdma_req->recv != NULL); 824 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA RECV POSTED. Recv: %p Connection: %p\n", rdma_req->recv, 825 rqpair); 826 rc = ibv_post_recv(rqpair->cm_id->qp, &rdma_req->recv->wr, &bad_recv_wr); 827 if (rc) { 828 SPDK_ERRLOG("Unable to re-post rx descriptor\n"); 829 return rc; 830 } 831 rdma_req->recv = NULL; 832 833 /* Build the response which consists of an optional 834 * RDMA WRITE to transfer data, plus an RDMA SEND 835 * containing the response. 836 */ 837 send_wr = &rdma_req->rsp.wr; 838 839 if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && 840 req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 841 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA WRITE POSTED. Request: %p Connection: %p\n", req, qpair); 842 843 rdma_req->data.wr.opcode = IBV_WR_RDMA_WRITE; 844 845 rdma_req->data.wr.next = send_wr; 846 *data_posted = 1; 847 send_wr = &rdma_req->data.wr; 848 } 849 850 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA SEND POSTED. Request: %p Connection: %p\n", req, qpair); 851 852 /* Send the completion */ 853 rc = ibv_post_send(rqpair->cm_id->qp, send_wr, &bad_send_wr); 854 if (rc) { 855 SPDK_ERRLOG("Unable to send response capsule\n"); 856 } 857 858 return rc; 859 } 860 861 static int 862 spdk_nvmf_rdma_event_accept(struct rdma_cm_id *id, struct spdk_nvmf_rdma_qpair *rqpair) 863 { 864 struct spdk_nvmf_rdma_accept_private_data accept_data; 865 struct rdma_conn_param ctrlr_event_data = {}; 866 int rc; 867 868 accept_data.recfmt = 0; 869 accept_data.crqsize = rqpair->max_queue_depth; 870 871 ctrlr_event_data.private_data = &accept_data; 872 ctrlr_event_data.private_data_len = sizeof(accept_data); 873 if (id->ps == RDMA_PS_TCP) { 874 ctrlr_event_data.responder_resources = 0; /* We accept 0 reads from the host */ 875 ctrlr_event_data.initiator_depth = rqpair->max_rw_depth; 876 } 877 878 rc = rdma_accept(id, &ctrlr_event_data); 879 if (rc) { 880 SPDK_ERRLOG("Error %d on rdma_accept\n", errno); 881 } else { 882 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Sent back the accept\n"); 883 } 884 885 return rc; 886 } 887 888 static void 889 spdk_nvmf_rdma_event_reject(struct rdma_cm_id *id, enum spdk_nvmf_rdma_transport_error error) 890 { 891 struct spdk_nvmf_rdma_reject_private_data rej_data; 892 893 rej_data.recfmt = 0; 894 rej_data.sts = error; 895 896 rdma_reject(id, &rej_data, sizeof(rej_data)); 897 } 898 899 static int 900 nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event, 901 new_qpair_fn cb_fn) 902 { 903 struct spdk_nvmf_rdma_transport *rtransport; 904 struct spdk_nvmf_rdma_qpair *rqpair = NULL; 905 struct spdk_nvmf_rdma_port *port; 906 struct rdma_conn_param *rdma_param = NULL; 907 const struct spdk_nvmf_rdma_request_private_data *private_data = NULL; 908 uint16_t max_queue_depth; 909 uint16_t max_rw_depth; 910 911 rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); 912 913 assert(event->id != NULL); /* Impossible. Can't even reject the connection. */ 914 assert(event->id->verbs != NULL); /* Impossible. No way to handle this. */ 915 916 rdma_param = &event->param.conn; 917 if (rdma_param->private_data == NULL || 918 rdma_param->private_data_len < sizeof(struct spdk_nvmf_rdma_request_private_data)) { 919 SPDK_ERRLOG("connect request: no private data provided\n"); 920 spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_PRIVATE_DATA_LENGTH); 921 return -1; 922 } 923 924 private_data = rdma_param->private_data; 925 if (private_data->recfmt != 0) { 926 SPDK_ERRLOG("Received RDMA private data with RECFMT != 0\n"); 927 spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_RECFMT); 928 return -1; 929 } 930 931 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Connect Recv on fabric intf name %s, dev_name %s\n", 932 event->id->verbs->device->name, event->id->verbs->device->dev_name); 933 934 port = event->listen_id->context; 935 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Listen Id was %p with verbs %p. ListenAddr: %p\n", 936 event->listen_id, event->listen_id->verbs, port); 937 938 /* Figure out the supported queue depth. This is a multi-step process 939 * that takes into account hardware maximums, host provided values, 940 * and our target's internal memory limits */ 941 942 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Calculating Queue Depth\n"); 943 944 /* Start with the maximum queue depth allowed by the target */ 945 max_queue_depth = rtransport->transport.opts.max_queue_depth; 946 max_rw_depth = rtransport->transport.opts.max_queue_depth; 947 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Target Max Queue Depth: %d\n", 948 rtransport->transport.opts.max_queue_depth); 949 950 /* Next check the local NIC's hardware limitations */ 951 SPDK_DEBUGLOG(SPDK_LOG_RDMA, 952 "Local NIC Max Send/Recv Queue Depth: %d Max Read/Write Queue Depth: %d\n", 953 port->device->attr.max_qp_wr, port->device->attr.max_qp_rd_atom); 954 max_queue_depth = spdk_min(max_queue_depth, port->device->attr.max_qp_wr); 955 max_rw_depth = spdk_min(max_rw_depth, port->device->attr.max_qp_rd_atom); 956 957 /* Next check the remote NIC's hardware limitations */ 958 SPDK_DEBUGLOG(SPDK_LOG_RDMA, 959 "Host (Initiator) NIC Max Incoming RDMA R/W operations: %d Max Outgoing RDMA R/W operations: %d\n", 960 rdma_param->initiator_depth, rdma_param->responder_resources); 961 if (rdma_param->initiator_depth > 0) { 962 max_rw_depth = spdk_min(max_rw_depth, rdma_param->initiator_depth); 963 } 964 965 /* Finally check for the host software requested values, which are 966 * optional. */ 967 if (rdma_param->private_data != NULL && 968 rdma_param->private_data_len >= sizeof(struct spdk_nvmf_rdma_request_private_data)) { 969 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Host Receive Queue Size: %d\n", private_data->hrqsize); 970 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Host Send Queue Size: %d\n", private_data->hsqsize); 971 max_queue_depth = spdk_min(max_queue_depth, private_data->hrqsize); 972 max_queue_depth = spdk_min(max_queue_depth, private_data->hsqsize + 1); 973 } 974 975 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n", 976 max_queue_depth, max_rw_depth); 977 978 rqpair = calloc(1, sizeof(struct spdk_nvmf_rdma_qpair)); 979 if (rqpair == NULL) { 980 SPDK_ERRLOG("Could not allocate new connection.\n"); 981 spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES); 982 return -1; 983 } 984 985 rqpair->port = port; 986 rqpair->max_queue_depth = max_queue_depth; 987 rqpair->max_rw_depth = max_rw_depth; 988 rqpair->cm_id = event->id; 989 rqpair->listen_id = event->listen_id; 990 rqpair->qpair.transport = transport; 991 rqpair->max_sge = spdk_min(port->device->attr.max_sge, SPDK_NVMF_MAX_SGL_ENTRIES); 992 TAILQ_INIT(&rqpair->incoming_queue); 993 event->id->context = &rqpair->qpair; 994 995 cb_fn(&rqpair->qpair); 996 997 return 0; 998 } 999 1000 static int 1001 spdk_nvmf_rdma_mem_notify(void *cb_ctx, struct spdk_mem_map *map, 1002 enum spdk_mem_map_notify_action action, 1003 void *vaddr, size_t size) 1004 { 1005 struct spdk_nvmf_rdma_device *device = cb_ctx; 1006 struct ibv_pd *pd = device->pd; 1007 struct ibv_mr *mr; 1008 1009 switch (action) { 1010 case SPDK_MEM_MAP_NOTIFY_REGISTER: 1011 mr = ibv_reg_mr(pd, vaddr, size, 1012 IBV_ACCESS_LOCAL_WRITE | 1013 IBV_ACCESS_REMOTE_READ | 1014 IBV_ACCESS_REMOTE_WRITE); 1015 if (mr == NULL) { 1016 SPDK_ERRLOG("ibv_reg_mr() failed\n"); 1017 return -1; 1018 } else { 1019 spdk_mem_map_set_translation(map, (uint64_t)vaddr, size, (uint64_t)mr); 1020 } 1021 break; 1022 case SPDK_MEM_MAP_NOTIFY_UNREGISTER: 1023 mr = (struct ibv_mr *)spdk_mem_map_translate(map, (uint64_t)vaddr, NULL); 1024 spdk_mem_map_clear_translation(map, (uint64_t)vaddr, size); 1025 if (mr) { 1026 ibv_dereg_mr(mr); 1027 } 1028 break; 1029 } 1030 1031 return 0; 1032 } 1033 1034 typedef enum spdk_nvme_data_transfer spdk_nvme_data_transfer_t; 1035 1036 static spdk_nvme_data_transfer_t 1037 spdk_nvmf_rdma_request_get_xfer(struct spdk_nvmf_rdma_request *rdma_req) 1038 { 1039 enum spdk_nvme_data_transfer xfer; 1040 struct spdk_nvme_cmd *cmd = &rdma_req->req.cmd->nvme_cmd; 1041 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1; 1042 1043 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL 1044 rdma_req->rsp.wr.opcode = IBV_WR_SEND; 1045 rdma_req->rsp.wr.imm_data = 0; 1046 #endif 1047 1048 /* Figure out data transfer direction */ 1049 if (cmd->opc == SPDK_NVME_OPC_FABRIC) { 1050 xfer = spdk_nvme_opc_get_data_transfer(rdma_req->req.cmd->nvmf_cmd.fctype); 1051 } else { 1052 xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); 1053 1054 /* Some admin commands are special cases */ 1055 if ((rdma_req->req.qpair->qid == 0) && 1056 ((cmd->opc == SPDK_NVME_OPC_GET_FEATURES) || 1057 (cmd->opc == SPDK_NVME_OPC_SET_FEATURES))) { 1058 switch (cmd->cdw10 & 0xff) { 1059 case SPDK_NVME_FEAT_LBA_RANGE_TYPE: 1060 case SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION: 1061 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 1062 break; 1063 default: 1064 xfer = SPDK_NVME_DATA_NONE; 1065 } 1066 } 1067 } 1068 1069 if (xfer == SPDK_NVME_DATA_NONE) { 1070 return xfer; 1071 } 1072 1073 /* Even for commands that may transfer data, they could have specified 0 length. 1074 * We want those to show up with xfer SPDK_NVME_DATA_NONE. 1075 */ 1076 switch (sgl->generic.type) { 1077 case SPDK_NVME_SGL_TYPE_DATA_BLOCK: 1078 case SPDK_NVME_SGL_TYPE_BIT_BUCKET: 1079 case SPDK_NVME_SGL_TYPE_SEGMENT: 1080 case SPDK_NVME_SGL_TYPE_LAST_SEGMENT: 1081 case SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK: 1082 if (sgl->unkeyed.length == 0) { 1083 xfer = SPDK_NVME_DATA_NONE; 1084 } 1085 break; 1086 case SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK: 1087 if (sgl->keyed.length == 0) { 1088 xfer = SPDK_NVME_DATA_NONE; 1089 } 1090 break; 1091 } 1092 1093 return xfer; 1094 } 1095 1096 static int 1097 spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport, 1098 struct spdk_nvmf_rdma_device *device, 1099 struct spdk_nvmf_rdma_request *rdma_req) 1100 { 1101 void *buf = NULL; 1102 uint32_t length = rdma_req->req.length; 1103 uint32_t i = 0; 1104 1105 rdma_req->req.iovcnt = 0; 1106 while (length) { 1107 buf = spdk_mempool_get(rtransport->data_buf_pool); 1108 if (!buf) { 1109 goto nomem; 1110 } 1111 1112 rdma_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) & 1113 ~NVMF_DATA_BUFFER_MASK); 1114 rdma_req->req.iov[i].iov_len = spdk_min(length, rtransport->transport.opts.io_unit_size); 1115 rdma_req->req.iovcnt++; 1116 rdma_req->data.buffers[i] = buf; 1117 rdma_req->data.wr.sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[i].iov_base); 1118 rdma_req->data.wr.sg_list[i].length = rdma_req->req.iov[i].iov_len; 1119 rdma_req->data.wr.sg_list[i].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map, 1120 (uint64_t)buf, NULL))->lkey; 1121 1122 length -= rdma_req->req.iov[i].iov_len; 1123 i++; 1124 } 1125 1126 rdma_req->data_from_pool = true; 1127 1128 return 0; 1129 1130 nomem: 1131 while (i) { 1132 i--; 1133 spdk_mempool_put(rtransport->data_buf_pool, rdma_req->req.iov[i].iov_base); 1134 rdma_req->req.iov[i].iov_base = NULL; 1135 rdma_req->req.iov[i].iov_len = 0; 1136 1137 rdma_req->data.wr.sg_list[i].addr = 0; 1138 rdma_req->data.wr.sg_list[i].length = 0; 1139 rdma_req->data.wr.sg_list[i].lkey = 0; 1140 } 1141 rdma_req->req.iovcnt = 0; 1142 return -ENOMEM; 1143 } 1144 1145 static int 1146 spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport, 1147 struct spdk_nvmf_rdma_device *device, 1148 struct spdk_nvmf_rdma_request *rdma_req) 1149 { 1150 struct spdk_nvme_cmd *cmd; 1151 struct spdk_nvme_cpl *rsp; 1152 struct spdk_nvme_sgl_descriptor *sgl; 1153 1154 cmd = &rdma_req->req.cmd->nvme_cmd; 1155 rsp = &rdma_req->req.rsp->nvme_cpl; 1156 sgl = &cmd->dptr.sgl1; 1157 1158 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK && 1159 (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS || 1160 sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) { 1161 if (sgl->keyed.length > rtransport->transport.opts.max_io_size) { 1162 SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n", 1163 sgl->keyed.length, rtransport->transport.opts.max_io_size); 1164 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 1165 return -1; 1166 } 1167 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL 1168 if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) { 1169 if (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY) { 1170 rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV; 1171 rdma_req->rsp.wr.imm_data = sgl->keyed.key; 1172 } 1173 } 1174 #endif 1175 1176 /* fill request length and populate iovs */ 1177 rdma_req->req.length = sgl->keyed.length; 1178 1179 if (spdk_nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req) < 0) { 1180 /* No available buffers. Queue this request up. */ 1181 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "No available large data buffers. Queueing request %p\n", rdma_req); 1182 return 0; 1183 } 1184 1185 /* backward compatible */ 1186 rdma_req->req.data = rdma_req->req.iov[0].iov_base; 1187 1188 /* rdma wr specifics */ 1189 rdma_req->data.wr.num_sge = rdma_req->req.iovcnt; 1190 rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key; 1191 rdma_req->data.wr.wr.rdma.remote_addr = sgl->address; 1192 1193 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p took %d buffer/s from central pool\n", rdma_req, 1194 rdma_req->req.iovcnt); 1195 1196 return 0; 1197 } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK && 1198 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) { 1199 uint64_t offset = sgl->address; 1200 uint32_t max_len = rtransport->transport.opts.in_capsule_data_size; 1201 1202 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n", 1203 offset, sgl->unkeyed.length); 1204 1205 if (offset > max_len) { 1206 SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n", 1207 offset, max_len); 1208 rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET; 1209 return -1; 1210 } 1211 max_len -= (uint32_t)offset; 1212 1213 if (sgl->unkeyed.length > max_len) { 1214 SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n", 1215 sgl->unkeyed.length, max_len); 1216 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; 1217 return -1; 1218 } 1219 1220 rdma_req->req.data = rdma_req->recv->buf + offset; 1221 rdma_req->data_from_pool = false; 1222 rdma_req->req.length = sgl->unkeyed.length; 1223 1224 rdma_req->req.iov[0].iov_base = rdma_req->req.data; 1225 rdma_req->req.iov[0].iov_len = rdma_req->req.length; 1226 rdma_req->req.iovcnt = 1; 1227 1228 return 0; 1229 } 1230 1231 SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n", 1232 sgl->generic.type, sgl->generic.subtype); 1233 rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID; 1234 return -1; 1235 } 1236 1237 static bool 1238 spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, 1239 struct spdk_nvmf_rdma_request *rdma_req) 1240 { 1241 struct spdk_nvmf_rdma_qpair *rqpair; 1242 struct spdk_nvmf_rdma_device *device; 1243 struct spdk_nvme_cpl *rsp = &rdma_req->req.rsp->nvme_cpl; 1244 int rc; 1245 struct spdk_nvmf_rdma_recv *rdma_recv; 1246 enum spdk_nvmf_rdma_request_state prev_state; 1247 bool progress = false; 1248 int data_posted; 1249 int cur_rdma_rw_depth; 1250 1251 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); 1252 device = rqpair->port->device; 1253 1254 assert(rdma_req->state != RDMA_REQUEST_STATE_FREE); 1255 1256 /* If the queue pair is in an error state, force the request to the completed state 1257 * to release resources. */ 1258 if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) { 1259 if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) { 1260 TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link); 1261 } 1262 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED); 1263 } 1264 1265 /* The loop here is to allow for several back-to-back state changes. */ 1266 do { 1267 prev_state = rdma_req->state; 1268 1269 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p entering state %d\n", rdma_req, prev_state); 1270 1271 switch (rdma_req->state) { 1272 case RDMA_REQUEST_STATE_FREE: 1273 /* Some external code must kick a request into RDMA_REQUEST_STATE_NEW 1274 * to escape this state. */ 1275 break; 1276 case RDMA_REQUEST_STATE_NEW: 1277 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEW, 0, 0, 1278 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1279 rdma_recv = rdma_req->recv; 1280 1281 /* The first element of the SGL is the NVMe command */ 1282 rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr; 1283 memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp)); 1284 1285 TAILQ_REMOVE(&rqpair->incoming_queue, rdma_recv, link); 1286 1287 if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR) { 1288 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED); 1289 break; 1290 } 1291 1292 /* The next state transition depends on the data transfer needs of this request. */ 1293 rdma_req->req.xfer = spdk_nvmf_rdma_request_get_xfer(rdma_req); 1294 1295 /* If no data to transfer, ready to execute. */ 1296 if (rdma_req->req.xfer == SPDK_NVME_DATA_NONE) { 1297 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE); 1298 break; 1299 } 1300 1301 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_NEED_BUFFER); 1302 TAILQ_INSERT_TAIL(&rqpair->ch->pending_data_buf_queue, rdma_req, link); 1303 break; 1304 case RDMA_REQUEST_STATE_NEED_BUFFER: 1305 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0, 1306 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1307 1308 assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE); 1309 1310 if (rdma_req != TAILQ_FIRST(&rqpair->ch->pending_data_buf_queue)) { 1311 /* This request needs to wait in line to obtain a buffer */ 1312 break; 1313 } 1314 1315 /* Try to get a data buffer */ 1316 rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req); 1317 if (rc < 0) { 1318 TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link); 1319 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 1320 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_COMPLETE); 1321 break; 1322 } 1323 1324 if (!rdma_req->req.data) { 1325 /* No buffers available. */ 1326 break; 1327 } 1328 1329 TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link); 1330 1331 /* If data is transferring from host to controller and the data didn't 1332 * arrive using in capsule data, we need to do a transfer from the host. 1333 */ 1334 if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER && rdma_req->data_from_pool) { 1335 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING); 1336 break; 1337 } 1338 1339 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE); 1340 break; 1341 case RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING: 1342 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING, 0, 0, 1343 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1344 1345 if (rdma_req != TAILQ_FIRST(&rqpair->state_queue[RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING])) { 1346 /* This request needs to wait in line to perform RDMA */ 1347 break; 1348 } 1349 cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair); 1350 1351 if (cur_rdma_rw_depth >= rqpair->max_rw_depth) { 1352 /* R/W queue is full, need to wait */ 1353 break; 1354 } 1355 1356 if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1357 rc = request_transfer_in(&rdma_req->req); 1358 if (!rc) { 1359 spdk_nvmf_rdma_request_set_state(rdma_req, 1360 RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 1361 } else { 1362 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 1363 spdk_nvmf_rdma_request_set_state(rdma_req, 1364 RDMA_REQUEST_STATE_READY_TO_COMPLETE); 1365 } 1366 } else if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1367 /* The data transfer will be kicked off from 1368 * RDMA_REQUEST_STATE_READY_TO_COMPLETE state. 1369 */ 1370 spdk_nvmf_rdma_request_set_state(rdma_req, 1371 RDMA_REQUEST_STATE_READY_TO_COMPLETE); 1372 } else { 1373 SPDK_ERRLOG("Cannot perform data transfer, unknown state: %u\n", 1374 rdma_req->req.xfer); 1375 assert(0); 1376 } 1377 break; 1378 case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER: 1379 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0, 1380 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1381 /* Some external code must kick a request into RDMA_REQUEST_STATE_READY_TO_EXECUTE 1382 * to escape this state. */ 1383 break; 1384 case RDMA_REQUEST_STATE_READY_TO_EXECUTE: 1385 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, 1386 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1387 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_EXECUTING); 1388 spdk_nvmf_request_exec(&rdma_req->req); 1389 break; 1390 case RDMA_REQUEST_STATE_EXECUTING: 1391 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTING, 0, 0, 1392 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1393 /* Some external code must kick a request into RDMA_REQUEST_STATE_EXECUTED 1394 * to escape this state. */ 1395 break; 1396 case RDMA_REQUEST_STATE_EXECUTED: 1397 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTED, 0, 0, 1398 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1399 if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1400 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING); 1401 } else { 1402 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_COMPLETE); 1403 } 1404 break; 1405 case RDMA_REQUEST_STATE_READY_TO_COMPLETE: 1406 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE, 0, 0, 1407 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1408 rc = request_transfer_out(&rdma_req->req, &data_posted); 1409 assert(rc == 0); /* No good way to handle this currently */ 1410 if (rc) { 1411 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED); 1412 } else { 1413 spdk_nvmf_rdma_request_set_state(rdma_req, 1414 data_posted ? 1415 RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST : 1416 RDMA_REQUEST_STATE_COMPLETING); 1417 } 1418 break; 1419 case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST: 1420 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0, 1421 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1422 /* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED 1423 * to escape this state. */ 1424 break; 1425 case RDMA_REQUEST_STATE_COMPLETING: 1426 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETING, 0, 0, 1427 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1428 /* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED 1429 * to escape this state. */ 1430 break; 1431 case RDMA_REQUEST_STATE_COMPLETED: 1432 spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETED, 0, 0, 1433 (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id); 1434 1435 if (rdma_req->data_from_pool) { 1436 /* Put the buffer/s back in the pool */ 1437 for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) { 1438 spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]); 1439 rdma_req->req.iov[i].iov_base = NULL; 1440 rdma_req->data.buffers[i] = NULL; 1441 } 1442 rdma_req->data_from_pool = false; 1443 } 1444 rdma_req->req.length = 0; 1445 rdma_req->req.iovcnt = 0; 1446 rdma_req->req.data = NULL; 1447 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_FREE); 1448 break; 1449 case RDMA_REQUEST_NUM_STATES: 1450 default: 1451 assert(0); 1452 break; 1453 } 1454 1455 if (rdma_req->state != prev_state) { 1456 progress = true; 1457 } 1458 } while (rdma_req->state != prev_state); 1459 1460 return progress; 1461 } 1462 1463 /* Public API callbacks begin here */ 1464 1465 #define SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH 128 1466 #define SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH 128 1467 #define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 64 1468 #define SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096 1469 #define SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE 131072 1470 #define SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE 131072 1471 1472 static void 1473 spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts) 1474 { 1475 opts->max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH; 1476 opts->max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR; 1477 opts->in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE; 1478 opts->max_io_size = SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE; 1479 opts->io_unit_size = SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE; 1480 opts->max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH; 1481 } 1482 1483 static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport); 1484 1485 static struct spdk_nvmf_transport * 1486 spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts) 1487 { 1488 int rc; 1489 struct spdk_nvmf_rdma_transport *rtransport; 1490 struct spdk_nvmf_rdma_device *device, *tmp; 1491 struct ibv_context **contexts; 1492 uint32_t i; 1493 int flag; 1494 uint32_t sge_count; 1495 1496 const struct spdk_mem_map_ops nvmf_rdma_map_ops = { 1497 .notify_cb = spdk_nvmf_rdma_mem_notify, 1498 .are_contiguous = NULL 1499 }; 1500 1501 rtransport = calloc(1, sizeof(*rtransport)); 1502 if (!rtransport) { 1503 return NULL; 1504 } 1505 1506 if (pthread_mutex_init(&rtransport->lock, NULL)) { 1507 SPDK_ERRLOG("pthread_mutex_init() failed\n"); 1508 free(rtransport); 1509 return NULL; 1510 } 1511 1512 spdk_io_device_register(rtransport, spdk_nvmf_rdma_mgmt_channel_create, 1513 spdk_nvmf_rdma_mgmt_channel_destroy, 1514 sizeof(struct spdk_nvmf_rdma_mgmt_channel), 1515 "rdma_transport"); 1516 1517 TAILQ_INIT(&rtransport->devices); 1518 TAILQ_INIT(&rtransport->ports); 1519 1520 rtransport->transport.ops = &spdk_nvmf_transport_rdma; 1521 1522 SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n" 1523 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 1524 " max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 1525 " in_capsule_data_size=%d, max_aq_depth=%d\n", 1526 opts->max_queue_depth, 1527 opts->max_io_size, 1528 opts->max_qpairs_per_ctrlr, 1529 opts->io_unit_size, 1530 opts->in_capsule_data_size, 1531 opts->max_aq_depth); 1532 1533 /* I/O unit size cannot be larger than max I/O size */ 1534 if (opts->io_unit_size > opts->max_io_size) { 1535 opts->io_unit_size = opts->max_io_size; 1536 } 1537 1538 sge_count = opts->max_io_size / opts->io_unit_size; 1539 if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) { 1540 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 1541 spdk_nvmf_rdma_destroy(&rtransport->transport); 1542 return NULL; 1543 } 1544 1545 rtransport->event_channel = rdma_create_event_channel(); 1546 if (rtransport->event_channel == NULL) { 1547 SPDK_ERRLOG("rdma_create_event_channel() failed, %s\n", spdk_strerror(errno)); 1548 spdk_nvmf_rdma_destroy(&rtransport->transport); 1549 return NULL; 1550 } 1551 1552 flag = fcntl(rtransport->event_channel->fd, F_GETFL); 1553 if (fcntl(rtransport->event_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) { 1554 SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%s)\n", 1555 rtransport->event_channel->fd, spdk_strerror(errno)); 1556 spdk_nvmf_rdma_destroy(&rtransport->transport); 1557 return NULL; 1558 } 1559 1560 rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma", 1561 opts->max_queue_depth * 4, /* The 4 is arbitrarily chosen. Needs to be configurable. */ 1562 opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT, 1563 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 1564 SPDK_ENV_SOCKET_ID_ANY); 1565 if (!rtransport->data_buf_pool) { 1566 SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n"); 1567 spdk_nvmf_rdma_destroy(&rtransport->transport); 1568 return NULL; 1569 } 1570 1571 contexts = rdma_get_devices(NULL); 1572 if (contexts == NULL) { 1573 SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno); 1574 spdk_nvmf_rdma_destroy(&rtransport->transport); 1575 return NULL; 1576 } 1577 1578 i = 0; 1579 rc = 0; 1580 while (contexts[i] != NULL) { 1581 device = calloc(1, sizeof(*device)); 1582 if (!device) { 1583 SPDK_ERRLOG("Unable to allocate memory for RDMA devices.\n"); 1584 rc = -ENOMEM; 1585 break; 1586 } 1587 device->context = contexts[i]; 1588 rc = ibv_query_device(device->context, &device->attr); 1589 if (rc < 0) { 1590 SPDK_ERRLOG("Failed to query RDMA device attributes.\n"); 1591 free(device); 1592 break; 1593 1594 } 1595 1596 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL 1597 if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) == 0) { 1598 SPDK_WARNLOG("The libibverbs on this system supports SEND_WITH_INVALIDATE,"); 1599 SPDK_WARNLOG("but the device with vendor ID %u does not.\n", device->attr.vendor_id); 1600 } 1601 1602 /** 1603 * The vendor ID is assigned by the IEEE and an ID of 0 implies Soft-RoCE. 1604 * The Soft-RoCE RXE driver does not currently support send with invalidate, 1605 * but incorrectly reports that it does. There are changes making their way 1606 * through the kernel now that will enable this feature. When they are merged, 1607 * we can conditionally enable this feature. 1608 * 1609 * TODO: enable this for versions of the kernel rxe driver that support it. 1610 */ 1611 if (device->attr.vendor_id == 0) { 1612 device->attr.device_cap_flags &= ~(IBV_DEVICE_MEM_MGT_EXTENSIONS); 1613 } 1614 #endif 1615 1616 /* set up device context async ev fd as NON_BLOCKING */ 1617 flag = fcntl(device->context->async_fd, F_GETFL); 1618 rc = fcntl(device->context->async_fd, F_SETFL, flag | O_NONBLOCK); 1619 if (rc < 0) { 1620 SPDK_ERRLOG("Failed to set context async fd to NONBLOCK.\n"); 1621 free(device); 1622 break; 1623 } 1624 1625 device->pd = ibv_alloc_pd(device->context); 1626 if (!device->pd) { 1627 SPDK_ERRLOG("Unable to allocate protection domain.\n"); 1628 free(device); 1629 rc = -1; 1630 break; 1631 } 1632 1633 device->map = spdk_mem_map_alloc(0, &nvmf_rdma_map_ops, device); 1634 if (!device->map) { 1635 SPDK_ERRLOG("Unable to allocate memory map for new poll group\n"); 1636 ibv_dealloc_pd(device->pd); 1637 free(device); 1638 rc = -1; 1639 break; 1640 } 1641 1642 TAILQ_INSERT_TAIL(&rtransport->devices, device, link); 1643 i++; 1644 } 1645 rdma_free_devices(contexts); 1646 1647 if (rc < 0) { 1648 spdk_nvmf_rdma_destroy(&rtransport->transport); 1649 return NULL; 1650 } 1651 1652 /* Set up poll descriptor array to monitor events from RDMA and IB 1653 * in a single poll syscall 1654 */ 1655 rtransport->npoll_fds = i + 1; 1656 i = 0; 1657 rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd)); 1658 if (rtransport->poll_fds == NULL) { 1659 SPDK_ERRLOG("poll_fds allocation failed\n"); 1660 spdk_nvmf_rdma_destroy(&rtransport->transport); 1661 return NULL; 1662 } 1663 1664 rtransport->poll_fds[i].fd = rtransport->event_channel->fd; 1665 rtransport->poll_fds[i++].events = POLLIN; 1666 1667 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) { 1668 rtransport->poll_fds[i].fd = device->context->async_fd; 1669 rtransport->poll_fds[i++].events = POLLIN; 1670 } 1671 1672 return &rtransport->transport; 1673 } 1674 1675 static int 1676 spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport) 1677 { 1678 struct spdk_nvmf_rdma_transport *rtransport; 1679 struct spdk_nvmf_rdma_port *port, *port_tmp; 1680 struct spdk_nvmf_rdma_device *device, *device_tmp; 1681 1682 rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); 1683 1684 TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, port_tmp) { 1685 TAILQ_REMOVE(&rtransport->ports, port, link); 1686 rdma_destroy_id(port->id); 1687 free(port); 1688 } 1689 1690 if (rtransport->poll_fds != NULL) { 1691 free(rtransport->poll_fds); 1692 } 1693 1694 if (rtransport->event_channel != NULL) { 1695 rdma_destroy_event_channel(rtransport->event_channel); 1696 } 1697 1698 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, device_tmp) { 1699 TAILQ_REMOVE(&rtransport->devices, device, link); 1700 if (device->map) { 1701 spdk_mem_map_free(&device->map); 1702 } 1703 if (device->pd) { 1704 ibv_dealloc_pd(device->pd); 1705 } 1706 free(device); 1707 } 1708 1709 if (rtransport->data_buf_pool != NULL) { 1710 if (spdk_mempool_count(rtransport->data_buf_pool) != 1711 (transport->opts.max_queue_depth * 4)) { 1712 SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n", 1713 spdk_mempool_count(rtransport->data_buf_pool), 1714 transport->opts.max_queue_depth * 4); 1715 } 1716 } 1717 1718 spdk_mempool_free(rtransport->data_buf_pool); 1719 spdk_io_device_unregister(rtransport, NULL); 1720 pthread_mutex_destroy(&rtransport->lock); 1721 free(rtransport); 1722 1723 return 0; 1724 } 1725 1726 static int 1727 spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport, 1728 const struct spdk_nvme_transport_id *trid) 1729 { 1730 struct spdk_nvmf_rdma_transport *rtransport; 1731 struct spdk_nvmf_rdma_device *device; 1732 struct spdk_nvmf_rdma_port *port_tmp, *port; 1733 struct addrinfo *res; 1734 struct addrinfo hints; 1735 int family; 1736 int rc; 1737 1738 rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); 1739 1740 port = calloc(1, sizeof(*port)); 1741 if (!port) { 1742 return -ENOMEM; 1743 } 1744 1745 /* Selectively copy the trid. Things like NQN don't matter here - that 1746 * mapping is enforced elsewhere. 1747 */ 1748 port->trid.trtype = SPDK_NVME_TRANSPORT_RDMA; 1749 port->trid.adrfam = trid->adrfam; 1750 snprintf(port->trid.traddr, sizeof(port->trid.traddr), "%s", trid->traddr); 1751 snprintf(port->trid.trsvcid, sizeof(port->trid.trsvcid), "%s", trid->trsvcid); 1752 1753 pthread_mutex_lock(&rtransport->lock); 1754 assert(rtransport->event_channel != NULL); 1755 TAILQ_FOREACH(port_tmp, &rtransport->ports, link) { 1756 if (spdk_nvme_transport_id_compare(&port_tmp->trid, &port->trid) == 0) { 1757 port_tmp->ref++; 1758 free(port); 1759 /* Already listening at this address */ 1760 pthread_mutex_unlock(&rtransport->lock); 1761 return 0; 1762 } 1763 } 1764 1765 rc = rdma_create_id(rtransport->event_channel, &port->id, port, RDMA_PS_TCP); 1766 if (rc < 0) { 1767 SPDK_ERRLOG("rdma_create_id() failed\n"); 1768 free(port); 1769 pthread_mutex_unlock(&rtransport->lock); 1770 return rc; 1771 } 1772 1773 switch (port->trid.adrfam) { 1774 case SPDK_NVMF_ADRFAM_IPV4: 1775 family = AF_INET; 1776 break; 1777 case SPDK_NVMF_ADRFAM_IPV6: 1778 family = AF_INET6; 1779 break; 1780 default: 1781 SPDK_ERRLOG("Unhandled ADRFAM %d\n", port->trid.adrfam); 1782 free(port); 1783 pthread_mutex_unlock(&rtransport->lock); 1784 return -EINVAL; 1785 } 1786 1787 memset(&hints, 0, sizeof(hints)); 1788 hints.ai_family = family; 1789 hints.ai_flags = AI_NUMERICSERV; 1790 hints.ai_socktype = SOCK_STREAM; 1791 hints.ai_protocol = 0; 1792 1793 rc = getaddrinfo(port->trid.traddr, port->trid.trsvcid, &hints, &res); 1794 if (rc) { 1795 SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(rc), rc); 1796 free(port); 1797 pthread_mutex_unlock(&rtransport->lock); 1798 return -EINVAL; 1799 } 1800 1801 rc = rdma_bind_addr(port->id, res->ai_addr); 1802 freeaddrinfo(res); 1803 1804 if (rc < 0) { 1805 SPDK_ERRLOG("rdma_bind_addr() failed\n"); 1806 rdma_destroy_id(port->id); 1807 free(port); 1808 pthread_mutex_unlock(&rtransport->lock); 1809 return rc; 1810 } 1811 1812 if (!port->id->verbs) { 1813 SPDK_ERRLOG("ibv_context is null\n"); 1814 rdma_destroy_id(port->id); 1815 free(port); 1816 pthread_mutex_unlock(&rtransport->lock); 1817 return -1; 1818 } 1819 1820 rc = rdma_listen(port->id, 10); /* 10 = backlog */ 1821 if (rc < 0) { 1822 SPDK_ERRLOG("rdma_listen() failed\n"); 1823 rdma_destroy_id(port->id); 1824 free(port); 1825 pthread_mutex_unlock(&rtransport->lock); 1826 return rc; 1827 } 1828 1829 TAILQ_FOREACH(device, &rtransport->devices, link) { 1830 if (device->context == port->id->verbs) { 1831 port->device = device; 1832 break; 1833 } 1834 } 1835 if (!port->device) { 1836 SPDK_ERRLOG("Accepted a connection with verbs %p, but unable to find a corresponding device.\n", 1837 port->id->verbs); 1838 rdma_destroy_id(port->id); 1839 free(port); 1840 pthread_mutex_unlock(&rtransport->lock); 1841 return -EINVAL; 1842 } 1843 1844 SPDK_INFOLOG(SPDK_LOG_RDMA, "*** NVMf Target Listening on %s port %d ***\n", 1845 port->trid.traddr, ntohs(rdma_get_src_port(port->id))); 1846 1847 port->ref = 1; 1848 1849 TAILQ_INSERT_TAIL(&rtransport->ports, port, link); 1850 pthread_mutex_unlock(&rtransport->lock); 1851 1852 return 0; 1853 } 1854 1855 static int 1856 spdk_nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport, 1857 const struct spdk_nvme_transport_id *_trid) 1858 { 1859 struct spdk_nvmf_rdma_transport *rtransport; 1860 struct spdk_nvmf_rdma_port *port, *tmp; 1861 struct spdk_nvme_transport_id trid = {}; 1862 1863 rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); 1864 1865 /* Selectively copy the trid. Things like NQN don't matter here - that 1866 * mapping is enforced elsewhere. 1867 */ 1868 trid.trtype = SPDK_NVME_TRANSPORT_RDMA; 1869 trid.adrfam = _trid->adrfam; 1870 snprintf(trid.traddr, sizeof(port->trid.traddr), "%s", _trid->traddr); 1871 snprintf(trid.trsvcid, sizeof(port->trid.trsvcid), "%s", _trid->trsvcid); 1872 1873 pthread_mutex_lock(&rtransport->lock); 1874 TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, tmp) { 1875 if (spdk_nvme_transport_id_compare(&port->trid, &trid) == 0) { 1876 assert(port->ref > 0); 1877 port->ref--; 1878 if (port->ref == 0) { 1879 TAILQ_REMOVE(&rtransport->ports, port, link); 1880 rdma_destroy_id(port->id); 1881 free(port); 1882 } 1883 break; 1884 } 1885 } 1886 1887 pthread_mutex_unlock(&rtransport->lock); 1888 return 0; 1889 } 1890 1891 static bool 1892 spdk_nvmf_rdma_qpair_is_idle(struct spdk_nvmf_qpair *qpair) 1893 { 1894 int cur_queue_depth, cur_rdma_rw_depth; 1895 struct spdk_nvmf_rdma_qpair *rqpair; 1896 1897 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 1898 cur_queue_depth = spdk_nvmf_rdma_cur_queue_depth(rqpair); 1899 cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair); 1900 1901 if (cur_queue_depth == 0 && cur_rdma_rw_depth == 0) { 1902 return true; 1903 } 1904 return false; 1905 } 1906 1907 static void 1908 spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport, 1909 struct spdk_nvmf_rdma_qpair *rqpair) 1910 { 1911 struct spdk_nvmf_rdma_recv *rdma_recv, *recv_tmp; 1912 struct spdk_nvmf_rdma_request *rdma_req, *req_tmp; 1913 1914 /* We process I/O in the data transfer pending queue at the highest priority. */ 1915 TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING], 1916 state_link, req_tmp) { 1917 if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) { 1918 break; 1919 } 1920 } 1921 1922 /* The second highest priority is I/O waiting on memory buffers. */ 1923 TAILQ_FOREACH_SAFE(rdma_req, &rqpair->ch->pending_data_buf_queue, link, 1924 req_tmp) { 1925 if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) { 1926 break; 1927 } 1928 } 1929 1930 if (rqpair->qpair_disconnected) { 1931 spdk_nvmf_rdma_qpair_destroy(rqpair); 1932 return; 1933 } 1934 1935 /* Do not process newly received commands if qp is in ERROR state, 1936 * wait till the recovery is complete. 1937 */ 1938 if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR) { 1939 return; 1940 } 1941 1942 /* The lowest priority is processing newly received commands */ 1943 TAILQ_FOREACH_SAFE(rdma_recv, &rqpair->incoming_queue, link, recv_tmp) { 1944 if (TAILQ_EMPTY(&rqpair->state_queue[RDMA_REQUEST_STATE_FREE])) { 1945 break; 1946 } 1947 1948 rdma_req = TAILQ_FIRST(&rqpair->state_queue[RDMA_REQUEST_STATE_FREE]); 1949 rdma_req->recv = rdma_recv; 1950 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_NEW); 1951 if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) { 1952 break; 1953 } 1954 } 1955 } 1956 1957 static void 1958 _nvmf_rdma_disconnect(void *ctx) 1959 { 1960 struct spdk_nvmf_qpair *qpair = ctx; 1961 struct spdk_nvmf_rdma_qpair *rqpair; 1962 1963 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 1964 1965 spdk_nvmf_rdma_qpair_dec_refcnt(rqpair); 1966 1967 spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); 1968 } 1969 1970 static void 1971 _nvmf_rdma_disconnect_retry(void *ctx) 1972 { 1973 struct spdk_nvmf_qpair *qpair = ctx; 1974 struct spdk_nvmf_poll_group *group; 1975 1976 /* Read the group out of the qpair. This is normally set and accessed only from 1977 * the thread that created the group. Here, we're not on that thread necessarily. 1978 * The data member qpair->group begins it's life as NULL and then is assigned to 1979 * a pointer and never changes. So fortunately reading this and checking for 1980 * non-NULL is thread safe in the x86_64 memory model. */ 1981 group = qpair->group; 1982 1983 if (group == NULL) { 1984 /* The qpair hasn't been assigned to a group yet, so we can't 1985 * process a disconnect. Send a message to ourself and try again. */ 1986 spdk_thread_send_msg(spdk_get_thread(), _nvmf_rdma_disconnect_retry, qpair); 1987 return; 1988 } 1989 1990 spdk_thread_send_msg(group->thread, _nvmf_rdma_disconnect, qpair); 1991 } 1992 1993 static int 1994 nvmf_rdma_disconnect(struct rdma_cm_event *evt) 1995 { 1996 struct spdk_nvmf_qpair *qpair; 1997 struct spdk_nvmf_rdma_qpair *rqpair; 1998 1999 if (evt->id == NULL) { 2000 SPDK_ERRLOG("disconnect request: missing cm_id\n"); 2001 return -1; 2002 } 2003 2004 qpair = evt->id->context; 2005 if (qpair == NULL) { 2006 SPDK_ERRLOG("disconnect request: no active connection\n"); 2007 return -1; 2008 } 2009 2010 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 2011 2012 spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair->cm_id, 0); 2013 2014 spdk_nvmf_rdma_update_ibv_state(rqpair); 2015 spdk_nvmf_rdma_qpair_inc_refcnt(rqpair); 2016 2017 _nvmf_rdma_disconnect_retry(qpair); 2018 2019 return 0; 2020 } 2021 2022 #ifdef DEBUG 2023 static const char *CM_EVENT_STR[] = { 2024 "RDMA_CM_EVENT_ADDR_RESOLVED", 2025 "RDMA_CM_EVENT_ADDR_ERROR", 2026 "RDMA_CM_EVENT_ROUTE_RESOLVED", 2027 "RDMA_CM_EVENT_ROUTE_ERROR", 2028 "RDMA_CM_EVENT_CONNECT_REQUEST", 2029 "RDMA_CM_EVENT_CONNECT_RESPONSE", 2030 "RDMA_CM_EVENT_CONNECT_ERROR", 2031 "RDMA_CM_EVENT_UNREACHABLE", 2032 "RDMA_CM_EVENT_REJECTED", 2033 "RDMA_CM_EVENT_ESTABLISHED", 2034 "RDMA_CM_EVENT_DISCONNECTED", 2035 "RDMA_CM_EVENT_DEVICE_REMOVAL", 2036 "RDMA_CM_EVENT_MULTICAST_JOIN", 2037 "RDMA_CM_EVENT_MULTICAST_ERROR", 2038 "RDMA_CM_EVENT_ADDR_CHANGE", 2039 "RDMA_CM_EVENT_TIMEWAIT_EXIT" 2040 }; 2041 #endif /* DEBUG */ 2042 2043 static void 2044 spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn) 2045 { 2046 struct spdk_nvmf_rdma_transport *rtransport; 2047 struct rdma_cm_event *event; 2048 int rc; 2049 2050 rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); 2051 2052 if (rtransport->event_channel == NULL) { 2053 return; 2054 } 2055 2056 while (1) { 2057 rc = rdma_get_cm_event(rtransport->event_channel, &event); 2058 if (rc == 0) { 2059 SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]); 2060 2061 spdk_trace_record(TRACE_RDMA_CM_ASYNC_EVENT, 0, 0, 0, event->event); 2062 2063 switch (event->event) { 2064 case RDMA_CM_EVENT_ADDR_RESOLVED: 2065 case RDMA_CM_EVENT_ADDR_ERROR: 2066 case RDMA_CM_EVENT_ROUTE_RESOLVED: 2067 case RDMA_CM_EVENT_ROUTE_ERROR: 2068 /* No action required. The target never attempts to resolve routes. */ 2069 break; 2070 case RDMA_CM_EVENT_CONNECT_REQUEST: 2071 rc = nvmf_rdma_connect(transport, event, cb_fn); 2072 if (rc < 0) { 2073 SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc); 2074 break; 2075 } 2076 break; 2077 case RDMA_CM_EVENT_CONNECT_RESPONSE: 2078 /* The target never initiates a new connection. So this will not occur. */ 2079 break; 2080 case RDMA_CM_EVENT_CONNECT_ERROR: 2081 /* Can this happen? The docs say it can, but not sure what causes it. */ 2082 break; 2083 case RDMA_CM_EVENT_UNREACHABLE: 2084 case RDMA_CM_EVENT_REJECTED: 2085 /* These only occur on the client side. */ 2086 break; 2087 case RDMA_CM_EVENT_ESTABLISHED: 2088 /* TODO: Should we be waiting for this event anywhere? */ 2089 break; 2090 case RDMA_CM_EVENT_DISCONNECTED: 2091 case RDMA_CM_EVENT_DEVICE_REMOVAL: 2092 rc = nvmf_rdma_disconnect(event); 2093 if (rc < 0) { 2094 SPDK_ERRLOG("Unable to process disconnect event. rc: %d\n", rc); 2095 break; 2096 } 2097 break; 2098 case RDMA_CM_EVENT_MULTICAST_JOIN: 2099 case RDMA_CM_EVENT_MULTICAST_ERROR: 2100 /* Multicast is not used */ 2101 break; 2102 case RDMA_CM_EVENT_ADDR_CHANGE: 2103 /* Not utilizing this event */ 2104 break; 2105 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 2106 /* For now, do nothing. The target never re-uses queue pairs. */ 2107 break; 2108 default: 2109 SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event); 2110 break; 2111 } 2112 2113 rdma_ack_cm_event(event); 2114 } else { 2115 if (errno != EAGAIN && errno != EWOULDBLOCK) { 2116 SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno)); 2117 } 2118 break; 2119 } 2120 } 2121 } 2122 2123 static void 2124 spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair, 2125 enum spdk_nvmf_rdma_request_state state) 2126 { 2127 struct spdk_nvmf_rdma_request *rdma_req, *req_tmp; 2128 struct spdk_nvmf_rdma_transport *rtransport; 2129 2130 TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[state], state_link, req_tmp) { 2131 rtransport = SPDK_CONTAINEROF(rdma_req->req.qpair->transport, 2132 struct spdk_nvmf_rdma_transport, transport); 2133 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED); 2134 spdk_nvmf_rdma_request_process(rtransport, rdma_req); 2135 } 2136 } 2137 2138 static void 2139 spdk_nvmf_rdma_qpair_recover(struct spdk_nvmf_rdma_qpair *rqpair) 2140 { 2141 enum ibv_qp_state state, next_state; 2142 int recovered; 2143 struct spdk_nvmf_rdma_transport *rtransport; 2144 2145 if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) { 2146 /* There must be outstanding requests down to media. 2147 * If so, wait till they're complete. 2148 */ 2149 assert(!TAILQ_EMPTY(&rqpair->qpair.outstanding)); 2150 return; 2151 } 2152 2153 state = rqpair->ibv_attr.qp_state; 2154 next_state = state; 2155 2156 SPDK_NOTICELOG("RDMA qpair %u is in state: %s\n", 2157 rqpair->qpair.qid, 2158 str_ibv_qp_state[state]); 2159 2160 if (!(state == IBV_QPS_ERR || state == IBV_QPS_RESET)) { 2161 SPDK_ERRLOG("Can't recover RDMA qpair %u from the state: %s\n", 2162 rqpair->qpair.qid, 2163 str_ibv_qp_state[state]); 2164 spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL); 2165 return; 2166 } 2167 2168 recovered = 0; 2169 while (!recovered) { 2170 switch (state) { 2171 case IBV_QPS_ERR: 2172 next_state = IBV_QPS_RESET; 2173 break; 2174 case IBV_QPS_RESET: 2175 next_state = IBV_QPS_INIT; 2176 break; 2177 case IBV_QPS_INIT: 2178 next_state = IBV_QPS_RTR; 2179 break; 2180 case IBV_QPS_RTR: 2181 next_state = IBV_QPS_RTS; 2182 break; 2183 case IBV_QPS_RTS: 2184 recovered = 1; 2185 break; 2186 default: 2187 SPDK_ERRLOG("RDMA qpair %u unexpected state for recovery: %u\n", 2188 rqpair->qpair.qid, state); 2189 goto error; 2190 } 2191 /* Do not transition into same state */ 2192 if (next_state == state) { 2193 break; 2194 } 2195 2196 if (spdk_nvmf_rdma_set_ibv_state(rqpair, next_state)) { 2197 goto error; 2198 } 2199 2200 state = next_state; 2201 } 2202 2203 rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport, 2204 struct spdk_nvmf_rdma_transport, 2205 transport); 2206 2207 spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); 2208 2209 return; 2210 error: 2211 SPDK_NOTICELOG("RDMA qpair %u: recovery failed, disconnecting...\n", 2212 rqpair->qpair.qid); 2213 spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL); 2214 } 2215 2216 /* Clean up only the states that can be aborted at any time */ 2217 static void 2218 _spdk_nvmf_rdma_qp_cleanup_safe_states(struct spdk_nvmf_rdma_qpair *rqpair) 2219 { 2220 struct spdk_nvmf_rdma_request *rdma_req, *req_tmp; 2221 2222 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEW); 2223 TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) { 2224 TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link); 2225 } 2226 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER); 2227 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING); 2228 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_READY_TO_EXECUTE); 2229 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTED); 2230 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_READY_TO_COMPLETE); 2231 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETED); 2232 } 2233 2234 /* This cleans up all memory. It is only safe to use if the rest of the software stack 2235 * has been shut down */ 2236 static void 2237 _spdk_nvmf_rdma_qp_cleanup_all_states(struct spdk_nvmf_rdma_qpair *rqpair) 2238 { 2239 _spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair); 2240 2241 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTING); 2242 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 2243 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 2244 spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING); 2245 } 2246 2247 static void 2248 _spdk_nvmf_rdma_qp_error(void *arg) 2249 { 2250 struct spdk_nvmf_rdma_qpair *rqpair = arg; 2251 enum ibv_qp_state state; 2252 2253 spdk_nvmf_rdma_qpair_dec_refcnt(rqpair); 2254 2255 state = rqpair->ibv_attr.qp_state; 2256 if (state != IBV_QPS_ERR) { 2257 /* Error was already recovered */ 2258 return; 2259 } 2260 2261 if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) { 2262 spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr); 2263 } 2264 2265 _spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair); 2266 2267 /* Attempt recovery. This will exit without recovering if I/O requests 2268 * are still outstanding */ 2269 spdk_nvmf_rdma_qpair_recover(rqpair); 2270 } 2271 2272 static void 2273 spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device) 2274 { 2275 int rc; 2276 struct spdk_nvmf_rdma_qpair *rqpair; 2277 struct ibv_async_event event; 2278 enum ibv_qp_state state; 2279 2280 rc = ibv_get_async_event(device->context, &event); 2281 2282 if (rc) { 2283 SPDK_ERRLOG("Failed to get async_event (%d): %s\n", 2284 errno, spdk_strerror(errno)); 2285 return; 2286 } 2287 2288 SPDK_NOTICELOG("Async event: %s\n", 2289 ibv_event_type_str(event.event_type)); 2290 2291 switch (event.event_type) { 2292 case IBV_EVENT_QP_FATAL: 2293 rqpair = event.element.qp->qp_context; 2294 spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 2295 (uintptr_t)rqpair->cm_id, event.event_type); 2296 spdk_nvmf_rdma_update_ibv_state(rqpair); 2297 spdk_nvmf_rdma_qpair_inc_refcnt(rqpair); 2298 spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair); 2299 break; 2300 case IBV_EVENT_QP_LAST_WQE_REACHED: 2301 /* This event only occurs for shared receive queues, which are not currently supported. */ 2302 break; 2303 case IBV_EVENT_SQ_DRAINED: 2304 /* This event occurs frequently in both error and non-error states. 2305 * Check if the qpair is in an error state before sending a message. 2306 * Note that we're not on the correct thread to access the qpair, but 2307 * the operations that the below calls make all happen to be thread 2308 * safe. */ 2309 rqpair = event.element.qp->qp_context; 2310 spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 2311 (uintptr_t)rqpair->cm_id, event.event_type); 2312 state = spdk_nvmf_rdma_update_ibv_state(rqpair); 2313 if (state == IBV_QPS_ERR) { 2314 spdk_nvmf_rdma_qpair_inc_refcnt(rqpair); 2315 spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair); 2316 } 2317 break; 2318 case IBV_EVENT_QP_REQ_ERR: 2319 case IBV_EVENT_QP_ACCESS_ERR: 2320 case IBV_EVENT_COMM_EST: 2321 case IBV_EVENT_PATH_MIG: 2322 case IBV_EVENT_PATH_MIG_ERR: 2323 rqpair = event.element.qp->qp_context; 2324 spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 2325 (uintptr_t)rqpair->cm_id, event.event_type); 2326 spdk_nvmf_rdma_update_ibv_state(rqpair); 2327 break; 2328 case IBV_EVENT_CQ_ERR: 2329 case IBV_EVENT_DEVICE_FATAL: 2330 case IBV_EVENT_PORT_ACTIVE: 2331 case IBV_EVENT_PORT_ERR: 2332 case IBV_EVENT_LID_CHANGE: 2333 case IBV_EVENT_PKEY_CHANGE: 2334 case IBV_EVENT_SM_CHANGE: 2335 case IBV_EVENT_SRQ_ERR: 2336 case IBV_EVENT_SRQ_LIMIT_REACHED: 2337 case IBV_EVENT_CLIENT_REREGISTER: 2338 case IBV_EVENT_GID_CHANGE: 2339 default: 2340 spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 0, event.event_type); 2341 break; 2342 } 2343 ibv_ack_async_event(&event); 2344 } 2345 2346 static void 2347 spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn) 2348 { 2349 int nfds, i = 0; 2350 struct spdk_nvmf_rdma_transport *rtransport; 2351 struct spdk_nvmf_rdma_device *device, *tmp; 2352 2353 rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); 2354 nfds = poll(rtransport->poll_fds, rtransport->npoll_fds, 0); 2355 2356 if (nfds <= 0) { 2357 return; 2358 } 2359 2360 /* The first poll descriptor is RDMA CM event */ 2361 if (rtransport->poll_fds[i++].revents & POLLIN) { 2362 spdk_nvmf_process_cm_event(transport, cb_fn); 2363 nfds--; 2364 } 2365 2366 if (nfds == 0) { 2367 return; 2368 } 2369 2370 /* Second and subsequent poll descriptors are IB async events */ 2371 TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) { 2372 if (rtransport->poll_fds[i++].revents & POLLIN) { 2373 spdk_nvmf_process_ib_event(device); 2374 nfds--; 2375 } 2376 } 2377 /* check all flagged fd's have been served */ 2378 assert(nfds == 0); 2379 } 2380 2381 static void 2382 spdk_nvmf_rdma_discover(struct spdk_nvmf_transport *transport, 2383 struct spdk_nvme_transport_id *trid, 2384 struct spdk_nvmf_discovery_log_page_entry *entry) 2385 { 2386 entry->trtype = SPDK_NVMF_TRTYPE_RDMA; 2387 entry->adrfam = trid->adrfam; 2388 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED; 2389 2390 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 2391 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 2392 2393 entry->tsas.rdma.rdma_qptype = SPDK_NVMF_RDMA_QPTYPE_RELIABLE_CONNECTED; 2394 entry->tsas.rdma.rdma_prtype = SPDK_NVMF_RDMA_PRTYPE_NONE; 2395 entry->tsas.rdma.rdma_cms = SPDK_NVMF_RDMA_CMS_RDMA_CM; 2396 } 2397 2398 static struct spdk_nvmf_transport_poll_group * 2399 spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport) 2400 { 2401 struct spdk_nvmf_rdma_transport *rtransport; 2402 struct spdk_nvmf_rdma_poll_group *rgroup; 2403 struct spdk_nvmf_rdma_poller *poller; 2404 struct spdk_nvmf_rdma_device *device; 2405 2406 rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport); 2407 2408 rgroup = calloc(1, sizeof(*rgroup)); 2409 if (!rgroup) { 2410 return NULL; 2411 } 2412 2413 TAILQ_INIT(&rgroup->pollers); 2414 2415 pthread_mutex_lock(&rtransport->lock); 2416 TAILQ_FOREACH(device, &rtransport->devices, link) { 2417 poller = calloc(1, sizeof(*poller)); 2418 if (!poller) { 2419 SPDK_ERRLOG("Unable to allocate memory for new RDMA poller\n"); 2420 free(rgroup); 2421 pthread_mutex_unlock(&rtransport->lock); 2422 return NULL; 2423 } 2424 2425 poller->device = device; 2426 poller->group = rgroup; 2427 2428 TAILQ_INIT(&poller->qpairs); 2429 2430 poller->cq = ibv_create_cq(device->context, NVMF_RDMA_CQ_SIZE, poller, NULL, 0); 2431 if (!poller->cq) { 2432 SPDK_ERRLOG("Unable to create completion queue\n"); 2433 free(poller); 2434 free(rgroup); 2435 pthread_mutex_unlock(&rtransport->lock); 2436 return NULL; 2437 } 2438 2439 TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link); 2440 } 2441 2442 pthread_mutex_unlock(&rtransport->lock); 2443 return &rgroup->group; 2444 } 2445 2446 static void 2447 spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 2448 { 2449 struct spdk_nvmf_rdma_poll_group *rgroup; 2450 struct spdk_nvmf_rdma_poller *poller, *tmp; 2451 struct spdk_nvmf_rdma_qpair *qpair, *tmp_qpair; 2452 2453 rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group); 2454 2455 if (!rgroup) { 2456 return; 2457 } 2458 2459 TAILQ_FOREACH_SAFE(poller, &rgroup->pollers, link, tmp) { 2460 TAILQ_REMOVE(&rgroup->pollers, poller, link); 2461 2462 if (poller->cq) { 2463 ibv_destroy_cq(poller->cq); 2464 } 2465 TAILQ_FOREACH_SAFE(qpair, &poller->qpairs, link, tmp_qpair) { 2466 _spdk_nvmf_rdma_qp_cleanup_all_states(qpair); 2467 spdk_nvmf_rdma_qpair_destroy(qpair); 2468 } 2469 2470 free(poller); 2471 } 2472 2473 free(rgroup); 2474 } 2475 2476 static int 2477 spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 2478 struct spdk_nvmf_qpair *qpair) 2479 { 2480 struct spdk_nvmf_rdma_transport *rtransport; 2481 struct spdk_nvmf_rdma_poll_group *rgroup; 2482 struct spdk_nvmf_rdma_qpair *rqpair; 2483 struct spdk_nvmf_rdma_device *device; 2484 struct spdk_nvmf_rdma_poller *poller; 2485 int rc; 2486 2487 rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport); 2488 rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group); 2489 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 2490 2491 device = rqpair->port->device; 2492 2493 TAILQ_FOREACH(poller, &rgroup->pollers, link) { 2494 if (poller->device == device) { 2495 break; 2496 } 2497 } 2498 2499 if (!poller) { 2500 SPDK_ERRLOG("No poller found for device.\n"); 2501 return -1; 2502 } 2503 2504 TAILQ_INSERT_TAIL(&poller->qpairs, rqpair, link); 2505 rqpair->poller = poller; 2506 2507 rc = spdk_nvmf_rdma_qpair_initialize(qpair); 2508 if (rc < 0) { 2509 SPDK_ERRLOG("Failed to initialize nvmf_rdma_qpair with qpair=%p\n", qpair); 2510 return -1; 2511 } 2512 2513 rqpair->mgmt_channel = spdk_get_io_channel(rtransport); 2514 if (!rqpair->mgmt_channel) { 2515 spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES); 2516 spdk_nvmf_rdma_qpair_destroy(rqpair); 2517 return -1; 2518 } 2519 2520 rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel); 2521 assert(rqpair->ch != NULL); 2522 2523 rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair); 2524 if (rc) { 2525 /* Try to reject, but we probably can't */ 2526 spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES); 2527 spdk_nvmf_rdma_qpair_destroy(rqpair); 2528 return -1; 2529 } 2530 2531 spdk_nvmf_rdma_update_ibv_state(rqpair); 2532 2533 return 0; 2534 } 2535 2536 static int 2537 spdk_nvmf_rdma_request_free(struct spdk_nvmf_request *req) 2538 { 2539 struct spdk_nvmf_rdma_request *rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req); 2540 struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(req->qpair->transport, 2541 struct spdk_nvmf_rdma_transport, transport); 2542 2543 if (rdma_req->data_from_pool) { 2544 /* Put the buffer/s back in the pool */ 2545 for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) { 2546 spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]); 2547 rdma_req->req.iov[i].iov_base = NULL; 2548 rdma_req->data.buffers[i] = NULL; 2549 } 2550 rdma_req->data_from_pool = false; 2551 } 2552 rdma_req->req.length = 0; 2553 rdma_req->req.iovcnt = 0; 2554 rdma_req->req.data = NULL; 2555 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_FREE); 2556 return 0; 2557 } 2558 2559 static int 2560 spdk_nvmf_rdma_request_complete(struct spdk_nvmf_request *req) 2561 { 2562 struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(req->qpair->transport, 2563 struct spdk_nvmf_rdma_transport, transport); 2564 struct spdk_nvmf_rdma_request *rdma_req = SPDK_CONTAINEROF(req, 2565 struct spdk_nvmf_rdma_request, req); 2566 struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, 2567 struct spdk_nvmf_rdma_qpair, qpair); 2568 2569 if (rqpair->ibv_attr.qp_state != IBV_QPS_ERR) { 2570 /* The connection is alive, so process the request as normal */ 2571 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_EXECUTED); 2572 } else { 2573 /* The connection is dead. Move the request directly to the completed state. */ 2574 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED); 2575 } 2576 2577 spdk_nvmf_rdma_request_process(rtransport, rdma_req); 2578 2579 if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE && rqpair->ibv_attr.qp_state == IBV_QPS_ERR) { 2580 /* If the NVMe-oF layer thinks the connection is active, but the RDMA layer thinks 2581 * the connection is dead, perform error recovery. */ 2582 spdk_nvmf_rdma_qpair_recover(rqpair); 2583 } 2584 2585 return 0; 2586 } 2587 2588 static void 2589 spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair) 2590 { 2591 struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 2592 2593 spdk_nvmf_rdma_qpair_destroy(rqpair); 2594 } 2595 2596 static struct spdk_nvmf_rdma_request * 2597 get_rdma_req_from_wc(struct ibv_wc *wc) 2598 { 2599 struct spdk_nvmf_rdma_request *rdma_req; 2600 2601 rdma_req = (struct spdk_nvmf_rdma_request *)wc->wr_id; 2602 assert(rdma_req != NULL); 2603 2604 #ifdef DEBUG 2605 struct spdk_nvmf_rdma_qpair *rqpair; 2606 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); 2607 2608 assert(rdma_req - rqpair->reqs >= 0); 2609 assert(rdma_req - rqpair->reqs < (ptrdiff_t)rqpair->max_queue_depth); 2610 #endif 2611 2612 return rdma_req; 2613 } 2614 2615 static struct spdk_nvmf_rdma_recv * 2616 get_rdma_recv_from_wc(struct ibv_wc *wc) 2617 { 2618 struct spdk_nvmf_rdma_recv *rdma_recv; 2619 2620 assert(wc->byte_len >= sizeof(struct spdk_nvmf_capsule_cmd)); 2621 2622 rdma_recv = (struct spdk_nvmf_rdma_recv *)wc->wr_id; 2623 assert(rdma_recv != NULL); 2624 2625 #ifdef DEBUG 2626 struct spdk_nvmf_rdma_qpair *rqpair = rdma_recv->qpair; 2627 2628 assert(rdma_recv - rqpair->recvs >= 0); 2629 assert(rdma_recv - rqpair->recvs < (ptrdiff_t)rqpair->max_queue_depth); 2630 #endif 2631 2632 return rdma_recv; 2633 } 2634 2635 #ifdef DEBUG 2636 static int 2637 spdk_nvmf_rdma_req_is_completing(struct spdk_nvmf_rdma_request *rdma_req) 2638 { 2639 return rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST || 2640 rdma_req->state == RDMA_REQUEST_STATE_COMPLETING; 2641 } 2642 #endif 2643 2644 static int 2645 spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport, 2646 struct spdk_nvmf_rdma_poller *rpoller) 2647 { 2648 struct ibv_wc wc[32]; 2649 struct spdk_nvmf_rdma_request *rdma_req; 2650 struct spdk_nvmf_rdma_recv *rdma_recv; 2651 struct spdk_nvmf_rdma_qpair *rqpair; 2652 int reaped, i; 2653 int count = 0; 2654 bool error = false; 2655 2656 /* Poll for completing operations. */ 2657 reaped = ibv_poll_cq(rpoller->cq, 32, wc); 2658 if (reaped < 0) { 2659 SPDK_ERRLOG("Error polling CQ! (%d): %s\n", 2660 errno, spdk_strerror(errno)); 2661 return -1; 2662 } 2663 2664 for (i = 0; i < reaped; i++) { 2665 /* Handle error conditions */ 2666 if (wc[i].status) { 2667 SPDK_WARNLOG("CQ error on CQ %p, Request 0x%lu (%d): %s\n", 2668 rpoller->cq, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status)); 2669 error = true; 2670 2671 switch (wc[i].opcode) { 2672 case IBV_WC_SEND: 2673 rdma_req = get_rdma_req_from_wc(&wc[i]); 2674 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); 2675 2676 /* We're going to attempt an error recovery, so force the request into 2677 * the completed state. */ 2678 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED); 2679 spdk_nvmf_rdma_request_process(rtransport, rdma_req); 2680 break; 2681 case IBV_WC_RECV: 2682 rdma_recv = get_rdma_recv_from_wc(&wc[i]); 2683 rqpair = rdma_recv->qpair; 2684 2685 /* Dump this into the incoming queue. This gets cleaned up when 2686 * the queue pair disconnects or recovers. */ 2687 TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link); 2688 break; 2689 case IBV_WC_RDMA_WRITE: 2690 case IBV_WC_RDMA_READ: 2691 /* If the data transfer fails still force the queue into the error state, 2692 * but the rdma_req objects should only be manipulated in response to 2693 * SEND and RECV operations. */ 2694 rdma_req = get_rdma_req_from_wc(&wc[i]); 2695 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); 2696 break; 2697 default: 2698 SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode); 2699 continue; 2700 } 2701 2702 /* Set the qpair to the error state. This will initiate a recovery. */ 2703 spdk_nvmf_rdma_set_ibv_state(rqpair, IBV_QPS_ERR); 2704 continue; 2705 } 2706 2707 switch (wc[i].opcode) { 2708 case IBV_WC_SEND: 2709 rdma_req = get_rdma_req_from_wc(&wc[i]); 2710 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); 2711 2712 assert(spdk_nvmf_rdma_req_is_completing(rdma_req)); 2713 2714 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED); 2715 spdk_nvmf_rdma_request_process(rtransport, rdma_req); 2716 2717 count++; 2718 2719 /* Try to process other queued requests */ 2720 spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); 2721 break; 2722 2723 case IBV_WC_RDMA_WRITE: 2724 rdma_req = get_rdma_req_from_wc(&wc[i]); 2725 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); 2726 2727 /* Try to process other queued requests */ 2728 spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); 2729 break; 2730 2731 case IBV_WC_RDMA_READ: 2732 rdma_req = get_rdma_req_from_wc(&wc[i]); 2733 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair); 2734 2735 assert(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 2736 spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE); 2737 spdk_nvmf_rdma_request_process(rtransport, rdma_req); 2738 2739 /* Try to process other queued requests */ 2740 spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); 2741 break; 2742 2743 case IBV_WC_RECV: 2744 rdma_recv = get_rdma_recv_from_wc(&wc[i]); 2745 rqpair = rdma_recv->qpair; 2746 2747 TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link); 2748 /* Try to process other queued requests */ 2749 spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair); 2750 break; 2751 2752 default: 2753 SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode); 2754 continue; 2755 } 2756 } 2757 2758 if (error == true) { 2759 return -1; 2760 } 2761 2762 return count; 2763 } 2764 2765 static int 2766 spdk_nvmf_rdma_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 2767 { 2768 struct spdk_nvmf_rdma_transport *rtransport; 2769 struct spdk_nvmf_rdma_poll_group *rgroup; 2770 struct spdk_nvmf_rdma_poller *rpoller; 2771 int count, rc; 2772 2773 rtransport = SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_rdma_transport, transport); 2774 rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group); 2775 2776 count = 0; 2777 TAILQ_FOREACH(rpoller, &rgroup->pollers, link) { 2778 rc = spdk_nvmf_rdma_poller_poll(rtransport, rpoller); 2779 if (rc < 0) { 2780 return rc; 2781 } 2782 count += rc; 2783 } 2784 2785 return count; 2786 } 2787 2788 static int 2789 spdk_nvmf_rdma_trid_from_cm_id(struct rdma_cm_id *id, 2790 struct spdk_nvme_transport_id *trid, 2791 bool peer) 2792 { 2793 struct sockaddr *saddr; 2794 uint16_t port; 2795 2796 trid->trtype = SPDK_NVME_TRANSPORT_RDMA; 2797 2798 if (peer) { 2799 saddr = rdma_get_peer_addr(id); 2800 } else { 2801 saddr = rdma_get_local_addr(id); 2802 } 2803 switch (saddr->sa_family) { 2804 case AF_INET: { 2805 struct sockaddr_in *saddr_in = (struct sockaddr_in *)saddr; 2806 2807 trid->adrfam = SPDK_NVMF_ADRFAM_IPV4; 2808 inet_ntop(AF_INET, &saddr_in->sin_addr, 2809 trid->traddr, sizeof(trid->traddr)); 2810 if (peer) { 2811 port = ntohs(rdma_get_dst_port(id)); 2812 } else { 2813 port = ntohs(rdma_get_src_port(id)); 2814 } 2815 snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%u", port); 2816 break; 2817 } 2818 case AF_INET6: { 2819 struct sockaddr_in6 *saddr_in = (struct sockaddr_in6 *)saddr; 2820 trid->adrfam = SPDK_NVMF_ADRFAM_IPV6; 2821 inet_ntop(AF_INET6, &saddr_in->sin6_addr, 2822 trid->traddr, sizeof(trid->traddr)); 2823 if (peer) { 2824 port = ntohs(rdma_get_dst_port(id)); 2825 } else { 2826 port = ntohs(rdma_get_src_port(id)); 2827 } 2828 snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%u", port); 2829 break; 2830 } 2831 default: 2832 return -1; 2833 2834 } 2835 2836 return 0; 2837 } 2838 2839 static int 2840 spdk_nvmf_rdma_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 2841 struct spdk_nvme_transport_id *trid) 2842 { 2843 struct spdk_nvmf_rdma_qpair *rqpair; 2844 2845 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 2846 2847 return spdk_nvmf_rdma_trid_from_cm_id(rqpair->cm_id, trid, true); 2848 } 2849 2850 static int 2851 spdk_nvmf_rdma_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 2852 struct spdk_nvme_transport_id *trid) 2853 { 2854 struct spdk_nvmf_rdma_qpair *rqpair; 2855 2856 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 2857 2858 return spdk_nvmf_rdma_trid_from_cm_id(rqpair->cm_id, trid, false); 2859 } 2860 2861 static int 2862 spdk_nvmf_rdma_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 2863 struct spdk_nvme_transport_id *trid) 2864 { 2865 struct spdk_nvmf_rdma_qpair *rqpair; 2866 2867 rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); 2868 2869 return spdk_nvmf_rdma_trid_from_cm_id(rqpair->listen_id, trid, false); 2870 } 2871 2872 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = { 2873 .type = SPDK_NVME_TRANSPORT_RDMA, 2874 .opts_init = spdk_nvmf_rdma_opts_init, 2875 .create = spdk_nvmf_rdma_create, 2876 .destroy = spdk_nvmf_rdma_destroy, 2877 2878 .listen = spdk_nvmf_rdma_listen, 2879 .stop_listen = spdk_nvmf_rdma_stop_listen, 2880 .accept = spdk_nvmf_rdma_accept, 2881 2882 .listener_discover = spdk_nvmf_rdma_discover, 2883 2884 .poll_group_create = spdk_nvmf_rdma_poll_group_create, 2885 .poll_group_destroy = spdk_nvmf_rdma_poll_group_destroy, 2886 .poll_group_add = spdk_nvmf_rdma_poll_group_add, 2887 .poll_group_poll = spdk_nvmf_rdma_poll_group_poll, 2888 2889 .req_free = spdk_nvmf_rdma_request_free, 2890 .req_complete = spdk_nvmf_rdma_request_complete, 2891 2892 .qpair_fini = spdk_nvmf_rdma_close_qpair, 2893 .qpair_is_idle = spdk_nvmf_rdma_qpair_is_idle, 2894 .qpair_get_peer_trid = spdk_nvmf_rdma_qpair_get_peer_trid, 2895 .qpair_get_local_trid = spdk_nvmf_rdma_qpair_get_local_trid, 2896 .qpair_get_listen_trid = spdk_nvmf_rdma_qpair_get_listen_trid, 2897 2898 }; 2899 2900 SPDK_LOG_REGISTER_COMPONENT("rdma", SPDK_LOG_RDMA) 2901