1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. All rights reserved. 3 * Copyright (c) Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include <rdma/rdma_cma.h> 8 9 #include "spdk/stdinc.h" 10 #include "spdk/string.h" 11 #include "spdk/likely.h" 12 13 #include "spdk_internal/rdma_provider.h" 14 #include "spdk_internal/rdma_utils.h" 15 #include "spdk/log.h" 16 17 struct spdk_rdma_provider_qp * 18 spdk_rdma_provider_qp_create(struct rdma_cm_id *cm_id, 19 struct spdk_rdma_provider_qp_init_attr *qp_attr) 20 { 21 struct spdk_rdma_provider_qp *spdk_rdma_qp; 22 int rc; 23 struct ibv_qp_init_attr attr = { 24 .qp_context = qp_attr->qp_context, 25 .send_cq = qp_attr->send_cq, 26 .recv_cq = qp_attr->recv_cq, 27 .srq = qp_attr->srq, 28 .cap = qp_attr->cap, 29 .qp_type = IBV_QPT_RC 30 }; 31 32 if (qp_attr->domain_transfer) { 33 SPDK_ERRLOG("verbs provider doesn't support memory domain transfer functionality"); 34 return NULL; 35 } 36 37 spdk_rdma_qp = calloc(1, sizeof(*spdk_rdma_qp)); 38 if (!spdk_rdma_qp) { 39 SPDK_ERRLOG("qp memory allocation failed\n"); 40 return NULL; 41 } 42 43 if (qp_attr->stats) { 44 spdk_rdma_qp->stats = qp_attr->stats; 45 spdk_rdma_qp->shared_stats = true; 46 } else { 47 spdk_rdma_qp->stats = calloc(1, sizeof(*spdk_rdma_qp->stats)); 48 if (!spdk_rdma_qp->stats) { 49 SPDK_ERRLOG("qp statistics memory allocation failed\n"); 50 free(spdk_rdma_qp); 51 return NULL; 52 } 53 } 54 55 rc = rdma_create_qp(cm_id, qp_attr->pd, &attr); 56 if (rc) { 57 SPDK_ERRLOG("Failed to create qp, rc %d, errno %s (%d)\n", rc, spdk_strerror(errno), errno); 58 free(spdk_rdma_qp); 59 return NULL; 60 } 61 spdk_rdma_qp->qp = cm_id->qp; 62 spdk_rdma_qp->cm_id = cm_id; 63 spdk_rdma_qp->domain = spdk_rdma_utils_get_memory_domain(qp_attr->pd); 64 if (!spdk_rdma_qp->domain) { 65 spdk_rdma_provider_qp_destroy(spdk_rdma_qp); 66 return NULL; 67 } 68 69 qp_attr->cap = attr.cap; 70 71 return spdk_rdma_qp; 72 } 73 74 int 75 spdk_rdma_provider_qp_accept(struct spdk_rdma_provider_qp *spdk_rdma_qp, 76 struct rdma_conn_param *conn_param) 77 { 78 assert(spdk_rdma_qp != NULL); 79 assert(spdk_rdma_qp->cm_id != NULL); 80 81 return rdma_accept(spdk_rdma_qp->cm_id, conn_param); 82 } 83 84 int 85 spdk_rdma_provider_qp_complete_connect(struct spdk_rdma_provider_qp *spdk_rdma_qp) 86 { 87 /* Nothing to be done for Verbs */ 88 return 0; 89 } 90 91 void 92 spdk_rdma_provider_qp_destroy(struct spdk_rdma_provider_qp *spdk_rdma_qp) 93 { 94 assert(spdk_rdma_qp != NULL); 95 96 if (spdk_rdma_qp->send_wrs.first != NULL) { 97 SPDK_WARNLOG("Destroying qpair with queued Work Requests\n"); 98 } 99 100 if (spdk_rdma_qp->qp) { 101 rdma_destroy_qp(spdk_rdma_qp->cm_id); 102 } 103 104 if (!spdk_rdma_qp->shared_stats) { 105 free(spdk_rdma_qp->stats); 106 } 107 if (spdk_rdma_qp->domain) { 108 spdk_rdma_utils_put_memory_domain(spdk_rdma_qp->domain); 109 } 110 111 free(spdk_rdma_qp); 112 } 113 114 int 115 spdk_rdma_provider_qp_disconnect(struct spdk_rdma_provider_qp *spdk_rdma_qp) 116 { 117 int rc = 0; 118 119 assert(spdk_rdma_qp != NULL); 120 121 if (spdk_rdma_qp->cm_id) { 122 rc = rdma_disconnect(spdk_rdma_qp->cm_id); 123 if (rc) { 124 if (errno == EINVAL && spdk_rdma_qp->qp->context->device->transport_type == IBV_TRANSPORT_IWARP) { 125 /* rdma_disconnect may return an error and set errno to EINVAL in case of iWARP. 126 * This behaviour is expected since iWARP handles disconnect event other than IB and 127 * qpair is already in error state when we call rdma_disconnect */ 128 return 0; 129 } 130 SPDK_ERRLOG("rdma_disconnect failed, errno %s (%d)\n", spdk_strerror(errno), errno); 131 } 132 } 133 134 return rc; 135 } 136 137 bool 138 spdk_rdma_provider_qp_queue_send_wrs(struct spdk_rdma_provider_qp *spdk_rdma_qp, 139 struct ibv_send_wr *first) 140 { 141 struct ibv_send_wr *last; 142 143 assert(spdk_rdma_qp); 144 assert(first); 145 146 spdk_rdma_qp->stats->send.num_submitted_wrs++; 147 last = first; 148 while (last->next != NULL) { 149 last = last->next; 150 spdk_rdma_qp->stats->send.num_submitted_wrs++; 151 } 152 153 if (spdk_rdma_qp->send_wrs.first == NULL) { 154 spdk_rdma_qp->send_wrs.first = first; 155 spdk_rdma_qp->send_wrs.last = last; 156 return true; 157 } else { 158 spdk_rdma_qp->send_wrs.last->next = first; 159 spdk_rdma_qp->send_wrs.last = last; 160 return false; 161 } 162 } 163 164 int 165 spdk_rdma_provider_qp_flush_send_wrs(struct spdk_rdma_provider_qp *spdk_rdma_qp, 166 struct ibv_send_wr **bad_wr) 167 { 168 int rc; 169 170 assert(spdk_rdma_qp); 171 assert(bad_wr); 172 173 if (spdk_unlikely(!spdk_rdma_qp->send_wrs.first)) { 174 return 0; 175 } 176 177 rc = ibv_post_send(spdk_rdma_qp->qp, spdk_rdma_qp->send_wrs.first, bad_wr); 178 179 spdk_rdma_qp->send_wrs.first = NULL; 180 spdk_rdma_qp->stats->send.doorbell_updates++; 181 182 return rc; 183 } 184 185 bool 186 spdk_rdma_provider_accel_sequence_supported(void) 187 { 188 return false; 189 } 190