1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stddef.h> 35 #include <assert.h> 36 #include <errno.h> 37 #include <string.h> 38 #include <stdint.h> 39 40 /* Verbs header. */ 41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 42 #ifdef PEDANTIC 43 #pragma GCC diagnostic ignored "-pedantic" 44 #endif 45 #include <infiniband/verbs.h> 46 #ifdef PEDANTIC 47 #pragma GCC diagnostic error "-pedantic" 48 #endif 49 50 /* DPDK headers don't like -pedantic. */ 51 #ifdef PEDANTIC 52 #pragma GCC diagnostic ignored "-pedantic" 53 #endif 54 #include <rte_mbuf.h> 55 #include <rte_malloc.h> 56 #include <rte_ethdev.h> 57 #include <rte_common.h> 58 #ifdef PEDANTIC 59 #pragma GCC diagnostic error "-pedantic" 60 #endif 61 62 #include "mlx5_utils.h" 63 #include "mlx5.h" 64 #include "mlx5_rxtx.h" 65 #include "mlx5_autoconf.h" 66 #include "mlx5_defs.h" 67 68 /** 69 * Allocate TX queue elements. 70 * 71 * @param txq 72 * Pointer to TX queue structure. 73 * @param elts_n 74 * Number of elements to allocate. 75 * 76 * @return 77 * 0 on success, errno value on failure. 78 */ 79 static int 80 txq_alloc_elts(struct txq *txq, unsigned int elts_n) 81 { 82 unsigned int i; 83 struct txq_elt (*elts)[elts_n] = 84 rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket); 85 linear_t (*elts_linear)[elts_n] = 86 rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0, 87 txq->socket); 88 struct ibv_mr *mr_linear = NULL; 89 int ret = 0; 90 91 if ((elts == NULL) || (elts_linear == NULL)) { 92 ERROR("%p: can't allocate packets array", (void *)txq); 93 ret = ENOMEM; 94 goto error; 95 } 96 mr_linear = 97 ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear), 98 (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); 99 if (mr_linear == NULL) { 100 ERROR("%p: unable to configure MR, ibv_reg_mr() failed", 101 (void *)txq); 102 ret = EINVAL; 103 goto error; 104 } 105 for (i = 0; (i != elts_n); ++i) { 106 struct txq_elt *elt = &(*elts)[i]; 107 108 elt->buf = NULL; 109 } 110 DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n); 111 txq->elts_n = elts_n; 112 txq->elts = elts; 113 txq->elts_head = 0; 114 txq->elts_tail = 0; 115 txq->elts_comp = 0; 116 /* Request send completion every MLX5_PMD_TX_PER_COMP_REQ packets or 117 * at least 4 times per ring. */ 118 txq->elts_comp_cd_init = 119 ((MLX5_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ? 120 MLX5_PMD_TX_PER_COMP_REQ : (elts_n / 4)); 121 txq->elts_comp_cd = txq->elts_comp_cd_init; 122 txq->elts_linear = elts_linear; 123 txq->mr_linear = mr_linear; 124 assert(ret == 0); 125 return 0; 126 error: 127 if (mr_linear != NULL) 128 claim_zero(ibv_dereg_mr(mr_linear)); 129 130 rte_free(elts_linear); 131 rte_free(elts); 132 133 DEBUG("%p: failed, freed everything", (void *)txq); 134 assert(ret > 0); 135 return ret; 136 } 137 138 /** 139 * Free TX queue elements. 140 * 141 * @param txq 142 * Pointer to TX queue structure. 143 */ 144 static void 145 txq_free_elts(struct txq *txq) 146 { 147 unsigned int elts_n = txq->elts_n; 148 unsigned int elts_head = txq->elts_head; 149 unsigned int elts_tail = txq->elts_tail; 150 struct txq_elt (*elts)[elts_n] = txq->elts; 151 linear_t (*elts_linear)[elts_n] = txq->elts_linear; 152 struct ibv_mr *mr_linear = txq->mr_linear; 153 154 DEBUG("%p: freeing WRs", (void *)txq); 155 txq->elts_n = 0; 156 txq->elts_head = 0; 157 txq->elts_tail = 0; 158 txq->elts_comp = 0; 159 txq->elts_comp_cd = 0; 160 txq->elts_comp_cd_init = 0; 161 txq->elts = NULL; 162 txq->elts_linear = NULL; 163 txq->mr_linear = NULL; 164 if (mr_linear != NULL) 165 claim_zero(ibv_dereg_mr(mr_linear)); 166 167 rte_free(elts_linear); 168 if (elts == NULL) 169 return; 170 while (elts_tail != elts_head) { 171 struct txq_elt *elt = &(*elts)[elts_tail]; 172 173 assert(elt->buf != NULL); 174 rte_pktmbuf_free(elt->buf); 175 #ifndef NDEBUG 176 /* Poisoning. */ 177 memset(elt, 0x77, sizeof(*elt)); 178 #endif 179 if (++elts_tail == elts_n) 180 elts_tail = 0; 181 } 182 rte_free(elts); 183 } 184 185 /** 186 * Clean up a TX queue. 187 * 188 * Destroy objects, free allocated memory and reset the structure for reuse. 189 * 190 * @param txq 191 * Pointer to TX queue structure. 192 */ 193 void 194 txq_cleanup(struct txq *txq) 195 { 196 struct ibv_exp_release_intf_params params; 197 size_t i; 198 199 DEBUG("cleaning up %p", (void *)txq); 200 txq_free_elts(txq); 201 txq->poll_cnt = NULL; 202 #if MLX5_PMD_MAX_INLINE > 0 203 txq->send_pending_inline = NULL; 204 #endif 205 txq->send_flush = NULL; 206 if (txq->if_qp != NULL) { 207 assert(txq->priv != NULL); 208 assert(txq->priv->ctx != NULL); 209 assert(txq->qp != NULL); 210 params = (struct ibv_exp_release_intf_params){ 211 .comp_mask = 0, 212 }; 213 claim_zero(ibv_exp_release_intf(txq->priv->ctx, 214 txq->if_qp, 215 ¶ms)); 216 } 217 if (txq->if_cq != NULL) { 218 assert(txq->priv != NULL); 219 assert(txq->priv->ctx != NULL); 220 assert(txq->cq != NULL); 221 params = (struct ibv_exp_release_intf_params){ 222 .comp_mask = 0, 223 }; 224 claim_zero(ibv_exp_release_intf(txq->priv->ctx, 225 txq->if_cq, 226 ¶ms)); 227 } 228 if (txq->qp != NULL) 229 claim_zero(ibv_destroy_qp(txq->qp)); 230 if (txq->cq != NULL) 231 claim_zero(ibv_destroy_cq(txq->cq)); 232 if (txq->rd != NULL) { 233 struct ibv_exp_destroy_res_domain_attr attr = { 234 .comp_mask = 0, 235 }; 236 237 assert(txq->priv != NULL); 238 assert(txq->priv->ctx != NULL); 239 claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx, 240 txq->rd, 241 &attr)); 242 } 243 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { 244 if (txq->mp2mr[i].mp == NULL) 245 break; 246 assert(txq->mp2mr[i].mr != NULL); 247 claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr)); 248 } 249 memset(txq, 0, sizeof(*txq)); 250 } 251 252 /** 253 * Configure a TX queue. 254 * 255 * @param dev 256 * Pointer to Ethernet device structure. 257 * @param txq 258 * Pointer to TX queue structure. 259 * @param desc 260 * Number of descriptors to configure in queue. 261 * @param socket 262 * NUMA socket on which memory must be allocated. 263 * @param[in] conf 264 * Thresholds parameters. 265 * 266 * @return 267 * 0 on success, errno value on failure. 268 */ 269 int 270 txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, 271 unsigned int socket, const struct rte_eth_txconf *conf) 272 { 273 struct priv *priv = mlx5_get_priv(dev); 274 struct txq tmpl = { 275 .priv = priv, 276 .socket = socket 277 }; 278 union { 279 struct ibv_exp_query_intf_params params; 280 struct ibv_exp_qp_init_attr init; 281 struct ibv_exp_res_domain_init_attr rd; 282 struct ibv_exp_cq_init_attr cq; 283 struct ibv_exp_qp_attr mod; 284 } attr; 285 enum ibv_exp_query_intf_status status; 286 int ret = 0; 287 288 (void)conf; /* Thresholds configuration (ignored). */ 289 if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) { 290 ERROR("%p: invalid number of TX descriptors (must be a" 291 " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); 292 return EINVAL; 293 } 294 desc /= MLX5_PMD_SGE_WR_N; 295 /* MRs will be registered in mp2mr[] later. */ 296 attr.rd = (struct ibv_exp_res_domain_init_attr){ 297 .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | 298 IBV_EXP_RES_DOMAIN_MSG_MODEL), 299 .thread_model = IBV_EXP_THREAD_SINGLE, 300 .msg_model = IBV_EXP_MSG_HIGH_BW, 301 }; 302 tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); 303 if (tmpl.rd == NULL) { 304 ret = ENOMEM; 305 ERROR("%p: RD creation failure: %s", 306 (void *)dev, strerror(ret)); 307 goto error; 308 } 309 attr.cq = (struct ibv_exp_cq_init_attr){ 310 .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, 311 .res_domain = tmpl.rd, 312 }; 313 tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); 314 if (tmpl.cq == NULL) { 315 ret = ENOMEM; 316 ERROR("%p: CQ creation failure: %s", 317 (void *)dev, strerror(ret)); 318 goto error; 319 } 320 DEBUG("priv->device_attr.max_qp_wr is %d", 321 priv->device_attr.max_qp_wr); 322 DEBUG("priv->device_attr.max_sge is %d", 323 priv->device_attr.max_sge); 324 attr.init = (struct ibv_exp_qp_init_attr){ 325 /* CQ to be associated with the send queue. */ 326 .send_cq = tmpl.cq, 327 /* CQ to be associated with the receive queue. */ 328 .recv_cq = tmpl.cq, 329 .cap = { 330 /* Max number of outstanding WRs. */ 331 .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? 332 priv->device_attr.max_qp_wr : 333 desc), 334 /* Max number of scatter/gather elements in a WR. */ 335 .max_send_sge = ((priv->device_attr.max_sge < 336 MLX5_PMD_SGE_WR_N) ? 337 priv->device_attr.max_sge : 338 MLX5_PMD_SGE_WR_N), 339 #if MLX5_PMD_MAX_INLINE > 0 340 .max_inline_data = MLX5_PMD_MAX_INLINE, 341 #endif 342 }, 343 .qp_type = IBV_QPT_RAW_PACKET, 344 /* Do *NOT* enable this, completions events are managed per 345 * TX burst. */ 346 .sq_sig_all = 0, 347 .pd = priv->pd, 348 .res_domain = tmpl.rd, 349 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | 350 IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), 351 }; 352 tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); 353 if (tmpl.qp == NULL) { 354 ret = (errno ? errno : EINVAL); 355 ERROR("%p: QP creation failure: %s", 356 (void *)dev, strerror(ret)); 357 goto error; 358 } 359 #if MLX5_PMD_MAX_INLINE > 0 360 /* ibv_create_qp() updates this value. */ 361 tmpl.max_inline = attr.init.cap.max_inline_data; 362 #endif 363 attr.mod = (struct ibv_exp_qp_attr){ 364 /* Move the QP to this state. */ 365 .qp_state = IBV_QPS_INIT, 366 /* Primary port number. */ 367 .port_num = priv->port 368 }; 369 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, 370 (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT)); 371 if (ret) { 372 ERROR("%p: QP state to IBV_QPS_INIT failed: %s", 373 (void *)dev, strerror(ret)); 374 goto error; 375 } 376 ret = txq_alloc_elts(&tmpl, desc); 377 if (ret) { 378 ERROR("%p: TXQ allocation failed: %s", 379 (void *)dev, strerror(ret)); 380 goto error; 381 } 382 attr.mod = (struct ibv_exp_qp_attr){ 383 .qp_state = IBV_QPS_RTR 384 }; 385 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); 386 if (ret) { 387 ERROR("%p: QP state to IBV_QPS_RTR failed: %s", 388 (void *)dev, strerror(ret)); 389 goto error; 390 } 391 attr.mod.qp_state = IBV_QPS_RTS; 392 ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); 393 if (ret) { 394 ERROR("%p: QP state to IBV_QPS_RTS failed: %s", 395 (void *)dev, strerror(ret)); 396 goto error; 397 } 398 attr.params = (struct ibv_exp_query_intf_params){ 399 .intf_scope = IBV_EXP_INTF_GLOBAL, 400 .intf = IBV_EXP_INTF_CQ, 401 .obj = tmpl.cq, 402 }; 403 tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); 404 if (tmpl.if_cq == NULL) { 405 ret = EINVAL; 406 ERROR("%p: CQ interface family query failed with status %d", 407 (void *)dev, status); 408 goto error; 409 } 410 attr.params = (struct ibv_exp_query_intf_params){ 411 .intf_scope = IBV_EXP_INTF_GLOBAL, 412 .intf = IBV_EXP_INTF_QP_BURST, 413 .obj = tmpl.qp, 414 #ifdef HAVE_VERBS_VLAN_INSERTION 415 .intf_version = 1, 416 #endif 417 #ifdef HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR 418 /* Enable multi-packet send if supported. */ 419 .family_flags = 420 (priv->mps ? 421 IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR : 422 0), 423 #endif 424 }; 425 tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); 426 if (tmpl.if_qp == NULL) { 427 ret = EINVAL; 428 ERROR("%p: QP interface family query failed with status %d", 429 (void *)dev, status); 430 goto error; 431 } 432 /* Clean up txq in case we're reinitializing it. */ 433 DEBUG("%p: cleaning-up old txq just in case", (void *)txq); 434 txq_cleanup(txq); 435 *txq = tmpl; 436 txq->poll_cnt = txq->if_cq->poll_cnt; 437 #if MLX5_PMD_MAX_INLINE > 0 438 txq->send_pending_inline = txq->if_qp->send_pending_inline; 439 #ifdef HAVE_VERBS_VLAN_INSERTION 440 txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan; 441 #endif 442 #endif 443 #if MLX5_PMD_SGE_WR_N > 1 444 txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list; 445 #ifdef HAVE_VERBS_VLAN_INSERTION 446 txq->send_pending_sg_list_vlan = txq->if_qp->send_pending_sg_list_vlan; 447 #endif 448 #endif 449 txq->send_pending = txq->if_qp->send_pending; 450 #ifdef HAVE_VERBS_VLAN_INSERTION 451 txq->send_pending_vlan = txq->if_qp->send_pending_vlan; 452 #endif 453 txq->send_flush = txq->if_qp->send_flush; 454 DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl); 455 /* Pre-register known mempools. */ 456 rte_mempool_walk(txq_mp2mr_iter, txq); 457 assert(ret == 0); 458 return 0; 459 error: 460 txq_cleanup(&tmpl); 461 assert(ret > 0); 462 return ret; 463 } 464 465 /** 466 * DPDK callback to configure a TX queue. 467 * 468 * @param dev 469 * Pointer to Ethernet device structure. 470 * @param idx 471 * TX queue index. 472 * @param desc 473 * Number of descriptors to configure in queue. 474 * @param socket 475 * NUMA socket on which memory must be allocated. 476 * @param[in] conf 477 * Thresholds parameters. 478 * 479 * @return 480 * 0 on success, negative errno value on failure. 481 */ 482 int 483 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 484 unsigned int socket, const struct rte_eth_txconf *conf) 485 { 486 struct priv *priv = dev->data->dev_private; 487 struct txq *txq = (*priv->txqs)[idx]; 488 int ret; 489 490 if (mlx5_is_secondary()) 491 return -E_RTE_SECONDARY; 492 493 priv_lock(priv); 494 DEBUG("%p: configuring queue %u for %u descriptors", 495 (void *)dev, idx, desc); 496 if (idx >= priv->txqs_n) { 497 ERROR("%p: queue index out of range (%u >= %u)", 498 (void *)dev, idx, priv->txqs_n); 499 priv_unlock(priv); 500 return -EOVERFLOW; 501 } 502 if (txq != NULL) { 503 DEBUG("%p: reusing already allocated queue index %u (%p)", 504 (void *)dev, idx, (void *)txq); 505 if (priv->started) { 506 priv_unlock(priv); 507 return -EEXIST; 508 } 509 (*priv->txqs)[idx] = NULL; 510 txq_cleanup(txq); 511 } else { 512 txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket); 513 if (txq == NULL) { 514 ERROR("%p: unable to allocate queue index %u", 515 (void *)dev, idx); 516 priv_unlock(priv); 517 return -ENOMEM; 518 } 519 } 520 ret = txq_setup(dev, txq, desc, socket, conf); 521 if (ret) 522 rte_free(txq); 523 else { 524 txq->stats.idx = idx; 525 DEBUG("%p: adding TX queue %p to list", 526 (void *)dev, (void *)txq); 527 (*priv->txqs)[idx] = txq; 528 /* Update send callback. */ 529 dev->tx_pkt_burst = mlx5_tx_burst; 530 } 531 priv_unlock(priv); 532 return -ret; 533 } 534 535 /** 536 * DPDK callback to release a TX queue. 537 * 538 * @param dpdk_txq 539 * Generic TX queue pointer. 540 */ 541 void 542 mlx5_tx_queue_release(void *dpdk_txq) 543 { 544 struct txq *txq = (struct txq *)dpdk_txq; 545 struct priv *priv; 546 unsigned int i; 547 548 if (mlx5_is_secondary()) 549 return; 550 551 if (txq == NULL) 552 return; 553 priv = txq->priv; 554 priv_lock(priv); 555 for (i = 0; (i != priv->txqs_n); ++i) 556 if ((*priv->txqs)[i] == txq) { 557 DEBUG("%p: removing TX queue %p from list", 558 (void *)priv->dev, (void *)txq); 559 (*priv->txqs)[i] = NULL; 560 break; 561 } 562 txq_cleanup(txq); 563 rte_free(txq); 564 priv_unlock(priv); 565 } 566 567 /** 568 * DPDK callback for TX in secondary processes. 569 * 570 * This function configures all queues from primary process information 571 * if necessary before reverting to the normal TX burst callback. 572 * 573 * @param dpdk_txq 574 * Generic pointer to TX queue structure. 575 * @param[in] pkts 576 * Packets to transmit. 577 * @param pkts_n 578 * Number of packets in array. 579 * 580 * @return 581 * Number of packets successfully transmitted (<= pkts_n). 582 */ 583 uint16_t 584 mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, 585 uint16_t pkts_n) 586 { 587 struct txq *txq = dpdk_txq; 588 struct priv *priv = mlx5_secondary_data_setup(txq->priv); 589 struct priv *primary_priv; 590 unsigned int index; 591 592 if (priv == NULL) 593 return 0; 594 primary_priv = 595 mlx5_secondary_data[priv->dev->data->port_id].primary_priv; 596 /* Look for queue index in both private structures. */ 597 for (index = 0; index != priv->txqs_n; ++index) 598 if (((*primary_priv->txqs)[index] == txq) || 599 ((*priv->txqs)[index] == txq)) 600 break; 601 if (index == priv->txqs_n) 602 return 0; 603 txq = (*priv->txqs)[index]; 604 return priv->dev->tx_pkt_burst(txq, pkts, pkts_n); 605 } 606