1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <stddef.h> 6 #include <errno.h> 7 #include <string.h> 8 #include <stdint.h> 9 #include <unistd.h> 10 #include <inttypes.h> 11 #include <sys/queue.h> 12 13 #include "mlx5_autoconf.h" 14 15 #include <rte_mbuf.h> 16 #include <rte_malloc.h> 17 #include <ethdev_driver.h> 18 #include <rte_common.h> 19 20 #include <mlx5_glue.h> 21 #include <mlx5_common.h> 22 #include <mlx5_common_mr.h> 23 #include <mlx5_verbs.h> 24 #include <mlx5_rx.h> 25 #include <mlx5_tx.h> 26 #include <mlx5_utils.h> 27 #include <mlx5_malloc.h> 28 29 /** 30 * Register mr. Given protection domain pointer, pointer to addr and length 31 * register the memory region. 32 * 33 * @param[in] pd 34 * Pointer to protection domain context. 35 * @param[in] addr 36 * Pointer to memory start address. 37 * @param[in] length 38 * Length of the memory to register. 39 * @param[out] pmd_mr 40 * pmd_mr struct set with lkey, address, length and pointer to mr object 41 * 42 * @return 43 * 0 on successful registration, -1 otherwise 44 */ 45 static int 46 mlx5_reg_mr(void *pd, void *addr, size_t length, 47 struct mlx5_pmd_mr *pmd_mr) 48 { 49 return mlx5_common_verbs_reg_mr(pd, addr, length, pmd_mr); 50 } 51 52 /** 53 * Deregister mr. Given the mlx5 pmd MR - deregister the MR 54 * 55 * @param[in] pmd_mr 56 * pmd_mr struct set with lkey, address, length and pointer to mr object 57 * 58 */ 59 static void 60 mlx5_dereg_mr(struct mlx5_pmd_mr *pmd_mr) 61 { 62 mlx5_common_verbs_dereg_mr(pmd_mr); 63 } 64 65 /* verbs operations. */ 66 const struct mlx5_mr_ops mlx5_mr_verbs_ops = { 67 .reg_mr = mlx5_reg_mr, 68 .dereg_mr = mlx5_dereg_mr, 69 }; 70 71 /** 72 * Modify Rx WQ vlan stripping offload 73 * 74 * @param rxq_obj 75 * Rx queue object. 76 * 77 * @return 0 on success, non-0 otherwise 78 */ 79 static int 80 mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 81 { 82 uint16_t vlan_offloads = 83 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) | 84 0; 85 struct ibv_wq_attr mod; 86 mod = (struct ibv_wq_attr){ 87 .attr_mask = IBV_WQ_ATTR_FLAGS, 88 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, 89 .flags = vlan_offloads, 90 }; 91 92 return mlx5_glue->modify_wq(rxq_obj->wq, &mod); 93 } 94 95 /** 96 * Modifies the attributes for the specified WQ. 97 * 98 * @param rxq_obj 99 * Verbs Rx queue object. 100 * @param type 101 * Type of change queue state. 102 * 103 * @return 104 * 0 on success, a negative errno value otherwise and rte_errno is set. 105 */ 106 static int 107 mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) 108 { 109 struct ibv_wq_attr mod = { 110 .attr_mask = IBV_WQ_ATTR_STATE, 111 .wq_state = (enum ibv_wq_state)type, 112 }; 113 114 return mlx5_glue->modify_wq(rxq_obj->wq, &mod); 115 } 116 117 /** 118 * Modify QP using Verbs API. 119 * 120 * @param txq_obj 121 * Verbs Tx queue object. 122 * @param type 123 * Type of change queue state. 124 * @param dev_port 125 * IB device port number. 126 * 127 * @return 128 * 0 on success, a negative errno value otherwise and rte_errno is set. 129 */ 130 static int 131 mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 132 uint8_t dev_port) 133 { 134 struct ibv_qp_attr mod = { 135 .qp_state = IBV_QPS_RESET, 136 .port_num = dev_port, 137 }; 138 int attr_mask = (IBV_QP_STATE | IBV_QP_PORT); 139 int ret; 140 141 if (type != MLX5_TXQ_MOD_RST2RDY) { 142 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE); 143 if (ret) { 144 DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s", 145 strerror(errno)); 146 rte_errno = errno; 147 return ret; 148 } 149 if (type == MLX5_TXQ_MOD_RDY2RST) 150 return 0; 151 } 152 if (type == MLX5_TXQ_MOD_ERR2RDY) 153 attr_mask = IBV_QP_STATE; 154 mod.qp_state = IBV_QPS_INIT; 155 ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask); 156 if (ret) { 157 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s", 158 strerror(errno)); 159 rte_errno = errno; 160 return ret; 161 } 162 mod.qp_state = IBV_QPS_RTR; 163 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE); 164 if (ret) { 165 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s", 166 strerror(errno)); 167 rte_errno = errno; 168 return ret; 169 } 170 mod.qp_state = IBV_QPS_RTS; 171 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE); 172 if (ret) { 173 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s", 174 strerror(errno)); 175 rte_errno = errno; 176 return ret; 177 } 178 return 0; 179 } 180 181 /** 182 * Create a CQ Verbs object. 183 * 184 * @param dev 185 * Pointer to Ethernet device. 186 * @param idx 187 * Queue index in DPDK Rx queue array. 188 * 189 * @return 190 * The Verbs CQ object initialized, NULL otherwise and rte_errno is set. 191 */ 192 static struct ibv_cq * 193 mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx) 194 { 195 struct mlx5_priv *priv = dev->data->dev_private; 196 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 197 struct mlx5_rxq_ctrl *rxq_ctrl = 198 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 199 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj; 200 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 201 struct { 202 struct ibv_cq_init_attr_ex ibv; 203 struct mlx5dv_cq_init_attr mlx5; 204 } cq_attr; 205 206 cq_attr.ibv = (struct ibv_cq_init_attr_ex){ 207 .cqe = cqe_n, 208 .channel = rxq_obj->ibv_channel, 209 .comp_mask = 0, 210 }; 211 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ 212 .comp_mask = 0, 213 }; 214 if (priv->config.cqe_comp && !rxq_data->hw_timestamp) { 215 cq_attr.mlx5.comp_mask |= 216 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; 217 rxq_data->byte_mask = UINT32_MAX; 218 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 219 if (mlx5_rxq_mprq_enabled(rxq_data)) { 220 cq_attr.mlx5.cqe_comp_res_format = 221 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX; 222 rxq_data->mcqe_format = 223 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 224 } else { 225 cq_attr.mlx5.cqe_comp_res_format = 226 MLX5DV_CQE_RES_FORMAT_HASH; 227 rxq_data->mcqe_format = 228 MLX5_CQE_RESP_FORMAT_HASH; 229 } 230 #else 231 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; 232 rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH; 233 #endif 234 /* 235 * For vectorized Rx, it must not be doubled in order to 236 * make cq_ci and rq_ci aligned. 237 */ 238 if (mlx5_rxq_check_vec_support(rxq_data) < 0) 239 cq_attr.ibv.cqe *= 2; 240 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 241 DRV_LOG(DEBUG, 242 "Port %u Rx CQE compression is disabled for HW" 243 " timestamp.", 244 dev->data->port_id); 245 } 246 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 247 if (RTE_CACHE_LINE_SIZE == 128) { 248 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS; 249 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD; 250 } 251 #endif 252 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx, 253 &cq_attr.ibv, 254 &cq_attr.mlx5)); 255 } 256 257 /** 258 * Create a WQ Verbs object. 259 * 260 * @param dev 261 * Pointer to Ethernet device. 262 * @param idx 263 * Queue index in DPDK Rx queue array. 264 * 265 * @return 266 * The Verbs WQ object initialized, NULL otherwise and rte_errno is set. 267 */ 268 static struct ibv_wq * 269 mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx) 270 { 271 struct mlx5_priv *priv = dev->data->dev_private; 272 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 273 struct mlx5_rxq_ctrl *rxq_ctrl = 274 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 275 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj; 276 unsigned int wqe_n = 1 << rxq_data->elts_n; 277 struct { 278 struct ibv_wq_init_attr ibv; 279 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 280 struct mlx5dv_wq_init_attr mlx5; 281 #endif 282 } wq_attr; 283 284 wq_attr.ibv = (struct ibv_wq_init_attr){ 285 .wq_context = NULL, /* Could be useful in the future. */ 286 .wq_type = IBV_WQT_RQ, 287 /* Max number of outstanding WRs. */ 288 .max_wr = wqe_n >> rxq_data->sges_n, 289 /* Max number of scatter/gather elements in a WR. */ 290 .max_sge = 1 << rxq_data->sges_n, 291 .pd = priv->sh->pd, 292 .cq = rxq_obj->ibv_cq, 293 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0, 294 .create_flags = (rxq_data->vlan_strip ? 295 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0), 296 }; 297 /* By default, FCS (CRC) is stripped by hardware. */ 298 if (rxq_data->crc_present) { 299 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; 300 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; 301 } 302 if (priv->config.hw_padding) { 303 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) 304 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; 305 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; 306 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) 307 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING; 308 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; 309 #endif 310 } 311 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 312 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){ 313 .comp_mask = 0, 314 }; 315 if (mlx5_rxq_mprq_enabled(rxq_data)) { 316 struct mlx5dv_striding_rq_init_attr *mprq_attr = 317 &wq_attr.mlx5.striding_rq_attrs; 318 319 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ; 320 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){ 321 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n, 322 .single_wqe_log_num_of_strides = rxq_data->strd_num_n, 323 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT, 324 }; 325 } 326 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv, 327 &wq_attr.mlx5); 328 #else 329 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv); 330 #endif 331 if (rxq_obj->wq) { 332 /* 333 * Make sure number of WRs*SGEs match expectations since a queue 334 * cannot allocate more than "desc" buffers. 335 */ 336 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) || 337 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) { 338 DRV_LOG(ERR, 339 "Port %u Rx queue %u requested %u*%u but got" 340 " %u*%u WRs*SGEs.", 341 dev->data->port_id, idx, 342 wqe_n >> rxq_data->sges_n, 343 (1 << rxq_data->sges_n), 344 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge); 345 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); 346 rxq_obj->wq = NULL; 347 rte_errno = EINVAL; 348 } 349 } 350 return rxq_obj->wq; 351 } 352 353 /** 354 * Create the Rx queue Verbs object. 355 * 356 * @param dev 357 * Pointer to Ethernet device. 358 * @param idx 359 * Queue index in DPDK Rx queue array. 360 * 361 * @return 362 * 0 on success, a negative errno value otherwise and rte_errno is set. 363 */ 364 static int 365 mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) 366 { 367 struct mlx5_priv *priv = dev->data->dev_private; 368 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 369 struct mlx5_rxq_ctrl *rxq_ctrl = 370 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 371 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 372 struct mlx5dv_cq cq_info; 373 struct mlx5dv_rwq rwq; 374 int ret = 0; 375 struct mlx5dv_obj obj; 376 377 MLX5_ASSERT(rxq_data); 378 MLX5_ASSERT(tmpl); 379 tmpl->rxq_ctrl = rxq_ctrl; 380 if (rxq_ctrl->irq) { 381 tmpl->ibv_channel = 382 mlx5_glue->create_comp_channel(priv->sh->ctx); 383 if (!tmpl->ibv_channel) { 384 DRV_LOG(ERR, "Port %u: comp channel creation failure.", 385 dev->data->port_id); 386 rte_errno = ENOMEM; 387 goto error; 388 } 389 tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd; 390 } 391 /* Create CQ using Verbs API. */ 392 tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx); 393 if (!tmpl->ibv_cq) { 394 DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.", 395 dev->data->port_id, idx); 396 rte_errno = ENOMEM; 397 goto error; 398 } 399 obj.cq.in = tmpl->ibv_cq; 400 obj.cq.out = &cq_info; 401 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ); 402 if (ret) { 403 rte_errno = ret; 404 goto error; 405 } 406 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { 407 DRV_LOG(ERR, 408 "Port %u wrong MLX5_CQE_SIZE environment " 409 "variable value: it should be set to %u.", 410 dev->data->port_id, RTE_CACHE_LINE_SIZE); 411 rte_errno = EINVAL; 412 goto error; 413 } 414 /* Fill the rings. */ 415 rxq_data->cqe_n = log2above(cq_info.cqe_cnt); 416 rxq_data->cq_db = cq_info.dbrec; 417 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; 418 rxq_data->cq_uar = cq_info.cq_uar; 419 rxq_data->cqn = cq_info.cqn; 420 /* Create WQ (RQ) using Verbs API. */ 421 tmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx); 422 if (!tmpl->wq) { 423 DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.", 424 dev->data->port_id, idx); 425 rte_errno = ENOMEM; 426 goto error; 427 } 428 /* Change queue state to ready. */ 429 ret = mlx5_ibv_modify_wq(tmpl, IBV_WQS_RDY); 430 if (ret) { 431 DRV_LOG(ERR, 432 "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.", 433 dev->data->port_id, idx); 434 rte_errno = ret; 435 goto error; 436 } 437 obj.rwq.in = tmpl->wq; 438 obj.rwq.out = &rwq; 439 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ); 440 if (ret) { 441 rte_errno = ret; 442 goto error; 443 } 444 rxq_data->wqes = rwq.buf; 445 rxq_data->rq_db = rwq.dbrec; 446 rxq_data->cq_arm_sn = 0; 447 mlx5_rxq_initialize(rxq_data); 448 rxq_data->cq_ci = 0; 449 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 450 rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num; 451 return 0; 452 error: 453 ret = rte_errno; /* Save rte_errno before cleanup. */ 454 if (tmpl->wq) 455 claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); 456 if (tmpl->ibv_cq) 457 claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq)); 458 if (tmpl->ibv_channel) 459 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel)); 460 rte_errno = ret; /* Restore rte_errno. */ 461 return -rte_errno; 462 } 463 464 /** 465 * Release an Rx verbs queue object. 466 * 467 * @param rxq_obj 468 * Verbs Rx queue object. 469 */ 470 static void 471 mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj) 472 { 473 MLX5_ASSERT(rxq_obj); 474 MLX5_ASSERT(rxq_obj->wq); 475 MLX5_ASSERT(rxq_obj->ibv_cq); 476 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); 477 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq)); 478 if (rxq_obj->ibv_channel) 479 claim_zero(mlx5_glue->destroy_comp_channel 480 (rxq_obj->ibv_channel)); 481 } 482 483 /** 484 * Get event for an Rx verbs queue object. 485 * 486 * @param rxq_obj 487 * Verbs Rx queue object. 488 * 489 * @return 490 * 0 on success, a negative errno value otherwise and rte_errno is set. 491 */ 492 static int 493 mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj) 494 { 495 struct ibv_cq *ev_cq; 496 void *ev_ctx; 497 int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel, 498 &ev_cq, &ev_ctx); 499 500 if (ret < 0 || ev_cq != rxq_obj->ibv_cq) 501 goto exit; 502 mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1); 503 return 0; 504 exit: 505 if (ret < 0) 506 rte_errno = errno; 507 else 508 rte_errno = EINVAL; 509 return -rte_errno; 510 } 511 512 /** 513 * Creates a receive work queue as a filed of indirection table. 514 * 515 * @param dev 516 * Pointer to Ethernet device. 517 * @param log_n 518 * Log of number of queues in the array. 519 * @param ind_tbl 520 * Verbs indirection table object. 521 * 522 * @return 523 * 0 on success, a negative errno value otherwise and rte_errno is set. 524 */ 525 static int 526 mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 527 struct mlx5_ind_table_obj *ind_tbl) 528 { 529 struct mlx5_priv *priv = dev->data->dev_private; 530 struct ibv_wq *wq[1 << log_n]; 531 unsigned int i, j; 532 533 MLX5_ASSERT(ind_tbl); 534 for (i = 0; i != ind_tbl->queues_n; ++i) { 535 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; 536 struct mlx5_rxq_ctrl *rxq_ctrl = 537 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 538 539 wq[i] = rxq_ctrl->obj->wq; 540 } 541 MLX5_ASSERT(i > 0); 542 /* Finalise indirection table. */ 543 for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i) 544 wq[i] = wq[j]; 545 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx, 546 &(struct ibv_rwq_ind_table_init_attr){ 547 .log_ind_tbl_size = log_n, 548 .ind_tbl = wq, 549 .comp_mask = 0, 550 }); 551 if (!ind_tbl->ind_table) { 552 rte_errno = errno; 553 return -rte_errno; 554 } 555 return 0; 556 } 557 558 /** 559 * Destroys the specified Indirection Table. 560 * 561 * @param ind_table 562 * Indirection table to release. 563 */ 564 static void 565 mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 566 { 567 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); 568 } 569 570 /** 571 * Create an Rx Hash queue. 572 * 573 * @param dev 574 * Pointer to Ethernet device. 575 * @param hrxq 576 * Pointer to Rx Hash queue. 577 * @param tunnel 578 * Tunnel type. 579 * 580 * @return 581 * 0 on success, a negative errno value otherwise and rte_errno is set. 582 */ 583 static int 584 mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 585 int tunnel __rte_unused) 586 { 587 struct mlx5_priv *priv = dev->data->dev_private; 588 struct ibv_qp *qp = NULL; 589 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table; 590 const uint8_t *rss_key = hrxq->rss_key; 591 uint64_t hash_fields = hrxq->hash_fields; 592 int err; 593 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 594 struct mlx5dv_qp_init_attr qp_init_attr; 595 596 memset(&qp_init_attr, 0, sizeof(qp_init_attr)); 597 if (tunnel) { 598 qp_init_attr.comp_mask = 599 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; 600 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; 601 } 602 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 603 if (dev->data->dev_conf.lpbk_mode) { 604 /* Allow packet sent from NIC loop back w/o source MAC check. */ 605 qp_init_attr.comp_mask |= 606 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; 607 qp_init_attr.create_flags |= 608 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; 609 } 610 #endif 611 qp = mlx5_glue->dv_create_qp 612 (priv->sh->ctx, 613 &(struct ibv_qp_init_attr_ex){ 614 .qp_type = IBV_QPT_RAW_PACKET, 615 .comp_mask = 616 IBV_QP_INIT_ATTR_PD | 617 IBV_QP_INIT_ATTR_IND_TABLE | 618 IBV_QP_INIT_ATTR_RX_HASH, 619 .rx_hash_conf = (struct ibv_rx_hash_conf){ 620 .rx_hash_function = 621 IBV_RX_HASH_FUNC_TOEPLITZ, 622 .rx_hash_key_len = hrxq->rss_key_len, 623 .rx_hash_key = 624 (void *)(uintptr_t)rss_key, 625 .rx_hash_fields_mask = hash_fields, 626 }, 627 .rwq_ind_tbl = ind_tbl->ind_table, 628 .pd = priv->sh->pd, 629 }, 630 &qp_init_attr); 631 #else 632 qp = mlx5_glue->create_qp_ex 633 (priv->sh->ctx, 634 &(struct ibv_qp_init_attr_ex){ 635 .qp_type = IBV_QPT_RAW_PACKET, 636 .comp_mask = 637 IBV_QP_INIT_ATTR_PD | 638 IBV_QP_INIT_ATTR_IND_TABLE | 639 IBV_QP_INIT_ATTR_RX_HASH, 640 .rx_hash_conf = (struct ibv_rx_hash_conf){ 641 .rx_hash_function = 642 IBV_RX_HASH_FUNC_TOEPLITZ, 643 .rx_hash_key_len = hrxq->rss_key_len, 644 .rx_hash_key = 645 (void *)(uintptr_t)rss_key, 646 .rx_hash_fields_mask = hash_fields, 647 }, 648 .rwq_ind_tbl = ind_tbl->ind_table, 649 .pd = priv->sh->pd, 650 }); 651 #endif 652 if (!qp) { 653 rte_errno = errno; 654 goto error; 655 } 656 hrxq->qp = qp; 657 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 658 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); 659 if (!hrxq->action) { 660 rte_errno = errno; 661 goto error; 662 } 663 #endif 664 return 0; 665 error: 666 err = rte_errno; /* Save rte_errno before cleanup. */ 667 if (qp) 668 claim_zero(mlx5_glue->destroy_qp(qp)); 669 rte_errno = err; /* Restore rte_errno. */ 670 return -rte_errno; 671 } 672 673 /** 674 * Destroy a Verbs queue pair. 675 * 676 * @param hrxq 677 * Hash Rx queue to release its qp. 678 */ 679 static void 680 mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq) 681 { 682 claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); 683 } 684 685 /** 686 * Release a drop Rx queue Verbs object. 687 * 688 * @param dev 689 * Pointer to Ethernet device. 690 */ 691 static void 692 mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev) 693 { 694 struct mlx5_priv *priv = dev->data->dev_private; 695 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; 696 697 if (rxq->wq) 698 claim_zero(mlx5_glue->destroy_wq(rxq->wq)); 699 if (rxq->ibv_cq) 700 claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq)); 701 mlx5_free(rxq); 702 priv->drop_queue.rxq = NULL; 703 } 704 705 /** 706 * Create a drop Rx queue Verbs object. 707 * 708 * @param dev 709 * Pointer to Ethernet device. 710 * 711 * @return 712 * 0 on success, a negative errno value otherwise and rte_errno is set. 713 */ 714 static int 715 mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev) 716 { 717 struct mlx5_priv *priv = dev->data->dev_private; 718 struct ibv_context *ctx = priv->sh->ctx; 719 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; 720 721 if (rxq) 722 return 0; 723 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY); 724 if (!rxq) { 725 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.", 726 dev->data->port_id); 727 rte_errno = ENOMEM; 728 return -rte_errno; 729 } 730 priv->drop_queue.rxq = rxq; 731 rxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); 732 if (!rxq->ibv_cq) { 733 DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.", 734 dev->data->port_id); 735 rte_errno = errno; 736 goto error; 737 } 738 rxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){ 739 .wq_type = IBV_WQT_RQ, 740 .max_wr = 1, 741 .max_sge = 1, 742 .pd = priv->sh->pd, 743 .cq = rxq->ibv_cq, 744 }); 745 if (!rxq->wq) { 746 DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.", 747 dev->data->port_id); 748 rte_errno = errno; 749 goto error; 750 } 751 priv->drop_queue.rxq = rxq; 752 return 0; 753 error: 754 mlx5_rxq_ibv_obj_drop_release(dev); 755 return -rte_errno; 756 } 757 758 /** 759 * Create a Verbs drop action for Rx Hash queue. 760 * 761 * @param dev 762 * Pointer to Ethernet device. 763 * 764 * @return 765 * 0 on success, a negative errno value otherwise and rte_errno is set. 766 */ 767 static int 768 mlx5_ibv_drop_action_create(struct rte_eth_dev *dev) 769 { 770 struct mlx5_priv *priv = dev->data->dev_private; 771 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 772 struct ibv_rwq_ind_table *ind_tbl = NULL; 773 struct mlx5_rxq_obj *rxq; 774 int ret; 775 776 MLX5_ASSERT(hrxq && hrxq->ind_table); 777 ret = mlx5_rxq_ibv_obj_drop_create(dev); 778 if (ret < 0) 779 goto error; 780 rxq = priv->drop_queue.rxq; 781 ind_tbl = mlx5_glue->create_rwq_ind_table 782 (priv->sh->ctx, 783 &(struct ibv_rwq_ind_table_init_attr){ 784 .log_ind_tbl_size = 0, 785 .ind_tbl = (struct ibv_wq **)&rxq->wq, 786 .comp_mask = 0, 787 }); 788 if (!ind_tbl) { 789 DRV_LOG(DEBUG, "Port %u" 790 " cannot allocate indirection table for drop queue.", 791 dev->data->port_id); 792 rte_errno = errno; 793 goto error; 794 } 795 hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx, 796 &(struct ibv_qp_init_attr_ex){ 797 .qp_type = IBV_QPT_RAW_PACKET, 798 .comp_mask = IBV_QP_INIT_ATTR_PD | 799 IBV_QP_INIT_ATTR_IND_TABLE | 800 IBV_QP_INIT_ATTR_RX_HASH, 801 .rx_hash_conf = (struct ibv_rx_hash_conf){ 802 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, 803 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN, 804 .rx_hash_key = rss_hash_default_key, 805 .rx_hash_fields_mask = 0, 806 }, 807 .rwq_ind_tbl = ind_tbl, 808 .pd = priv->sh->pd 809 }); 810 if (!hrxq->qp) { 811 DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.", 812 dev->data->port_id); 813 rte_errno = errno; 814 goto error; 815 } 816 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 817 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); 818 if (!hrxq->action) { 819 rte_errno = errno; 820 goto error; 821 } 822 #endif 823 hrxq->ind_table->ind_table = ind_tbl; 824 return 0; 825 error: 826 if (hrxq->qp) 827 claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); 828 if (ind_tbl) 829 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl)); 830 if (priv->drop_queue.rxq) 831 mlx5_rxq_ibv_obj_drop_release(dev); 832 return -rte_errno; 833 } 834 835 /** 836 * Release a drop hash Rx queue. 837 * 838 * @param dev 839 * Pointer to Ethernet device. 840 */ 841 static void 842 mlx5_ibv_drop_action_destroy(struct rte_eth_dev *dev) 843 { 844 struct mlx5_priv *priv = dev->data->dev_private; 845 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 846 struct ibv_rwq_ind_table *ind_tbl = hrxq->ind_table->ind_table; 847 848 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 849 claim_zero(mlx5_glue->destroy_flow_action(hrxq->action)); 850 #endif 851 claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); 852 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl)); 853 mlx5_rxq_ibv_obj_drop_release(dev); 854 } 855 856 /** 857 * Create a QP Verbs object. 858 * 859 * @param dev 860 * Pointer to Ethernet device. 861 * @param idx 862 * Queue index in DPDK Tx queue array. 863 * 864 * @return 865 * The QP Verbs object, NULL otherwise and rte_errno is set. 866 */ 867 static struct ibv_qp * 868 mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx) 869 { 870 struct mlx5_priv *priv = dev->data->dev_private; 871 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 872 struct mlx5_txq_ctrl *txq_ctrl = 873 container_of(txq_data, struct mlx5_txq_ctrl, txq); 874 struct ibv_qp *qp_obj = NULL; 875 struct ibv_qp_init_attr_ex qp_attr = { 0 }; 876 const int desc = 1 << txq_data->elts_n; 877 878 MLX5_ASSERT(txq_ctrl->obj->cq); 879 /* CQ to be associated with the send queue. */ 880 qp_attr.send_cq = txq_ctrl->obj->cq; 881 /* CQ to be associated with the receive queue. */ 882 qp_attr.recv_cq = txq_ctrl->obj->cq; 883 /* Max number of outstanding WRs. */ 884 qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ? 885 priv->sh->device_attr.max_qp_wr : desc); 886 /* 887 * Max number of scatter/gather elements in a WR, must be 1 to prevent 888 * libmlx5 from trying to affect must be 1 to prevent libmlx5 from 889 * trying to affect too much memory. TX gather is not impacted by the 890 * device_attr.max_sge limit and will still work properly. 891 */ 892 qp_attr.cap.max_send_sge = 1; 893 qp_attr.qp_type = IBV_QPT_RAW_PACKET, 894 /* Do *NOT* enable this, completions events are managed per Tx burst. */ 895 qp_attr.sq_sig_all = 0; 896 qp_attr.pd = priv->sh->pd; 897 qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD; 898 if (txq_data->inlen_send) 899 qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data; 900 if (txq_data->tso_en) { 901 qp_attr.max_tso_header = txq_ctrl->max_tso_header; 902 qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER; 903 } 904 qp_obj = mlx5_glue->create_qp_ex(priv->sh->ctx, &qp_attr); 905 if (qp_obj == NULL) { 906 DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.", 907 dev->data->port_id, idx); 908 rte_errno = errno; 909 } 910 return qp_obj; 911 } 912 913 /** 914 * Create the Tx queue Verbs object. 915 * 916 * @param dev 917 * Pointer to Ethernet device. 918 * @param idx 919 * Queue index in DPDK Tx queue array. 920 * 921 * @return 922 * 0 on success, a negative errno value otherwise and rte_errno is set. 923 */ 924 int 925 mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx) 926 { 927 struct mlx5_priv *priv = dev->data->dev_private; 928 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 929 struct mlx5_txq_ctrl *txq_ctrl = 930 container_of(txq_data, struct mlx5_txq_ctrl, txq); 931 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 932 unsigned int cqe_n; 933 struct mlx5dv_qp qp; 934 struct mlx5dv_cq cq_info; 935 struct mlx5dv_obj obj; 936 const int desc = 1 << txq_data->elts_n; 937 int ret = 0; 938 939 MLX5_ASSERT(txq_data); 940 MLX5_ASSERT(txq_obj); 941 txq_obj->txq_ctrl = txq_ctrl; 942 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { 943 DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION " 944 "must never be set.", dev->data->port_id); 945 rte_errno = EINVAL; 946 return -rte_errno; 947 } 948 cqe_n = desc / MLX5_TX_COMP_THRESH + 949 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 950 txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0); 951 if (txq_obj->cq == NULL) { 952 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 953 dev->data->port_id, idx); 954 rte_errno = errno; 955 goto error; 956 } 957 txq_obj->qp = mlx5_txq_ibv_qp_create(dev, idx); 958 if (txq_obj->qp == NULL) { 959 rte_errno = errno; 960 goto error; 961 } 962 ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY, 963 (uint8_t)priv->dev_port); 964 if (ret) { 965 DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.", 966 dev->data->port_id, idx); 967 rte_errno = errno; 968 goto error; 969 } 970 qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET; 971 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 972 /* If using DevX, need additional mask to read tisn value. */ 973 if (priv->sh->devx && !priv->sh->tdn) 974 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES; 975 #endif 976 obj.cq.in = txq_obj->cq; 977 obj.cq.out = &cq_info; 978 obj.qp.in = txq_obj->qp; 979 obj.qp.out = &qp; 980 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); 981 if (ret != 0) { 982 rte_errno = errno; 983 goto error; 984 } 985 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { 986 DRV_LOG(ERR, 987 "Port %u wrong MLX5_CQE_SIZE environment variable" 988 " value: it should be set to %u.", 989 dev->data->port_id, RTE_CACHE_LINE_SIZE); 990 rte_errno = EINVAL; 991 goto error; 992 } 993 txq_data->cqe_n = log2above(cq_info.cqe_cnt); 994 txq_data->cqe_s = 1 << txq_data->cqe_n; 995 txq_data->cqe_m = txq_data->cqe_s - 1; 996 txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8; 997 txq_data->wqes = qp.sq.buf; 998 txq_data->wqe_n = log2above(qp.sq.wqe_cnt); 999 txq_data->wqe_s = 1 << txq_data->wqe_n; 1000 txq_data->wqe_m = txq_data->wqe_s - 1; 1001 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 1002 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR]; 1003 txq_data->cq_db = cq_info.dbrec; 1004 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf; 1005 txq_data->cq_ci = 0; 1006 txq_data->cq_pi = 0; 1007 txq_data->wqe_ci = 0; 1008 txq_data->wqe_pi = 0; 1009 txq_data->wqe_comp = 0; 1010 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 1011 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1012 /* 1013 * If using DevX need to query and store TIS transport domain value. 1014 * This is done once per port. 1015 * Will use this value on Rx, when creating matching TIR. 1016 */ 1017 if (priv->sh->devx && !priv->sh->tdn) { 1018 ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn, 1019 &priv->sh->tdn); 1020 if (ret) { 1021 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS " 1022 "transport domain.", dev->data->port_id, idx); 1023 rte_errno = EINVAL; 1024 goto error; 1025 } else { 1026 DRV_LOG(DEBUG, "Port %u Tx queue %u TIS number %d " 1027 "transport domain %d.", dev->data->port_id, 1028 idx, qp.tisn, priv->sh->tdn); 1029 } 1030 } 1031 #endif 1032 txq_ctrl->bf_reg = qp.bf.reg; 1033 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { 1034 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; 1035 DRV_LOG(DEBUG, "Port %u: uar_mmap_offset 0x%" PRIx64 ".", 1036 dev->data->port_id, txq_ctrl->uar_mmap_offset); 1037 } else { 1038 DRV_LOG(ERR, 1039 "Port %u failed to retrieve UAR info, invalid" 1040 " libmlx5.so", 1041 dev->data->port_id); 1042 rte_errno = EINVAL; 1043 goto error; 1044 } 1045 txq_uar_init(txq_ctrl); 1046 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 1047 return 0; 1048 error: 1049 ret = rte_errno; /* Save rte_errno before cleanup. */ 1050 if (txq_obj->cq) 1051 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); 1052 if (txq_obj->qp) 1053 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); 1054 rte_errno = ret; /* Restore rte_errno. */ 1055 return -rte_errno; 1056 } 1057 1058 /* 1059 * Create the dummy QP with minimal resources for loopback. 1060 * 1061 * @param dev 1062 * Pointer to Ethernet device. 1063 * 1064 * @return 1065 * 0 on success, a negative errno value otherwise and rte_errno is set. 1066 */ 1067 int 1068 mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev) 1069 { 1070 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT) 1071 struct mlx5_priv *priv = dev->data->dev_private; 1072 struct mlx5_dev_ctx_shared *sh = priv->sh; 1073 struct ibv_context *ctx = sh->ctx; 1074 struct mlx5dv_qp_init_attr qp_init_attr = {0}; 1075 struct { 1076 struct ibv_cq_init_attr_ex ibv; 1077 struct mlx5dv_cq_init_attr mlx5; 1078 } cq_attr = {{0}}; 1079 1080 if (dev->data->dev_conf.lpbk_mode) { 1081 /* Allow packet sent from NIC loop back w/o source MAC check. */ 1082 qp_init_attr.comp_mask |= 1083 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; 1084 qp_init_attr.create_flags |= 1085 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; 1086 } else { 1087 return 0; 1088 } 1089 /* Only need to check refcnt, 0 after "sh" is allocated. */ 1090 if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) { 1091 MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp); 1092 priv->lb_used = 1; 1093 return 0; 1094 } 1095 cq_attr.ibv = (struct ibv_cq_init_attr_ex){ 1096 .cqe = 1, 1097 .channel = NULL, 1098 .comp_mask = 0, 1099 }; 1100 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ 1101 .comp_mask = 0, 1102 }; 1103 /* Only CQ is needed, no WQ(RQ) is required in this case. */ 1104 sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx, 1105 &cq_attr.ibv, 1106 &cq_attr.mlx5)); 1107 if (!sh->self_lb.ibv_cq) { 1108 DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.", 1109 dev->data->port_id); 1110 rte_errno = errno; 1111 goto error; 1112 } 1113 sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx, 1114 &(struct ibv_qp_init_attr_ex){ 1115 .qp_type = IBV_QPT_RAW_PACKET, 1116 .comp_mask = IBV_QP_INIT_ATTR_PD, 1117 .pd = sh->pd, 1118 .send_cq = sh->self_lb.ibv_cq, 1119 .recv_cq = sh->self_lb.ibv_cq, 1120 .cap.max_recv_wr = 1, 1121 }, 1122 &qp_init_attr); 1123 if (!sh->self_lb.qp) { 1124 DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.", 1125 dev->data->port_id); 1126 rte_errno = errno; 1127 goto error; 1128 } 1129 priv->lb_used = 1; 1130 return 0; 1131 error: 1132 if (sh->self_lb.ibv_cq) { 1133 claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq)); 1134 sh->self_lb.ibv_cq = NULL; 1135 } 1136 (void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED); 1137 return -rte_errno; 1138 #else 1139 RTE_SET_USED(dev); 1140 return 0; 1141 #endif 1142 } 1143 1144 /* 1145 * Release the dummy queue resources for loopback. 1146 * 1147 * @param dev 1148 * Pointer to Ethernet device. 1149 */ 1150 void 1151 mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev) 1152 { 1153 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT) 1154 struct mlx5_priv *priv = dev->data->dev_private; 1155 struct mlx5_dev_ctx_shared *sh = priv->sh; 1156 1157 if (!priv->lb_used) 1158 return; 1159 MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED)); 1160 if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) { 1161 if (sh->self_lb.qp) { 1162 claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp)); 1163 sh->self_lb.qp = NULL; 1164 } 1165 if (sh->self_lb.ibv_cq) { 1166 claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq)); 1167 sh->self_lb.ibv_cq = NULL; 1168 } 1169 } 1170 priv->lb_used = 0; 1171 #else 1172 RTE_SET_USED(dev); 1173 return; 1174 #endif 1175 } 1176 1177 /** 1178 * Release an Tx verbs queue object. 1179 * 1180 * @param txq_obj 1181 * Verbs Tx queue object.. 1182 */ 1183 void 1184 mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj) 1185 { 1186 MLX5_ASSERT(txq_obj); 1187 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); 1188 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); 1189 } 1190 1191 struct mlx5_obj_ops ibv_obj_ops = { 1192 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip, 1193 .rxq_obj_new = mlx5_rxq_ibv_obj_new, 1194 .rxq_event_get = mlx5_rx_ibv_get_event, 1195 .rxq_obj_modify = mlx5_ibv_modify_wq, 1196 .rxq_obj_release = mlx5_rxq_ibv_obj_release, 1197 .ind_table_new = mlx5_ibv_ind_table_new, 1198 .ind_table_destroy = mlx5_ibv_ind_table_destroy, 1199 .hrxq_new = mlx5_ibv_hrxq_new, 1200 .hrxq_destroy = mlx5_ibv_qp_destroy, 1201 .drop_action_create = mlx5_ibv_drop_action_create, 1202 .drop_action_destroy = mlx5_ibv_drop_action_destroy, 1203 .txq_obj_new = mlx5_txq_ibv_obj_new, 1204 .txq_obj_modify = mlx5_ibv_modify_qp, 1205 .txq_obj_release = mlx5_txq_ibv_obj_release, 1206 .lb_dummy_queue_create = NULL, 1207 .lb_dummy_queue_release = NULL, 1208 }; 1209