1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <stddef.h> 6 #include <errno.h> 7 #include <stdbool.h> 8 #include <string.h> 9 #include <stdint.h> 10 #include <sys/queue.h> 11 12 #include <rte_malloc.h> 13 #include <rte_common.h> 14 #include <rte_eal_paging.h> 15 16 #include <mlx5_glue.h> 17 #include <mlx5_devx_cmds.h> 18 #include <mlx5_common_devx.h> 19 #include <mlx5_malloc.h> 20 21 #include "mlx5.h" 22 #include "mlx5_common_os.h" 23 #include "mlx5_tx.h" 24 #include "mlx5_rx.h" 25 #include "mlx5_utils.h" 26 #include "mlx5_devx.h" 27 #include "mlx5_flow.h" 28 #include "mlx5_flow_os.h" 29 30 /** 31 * Modify RQ vlan stripping offload 32 * 33 * @param rxq_obj 34 * Rx queue object. 35 * 36 * @return 37 * 0 on success, non-0 otherwise 38 */ 39 static int 40 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 41 { 42 struct mlx5_devx_modify_rq_attr rq_attr; 43 44 memset(&rq_attr, 0, sizeof(rq_attr)); 45 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 46 rq_attr.state = MLX5_RQC_STATE_RDY; 47 rq_attr.vsd = (on ? 0 : 1); 48 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 49 return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 50 } 51 52 /** 53 * Modify RQ using DevX API. 54 * 55 * @param rxq_obj 56 * DevX Rx queue object. 57 * @param type 58 * Type of change queue state. 59 * 60 * @return 61 * 0 on success, a negative errno value otherwise and rte_errno is set. 62 */ 63 static int 64 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) 65 { 66 struct mlx5_devx_modify_rq_attr rq_attr; 67 68 memset(&rq_attr, 0, sizeof(rq_attr)); 69 switch (type) { 70 case MLX5_RXQ_MOD_ERR2RST: 71 rq_attr.rq_state = MLX5_RQC_STATE_ERR; 72 rq_attr.state = MLX5_RQC_STATE_RST; 73 break; 74 case MLX5_RXQ_MOD_RST2RDY: 75 rq_attr.rq_state = MLX5_RQC_STATE_RST; 76 rq_attr.state = MLX5_RQC_STATE_RDY; 77 break; 78 case MLX5_RXQ_MOD_RDY2ERR: 79 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 80 rq_attr.state = MLX5_RQC_STATE_ERR; 81 break; 82 case MLX5_RXQ_MOD_RDY2RST: 83 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 84 rq_attr.state = MLX5_RQC_STATE_RST; 85 break; 86 default: 87 break; 88 } 89 return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 90 } 91 92 /** 93 * Modify SQ using DevX API. 94 * 95 * @param txq_obj 96 * DevX Tx queue object. 97 * @param type 98 * Type of change queue state. 99 * @param dev_port 100 * Unnecessary. 101 * 102 * @return 103 * 0 on success, a negative errno value otherwise and rte_errno is set. 104 */ 105 static int 106 mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 107 uint8_t dev_port) 108 { 109 struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 110 int ret; 111 112 if (type != MLX5_TXQ_MOD_RST2RDY) { 113 /* Change queue state to reset. */ 114 if (type == MLX5_TXQ_MOD_ERR2RDY) 115 msq_attr.sq_state = MLX5_SQC_STATE_ERR; 116 else 117 msq_attr.sq_state = MLX5_SQC_STATE_RDY; 118 msq_attr.state = MLX5_SQC_STATE_RST; 119 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 120 if (ret) { 121 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET" 122 " %s", strerror(errno)); 123 rte_errno = errno; 124 return ret; 125 } 126 } 127 if (type != MLX5_TXQ_MOD_RDY2RST) { 128 /* Change queue state to ready. */ 129 msq_attr.sq_state = MLX5_SQC_STATE_RST; 130 msq_attr.state = MLX5_SQC_STATE_RDY; 131 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 132 if (ret) { 133 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY" 134 " %s", strerror(errno)); 135 rte_errno = errno; 136 return ret; 137 } 138 } 139 /* 140 * The dev_port variable is relevant only in Verbs API, and there is a 141 * pointer that points to this function and a parallel function in verbs 142 * intermittently, so they should have the same parameters. 143 */ 144 (void)dev_port; 145 return 0; 146 } 147 148 /** 149 * Destroy the Rx queue DevX object. 150 * 151 * @param rxq_obj 152 * Rxq object to destroy. 153 */ 154 static void 155 mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj) 156 { 157 mlx5_devx_rq_destroy(&rxq_obj->rq_obj); 158 memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj)); 159 mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 160 memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); 161 } 162 163 /** 164 * Release an Rx DevX queue object. 165 * 166 * @param rxq_obj 167 * DevX Rx queue object. 168 */ 169 static void 170 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 171 { 172 MLX5_ASSERT(rxq_obj); 173 if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { 174 MLX5_ASSERT(rxq_obj->rq); 175 mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST); 176 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 177 } else { 178 MLX5_ASSERT(rxq_obj->cq_obj.cq); 179 MLX5_ASSERT(rxq_obj->rq_obj.rq); 180 mlx5_rxq_release_devx_resources(rxq_obj); 181 if (rxq_obj->devx_channel) 182 mlx5_os_devx_destroy_event_channel 183 (rxq_obj->devx_channel); 184 } 185 } 186 187 /** 188 * Get event for an Rx DevX queue object. 189 * 190 * @param rxq_obj 191 * DevX Rx queue object. 192 * 193 * @return 194 * 0 on success, a negative errno value otherwise and rte_errno is set. 195 */ 196 static int 197 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 198 { 199 #ifdef HAVE_IBV_DEVX_EVENT 200 union { 201 struct mlx5dv_devx_async_event_hdr event_resp; 202 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 203 } out; 204 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 205 &out.event_resp, 206 sizeof(out.buf)); 207 208 if (ret < 0) { 209 rte_errno = errno; 210 return -rte_errno; 211 } 212 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) { 213 rte_errno = EINVAL; 214 return -rte_errno; 215 } 216 return 0; 217 #else 218 (void)rxq_obj; 219 rte_errno = ENOTSUP; 220 return -rte_errno; 221 #endif /* HAVE_IBV_DEVX_EVENT */ 222 } 223 224 /** 225 * Create a RQ object using DevX. 226 * 227 * @param dev 228 * Pointer to Ethernet device. 229 * @param idx 230 * Queue index in DPDK Rx queue array. 231 * 232 * @return 233 * 0 on success, a negative errno value otherwise and rte_errno is set. 234 */ 235 static int 236 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 237 { 238 struct mlx5_priv *priv = dev->data->dev_private; 239 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 240 struct mlx5_rxq_ctrl *rxq_ctrl = 241 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 242 struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 243 uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; 244 uint32_t wqe_size, log_wqe_size; 245 246 /* Fill RQ attributes. */ 247 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 248 rq_attr.flush_in_error_en = 1; 249 rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1; 250 rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id; 251 rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 252 /* Fill WQ attributes for this RQ. */ 253 if (mlx5_rxq_mprq_enabled(rxq_data)) { 254 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 255 /* 256 * Number of strides in each WQE: 257 * 512*2^single_wqe_log_num_of_strides. 258 */ 259 rq_attr.wq_attr.single_wqe_log_num_of_strides = 260 rxq_data->strd_num_n - 261 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 262 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 263 rq_attr.wq_attr.single_stride_log_num_of_bytes = 264 rxq_data->strd_sz_n - 265 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 266 wqe_size = sizeof(struct mlx5_wqe_mprq); 267 } else { 268 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 269 wqe_size = sizeof(struct mlx5_wqe_data_seg); 270 } 271 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 272 wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 273 rq_attr.wq_attr.log_wq_stride = log_wqe_size; 274 rq_attr.wq_attr.log_wq_sz = log_desc_n; 275 rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ? 276 MLX5_WQ_END_PAD_MODE_ALIGN : 277 MLX5_WQ_END_PAD_MODE_NONE; 278 rq_attr.wq_attr.pd = priv->sh->pdn; 279 rq_attr.counter_set_id = priv->counter_set_id; 280 /* Create RQ using DevX API. */ 281 return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj, 282 wqe_size, log_desc_n, &rq_attr, 283 rxq_ctrl->socket); 284 } 285 286 /** 287 * Create a DevX CQ object for an Rx queue. 288 * 289 * @param dev 290 * Pointer to Ethernet device. 291 * @param idx 292 * Queue index in DPDK Rx queue array. 293 * 294 * @return 295 * 0 on success, a negative errno value otherwise and rte_errno is set. 296 */ 297 static int 298 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 299 { 300 struct mlx5_devx_cq *cq_obj = 0; 301 struct mlx5_devx_cq_attr cq_attr = { 0 }; 302 struct mlx5_priv *priv = dev->data->dev_private; 303 struct mlx5_dev_ctx_shared *sh = priv->sh; 304 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 305 struct mlx5_rxq_ctrl *rxq_ctrl = 306 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 307 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 308 uint32_t log_cqe_n; 309 uint16_t event_nums[1] = { 0 }; 310 int ret = 0; 311 312 if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 313 !rxq_data->lro) { 314 cq_attr.cqe_comp_en = 1u; 315 rxq_data->mcqe_format = priv->config.cqe_comp_fmt; 316 rxq_data->byte_mask = UINT32_MAX; 317 switch (priv->config.cqe_comp_fmt) { 318 case MLX5_CQE_RESP_FORMAT_HASH: 319 /* fallthrough */ 320 case MLX5_CQE_RESP_FORMAT_CSUM: 321 /* 322 * Select CSUM miniCQE format only for non-vectorized 323 * MPRQ Rx burst, use HASH miniCQE format for others. 324 */ 325 if (mlx5_rxq_check_vec_support(rxq_data) < 0 && 326 mlx5_rxq_mprq_enabled(rxq_data)) 327 cq_attr.mini_cqe_res_format = 328 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 329 else 330 cq_attr.mini_cqe_res_format = 331 MLX5_CQE_RESP_FORMAT_HASH; 332 rxq_data->mcqe_format = cq_attr.mini_cqe_res_format; 333 break; 334 case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX: 335 rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK; 336 /* fallthrough */ 337 case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX: 338 cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt; 339 break; 340 case MLX5_CQE_RESP_FORMAT_L34H_STRIDX: 341 cq_attr.mini_cqe_res_format = 0; 342 cq_attr.mini_cqe_res_format_ext = 1; 343 break; 344 } 345 DRV_LOG(DEBUG, 346 "Port %u Rx CQE compression is enabled, format %d.", 347 dev->data->port_id, priv->config.cqe_comp_fmt); 348 /* 349 * For vectorized Rx, it must not be doubled in order to 350 * make cq_ci and rq_ci aligned. 351 */ 352 if (mlx5_rxq_check_vec_support(rxq_data) < 0) 353 cqe_n *= 2; 354 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 355 DRV_LOG(DEBUG, 356 "Port %u Rx CQE compression is disabled for HW" 357 " timestamp.", 358 dev->data->port_id); 359 } else if (priv->config.cqe_comp && rxq_data->lro) { 360 DRV_LOG(DEBUG, 361 "Port %u Rx CQE compression is disabled for LRO.", 362 dev->data->port_id); 363 } 364 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar); 365 log_cqe_n = log2above(cqe_n); 366 /* Create CQ using DevX API. */ 367 ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n, 368 &cq_attr, sh->numa_node); 369 if (ret) 370 return ret; 371 cq_obj = &rxq_ctrl->obj->cq_obj; 372 rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) 373 (uintptr_t)cq_obj->cqes; 374 rxq_data->cq_db = cq_obj->db_rec; 375 rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); 376 rxq_data->cqe_n = log_cqe_n; 377 rxq_data->cqn = cq_obj->cq->id; 378 if (rxq_ctrl->obj->devx_channel) { 379 ret = mlx5_os_devx_subscribe_devx_event 380 (rxq_ctrl->obj->devx_channel, 381 cq_obj->cq->obj, 382 sizeof(event_nums), 383 event_nums, 384 (uint64_t)(uintptr_t)cq_obj->cq); 385 if (ret) { 386 DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 387 ret = errno; 388 mlx5_devx_cq_destroy(cq_obj); 389 memset(cq_obj, 0, sizeof(*cq_obj)); 390 rte_errno = ret; 391 return -ret; 392 } 393 } 394 return 0; 395 } 396 397 /** 398 * Create the Rx hairpin queue object. 399 * 400 * @param dev 401 * Pointer to Ethernet device. 402 * @param idx 403 * Queue index in DPDK Rx queue array. 404 * 405 * @return 406 * 0 on success, a negative errno value otherwise and rte_errno is set. 407 */ 408 static int 409 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 410 { 411 struct mlx5_priv *priv = dev->data->dev_private; 412 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 413 struct mlx5_rxq_ctrl *rxq_ctrl = 414 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 415 struct mlx5_devx_create_rq_attr attr = { 0 }; 416 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 417 uint32_t max_wq_data; 418 419 MLX5_ASSERT(rxq_data); 420 MLX5_ASSERT(tmpl); 421 tmpl->rxq_ctrl = rxq_ctrl; 422 attr.hairpin = 1; 423 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 424 /* Jumbo frames > 9KB should be supported, and more packets. */ 425 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 426 if (priv->config.log_hp_size > max_wq_data) { 427 DRV_LOG(ERR, "Total data size %u power of 2 is " 428 "too large for hairpin.", 429 priv->config.log_hp_size); 430 rte_errno = ERANGE; 431 return -rte_errno; 432 } 433 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 434 } else { 435 attr.wq_attr.log_hairpin_data_sz = 436 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 437 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 438 } 439 /* Set the packets number to the maximum value for performance. */ 440 attr.wq_attr.log_hairpin_num_packets = 441 attr.wq_attr.log_hairpin_data_sz - 442 MLX5_HAIRPIN_QUEUE_STRIDE; 443 attr.counter_set_id = priv->counter_set_id; 444 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, 445 rxq_ctrl->socket); 446 if (!tmpl->rq) { 447 DRV_LOG(ERR, 448 "Port %u Rx hairpin queue %u can't create rq object.", 449 dev->data->port_id, idx); 450 rte_errno = errno; 451 return -rte_errno; 452 } 453 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 454 return 0; 455 } 456 457 /** 458 * Create the Rx queue DevX object. 459 * 460 * @param dev 461 * Pointer to Ethernet device. 462 * @param idx 463 * Queue index in DPDK Rx queue array. 464 * 465 * @return 466 * 0 on success, a negative errno value otherwise and rte_errno is set. 467 */ 468 static int 469 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 470 { 471 struct mlx5_priv *priv = dev->data->dev_private; 472 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 473 struct mlx5_rxq_ctrl *rxq_ctrl = 474 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 475 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 476 int ret = 0; 477 478 MLX5_ASSERT(rxq_data); 479 MLX5_ASSERT(tmpl); 480 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 481 return mlx5_rxq_obj_hairpin_new(dev, idx); 482 tmpl->rxq_ctrl = rxq_ctrl; 483 if (rxq_ctrl->irq) { 484 int devx_ev_flag = 485 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 486 487 tmpl->devx_channel = mlx5_os_devx_create_event_channel 488 (priv->sh->ctx, 489 devx_ev_flag); 490 if (!tmpl->devx_channel) { 491 rte_errno = errno; 492 DRV_LOG(ERR, "Failed to create event channel %d.", 493 rte_errno); 494 goto error; 495 } 496 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 497 } 498 /* Create CQ using DevX API. */ 499 ret = mlx5_rxq_create_devx_cq_resources(dev, idx); 500 if (ret) { 501 DRV_LOG(ERR, "Failed to create CQ."); 502 goto error; 503 } 504 /* Create RQ using DevX API. */ 505 ret = mlx5_rxq_create_devx_rq_resources(dev, idx); 506 if (ret) { 507 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 508 dev->data->port_id, idx); 509 rte_errno = ENOMEM; 510 goto error; 511 } 512 /* Change queue state to ready. */ 513 ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY); 514 if (ret) 515 goto error; 516 rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf; 517 rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec; 518 rxq_data->cq_arm_sn = 0; 519 rxq_data->cq_ci = 0; 520 mlx5_rxq_initialize(rxq_data); 521 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 522 rxq_ctrl->wqn = tmpl->rq_obj.rq->id; 523 return 0; 524 error: 525 ret = rte_errno; /* Save rte_errno before cleanup. */ 526 mlx5_rxq_devx_obj_release(tmpl); 527 rte_errno = ret; /* Restore rte_errno. */ 528 return -rte_errno; 529 } 530 531 /** 532 * Prepare RQT attribute structure for DevX RQT API. 533 * 534 * @param dev 535 * Pointer to Ethernet device. 536 * @param log_n 537 * Log of number of queues in the array. 538 * @param ind_tbl 539 * DevX indirection table object. 540 * 541 * @return 542 * The RQT attr object initialized, NULL otherwise and rte_errno is set. 543 */ 544 static struct mlx5_devx_rqt_attr * 545 mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, 546 const unsigned int log_n, 547 const uint16_t *queues, 548 const uint32_t queues_n) 549 { 550 struct mlx5_priv *priv = dev->data->dev_private; 551 struct mlx5_devx_rqt_attr *rqt_attr = NULL; 552 const unsigned int rqt_n = 1 << log_n; 553 unsigned int i, j; 554 555 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 556 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 557 if (!rqt_attr) { 558 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 559 dev->data->port_id); 560 rte_errno = ENOMEM; 561 return NULL; 562 } 563 rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 564 rqt_attr->rqt_actual_size = rqt_n; 565 for (i = 0; i != queues_n; ++i) { 566 struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]]; 567 struct mlx5_rxq_ctrl *rxq_ctrl = 568 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 569 570 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id; 571 } 572 MLX5_ASSERT(i > 0); 573 for (j = 0; i != rqt_n; ++j, ++i) 574 rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 575 return rqt_attr; 576 } 577 578 /** 579 * Create RQT using DevX API as a filed of indirection table. 580 * 581 * @param dev 582 * Pointer to Ethernet device. 583 * @param log_n 584 * Log of number of queues in the array. 585 * @param ind_tbl 586 * DevX indirection table object. 587 * 588 * @return 589 * 0 on success, a negative errno value otherwise and rte_errno is set. 590 */ 591 static int 592 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 593 struct mlx5_ind_table_obj *ind_tbl) 594 { 595 struct mlx5_priv *priv = dev->data->dev_private; 596 struct mlx5_devx_rqt_attr *rqt_attr = NULL; 597 598 MLX5_ASSERT(ind_tbl); 599 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 600 ind_tbl->queues, 601 ind_tbl->queues_n); 602 if (!rqt_attr) 603 return -rte_errno; 604 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); 605 mlx5_free(rqt_attr); 606 if (!ind_tbl->rqt) { 607 DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 608 dev->data->port_id); 609 rte_errno = errno; 610 return -rte_errno; 611 } 612 return 0; 613 } 614 615 /** 616 * Modify RQT using DevX API as a filed of indirection table. 617 * 618 * @param dev 619 * Pointer to Ethernet device. 620 * @param log_n 621 * Log of number of queues in the array. 622 * @param ind_tbl 623 * DevX indirection table object. 624 * 625 * @return 626 * 0 on success, a negative errno value otherwise and rte_errno is set. 627 */ 628 static int 629 mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n, 630 const uint16_t *queues, const uint32_t queues_n, 631 struct mlx5_ind_table_obj *ind_tbl) 632 { 633 int ret = 0; 634 struct mlx5_devx_rqt_attr *rqt_attr = NULL; 635 636 MLX5_ASSERT(ind_tbl); 637 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 638 queues, 639 queues_n); 640 if (!rqt_attr) 641 return -rte_errno; 642 ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr); 643 mlx5_free(rqt_attr); 644 if (ret) 645 DRV_LOG(ERR, "Port %u cannot modify DevX RQT.", 646 dev->data->port_id); 647 return ret; 648 } 649 650 /** 651 * Destroy the DevX RQT object. 652 * 653 * @param ind_table 654 * Indirection table to release. 655 */ 656 static void 657 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 658 { 659 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 660 } 661 662 /** 663 * Set TIR attribute struct with relevant input values. 664 * 665 * @param[in] dev 666 * Pointer to Ethernet device. 667 * @param[in] rss_key 668 * RSS key for the Rx hash queue. 669 * @param[in] hash_fields 670 * Verbs protocol hash field to make the RSS on. 671 * @param[in] ind_tbl 672 * Indirection table for TIR. 673 * @param[in] tunnel 674 * Tunnel type. 675 * @param[out] tir_attr 676 * Parameters structure for TIR creation/modification. 677 * 678 * @return 679 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. 680 */ 681 static void 682 mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, 683 uint64_t hash_fields, 684 const struct mlx5_ind_table_obj *ind_tbl, 685 int tunnel, struct mlx5_devx_tir_attr *tir_attr) 686 { 687 struct mlx5_priv *priv = dev->data->dev_private; 688 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 689 struct mlx5_rxq_ctrl *rxq_ctrl = 690 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 691 enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type; 692 bool lro = true; 693 uint32_t i; 694 695 /* Enable TIR LRO only if all the queues were configured for. */ 696 for (i = 0; i < ind_tbl->queues_n; ++i) { 697 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 698 lro = false; 699 break; 700 } 701 } 702 memset(tir_attr, 0, sizeof(*tir_attr)); 703 tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 704 tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 705 tir_attr->tunneled_offload_en = !!tunnel; 706 /* If needed, translate hash_fields bitmap to PRM format. */ 707 if (hash_fields) { 708 struct mlx5_rx_hash_field_select *rx_hash_field_select = 709 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 710 hash_fields & IBV_RX_HASH_INNER ? 711 &tir_attr->rx_hash_field_selector_inner : 712 #endif 713 &tir_attr->rx_hash_field_selector_outer; 714 /* 1 bit: 0: IPv4, 1: IPv6. */ 715 rx_hash_field_select->l3_prot_type = 716 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 717 /* 1 bit: 0: TCP, 1: UDP. */ 718 rx_hash_field_select->l4_prot_type = 719 !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 720 /* Bitmask which sets which fields to use in RX Hash. */ 721 rx_hash_field_select->selected_fields = 722 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 723 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 724 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 725 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 726 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 727 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 728 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 729 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 730 } 731 if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN) 732 tir_attr->transport_domain = priv->sh->td->id; 733 else 734 tir_attr->transport_domain = priv->sh->tdn; 735 memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 736 tir_attr->indirect_table = ind_tbl->rqt->id; 737 if (dev->data->dev_conf.lpbk_mode) 738 tir_attr->self_lb_block = 739 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 740 if (lro) { 741 tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout; 742 tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; 743 tir_attr->lro_enable_mask = 744 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 745 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 746 } 747 } 748 749 /** 750 * Create an Rx Hash queue. 751 * 752 * @param dev 753 * Pointer to Ethernet device. 754 * @param hrxq 755 * Pointer to Rx Hash queue. 756 * @param tunnel 757 * Tunnel type. 758 * 759 * @return 760 * 0 on success, a negative errno value otherwise and rte_errno is set. 761 */ 762 static int 763 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 764 int tunnel __rte_unused) 765 { 766 struct mlx5_priv *priv = dev->data->dev_private; 767 struct mlx5_devx_tir_attr tir_attr = {0}; 768 int err; 769 770 mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields, 771 hrxq->ind_table, tunnel, &tir_attr); 772 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); 773 if (!hrxq->tir) { 774 DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 775 dev->data->port_id); 776 rte_errno = errno; 777 goto error; 778 } 779 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 780 if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir, 781 &hrxq->action)) { 782 rte_errno = errno; 783 goto error; 784 } 785 #endif 786 return 0; 787 error: 788 err = rte_errno; /* Save rte_errno before cleanup. */ 789 if (hrxq->tir) 790 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 791 rte_errno = err; /* Restore rte_errno. */ 792 return -rte_errno; 793 } 794 795 /** 796 * Destroy a DevX TIR object. 797 * 798 * @param hrxq 799 * Hash Rx queue to release its tir. 800 */ 801 static void 802 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 803 { 804 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 805 } 806 807 /** 808 * Modify an Rx Hash queue configuration. 809 * 810 * @param dev 811 * Pointer to Ethernet device. 812 * @param hrxq 813 * Hash Rx queue to modify. 814 * @param rss_key 815 * RSS key for the Rx hash queue. 816 * @param hash_fields 817 * Verbs protocol hash field to make the RSS on. 818 * @param[in] ind_tbl 819 * Indirection table for TIR. 820 * 821 * @return 822 * 0 on success, a negative errno value otherwise and rte_errno is set. 823 */ 824 static int 825 mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 826 const uint8_t *rss_key, 827 uint64_t hash_fields, 828 const struct mlx5_ind_table_obj *ind_tbl) 829 { 830 struct mlx5_devx_modify_tir_attr modify_tir = {0}; 831 832 /* 833 * untested for modification fields: 834 * - rx_hash_symmetric not set in hrxq_new(), 835 * - rx_hash_fn set hard-coded in hrxq_new(), 836 * - lro_xxx not set after rxq setup 837 */ 838 if (ind_tbl != hrxq->ind_table) 839 modify_tir.modify_bitmask |= 840 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE; 841 if (hash_fields != hrxq->hash_fields || 842 memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN)) 843 modify_tir.modify_bitmask |= 844 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH; 845 mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl, 846 0, /* N/A - tunnel modification unsupported */ 847 &modify_tir.tir); 848 modify_tir.tirn = hrxq->tir->id; 849 if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) { 850 DRV_LOG(ERR, "port %u cannot modify DevX TIR", 851 dev->data->port_id); 852 rte_errno = errno; 853 return -rte_errno; 854 } 855 return 0; 856 } 857 858 /** 859 * Create a DevX drop action for Rx Hash queue. 860 * 861 * @param dev 862 * Pointer to Ethernet device. 863 * 864 * @return 865 * 0 on success, a negative errno value otherwise and rte_errno is set. 866 */ 867 static int 868 mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 869 { 870 (void)dev; 871 DRV_LOG(ERR, "DevX drop action is not supported yet."); 872 rte_errno = ENOTSUP; 873 return -rte_errno; 874 } 875 876 /** 877 * Release a drop hash Rx queue. 878 * 879 * @param dev 880 * Pointer to Ethernet device. 881 */ 882 static void 883 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 884 { 885 (void)dev; 886 DRV_LOG(ERR, "DevX drop action is not supported yet."); 887 rte_errno = ENOTSUP; 888 } 889 890 /** 891 * Create the Tx hairpin queue object. 892 * 893 * @param dev 894 * Pointer to Ethernet device. 895 * @param idx 896 * Queue index in DPDK Tx queue array. 897 * 898 * @return 899 * 0 on success, a negative errno value otherwise and rte_errno is set. 900 */ 901 static int 902 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 903 { 904 struct mlx5_priv *priv = dev->data->dev_private; 905 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 906 struct mlx5_txq_ctrl *txq_ctrl = 907 container_of(txq_data, struct mlx5_txq_ctrl, txq); 908 struct mlx5_devx_create_sq_attr attr = { 0 }; 909 struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 910 uint32_t max_wq_data; 911 912 MLX5_ASSERT(txq_data); 913 MLX5_ASSERT(tmpl); 914 tmpl->txq_ctrl = txq_ctrl; 915 attr.hairpin = 1; 916 attr.tis_lst_sz = 1; 917 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 918 /* Jumbo frames > 9KB should be supported, and more packets. */ 919 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 920 if (priv->config.log_hp_size > max_wq_data) { 921 DRV_LOG(ERR, "Total data size %u power of 2 is " 922 "too large for hairpin.", 923 priv->config.log_hp_size); 924 rte_errno = ERANGE; 925 return -rte_errno; 926 } 927 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 928 } else { 929 attr.wq_attr.log_hairpin_data_sz = 930 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 931 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 932 } 933 /* Set the packets number to the maximum value for performance. */ 934 attr.wq_attr.log_hairpin_num_packets = 935 attr.wq_attr.log_hairpin_data_sz - 936 MLX5_HAIRPIN_QUEUE_STRIDE; 937 attr.tis_num = priv->sh->tis->id; 938 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); 939 if (!tmpl->sq) { 940 DRV_LOG(ERR, 941 "Port %u tx hairpin queue %u can't create SQ object.", 942 dev->data->port_id, idx); 943 rte_errno = errno; 944 return -rte_errno; 945 } 946 return 0; 947 } 948 949 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 950 /** 951 * Destroy the Tx queue DevX object. 952 * 953 * @param txq_obj 954 * Txq object to destroy. 955 */ 956 static void 957 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 958 { 959 mlx5_devx_sq_destroy(&txq_obj->sq_obj); 960 memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj)); 961 mlx5_devx_cq_destroy(&txq_obj->cq_obj); 962 memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj)); 963 } 964 965 /** 966 * Create a SQ object and its resources using DevX. 967 * 968 * @param dev 969 * Pointer to Ethernet device. 970 * @param idx 971 * Queue index in DPDK Tx queue array. 972 * @param[in] log_desc_n 973 * Log of number of descriptors in queue. 974 * 975 * @return 976 * 0 on success, a negative errno value otherwise and rte_errno is set. 977 */ 978 static int 979 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, 980 uint16_t log_desc_n) 981 { 982 struct mlx5_priv *priv = dev->data->dev_private; 983 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 984 struct mlx5_txq_ctrl *txq_ctrl = 985 container_of(txq_data, struct mlx5_txq_ctrl, txq); 986 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 987 struct mlx5_devx_create_sq_attr sq_attr = { 988 .flush_in_error_en = 1, 989 .allow_multi_pkt_send_wqe = !!priv->config.mps, 990 .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode, 991 .allow_swp = !!priv->config.swp, 992 .cqn = txq_obj->cq_obj.cq->id, 993 .tis_lst_sz = 1, 994 .tis_num = priv->sh->tis->id, 995 .wq_attr = (struct mlx5_devx_wq_attr){ 996 .pd = priv->sh->pdn, 997 .uar_page = 998 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar), 999 }, 1000 .ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format), 1001 }; 1002 /* Create Send Queue object with DevX. */ 1003 return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n, 1004 &sq_attr, priv->sh->numa_node); 1005 } 1006 #endif 1007 1008 /** 1009 * Create the Tx queue DevX object. 1010 * 1011 * @param dev 1012 * Pointer to Ethernet device. 1013 * @param idx 1014 * Queue index in DPDK Tx queue array. 1015 * 1016 * @return 1017 * 0 on success, a negative errno value otherwise and rte_errno is set. 1018 */ 1019 int 1020 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 1021 { 1022 struct mlx5_priv *priv = dev->data->dev_private; 1023 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 1024 struct mlx5_txq_ctrl *txq_ctrl = 1025 container_of(txq_data, struct mlx5_txq_ctrl, txq); 1026 1027 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 1028 return mlx5_txq_obj_hairpin_new(dev, idx); 1029 #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H) 1030 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 1031 dev->data->port_id, idx); 1032 rte_errno = ENOMEM; 1033 return -rte_errno; 1034 #else 1035 struct mlx5_dev_ctx_shared *sh = priv->sh; 1036 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 1037 struct mlx5_devx_cq_attr cq_attr = { 1038 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar), 1039 }; 1040 void *reg_addr; 1041 uint32_t cqe_n, log_desc_n; 1042 uint32_t wqe_n, wqe_size; 1043 int ret = 0; 1044 1045 MLX5_ASSERT(txq_data); 1046 MLX5_ASSERT(txq_obj); 1047 txq_obj->txq_ctrl = txq_ctrl; 1048 txq_obj->dev = dev; 1049 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 1050 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 1051 log_desc_n = log2above(cqe_n); 1052 cqe_n = 1UL << log_desc_n; 1053 if (cqe_n > UINT16_MAX) { 1054 DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.", 1055 dev->data->port_id, txq_data->idx, cqe_n); 1056 rte_errno = EINVAL; 1057 return 0; 1058 } 1059 /* Create completion queue object with DevX. */ 1060 ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n, 1061 &cq_attr, priv->sh->numa_node); 1062 if (ret) { 1063 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 1064 dev->data->port_id, idx); 1065 goto error; 1066 } 1067 txq_data->cqe_n = log_desc_n; 1068 txq_data->cqe_s = cqe_n; 1069 txq_data->cqe_m = txq_data->cqe_s - 1; 1070 txq_data->cqes = txq_obj->cq_obj.cqes; 1071 txq_data->cq_ci = 0; 1072 txq_data->cq_pi = 0; 1073 txq_data->cq_db = txq_obj->cq_obj.db_rec; 1074 *txq_data->cq_db = 0; 1075 /* 1076 * Adjust the amount of WQEs depending on inline settings. 1077 * The number of descriptors should be enough to handle 1078 * the specified number of packets. If queue is being created 1079 * with Verbs the rdma-core does queue size adjustment 1080 * internally in the mlx5_calc_sq_size(), we do the same 1081 * for the queue being created with DevX at this point. 1082 */ 1083 wqe_size = txq_data->tso_en ? 1084 RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0; 1085 wqe_size += sizeof(struct mlx5_wqe_cseg) + 1086 sizeof(struct mlx5_wqe_eseg) + 1087 sizeof(struct mlx5_wqe_dseg); 1088 if (txq_data->inlen_send) 1089 wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) + 1090 sizeof(struct mlx5_wqe_eseg) + 1091 RTE_ALIGN(txq_data->inlen_send + 1092 sizeof(uint32_t), 1093 MLX5_WSEG_SIZE)); 1094 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 1095 /* Create Send Queue object with DevX. */ 1096 wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size, 1097 (uint32_t)priv->sh->device_attr.max_qp_wr); 1098 log_desc_n = log2above(wqe_n); 1099 ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n); 1100 if (ret) { 1101 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 1102 dev->data->port_id, idx); 1103 rte_errno = errno; 1104 goto error; 1105 } 1106 /* Create the Work Queue. */ 1107 txq_data->wqe_n = log_desc_n; 1108 txq_data->wqe_s = 1 << txq_data->wqe_n; 1109 txq_data->wqe_m = txq_data->wqe_s - 1; 1110 txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes; 1111 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 1112 txq_data->wqe_ci = 0; 1113 txq_data->wqe_pi = 0; 1114 txq_data->wqe_comp = 0; 1115 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 1116 txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; 1117 *txq_data->qp_db = 0; 1118 txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; 1119 /* Change Send Queue state to Ready-to-Send. */ 1120 ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); 1121 if (ret) { 1122 rte_errno = errno; 1123 DRV_LOG(ERR, 1124 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.", 1125 dev->data->port_id, idx); 1126 goto error; 1127 } 1128 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1129 /* 1130 * If using DevX need to query and store TIS transport domain value. 1131 * This is done once per port. 1132 * Will use this value on Rx, when creating matching TIR. 1133 */ 1134 if (!priv->sh->tdn) 1135 priv->sh->tdn = priv->sh->td->id; 1136 #endif 1137 MLX5_ASSERT(sh->tx_uar); 1138 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 1139 MLX5_ASSERT(reg_addr); 1140 txq_ctrl->bf_reg = reg_addr; 1141 txq_ctrl->uar_mmap_offset = 1142 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); 1143 txq_uar_init(txq_ctrl); 1144 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 1145 return 0; 1146 error: 1147 ret = rte_errno; /* Save rte_errno before cleanup. */ 1148 mlx5_txq_release_devx_resources(txq_obj); 1149 rte_errno = ret; /* Restore rte_errno. */ 1150 return -rte_errno; 1151 #endif 1152 } 1153 1154 /** 1155 * Release an Tx DevX queue object. 1156 * 1157 * @param txq_obj 1158 * DevX Tx queue object. 1159 */ 1160 void 1161 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 1162 { 1163 MLX5_ASSERT(txq_obj); 1164 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 1165 if (txq_obj->tis) 1166 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 1167 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 1168 } else { 1169 mlx5_txq_release_devx_resources(txq_obj); 1170 #endif 1171 } 1172 } 1173 1174 struct mlx5_obj_ops devx_obj_ops = { 1175 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 1176 .rxq_obj_new = mlx5_rxq_devx_obj_new, 1177 .rxq_event_get = mlx5_rx_devx_get_event, 1178 .rxq_obj_modify = mlx5_devx_modify_rq, 1179 .rxq_obj_release = mlx5_rxq_devx_obj_release, 1180 .ind_table_new = mlx5_devx_ind_table_new, 1181 .ind_table_modify = mlx5_devx_ind_table_modify, 1182 .ind_table_destroy = mlx5_devx_ind_table_destroy, 1183 .hrxq_new = mlx5_devx_hrxq_new, 1184 .hrxq_destroy = mlx5_devx_tir_destroy, 1185 .hrxq_modify = mlx5_devx_hrxq_modify, 1186 .drop_action_create = mlx5_devx_drop_action_create, 1187 .drop_action_destroy = mlx5_devx_drop_action_destroy, 1188 .txq_obj_new = mlx5_txq_devx_obj_new, 1189 .txq_obj_modify = mlx5_devx_modify_sq, 1190 .txq_obj_release = mlx5_txq_devx_obj_release, 1191 .lb_dummy_queue_create = NULL, 1192 .lb_dummy_queue_release = NULL, 1193 }; 1194