1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <stddef.h> 6 #include <errno.h> 7 #include <stdbool.h> 8 #include <string.h> 9 #include <stdint.h> 10 #include <sys/queue.h> 11 12 #include <rte_malloc.h> 13 #include <rte_common.h> 14 #include <rte_eal_paging.h> 15 16 #include <mlx5_glue.h> 17 #include <mlx5_devx_cmds.h> 18 #include <mlx5_malloc.h> 19 20 #include "mlx5.h" 21 #include "mlx5_common_os.h" 22 #include "mlx5_rxtx.h" 23 #include "mlx5_utils.h" 24 #include "mlx5_devx.h" 25 #include "mlx5_flow.h" 26 27 28 /** 29 * Modify RQ vlan stripping offload 30 * 31 * @param rxq_obj 32 * Rx queue object. 33 * 34 * @return 35 * 0 on success, non-0 otherwise 36 */ 37 static int 38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 39 { 40 struct mlx5_devx_modify_rq_attr rq_attr; 41 42 memset(&rq_attr, 0, sizeof(rq_attr)); 43 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 44 rq_attr.state = MLX5_RQC_STATE_RDY; 45 rq_attr.vsd = (on ? 0 : 1); 46 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 47 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 48 } 49 50 /** 51 * Modify RQ using DevX API. 52 * 53 * @param rxq_obj 54 * DevX Rx queue object. 55 * 56 * @return 57 * 0 on success, a negative errno value otherwise and rte_errno is set. 58 */ 59 static int 60 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start) 61 { 62 struct mlx5_devx_modify_rq_attr rq_attr; 63 64 memset(&rq_attr, 0, sizeof(rq_attr)); 65 if (is_start) { 66 rq_attr.rq_state = MLX5_RQC_STATE_RST; 67 rq_attr.state = MLX5_RQC_STATE_RDY; 68 } else { 69 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 70 rq_attr.state = MLX5_RQC_STATE_RST; 71 } 72 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 73 } 74 75 /** 76 * Release the resources allocated for an RQ DevX object. 77 * 78 * @param rxq_ctrl 79 * DevX Rx queue object. 80 */ 81 static void 82 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 83 { 84 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page; 85 86 if (rxq_ctrl->rxq.wqes) { 87 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); 88 rxq_ctrl->rxq.wqes = NULL; 89 } 90 if (rxq_ctrl->wq_umem) { 91 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); 92 rxq_ctrl->wq_umem = NULL; 93 } 94 if (dbr_page) { 95 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 96 mlx5_os_get_umem_id(dbr_page->umem), 97 rxq_ctrl->rq_dbr_offset)); 98 rxq_ctrl->rq_dbrec_page = NULL; 99 } 100 } 101 102 /** 103 * Release the resources allocated for the Rx CQ DevX object. 104 * 105 * @param rxq_ctrl 106 * DevX Rx queue object. 107 */ 108 static void 109 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 110 { 111 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page; 112 113 if (rxq_ctrl->rxq.cqes) { 114 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes); 115 rxq_ctrl->rxq.cqes = NULL; 116 } 117 if (rxq_ctrl->cq_umem) { 118 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem); 119 rxq_ctrl->cq_umem = NULL; 120 } 121 if (dbr_page) { 122 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 123 mlx5_os_get_umem_id(dbr_page->umem), 124 rxq_ctrl->cq_dbr_offset)); 125 rxq_ctrl->cq_dbrec_page = NULL; 126 } 127 } 128 129 /** 130 * Release an Rx DevX queue object. 131 * 132 * @param rxq_obj 133 * DevX Rx queue object. 134 */ 135 static void 136 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 137 { 138 MLX5_ASSERT(rxq_obj); 139 MLX5_ASSERT(rxq_obj->rq); 140 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) { 141 mlx5_devx_modify_rq(rxq_obj, false); 142 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 143 } else { 144 MLX5_ASSERT(rxq_obj->devx_cq); 145 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 146 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq)); 147 if (rxq_obj->devx_channel) 148 mlx5_glue->devx_destroy_event_channel 149 (rxq_obj->devx_channel); 150 rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl); 151 rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl); 152 } 153 } 154 155 /** 156 * Get event for an Rx DevX queue object. 157 * 158 * @param rxq_obj 159 * DevX Rx queue object. 160 * 161 * @return 162 * 0 on success, a negative errno value otherwise and rte_errno is set. 163 */ 164 static int 165 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 166 { 167 #ifdef HAVE_IBV_DEVX_EVENT 168 union { 169 struct mlx5dv_devx_async_event_hdr event_resp; 170 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 171 } out; 172 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 173 &out.event_resp, 174 sizeof(out.buf)); 175 176 if (ret < 0) { 177 rte_errno = errno; 178 return -rte_errno; 179 } 180 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) { 181 rte_errno = EINVAL; 182 return -rte_errno; 183 } 184 return 0; 185 #else 186 (void)rxq_obj; 187 rte_errno = ENOTSUP; 188 return -rte_errno; 189 #endif /* HAVE_IBV_DEVX_EVENT */ 190 } 191 192 /** 193 * Fill common fields of create RQ attributes structure. 194 * 195 * @param rxq_data 196 * Pointer to Rx queue data. 197 * @param cqn 198 * CQ number to use with this RQ. 199 * @param rq_attr 200 * RQ attributes structure to fill.. 201 */ 202 static void 203 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn, 204 struct mlx5_devx_create_rq_attr *rq_attr) 205 { 206 rq_attr->state = MLX5_RQC_STATE_RST; 207 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1; 208 rq_attr->cqn = cqn; 209 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 210 } 211 212 /** 213 * Fill common fields of DevX WQ attributes structure. 214 * 215 * @param priv 216 * Pointer to device private data. 217 * @param rxq_ctrl 218 * Pointer to Rx queue control structure. 219 * @param wq_attr 220 * WQ attributes structure to fill.. 221 */ 222 static void 223 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, 224 struct mlx5_devx_wq_attr *wq_attr) 225 { 226 wq_attr->end_padding_mode = priv->config.cqe_pad ? 227 MLX5_WQ_END_PAD_MODE_ALIGN : 228 MLX5_WQ_END_PAD_MODE_NONE; 229 wq_attr->pd = priv->sh->pdn; 230 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset; 231 wq_attr->dbr_umem_id = 232 mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem); 233 wq_attr->dbr_umem_valid = 1; 234 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem); 235 wq_attr->wq_umem_valid = 1; 236 } 237 238 /** 239 * Create a RQ object using DevX. 240 * 241 * @param dev 242 * Pointer to Ethernet device. 243 * @param idx 244 * Queue index in DPDK Rx queue array. 245 * 246 * @return 247 * The DevX RQ object initialized, NULL otherwise and rte_errno is set. 248 */ 249 static struct mlx5_devx_obj * 250 rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 251 { 252 struct mlx5_priv *priv = dev->data->dev_private; 253 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 254 struct mlx5_rxq_ctrl *rxq_ctrl = 255 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 256 struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 257 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); 258 uint32_t cqn = rxq_ctrl->obj->devx_cq->id; 259 struct mlx5_devx_dbr_page *dbr_page; 260 int64_t dbr_offset; 261 uint32_t wq_size = 0; 262 uint32_t wqe_size = 0; 263 uint32_t log_wqe_size = 0; 264 void *buf = NULL; 265 struct mlx5_devx_obj *rq; 266 267 /* Fill RQ attributes. */ 268 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 269 rq_attr.flush_in_error_en = 1; 270 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr); 271 /* Fill WQ attributes for this RQ. */ 272 if (mlx5_rxq_mprq_enabled(rxq_data)) { 273 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 274 /* 275 * Number of strides in each WQE: 276 * 512*2^single_wqe_log_num_of_strides. 277 */ 278 rq_attr.wq_attr.single_wqe_log_num_of_strides = 279 rxq_data->strd_num_n - 280 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 281 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 282 rq_attr.wq_attr.single_stride_log_num_of_bytes = 283 rxq_data->strd_sz_n - 284 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 285 wqe_size = sizeof(struct mlx5_wqe_mprq); 286 } else { 287 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 288 wqe_size = sizeof(struct mlx5_wqe_data_seg); 289 } 290 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 291 rq_attr.wq_attr.log_wq_stride = log_wqe_size; 292 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; 293 /* Calculate and allocate WQ memory space. */ 294 wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 295 wq_size = wqe_n * wqe_size; 296 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 297 if (alignment == (size_t)-1) { 298 DRV_LOG(ERR, "Failed to get mem page size"); 299 rte_errno = ENOMEM; 300 return NULL; 301 } 302 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size, 303 alignment, rxq_ctrl->socket); 304 if (!buf) 305 return NULL; 306 rxq_data->wqes = buf; 307 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 308 buf, wq_size, 0); 309 if (!rxq_ctrl->wq_umem) 310 goto error; 311 /* Allocate RQ door-bell. */ 312 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 313 if (dbr_offset < 0) { 314 DRV_LOG(ERR, "Failed to allocate RQ door-bell."); 315 goto error; 316 } 317 rxq_ctrl->rq_dbr_offset = dbr_offset; 318 rxq_ctrl->rq_dbrec_page = dbr_page; 319 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 320 (uintptr_t)rxq_ctrl->rq_dbr_offset); 321 /* Create RQ using DevX API. */ 322 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr); 323 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket); 324 if (!rq) 325 goto error; 326 return rq; 327 error: 328 rxq_release_devx_rq_resources(rxq_ctrl); 329 return NULL; 330 } 331 332 /** 333 * Create a DevX CQ object for an Rx queue. 334 * 335 * @param dev 336 * Pointer to Ethernet device. 337 * @param idx 338 * Queue index in DPDK Rx queue array. 339 * 340 * @return 341 * The DevX CQ object initialized, NULL otherwise and rte_errno is set. 342 */ 343 static struct mlx5_devx_obj * 344 rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 345 { 346 struct mlx5_devx_obj *cq_obj = 0; 347 struct mlx5_devx_cq_attr cq_attr = { 0 }; 348 struct mlx5_priv *priv = dev->data->dev_private; 349 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 350 struct mlx5_rxq_ctrl *rxq_ctrl = 351 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 352 size_t page_size = rte_mem_page_size(); 353 uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1); 354 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 355 struct mlx5_devx_dbr_page *dbr_page; 356 int64_t dbr_offset; 357 uint32_t eqn = 0; 358 void *buf = NULL; 359 uint16_t event_nums[1] = {0}; 360 uint32_t log_cqe_n; 361 uint32_t cq_size; 362 int ret = 0; 363 364 if (page_size == (size_t)-1) { 365 DRV_LOG(ERR, "Failed to get page_size."); 366 goto error; 367 } 368 if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 369 !rxq_data->lro) { 370 cq_attr.cqe_comp_en = 1u; 371 cq_attr.mini_cqe_res_format = 372 mlx5_rxq_mprq_enabled(rxq_data) ? 373 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX : 374 MLX5_CQE_RESP_FORMAT_HASH; 375 /* 376 * For vectorized Rx, it must not be doubled in order to 377 * make cq_ci and rq_ci aligned. 378 */ 379 if (mlx5_rxq_check_vec_support(rxq_data) < 0) 380 cqe_n *= 2; 381 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 382 DRV_LOG(DEBUG, 383 "Port %u Rx CQE compression is disabled for HW" 384 " timestamp.", 385 dev->data->port_id); 386 } else if (priv->config.cqe_comp && rxq_data->lro) { 387 DRV_LOG(DEBUG, 388 "Port %u Rx CQE compression is disabled for LRO.", 389 dev->data->port_id); 390 } 391 if (priv->config.cqe_pad) 392 cq_attr.cqe_size = MLX5_CQE_SIZE_128B; 393 log_cqe_n = log2above(cqe_n); 394 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n); 395 /* Query the EQN for this core. */ 396 if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) { 397 DRV_LOG(ERR, "Failed to query EQN for CQ."); 398 goto error; 399 } 400 cq_attr.eqn = eqn; 401 buf = rte_calloc_socket(__func__, 1, cq_size, page_size, 402 rxq_ctrl->socket); 403 if (!buf) { 404 DRV_LOG(ERR, "Failed to allocate memory for CQ."); 405 goto error; 406 } 407 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf; 408 rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf, 409 cq_size, 410 IBV_ACCESS_LOCAL_WRITE); 411 if (!rxq_ctrl->cq_umem) { 412 DRV_LOG(ERR, "Failed to register umem for CQ."); 413 goto error; 414 } 415 /* Allocate CQ door-bell. */ 416 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 417 if (dbr_offset < 0) { 418 DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 419 goto error; 420 } 421 rxq_ctrl->cq_dbr_offset = dbr_offset; 422 rxq_ctrl->cq_dbrec_page = dbr_page; 423 rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 424 (uintptr_t)rxq_ctrl->cq_dbr_offset); 425 rxq_data->cq_uar = 426 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar); 427 /* Create CQ using DevX API. */ 428 cq_attr.uar_page_id = 429 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar); 430 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem); 431 cq_attr.q_umem_valid = 1; 432 cq_attr.log_cq_size = log_cqe_n; 433 cq_attr.log_page_size = rte_log2_u32(page_size); 434 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset; 435 cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem); 436 cq_attr.db_umem_valid = 1; 437 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 438 if (!cq_obj) 439 goto error; 440 rxq_data->cqe_n = log_cqe_n; 441 rxq_data->cqn = cq_obj->id; 442 if (rxq_ctrl->obj->devx_channel) { 443 ret = mlx5_glue->devx_subscribe_devx_event 444 (rxq_ctrl->obj->devx_channel, 445 cq_obj->obj, 446 sizeof(event_nums), 447 event_nums, 448 (uint64_t)(uintptr_t)cq_obj); 449 if (ret) { 450 DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 451 rte_errno = errno; 452 goto error; 453 } 454 } 455 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */ 456 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size); 457 return cq_obj; 458 error: 459 if (cq_obj) 460 mlx5_devx_cmd_destroy(cq_obj); 461 rxq_release_devx_cq_resources(rxq_ctrl); 462 return NULL; 463 } 464 465 /** 466 * Create the Rx hairpin queue object. 467 * 468 * @param dev 469 * Pointer to Ethernet device. 470 * @param idx 471 * Queue index in DPDK Rx queue array. 472 * 473 * @return 474 * 0 on success, a negative errno value otherwise and rte_errno is set. 475 */ 476 static int 477 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 478 { 479 struct mlx5_priv *priv = dev->data->dev_private; 480 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 481 struct mlx5_rxq_ctrl *rxq_ctrl = 482 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 483 struct mlx5_devx_create_rq_attr attr = { 0 }; 484 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 485 uint32_t max_wq_data; 486 487 MLX5_ASSERT(rxq_data); 488 MLX5_ASSERT(tmpl); 489 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN; 490 tmpl->rxq_ctrl = rxq_ctrl; 491 attr.hairpin = 1; 492 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 493 /* Jumbo frames > 9KB should be supported, and more packets. */ 494 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 495 if (priv->config.log_hp_size > max_wq_data) { 496 DRV_LOG(ERR, "Total data size %u power of 2 is " 497 "too large for hairpin.", 498 priv->config.log_hp_size); 499 rte_errno = ERANGE; 500 return -rte_errno; 501 } 502 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 503 } else { 504 attr.wq_attr.log_hairpin_data_sz = 505 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 506 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 507 } 508 /* Set the packets number to the maximum value for performance. */ 509 attr.wq_attr.log_hairpin_num_packets = 510 attr.wq_attr.log_hairpin_data_sz - 511 MLX5_HAIRPIN_QUEUE_STRIDE; 512 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, 513 rxq_ctrl->socket); 514 if (!tmpl->rq) { 515 DRV_LOG(ERR, 516 "Port %u Rx hairpin queue %u can't create rq object.", 517 dev->data->port_id, idx); 518 rte_errno = errno; 519 return -rte_errno; 520 } 521 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 522 return 0; 523 } 524 525 /** 526 * Create the Rx queue DevX object. 527 * 528 * @param dev 529 * Pointer to Ethernet device. 530 * @param idx 531 * Queue index in DPDK Rx queue array. 532 * 533 * @return 534 * 0 on success, a negative errno value otherwise and rte_errno is set. 535 */ 536 static int 537 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 538 { 539 struct mlx5_priv *priv = dev->data->dev_private; 540 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 541 struct mlx5_rxq_ctrl *rxq_ctrl = 542 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 543 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 544 int ret = 0; 545 546 MLX5_ASSERT(rxq_data); 547 MLX5_ASSERT(tmpl); 548 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 549 return mlx5_rxq_obj_hairpin_new(dev, idx); 550 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; 551 tmpl->rxq_ctrl = rxq_ctrl; 552 if (rxq_ctrl->irq) { 553 int devx_ev_flag = 554 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 555 556 tmpl->devx_channel = mlx5_glue->devx_create_event_channel 557 (priv->sh->ctx, 558 devx_ev_flag); 559 if (!tmpl->devx_channel) { 560 rte_errno = errno; 561 DRV_LOG(ERR, "Failed to create event channel %d.", 562 rte_errno); 563 goto error; 564 } 565 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 566 } 567 /* Create CQ using DevX API. */ 568 tmpl->devx_cq = rxq_create_devx_cq_resources(dev, idx); 569 if (!tmpl->devx_cq) { 570 DRV_LOG(ERR, "Failed to create CQ."); 571 goto error; 572 } 573 /* Create RQ using DevX API. */ 574 tmpl->rq = rxq_create_devx_rq_resources(dev, idx); 575 if (!tmpl->rq) { 576 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 577 dev->data->port_id, idx); 578 rte_errno = ENOMEM; 579 goto error; 580 } 581 /* Change queue state to ready. */ 582 ret = mlx5_devx_modify_rq(tmpl, true); 583 if (ret) 584 goto error; 585 rxq_data->cq_arm_sn = 0; 586 mlx5_rxq_initialize(rxq_data); 587 rxq_data->cq_ci = 0; 588 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 589 rxq_ctrl->wqn = tmpl->rq->id; 590 return 0; 591 error: 592 ret = rte_errno; /* Save rte_errno before cleanup. */ 593 if (tmpl->rq) 594 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq)); 595 if (tmpl->devx_cq) 596 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq)); 597 if (tmpl->devx_channel) 598 mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel); 599 rxq_release_devx_rq_resources(rxq_ctrl); 600 rxq_release_devx_cq_resources(rxq_ctrl); 601 rte_errno = ret; /* Restore rte_errno. */ 602 return -rte_errno; 603 } 604 605 /** 606 * Create RQT using DevX API as a filed of indirection table. 607 * 608 * @param dev 609 * Pointer to Ethernet device. 610 * @param log_n 611 * Log of number of queues in the array. 612 * @param ind_tbl 613 * DevX indirection table object. 614 * 615 * @return 616 * 0 on success, a negative errno value otherwise and rte_errno is set. 617 */ 618 static int 619 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 620 struct mlx5_ind_table_obj *ind_tbl) 621 { 622 struct mlx5_priv *priv = dev->data->dev_private; 623 struct mlx5_devx_rqt_attr *rqt_attr = NULL; 624 const unsigned int rqt_n = 1 << log_n; 625 unsigned int i, j; 626 627 MLX5_ASSERT(ind_tbl); 628 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 629 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 630 if (!rqt_attr) { 631 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 632 dev->data->port_id); 633 rte_errno = ENOMEM; 634 return -rte_errno; 635 } 636 rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 637 rqt_attr->rqt_actual_size = rqt_n; 638 for (i = 0; i != ind_tbl->queues_n; ++i) { 639 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; 640 struct mlx5_rxq_ctrl *rxq_ctrl = 641 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 642 643 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id; 644 } 645 MLX5_ASSERT(i > 0); 646 for (j = 0; i != rqt_n; ++j, ++i) 647 rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 648 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); 649 mlx5_free(rqt_attr); 650 if (!ind_tbl->rqt) { 651 DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 652 dev->data->port_id); 653 rte_errno = errno; 654 return -rte_errno; 655 } 656 return 0; 657 } 658 659 /** 660 * Destroy the DevX RQT object. 661 * 662 * @param ind_table 663 * Indirection table to release. 664 */ 665 static void 666 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 667 { 668 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 669 } 670 671 /** 672 * Create an Rx Hash queue. 673 * 674 * @param dev 675 * Pointer to Ethernet device. 676 * @param hrxq 677 * Pointer to Rx Hash queue. 678 * @param tunnel 679 * Tunnel type. 680 * 681 * @return 682 * 0 on success, a negative errno value otherwise and rte_errno is set. 683 */ 684 static int 685 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 686 int tunnel __rte_unused) 687 { 688 struct mlx5_priv *priv = dev->data->dev_private; 689 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table; 690 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 691 struct mlx5_rxq_ctrl *rxq_ctrl = 692 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 693 struct mlx5_devx_tir_attr tir_attr; 694 const uint8_t *rss_key = hrxq->rss_key; 695 uint64_t hash_fields = hrxq->hash_fields; 696 bool lro = true; 697 uint32_t i; 698 int err; 699 700 /* Enable TIR LRO only if all the queues were configured for. */ 701 for (i = 0; i < ind_tbl->queues_n; ++i) { 702 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 703 lro = false; 704 break; 705 } 706 } 707 memset(&tir_attr, 0, sizeof(tir_attr)); 708 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 709 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 710 tir_attr.tunneled_offload_en = !!tunnel; 711 /* If needed, translate hash_fields bitmap to PRM format. */ 712 if (hash_fields) { 713 struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL; 714 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 715 rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ? 716 &tir_attr.rx_hash_field_selector_inner : 717 &tir_attr.rx_hash_field_selector_outer; 718 #else 719 rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer; 720 #endif 721 /* 1 bit: 0: IPv4, 1: IPv6. */ 722 rx_hash_field_select->l3_prot_type = 723 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 724 /* 1 bit: 0: TCP, 1: UDP. */ 725 rx_hash_field_select->l4_prot_type = 726 !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 727 /* Bitmask which sets which fields to use in RX Hash. */ 728 rx_hash_field_select->selected_fields = 729 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 730 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 731 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 732 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 733 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 734 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 735 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 736 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 737 } 738 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 739 tir_attr.transport_domain = priv->sh->td->id; 740 else 741 tir_attr.transport_domain = priv->sh->tdn; 742 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 743 tir_attr.indirect_table = ind_tbl->rqt->id; 744 if (dev->data->dev_conf.lpbk_mode) 745 tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 746 if (lro) { 747 tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; 748 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; 749 tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 750 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 751 } 752 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); 753 if (!hrxq->tir) { 754 DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 755 dev->data->port_id); 756 rte_errno = errno; 757 goto error; 758 } 759 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 760 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir 761 (hrxq->tir->obj); 762 if (!hrxq->action) { 763 rte_errno = errno; 764 goto error; 765 } 766 #endif 767 return 0; 768 error: 769 err = rte_errno; /* Save rte_errno before cleanup. */ 770 if (hrxq->tir) 771 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 772 rte_errno = err; /* Restore rte_errno. */ 773 return -rte_errno; 774 } 775 776 /** 777 * Destroy a DevX TIR object. 778 * 779 * @param hrxq 780 * Hash Rx queue to release its tir. 781 */ 782 static void 783 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 784 { 785 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 786 } 787 788 /** 789 * Create a DevX drop action for Rx Hash queue. 790 * 791 * @param dev 792 * Pointer to Ethernet device. 793 * 794 * @return 795 * 0 on success, a negative errno value otherwise and rte_errno is set. 796 */ 797 static int 798 mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 799 { 800 (void)dev; 801 DRV_LOG(ERR, "DevX drop action is not supported yet"); 802 rte_errno = ENOTSUP; 803 return -rte_errno; 804 } 805 806 /** 807 * Release a drop hash Rx queue. 808 * 809 * @param dev 810 * Pointer to Ethernet device. 811 */ 812 static void 813 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 814 { 815 (void)dev; 816 DRV_LOG(ERR, "DevX drop action is not supported yet"); 817 rte_errno = ENOTSUP; 818 } 819 820 struct mlx5_obj_ops devx_obj_ops = { 821 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 822 .rxq_obj_new = mlx5_rxq_devx_obj_new, 823 .rxq_event_get = mlx5_rx_devx_get_event, 824 .rxq_obj_modify = mlx5_devx_modify_rq, 825 .rxq_obj_release = mlx5_rxq_devx_obj_release, 826 .ind_table_new = mlx5_devx_ind_table_new, 827 .ind_table_destroy = mlx5_devx_ind_table_destroy, 828 .hrxq_new = mlx5_devx_hrxq_new, 829 .hrxq_destroy = mlx5_devx_tir_destroy, 830 .drop_action_create = mlx5_devx_drop_action_create, 831 .drop_action_destroy = mlx5_devx_drop_action_destroy, 832 }; 833