18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 28bb2410eSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 38bb2410eSOphir Munk */ 48bb2410eSOphir Munk 58bb2410eSOphir Munk #include <stddef.h> 68bb2410eSOphir Munk #include <errno.h> 7c279f187SMichael Baum #include <stdbool.h> 88bb2410eSOphir Munk #include <string.h> 98bb2410eSOphir Munk #include <stdint.h> 108bb2410eSOphir Munk #include <sys/queue.h> 118bb2410eSOphir Munk 128bb2410eSOphir Munk #include <rte_malloc.h> 138bb2410eSOphir Munk #include <rte_common.h> 148bb2410eSOphir Munk #include <rte_eal_paging.h> 158bb2410eSOphir Munk 168bb2410eSOphir Munk #include <mlx5_glue.h> 178bb2410eSOphir Munk #include <mlx5_devx_cmds.h> 188bb2410eSOphir Munk #include <mlx5_malloc.h> 198bb2410eSOphir Munk 208bb2410eSOphir Munk #include "mlx5.h" 218bb2410eSOphir Munk #include "mlx5_common_os.h" 228bb2410eSOphir Munk #include "mlx5_rxtx.h" 238bb2410eSOphir Munk #include "mlx5_utils.h" 248bb2410eSOphir Munk #include "mlx5_devx.h" 2587e2db37SMichael Baum #include "mlx5_flow.h" 268bb2410eSOphir Munk 27f6dee900SMichael Baum 28f6dee900SMichael Baum /** 298bb2410eSOphir Munk * Modify RQ vlan stripping offload 308bb2410eSOphir Munk * 318bb2410eSOphir Munk * @param rxq_obj 328bb2410eSOphir Munk * Rx queue object. 338bb2410eSOphir Munk * 34f6dee900SMichael Baum * @return 35f6dee900SMichael Baum * 0 on success, non-0 otherwise 368bb2410eSOphir Munk */ 378bb2410eSOphir Munk static int 388bb2410eSOphir Munk mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 398bb2410eSOphir Munk { 408bb2410eSOphir Munk struct mlx5_devx_modify_rq_attr rq_attr; 418bb2410eSOphir Munk 428bb2410eSOphir Munk memset(&rq_attr, 0, sizeof(rq_attr)); 438bb2410eSOphir Munk rq_attr.rq_state = MLX5_RQC_STATE_RDY; 448bb2410eSOphir Munk rq_attr.state = MLX5_RQC_STATE_RDY; 458bb2410eSOphir Munk rq_attr.vsd = (on ? 0 : 1); 468bb2410eSOphir Munk rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 478bb2410eSOphir Munk return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 488bb2410eSOphir Munk } 498bb2410eSOphir Munk 506deb19e1SMichael Baum /** 51fa2c85ccSMichael Baum * Modify RQ using DevX API. 52fa2c85ccSMichael Baum * 53fa2c85ccSMichael Baum * @param rxq_obj 54fa2c85ccSMichael Baum * DevX Rx queue object. 55fa2c85ccSMichael Baum * 56fa2c85ccSMichael Baum * @return 57fa2c85ccSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 58fa2c85ccSMichael Baum */ 59fa2c85ccSMichael Baum static int 60fa2c85ccSMichael Baum mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start) 61fa2c85ccSMichael Baum { 62fa2c85ccSMichael Baum struct mlx5_devx_modify_rq_attr rq_attr; 63fa2c85ccSMichael Baum 64fa2c85ccSMichael Baum memset(&rq_attr, 0, sizeof(rq_attr)); 65fa2c85ccSMichael Baum if (is_start) { 66fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RST; 67fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RDY; 68fa2c85ccSMichael Baum } else { 69fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 70fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 71fa2c85ccSMichael Baum } 72fa2c85ccSMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 73fa2c85ccSMichael Baum } 74fa2c85ccSMichael Baum 75fa2c85ccSMichael Baum /** 766deb19e1SMichael Baum * Release the resources allocated for an RQ DevX object. 776deb19e1SMichael Baum * 786deb19e1SMichael Baum * @param rxq_ctrl 796deb19e1SMichael Baum * DevX Rx queue object. 806deb19e1SMichael Baum */ 816deb19e1SMichael Baum static void 826deb19e1SMichael Baum rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 836deb19e1SMichael Baum { 84f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page; 85f6dee900SMichael Baum 866deb19e1SMichael Baum if (rxq_ctrl->rxq.wqes) { 876deb19e1SMichael Baum mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); 886deb19e1SMichael Baum rxq_ctrl->rxq.wqes = NULL; 896deb19e1SMichael Baum } 906deb19e1SMichael Baum if (rxq_ctrl->wq_umem) { 916deb19e1SMichael Baum mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); 926deb19e1SMichael Baum rxq_ctrl->wq_umem = NULL; 936deb19e1SMichael Baum } 94f6dee900SMichael Baum if (dbr_page) { 95f6dee900SMichael Baum claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 96f6dee900SMichael Baum mlx5_os_get_umem_id(dbr_page->umem), 97f6dee900SMichael Baum rxq_ctrl->rq_dbr_offset)); 98f6dee900SMichael Baum rxq_ctrl->rq_dbrec_page = NULL; 99f6dee900SMichael Baum } 1006deb19e1SMichael Baum } 1016deb19e1SMichael Baum 1026deb19e1SMichael Baum /** 1036deb19e1SMichael Baum * Release the resources allocated for the Rx CQ DevX object. 1046deb19e1SMichael Baum * 1056deb19e1SMichael Baum * @param rxq_ctrl 1066deb19e1SMichael Baum * DevX Rx queue object. 1076deb19e1SMichael Baum */ 1086deb19e1SMichael Baum static void 1096deb19e1SMichael Baum rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 1106deb19e1SMichael Baum { 111f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page; 112f6dee900SMichael Baum 1136deb19e1SMichael Baum if (rxq_ctrl->rxq.cqes) { 1146deb19e1SMichael Baum rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes); 1156deb19e1SMichael Baum rxq_ctrl->rxq.cqes = NULL; 1166deb19e1SMichael Baum } 1176deb19e1SMichael Baum if (rxq_ctrl->cq_umem) { 1186deb19e1SMichael Baum mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem); 1196deb19e1SMichael Baum rxq_ctrl->cq_umem = NULL; 1206deb19e1SMichael Baum } 121f6dee900SMichael Baum if (dbr_page) { 122f6dee900SMichael Baum claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 123f6dee900SMichael Baum mlx5_os_get_umem_id(dbr_page->umem), 124f6dee900SMichael Baum rxq_ctrl->cq_dbr_offset)); 125f6dee900SMichael Baum rxq_ctrl->cq_dbrec_page = NULL; 126f6dee900SMichael Baum } 1276deb19e1SMichael Baum } 1286deb19e1SMichael Baum 1296deb19e1SMichael Baum /** 1306deb19e1SMichael Baum * Release an Rx DevX queue object. 1316deb19e1SMichael Baum * 1326deb19e1SMichael Baum * @param rxq_obj 1336deb19e1SMichael Baum * DevX Rx queue object. 1346deb19e1SMichael Baum */ 1356deb19e1SMichael Baum static void 1366deb19e1SMichael Baum mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 1376deb19e1SMichael Baum { 1386deb19e1SMichael Baum MLX5_ASSERT(rxq_obj); 1396deb19e1SMichael Baum MLX5_ASSERT(rxq_obj->rq); 1406deb19e1SMichael Baum if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) { 141fa2c85ccSMichael Baum mlx5_devx_modify_rq(rxq_obj, false); 142fa2c85ccSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1436deb19e1SMichael Baum } else { 1446deb19e1SMichael Baum MLX5_ASSERT(rxq_obj->devx_cq); 1456deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1466deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq)); 1476deb19e1SMichael Baum if (rxq_obj->devx_channel) 1486deb19e1SMichael Baum mlx5_glue->devx_destroy_event_channel 1496deb19e1SMichael Baum (rxq_obj->devx_channel); 1506deb19e1SMichael Baum rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl); 1516deb19e1SMichael Baum rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl); 1526deb19e1SMichael Baum } 1536deb19e1SMichael Baum } 1546deb19e1SMichael Baum 1556deb19e1SMichael Baum /** 15632287079SMichael Baum * Get event for an Rx DevX queue object. 15732287079SMichael Baum * 15832287079SMichael Baum * @param rxq_obj 15932287079SMichael Baum * DevX Rx queue object. 16032287079SMichael Baum * 16132287079SMichael Baum * @return 16232287079SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 16332287079SMichael Baum */ 16432287079SMichael Baum static int 16532287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 16632287079SMichael Baum { 16732287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT 16832287079SMichael Baum union { 16932287079SMichael Baum struct mlx5dv_devx_async_event_hdr event_resp; 17032287079SMichael Baum uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 17132287079SMichael Baum } out; 17232287079SMichael Baum int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 17332287079SMichael Baum &out.event_resp, 17432287079SMichael Baum sizeof(out.buf)); 17532287079SMichael Baum 17632287079SMichael Baum if (ret < 0) { 17732287079SMichael Baum rte_errno = errno; 17832287079SMichael Baum return -rte_errno; 17932287079SMichael Baum } 18032287079SMichael Baum if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) { 18132287079SMichael Baum rte_errno = EINVAL; 18232287079SMichael Baum return -rte_errno; 18332287079SMichael Baum } 18432287079SMichael Baum return 0; 18532287079SMichael Baum #else 18632287079SMichael Baum (void)rxq_obj; 18732287079SMichael Baum rte_errno = ENOTSUP; 18832287079SMichael Baum return -rte_errno; 18932287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */ 19032287079SMichael Baum } 19132287079SMichael Baum 19232287079SMichael Baum /** 1936deb19e1SMichael Baum * Fill common fields of create RQ attributes structure. 1946deb19e1SMichael Baum * 1956deb19e1SMichael Baum * @param rxq_data 1966deb19e1SMichael Baum * Pointer to Rx queue data. 1976deb19e1SMichael Baum * @param cqn 1986deb19e1SMichael Baum * CQ number to use with this RQ. 1996deb19e1SMichael Baum * @param rq_attr 2006deb19e1SMichael Baum * RQ attributes structure to fill.. 2016deb19e1SMichael Baum */ 2026deb19e1SMichael Baum static void 2036deb19e1SMichael Baum mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn, 2046deb19e1SMichael Baum struct mlx5_devx_create_rq_attr *rq_attr) 2056deb19e1SMichael Baum { 2066deb19e1SMichael Baum rq_attr->state = MLX5_RQC_STATE_RST; 2076deb19e1SMichael Baum rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1; 2086deb19e1SMichael Baum rq_attr->cqn = cqn; 2096deb19e1SMichael Baum rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 2106deb19e1SMichael Baum } 2116deb19e1SMichael Baum 2126deb19e1SMichael Baum /** 2136deb19e1SMichael Baum * Fill common fields of DevX WQ attributes structure. 2146deb19e1SMichael Baum * 2156deb19e1SMichael Baum * @param priv 2166deb19e1SMichael Baum * Pointer to device private data. 2176deb19e1SMichael Baum * @param rxq_ctrl 2186deb19e1SMichael Baum * Pointer to Rx queue control structure. 2196deb19e1SMichael Baum * @param wq_attr 2206deb19e1SMichael Baum * WQ attributes structure to fill.. 2216deb19e1SMichael Baum */ 2226deb19e1SMichael Baum static void 2236deb19e1SMichael Baum mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, 2246deb19e1SMichael Baum struct mlx5_devx_wq_attr *wq_attr) 2256deb19e1SMichael Baum { 2266deb19e1SMichael Baum wq_attr->end_padding_mode = priv->config.cqe_pad ? 2276deb19e1SMichael Baum MLX5_WQ_END_PAD_MODE_ALIGN : 2286deb19e1SMichael Baum MLX5_WQ_END_PAD_MODE_NONE; 2296deb19e1SMichael Baum wq_attr->pd = priv->sh->pdn; 2306deb19e1SMichael Baum wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset; 231f6dee900SMichael Baum wq_attr->dbr_umem_id = 232f6dee900SMichael Baum mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem); 2336deb19e1SMichael Baum wq_attr->dbr_umem_valid = 1; 2346deb19e1SMichael Baum wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem); 2356deb19e1SMichael Baum wq_attr->wq_umem_valid = 1; 2366deb19e1SMichael Baum } 2376deb19e1SMichael Baum 2386deb19e1SMichael Baum /** 2396deb19e1SMichael Baum * Create a RQ object using DevX. 2406deb19e1SMichael Baum * 2416deb19e1SMichael Baum * @param dev 2426deb19e1SMichael Baum * Pointer to Ethernet device. 2436deb19e1SMichael Baum * @param idx 2446deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 2456deb19e1SMichael Baum * 2466deb19e1SMichael Baum * @return 247f6dee900SMichael Baum * The DevX RQ object initialized, NULL otherwise and rte_errno is set. 2486deb19e1SMichael Baum */ 2496deb19e1SMichael Baum static struct mlx5_devx_obj * 250f6dee900SMichael Baum rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 2516deb19e1SMichael Baum { 2526deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 2536deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 2546deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 2556deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 2566deb19e1SMichael Baum struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 2576deb19e1SMichael Baum uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); 258f6dee900SMichael Baum uint32_t cqn = rxq_ctrl->obj->devx_cq->id; 259f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page; 260f6dee900SMichael Baum int64_t dbr_offset; 2616deb19e1SMichael Baum uint32_t wq_size = 0; 2626deb19e1SMichael Baum uint32_t wqe_size = 0; 2636deb19e1SMichael Baum uint32_t log_wqe_size = 0; 2646deb19e1SMichael Baum void *buf = NULL; 2656deb19e1SMichael Baum struct mlx5_devx_obj *rq; 2666deb19e1SMichael Baum 2676deb19e1SMichael Baum /* Fill RQ attributes. */ 2686deb19e1SMichael Baum rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 2696deb19e1SMichael Baum rq_attr.flush_in_error_en = 1; 2706deb19e1SMichael Baum mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr); 2716deb19e1SMichael Baum /* Fill WQ attributes for this RQ. */ 2726deb19e1SMichael Baum if (mlx5_rxq_mprq_enabled(rxq_data)) { 2736deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 2746deb19e1SMichael Baum /* 2756deb19e1SMichael Baum * Number of strides in each WQE: 2766deb19e1SMichael Baum * 512*2^single_wqe_log_num_of_strides. 2776deb19e1SMichael Baum */ 2786deb19e1SMichael Baum rq_attr.wq_attr.single_wqe_log_num_of_strides = 2796deb19e1SMichael Baum rxq_data->strd_num_n - 2806deb19e1SMichael Baum MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 2816deb19e1SMichael Baum /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 2826deb19e1SMichael Baum rq_attr.wq_attr.single_stride_log_num_of_bytes = 2836deb19e1SMichael Baum rxq_data->strd_sz_n - 2846deb19e1SMichael Baum MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 2856deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_mprq); 2866deb19e1SMichael Baum } else { 2876deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 2886deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_data_seg); 2896deb19e1SMichael Baum } 2906deb19e1SMichael Baum log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 2916deb19e1SMichael Baum rq_attr.wq_attr.log_wq_stride = log_wqe_size; 2926deb19e1SMichael Baum rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; 2936deb19e1SMichael Baum /* Calculate and allocate WQ memory space. */ 2946deb19e1SMichael Baum wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 2956deb19e1SMichael Baum wq_size = wqe_n * wqe_size; 2966deb19e1SMichael Baum size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 2976deb19e1SMichael Baum if (alignment == (size_t)-1) { 2986deb19e1SMichael Baum DRV_LOG(ERR, "Failed to get mem page size"); 2996deb19e1SMichael Baum rte_errno = ENOMEM; 3006deb19e1SMichael Baum return NULL; 3016deb19e1SMichael Baum } 3026deb19e1SMichael Baum buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size, 3036deb19e1SMichael Baum alignment, rxq_ctrl->socket); 3046deb19e1SMichael Baum if (!buf) 3056deb19e1SMichael Baum return NULL; 3066deb19e1SMichael Baum rxq_data->wqes = buf; 3076deb19e1SMichael Baum rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 3086deb19e1SMichael Baum buf, wq_size, 0); 309f6dee900SMichael Baum if (!rxq_ctrl->wq_umem) 310f6dee900SMichael Baum goto error; 311f6dee900SMichael Baum /* Allocate RQ door-bell. */ 312f6dee900SMichael Baum dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 313f6dee900SMichael Baum if (dbr_offset < 0) { 314f6dee900SMichael Baum DRV_LOG(ERR, "Failed to allocate RQ door-bell."); 315f6dee900SMichael Baum goto error; 3166deb19e1SMichael Baum } 317f6dee900SMichael Baum rxq_ctrl->rq_dbr_offset = dbr_offset; 318f6dee900SMichael Baum rxq_ctrl->rq_dbrec_page = dbr_page; 319f6dee900SMichael Baum rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 320f6dee900SMichael Baum (uintptr_t)rxq_ctrl->rq_dbr_offset); 321f6dee900SMichael Baum /* Create RQ using DevX API. */ 3226deb19e1SMichael Baum mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr); 3236deb19e1SMichael Baum rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket); 3246deb19e1SMichael Baum if (!rq) 325f6dee900SMichael Baum goto error; 3266deb19e1SMichael Baum return rq; 327f6dee900SMichael Baum error: 328f6dee900SMichael Baum rxq_release_devx_rq_resources(rxq_ctrl); 329f6dee900SMichael Baum return NULL; 3306deb19e1SMichael Baum } 3316deb19e1SMichael Baum 3326deb19e1SMichael Baum /** 3336deb19e1SMichael Baum * Create a DevX CQ object for an Rx queue. 3346deb19e1SMichael Baum * 3356deb19e1SMichael Baum * @param dev 3366deb19e1SMichael Baum * Pointer to Ethernet device. 3376deb19e1SMichael Baum * @param idx 3386deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 3396deb19e1SMichael Baum * 3406deb19e1SMichael Baum * @return 341f6dee900SMichael Baum * The DevX CQ object initialized, NULL otherwise and rte_errno is set. 3426deb19e1SMichael Baum */ 3436deb19e1SMichael Baum static struct mlx5_devx_obj * 344f6dee900SMichael Baum rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 3456deb19e1SMichael Baum { 3466deb19e1SMichael Baum struct mlx5_devx_obj *cq_obj = 0; 3476deb19e1SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 3486deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 3496deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 3506deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 3516deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 3526deb19e1SMichael Baum size_t page_size = rte_mem_page_size(); 3536deb19e1SMichael Baum uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1); 354f6dee900SMichael Baum unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 355f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page; 356f6dee900SMichael Baum int64_t dbr_offset; 3576deb19e1SMichael Baum uint32_t eqn = 0; 3586deb19e1SMichael Baum void *buf = NULL; 3596deb19e1SMichael Baum uint16_t event_nums[1] = {0}; 3606deb19e1SMichael Baum uint32_t log_cqe_n; 3616deb19e1SMichael Baum uint32_t cq_size; 3626deb19e1SMichael Baum int ret = 0; 3636deb19e1SMichael Baum 3646deb19e1SMichael Baum if (page_size == (size_t)-1) { 3656deb19e1SMichael Baum DRV_LOG(ERR, "Failed to get page_size."); 3666deb19e1SMichael Baum goto error; 3676deb19e1SMichael Baum } 3686deb19e1SMichael Baum if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 3696deb19e1SMichael Baum !rxq_data->lro) { 3706deb19e1SMichael Baum cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; 3716deb19e1SMichael Baum #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT 3726deb19e1SMichael Baum cq_attr.mini_cqe_res_format = 3736deb19e1SMichael Baum mlx5_rxq_mprq_enabled(rxq_data) ? 3746deb19e1SMichael Baum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : 3756deb19e1SMichael Baum MLX5DV_CQE_RES_FORMAT_HASH; 3766deb19e1SMichael Baum #else 3776deb19e1SMichael Baum cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH; 3786deb19e1SMichael Baum #endif 3796deb19e1SMichael Baum /* 3806deb19e1SMichael Baum * For vectorized Rx, it must not be doubled in order to 3816deb19e1SMichael Baum * make cq_ci and rq_ci aligned. 3826deb19e1SMichael Baum */ 3836deb19e1SMichael Baum if (mlx5_rxq_check_vec_support(rxq_data) < 0) 3846deb19e1SMichael Baum cqe_n *= 2; 3856deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 3866deb19e1SMichael Baum DRV_LOG(DEBUG, 3876deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for HW" 3886deb19e1SMichael Baum " timestamp.", 3896deb19e1SMichael Baum dev->data->port_id); 3906deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->lro) { 3916deb19e1SMichael Baum DRV_LOG(DEBUG, 3926deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for LRO.", 3936deb19e1SMichael Baum dev->data->port_id); 3946deb19e1SMichael Baum } 3956deb19e1SMichael Baum #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD 3966deb19e1SMichael Baum if (priv->config.cqe_pad) 3976deb19e1SMichael Baum cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD; 3986deb19e1SMichael Baum #endif 3996deb19e1SMichael Baum log_cqe_n = log2above(cqe_n); 4006deb19e1SMichael Baum cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n); 4016deb19e1SMichael Baum /* Query the EQN for this core. */ 4026deb19e1SMichael Baum if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) { 4036deb19e1SMichael Baum DRV_LOG(ERR, "Failed to query EQN for CQ."); 4046deb19e1SMichael Baum goto error; 4056deb19e1SMichael Baum } 4066deb19e1SMichael Baum cq_attr.eqn = eqn; 4076deb19e1SMichael Baum buf = rte_calloc_socket(__func__, 1, cq_size, page_size, 4086deb19e1SMichael Baum rxq_ctrl->socket); 4096deb19e1SMichael Baum if (!buf) { 4106deb19e1SMichael Baum DRV_LOG(ERR, "Failed to allocate memory for CQ."); 4116deb19e1SMichael Baum goto error; 4126deb19e1SMichael Baum } 4136deb19e1SMichael Baum rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf; 4146deb19e1SMichael Baum rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf, 4156deb19e1SMichael Baum cq_size, 4166deb19e1SMichael Baum IBV_ACCESS_LOCAL_WRITE); 4176deb19e1SMichael Baum if (!rxq_ctrl->cq_umem) { 4186deb19e1SMichael Baum DRV_LOG(ERR, "Failed to register umem for CQ."); 4196deb19e1SMichael Baum goto error; 4206deb19e1SMichael Baum } 421f6dee900SMichael Baum /* Allocate CQ door-bell. */ 422f6dee900SMichael Baum dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 423f6dee900SMichael Baum if (dbr_offset < 0) { 424f6dee900SMichael Baum DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 425f6dee900SMichael Baum goto error; 426f6dee900SMichael Baum } 427f6dee900SMichael Baum rxq_ctrl->cq_dbr_offset = dbr_offset; 428f6dee900SMichael Baum rxq_ctrl->cq_dbrec_page = dbr_page; 429f6dee900SMichael Baum rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 430f6dee900SMichael Baum (uintptr_t)rxq_ctrl->cq_dbr_offset); 431f6dee900SMichael Baum rxq_data->cq_uar = 432f6dee900SMichael Baum mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar); 433f6dee900SMichael Baum /* Create CQ using DevX API. */ 4346deb19e1SMichael Baum cq_attr.uar_page_id = 4356deb19e1SMichael Baum mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar); 4366deb19e1SMichael Baum cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem); 4376deb19e1SMichael Baum cq_attr.q_umem_valid = 1; 4386deb19e1SMichael Baum cq_attr.log_cq_size = log_cqe_n; 4396deb19e1SMichael Baum cq_attr.log_page_size = rte_log2_u32(page_size); 4406deb19e1SMichael Baum cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset; 441f6dee900SMichael Baum cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem); 4426deb19e1SMichael Baum cq_attr.db_umem_valid = 1; 4436deb19e1SMichael Baum cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 4446deb19e1SMichael Baum if (!cq_obj) 4456deb19e1SMichael Baum goto error; 4466deb19e1SMichael Baum rxq_data->cqe_n = log_cqe_n; 4476deb19e1SMichael Baum rxq_data->cqn = cq_obj->id; 448f6dee900SMichael Baum if (rxq_ctrl->obj->devx_channel) { 4496deb19e1SMichael Baum ret = mlx5_glue->devx_subscribe_devx_event 450f6dee900SMichael Baum (rxq_ctrl->obj->devx_channel, 4516deb19e1SMichael Baum cq_obj->obj, 4526deb19e1SMichael Baum sizeof(event_nums), 4536deb19e1SMichael Baum event_nums, 4546deb19e1SMichael Baum (uint64_t)(uintptr_t)cq_obj); 4556deb19e1SMichael Baum if (ret) { 4566deb19e1SMichael Baum DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 4576deb19e1SMichael Baum rte_errno = errno; 4586deb19e1SMichael Baum goto error; 4596deb19e1SMichael Baum } 4606deb19e1SMichael Baum } 4616deb19e1SMichael Baum /* Initialise CQ to 1's to mark HW ownership for all CQEs. */ 4626deb19e1SMichael Baum memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size); 4636deb19e1SMichael Baum return cq_obj; 4646deb19e1SMichael Baum error: 4656deb19e1SMichael Baum if (cq_obj) 4666deb19e1SMichael Baum mlx5_devx_cmd_destroy(cq_obj); 4676deb19e1SMichael Baum rxq_release_devx_cq_resources(rxq_ctrl); 4686deb19e1SMichael Baum return NULL; 4696deb19e1SMichael Baum } 4706deb19e1SMichael Baum 4716deb19e1SMichael Baum /** 4726deb19e1SMichael Baum * Create the Rx hairpin queue object. 4736deb19e1SMichael Baum * 4746deb19e1SMichael Baum * @param dev 4756deb19e1SMichael Baum * Pointer to Ethernet device. 4766deb19e1SMichael Baum * @param idx 4776deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4786deb19e1SMichael Baum * 4796deb19e1SMichael Baum * @return 4801260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4816deb19e1SMichael Baum */ 4821260a87bSMichael Baum static int 4836deb19e1SMichael Baum mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 4846deb19e1SMichael Baum { 4856deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4866deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4876deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4886deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4896deb19e1SMichael Baum struct mlx5_devx_create_rq_attr attr = { 0 }; 4901260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4916deb19e1SMichael Baum uint32_t max_wq_data; 4926deb19e1SMichael Baum 4936deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4941260a87bSMichael Baum MLX5_ASSERT(tmpl); 4956deb19e1SMichael Baum tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN; 4966deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4976deb19e1SMichael Baum attr.hairpin = 1; 4986deb19e1SMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 4996deb19e1SMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 5006deb19e1SMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 5016deb19e1SMichael Baum if (priv->config.log_hp_size > max_wq_data) { 5026deb19e1SMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 5036deb19e1SMichael Baum "too large for hairpin.", 5046deb19e1SMichael Baum priv->config.log_hp_size); 5056deb19e1SMichael Baum rte_errno = ERANGE; 5061260a87bSMichael Baum return -rte_errno; 5076deb19e1SMichael Baum } 5086deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 5096deb19e1SMichael Baum } else { 5106deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = 5116deb19e1SMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 5126deb19e1SMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 5136deb19e1SMichael Baum } 5146deb19e1SMichael Baum /* Set the packets number to the maximum value for performance. */ 5156deb19e1SMichael Baum attr.wq_attr.log_hairpin_num_packets = 5166deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz - 5176deb19e1SMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 5186deb19e1SMichael Baum tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, 5196deb19e1SMichael Baum rxq_ctrl->socket); 5206deb19e1SMichael Baum if (!tmpl->rq) { 5216deb19e1SMichael Baum DRV_LOG(ERR, 5226deb19e1SMichael Baum "Port %u Rx hairpin queue %u can't create rq object.", 5236deb19e1SMichael Baum dev->data->port_id, idx); 5246deb19e1SMichael Baum rte_errno = errno; 5251260a87bSMichael Baum return -rte_errno; 5266deb19e1SMichael Baum } 5276deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 5281260a87bSMichael Baum return 0; 5296deb19e1SMichael Baum } 5306deb19e1SMichael Baum 5316deb19e1SMichael Baum /** 5326deb19e1SMichael Baum * Create the Rx queue DevX object. 5336deb19e1SMichael Baum * 5346deb19e1SMichael Baum * @param dev 5356deb19e1SMichael Baum * Pointer to Ethernet device. 5366deb19e1SMichael Baum * @param idx 5376deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 5386deb19e1SMichael Baum * 5396deb19e1SMichael Baum * @return 5401260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 5416deb19e1SMichael Baum */ 5421260a87bSMichael Baum static int 5436deb19e1SMichael Baum mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 5446deb19e1SMichael Baum { 5456deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 5466deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 5476deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 5486deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 5491260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 5506deb19e1SMichael Baum int ret = 0; 5516deb19e1SMichael Baum 5526deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 5531260a87bSMichael Baum MLX5_ASSERT(tmpl); 5546deb19e1SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 5556deb19e1SMichael Baum return mlx5_rxq_obj_hairpin_new(dev, idx); 5566deb19e1SMichael Baum tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; 5576deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 5586deb19e1SMichael Baum if (rxq_ctrl->irq) { 5596deb19e1SMichael Baum int devx_ev_flag = 5606deb19e1SMichael Baum MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 5616deb19e1SMichael Baum 5626deb19e1SMichael Baum tmpl->devx_channel = mlx5_glue->devx_create_event_channel 5636deb19e1SMichael Baum (priv->sh->ctx, 5646deb19e1SMichael Baum devx_ev_flag); 5656deb19e1SMichael Baum if (!tmpl->devx_channel) { 5666deb19e1SMichael Baum rte_errno = errno; 5676deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", 5686deb19e1SMichael Baum rte_errno); 5696deb19e1SMichael Baum goto error; 5706deb19e1SMichael Baum } 5716deb19e1SMichael Baum tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 5726deb19e1SMichael Baum } 5736deb19e1SMichael Baum /* Create CQ using DevX API. */ 574f6dee900SMichael Baum tmpl->devx_cq = rxq_create_devx_cq_resources(dev, idx); 5756deb19e1SMichael Baum if (!tmpl->devx_cq) { 5766deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create CQ."); 5776deb19e1SMichael Baum goto error; 5786deb19e1SMichael Baum } 5796deb19e1SMichael Baum /* Create RQ using DevX API. */ 580f6dee900SMichael Baum tmpl->rq = rxq_create_devx_rq_resources(dev, idx); 5816deb19e1SMichael Baum if (!tmpl->rq) { 5826deb19e1SMichael Baum DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 5836deb19e1SMichael Baum dev->data->port_id, idx); 5846deb19e1SMichael Baum rte_errno = ENOMEM; 5856deb19e1SMichael Baum goto error; 5866deb19e1SMichael Baum } 5876deb19e1SMichael Baum /* Change queue state to ready. */ 588fa2c85ccSMichael Baum ret = mlx5_devx_modify_rq(tmpl, true); 5896deb19e1SMichael Baum if (ret) 5906deb19e1SMichael Baum goto error; 5916deb19e1SMichael Baum rxq_data->cq_arm_sn = 0; 5926deb19e1SMichael Baum mlx5_rxq_initialize(rxq_data); 5936deb19e1SMichael Baum rxq_data->cq_ci = 0; 5946deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 5956deb19e1SMichael Baum rxq_ctrl->wqn = tmpl->rq->id; 5961260a87bSMichael Baum return 0; 5976deb19e1SMichael Baum error: 5986deb19e1SMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 5996deb19e1SMichael Baum if (tmpl->rq) 6006deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(tmpl->rq)); 6016deb19e1SMichael Baum if (tmpl->devx_cq) 6026deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq)); 6036deb19e1SMichael Baum if (tmpl->devx_channel) 6041260a87bSMichael Baum mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel); 6056deb19e1SMichael Baum rxq_release_devx_rq_resources(rxq_ctrl); 6066deb19e1SMichael Baum rxq_release_devx_cq_resources(rxq_ctrl); 6071260a87bSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 6081260a87bSMichael Baum return -rte_errno; 6096deb19e1SMichael Baum } 6106deb19e1SMichael Baum 61187e2db37SMichael Baum /** 61225ae7f1aSMichael Baum * Create RQT using DevX API as a filed of indirection table. 61387e2db37SMichael Baum * 61487e2db37SMichael Baum * @param dev 61587e2db37SMichael Baum * Pointer to Ethernet device. 61625ae7f1aSMichael Baum * @param log_n 61725ae7f1aSMichael Baum * Log of number of queues in the array. 61825ae7f1aSMichael Baum * @param ind_tbl 61925ae7f1aSMichael Baum * DevX indirection table object. 62087e2db37SMichael Baum * 62187e2db37SMichael Baum * @return 62225ae7f1aSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 62387e2db37SMichael Baum */ 62425ae7f1aSMichael Baum static int 62525ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 62625ae7f1aSMichael Baum struct mlx5_ind_table_obj *ind_tbl) 62787e2db37SMichael Baum { 62887e2db37SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 62987e2db37SMichael Baum struct mlx5_devx_rqt_attr *rqt_attr = NULL; 63025ae7f1aSMichael Baum const unsigned int rqt_n = 1 << log_n; 63125ae7f1aSMichael Baum unsigned int i, j; 63287e2db37SMichael Baum 63325ae7f1aSMichael Baum MLX5_ASSERT(ind_tbl); 63487e2db37SMichael Baum rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 63587e2db37SMichael Baum rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 63687e2db37SMichael Baum if (!rqt_attr) { 63787e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 63887e2db37SMichael Baum dev->data->port_id); 63987e2db37SMichael Baum rte_errno = ENOMEM; 64025ae7f1aSMichael Baum return -rte_errno; 64187e2db37SMichael Baum } 64287e2db37SMichael Baum rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 64387e2db37SMichael Baum rqt_attr->rqt_actual_size = rqt_n; 64425ae7f1aSMichael Baum for (i = 0; i != ind_tbl->queues_n; ++i) { 64525ae7f1aSMichael Baum struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; 64625ae7f1aSMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 64725ae7f1aSMichael Baum container_of(rxq, struct mlx5_rxq_ctrl, rxq); 64825ae7f1aSMichael Baum 64925ae7f1aSMichael Baum rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id; 65087e2db37SMichael Baum } 65125ae7f1aSMichael Baum MLX5_ASSERT(i > 0); 65225ae7f1aSMichael Baum for (j = 0; i != rqt_n; ++j, ++i) 65325ae7f1aSMichael Baum rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 65487e2db37SMichael Baum ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); 65587e2db37SMichael Baum mlx5_free(rqt_attr); 65687e2db37SMichael Baum if (!ind_tbl->rqt) { 65787e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 65887e2db37SMichael Baum dev->data->port_id); 65987e2db37SMichael Baum rte_errno = errno; 66025ae7f1aSMichael Baum return -rte_errno; 66187e2db37SMichael Baum } 66225ae7f1aSMichael Baum return 0; 66387e2db37SMichael Baum } 66487e2db37SMichael Baum 66587e2db37SMichael Baum /** 66687e2db37SMichael Baum * Destroy the DevX RQT object. 66787e2db37SMichael Baum * 66887e2db37SMichael Baum * @param ind_table 66987e2db37SMichael Baum * Indirection table to release. 67087e2db37SMichael Baum */ 67187e2db37SMichael Baum static void 67225ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 67387e2db37SMichael Baum { 67487e2db37SMichael Baum claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 67587e2db37SMichael Baum } 67687e2db37SMichael Baum 67785552726SMichael Baum /** 67885552726SMichael Baum * Create an Rx Hash queue. 67985552726SMichael Baum * 68085552726SMichael Baum * @param dev 68185552726SMichael Baum * Pointer to Ethernet device. 6825a959cbfSMichael Baum * @param hrxq 6835a959cbfSMichael Baum * Pointer to Rx Hash queue. 68485552726SMichael Baum * @param tunnel 68585552726SMichael Baum * Tunnel type. 68685552726SMichael Baum * 68785552726SMichael Baum * @return 6885a959cbfSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 68985552726SMichael Baum */ 6905a959cbfSMichael Baum static int 6915a959cbfSMichael Baum mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 69285552726SMichael Baum int tunnel __rte_unused) 69385552726SMichael Baum { 69485552726SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 6955a959cbfSMichael Baum struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table; 6965a959cbfSMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 69785552726SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 69885552726SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 69985552726SMichael Baum struct mlx5_devx_tir_attr tir_attr; 7005a959cbfSMichael Baum const uint8_t *rss_key = hrxq->rss_key; 7015a959cbfSMichael Baum uint64_t hash_fields = hrxq->hash_fields; 70285552726SMichael Baum bool lro = true; 7035a959cbfSMichael Baum uint32_t i; 7045a959cbfSMichael Baum int err; 70585552726SMichael Baum 70685552726SMichael Baum /* Enable TIR LRO only if all the queues were configured for. */ 7075a959cbfSMichael Baum for (i = 0; i < ind_tbl->queues_n; ++i) { 7085a959cbfSMichael Baum if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 70985552726SMichael Baum lro = false; 71085552726SMichael Baum break; 71185552726SMichael Baum } 71285552726SMichael Baum } 71385552726SMichael Baum memset(&tir_attr, 0, sizeof(tir_attr)); 71485552726SMichael Baum tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 71585552726SMichael Baum tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 71685552726SMichael Baum tir_attr.tunneled_offload_en = !!tunnel; 71785552726SMichael Baum /* If needed, translate hash_fields bitmap to PRM format. */ 71885552726SMichael Baum if (hash_fields) { 71985552726SMichael Baum struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL; 72085552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 72185552726SMichael Baum rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ? 72285552726SMichael Baum &tir_attr.rx_hash_field_selector_inner : 72385552726SMichael Baum &tir_attr.rx_hash_field_selector_outer; 72485552726SMichael Baum #else 72585552726SMichael Baum rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer; 72685552726SMichael Baum #endif 72785552726SMichael Baum /* 1 bit: 0: IPv4, 1: IPv6. */ 72885552726SMichael Baum rx_hash_field_select->l3_prot_type = 72985552726SMichael Baum !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 73085552726SMichael Baum /* 1 bit: 0: TCP, 1: UDP. */ 73185552726SMichael Baum rx_hash_field_select->l4_prot_type = 73285552726SMichael Baum !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 73385552726SMichael Baum /* Bitmask which sets which fields to use in RX Hash. */ 73485552726SMichael Baum rx_hash_field_select->selected_fields = 73585552726SMichael Baum ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 73685552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 73785552726SMichael Baum (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 73885552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 73985552726SMichael Baum (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 74085552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 74185552726SMichael Baum (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 74285552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 74385552726SMichael Baum } 74485552726SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 74585552726SMichael Baum tir_attr.transport_domain = priv->sh->td->id; 74685552726SMichael Baum else 74785552726SMichael Baum tir_attr.transport_domain = priv->sh->tdn; 74885552726SMichael Baum memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 74985552726SMichael Baum tir_attr.indirect_table = ind_tbl->rqt->id; 75085552726SMichael Baum if (dev->data->dev_conf.lpbk_mode) 75185552726SMichael Baum tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 75285552726SMichael Baum if (lro) { 75385552726SMichael Baum tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; 75485552726SMichael Baum tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; 75585552726SMichael Baum tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 75685552726SMichael Baum MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 75785552726SMichael Baum } 7585a959cbfSMichael Baum hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); 7595a959cbfSMichael Baum if (!hrxq->tir) { 76085552726SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 76185552726SMichael Baum dev->data->port_id); 76285552726SMichael Baum rte_errno = errno; 76385552726SMichael Baum goto error; 76485552726SMichael Baum } 76585552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 76685552726SMichael Baum hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir 76785552726SMichael Baum (hrxq->tir->obj); 76885552726SMichael Baum if (!hrxq->action) { 76985552726SMichael Baum rte_errno = errno; 77085552726SMichael Baum goto error; 77185552726SMichael Baum } 77285552726SMichael Baum #endif 7735a959cbfSMichael Baum return 0; 77485552726SMichael Baum error: 77585552726SMichael Baum err = rte_errno; /* Save rte_errno before cleanup. */ 7765a959cbfSMichael Baum if (hrxq->tir) 7775a959cbfSMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 77885552726SMichael Baum rte_errno = err; /* Restore rte_errno. */ 7795a959cbfSMichael Baum return -rte_errno; 78085552726SMichael Baum } 78185552726SMichael Baum 78285552726SMichael Baum /** 78385552726SMichael Baum * Destroy a DevX TIR object. 78485552726SMichael Baum * 78585552726SMichael Baum * @param hrxq 78685552726SMichael Baum * Hash Rx queue to release its tir. 78785552726SMichael Baum */ 78885552726SMichael Baum static void 78985552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 79085552726SMichael Baum { 79185552726SMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 79285552726SMichael Baum } 79385552726SMichael Baum 794*5eaf882eSMichael Baum /** 795*5eaf882eSMichael Baum * Create a drop Rx Hash queue. 796*5eaf882eSMichael Baum * 797*5eaf882eSMichael Baum * @param dev 798*5eaf882eSMichael Baum * Pointer to Ethernet device. 799*5eaf882eSMichael Baum * 800*5eaf882eSMichael Baum * @return 801*5eaf882eSMichael Baum * The DevX object initialized, NULL otherwise and rte_errno is set. 802*5eaf882eSMichael Baum */ 803*5eaf882eSMichael Baum static struct mlx5_hrxq * 804*5eaf882eSMichael Baum mlx5_devx_hrxq_drop_new(struct rte_eth_dev *dev) 805*5eaf882eSMichael Baum { 806*5eaf882eSMichael Baum (void)dev; 807*5eaf882eSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet"); 808*5eaf882eSMichael Baum rte_errno = ENOTSUP; 809*5eaf882eSMichael Baum return NULL; 810*5eaf882eSMichael Baum } 811*5eaf882eSMichael Baum 812*5eaf882eSMichael Baum /** 813*5eaf882eSMichael Baum * Release a drop hash Rx queue. 814*5eaf882eSMichael Baum * 815*5eaf882eSMichael Baum * @param dev 816*5eaf882eSMichael Baum * Pointer to Ethernet device. 817*5eaf882eSMichael Baum */ 818*5eaf882eSMichael Baum static void 819*5eaf882eSMichael Baum mlx5_devx_hrxq_drop_release(struct rte_eth_dev *dev) 820*5eaf882eSMichael Baum { 821*5eaf882eSMichael Baum (void)dev; 822*5eaf882eSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet"); 823*5eaf882eSMichael Baum rte_errno = ENOTSUP; 824*5eaf882eSMichael Baum } 825*5eaf882eSMichael Baum 8268bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = { 8278bb2410eSOphir Munk .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 8286deb19e1SMichael Baum .rxq_obj_new = mlx5_rxq_devx_obj_new, 82932287079SMichael Baum .rxq_event_get = mlx5_rx_devx_get_event, 830c279f187SMichael Baum .rxq_obj_modify = mlx5_devx_modify_rq, 8316deb19e1SMichael Baum .rxq_obj_release = mlx5_rxq_devx_obj_release, 83225ae7f1aSMichael Baum .ind_table_new = mlx5_devx_ind_table_new, 83325ae7f1aSMichael Baum .ind_table_destroy = mlx5_devx_ind_table_destroy, 83485552726SMichael Baum .hrxq_new = mlx5_devx_hrxq_new, 83585552726SMichael Baum .hrxq_destroy = mlx5_devx_tir_destroy, 836*5eaf882eSMichael Baum .hrxq_drop_new = mlx5_devx_hrxq_drop_new, 837*5eaf882eSMichael Baum .hrxq_drop_release = mlx5_devx_hrxq_drop_release, 8388bb2410eSOphir Munk }; 839