18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 28bb2410eSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 38bb2410eSOphir Munk */ 48bb2410eSOphir Munk 58bb2410eSOphir Munk #include <stddef.h> 68bb2410eSOphir Munk #include <errno.h> 7c279f187SMichael Baum #include <stdbool.h> 88bb2410eSOphir Munk #include <string.h> 98bb2410eSOphir Munk #include <stdint.h> 108bb2410eSOphir Munk #include <sys/queue.h> 118bb2410eSOphir Munk 128bb2410eSOphir Munk #include <rte_malloc.h> 138bb2410eSOphir Munk #include <rte_common.h> 148bb2410eSOphir Munk #include <rte_eal_paging.h> 158bb2410eSOphir Munk 168bb2410eSOphir Munk #include <mlx5_glue.h> 178bb2410eSOphir Munk #include <mlx5_devx_cmds.h> 188bb2410eSOphir Munk #include <mlx5_malloc.h> 198bb2410eSOphir Munk 208bb2410eSOphir Munk #include "mlx5.h" 218bb2410eSOphir Munk #include "mlx5_common_os.h" 228bb2410eSOphir Munk #include "mlx5_rxtx.h" 238bb2410eSOphir Munk #include "mlx5_utils.h" 248bb2410eSOphir Munk #include "mlx5_devx.h" 2587e2db37SMichael Baum #include "mlx5_flow.h" 268bb2410eSOphir Munk 27f6dee900SMichael Baum 28f6dee900SMichael Baum /** 298bb2410eSOphir Munk * Modify RQ vlan stripping offload 308bb2410eSOphir Munk * 318bb2410eSOphir Munk * @param rxq_obj 328bb2410eSOphir Munk * Rx queue object. 338bb2410eSOphir Munk * 34f6dee900SMichael Baum * @return 35f6dee900SMichael Baum * 0 on success, non-0 otherwise 368bb2410eSOphir Munk */ 378bb2410eSOphir Munk static int 388bb2410eSOphir Munk mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 398bb2410eSOphir Munk { 408bb2410eSOphir Munk struct mlx5_devx_modify_rq_attr rq_attr; 418bb2410eSOphir Munk 428bb2410eSOphir Munk memset(&rq_attr, 0, sizeof(rq_attr)); 438bb2410eSOphir Munk rq_attr.rq_state = MLX5_RQC_STATE_RDY; 448bb2410eSOphir Munk rq_attr.state = MLX5_RQC_STATE_RDY; 458bb2410eSOphir Munk rq_attr.vsd = (on ? 0 : 1); 468bb2410eSOphir Munk rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 478bb2410eSOphir Munk return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 488bb2410eSOphir Munk } 498bb2410eSOphir Munk 506deb19e1SMichael Baum /** 51fa2c85ccSMichael Baum * Modify RQ using DevX API. 52fa2c85ccSMichael Baum * 53fa2c85ccSMichael Baum * @param rxq_obj 54fa2c85ccSMichael Baum * DevX Rx queue object. 55fa2c85ccSMichael Baum * 56fa2c85ccSMichael Baum * @return 57fa2c85ccSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 58fa2c85ccSMichael Baum */ 59fa2c85ccSMichael Baum static int 60fa2c85ccSMichael Baum mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start) 61fa2c85ccSMichael Baum { 62fa2c85ccSMichael Baum struct mlx5_devx_modify_rq_attr rq_attr; 63fa2c85ccSMichael Baum 64fa2c85ccSMichael Baum memset(&rq_attr, 0, sizeof(rq_attr)); 65fa2c85ccSMichael Baum if (is_start) { 66fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RST; 67fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RDY; 68fa2c85ccSMichael Baum } else { 69fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 70fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 71fa2c85ccSMichael Baum } 72fa2c85ccSMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 73fa2c85ccSMichael Baum } 74fa2c85ccSMichael Baum 75fa2c85ccSMichael Baum /** 766deb19e1SMichael Baum * Release the resources allocated for an RQ DevX object. 776deb19e1SMichael Baum * 786deb19e1SMichael Baum * @param rxq_ctrl 796deb19e1SMichael Baum * DevX Rx queue object. 806deb19e1SMichael Baum */ 816deb19e1SMichael Baum static void 82*88f2e3f1SMichael Baum mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 836deb19e1SMichael Baum { 84f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page; 85f6dee900SMichael Baum 866deb19e1SMichael Baum if (rxq_ctrl->rxq.wqes) { 876deb19e1SMichael Baum mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); 886deb19e1SMichael Baum rxq_ctrl->rxq.wqes = NULL; 896deb19e1SMichael Baum } 906deb19e1SMichael Baum if (rxq_ctrl->wq_umem) { 916deb19e1SMichael Baum mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); 926deb19e1SMichael Baum rxq_ctrl->wq_umem = NULL; 936deb19e1SMichael Baum } 94f6dee900SMichael Baum if (dbr_page) { 95f6dee900SMichael Baum claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 96f6dee900SMichael Baum mlx5_os_get_umem_id(dbr_page->umem), 97f6dee900SMichael Baum rxq_ctrl->rq_dbr_offset)); 98f6dee900SMichael Baum rxq_ctrl->rq_dbrec_page = NULL; 99f6dee900SMichael Baum } 1006deb19e1SMichael Baum } 1016deb19e1SMichael Baum 1026deb19e1SMichael Baum /** 1036deb19e1SMichael Baum * Release the resources allocated for the Rx CQ DevX object. 1046deb19e1SMichael Baum * 1056deb19e1SMichael Baum * @param rxq_ctrl 1066deb19e1SMichael Baum * DevX Rx queue object. 1076deb19e1SMichael Baum */ 1086deb19e1SMichael Baum static void 109*88f2e3f1SMichael Baum mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 1106deb19e1SMichael Baum { 111f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page; 112f6dee900SMichael Baum 1136deb19e1SMichael Baum if (rxq_ctrl->rxq.cqes) { 1146deb19e1SMichael Baum rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes); 1156deb19e1SMichael Baum rxq_ctrl->rxq.cqes = NULL; 1166deb19e1SMichael Baum } 1176deb19e1SMichael Baum if (rxq_ctrl->cq_umem) { 1186deb19e1SMichael Baum mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem); 1196deb19e1SMichael Baum rxq_ctrl->cq_umem = NULL; 1206deb19e1SMichael Baum } 121f6dee900SMichael Baum if (dbr_page) { 122f6dee900SMichael Baum claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 123f6dee900SMichael Baum mlx5_os_get_umem_id(dbr_page->umem), 124f6dee900SMichael Baum rxq_ctrl->cq_dbr_offset)); 125f6dee900SMichael Baum rxq_ctrl->cq_dbrec_page = NULL; 126f6dee900SMichael Baum } 1276deb19e1SMichael Baum } 1286deb19e1SMichael Baum 1296deb19e1SMichael Baum /** 1306deb19e1SMichael Baum * Release an Rx DevX queue object. 1316deb19e1SMichael Baum * 1326deb19e1SMichael Baum * @param rxq_obj 1336deb19e1SMichael Baum * DevX Rx queue object. 1346deb19e1SMichael Baum */ 1356deb19e1SMichael Baum static void 1366deb19e1SMichael Baum mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 1376deb19e1SMichael Baum { 1386deb19e1SMichael Baum MLX5_ASSERT(rxq_obj); 1396deb19e1SMichael Baum MLX5_ASSERT(rxq_obj->rq); 1406deb19e1SMichael Baum if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) { 141fa2c85ccSMichael Baum mlx5_devx_modify_rq(rxq_obj, false); 142fa2c85ccSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1436deb19e1SMichael Baum } else { 1446deb19e1SMichael Baum MLX5_ASSERT(rxq_obj->devx_cq); 1456deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1466deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq)); 1476deb19e1SMichael Baum if (rxq_obj->devx_channel) 1486deb19e1SMichael Baum mlx5_glue->devx_destroy_event_channel 1496deb19e1SMichael Baum (rxq_obj->devx_channel); 150*88f2e3f1SMichael Baum mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl); 151*88f2e3f1SMichael Baum mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl); 1526deb19e1SMichael Baum } 1536deb19e1SMichael Baum } 1546deb19e1SMichael Baum 1556deb19e1SMichael Baum /** 15632287079SMichael Baum * Get event for an Rx DevX queue object. 15732287079SMichael Baum * 15832287079SMichael Baum * @param rxq_obj 15932287079SMichael Baum * DevX Rx queue object. 16032287079SMichael Baum * 16132287079SMichael Baum * @return 16232287079SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 16332287079SMichael Baum */ 16432287079SMichael Baum static int 16532287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 16632287079SMichael Baum { 16732287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT 16832287079SMichael Baum union { 16932287079SMichael Baum struct mlx5dv_devx_async_event_hdr event_resp; 17032287079SMichael Baum uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 17132287079SMichael Baum } out; 17232287079SMichael Baum int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 17332287079SMichael Baum &out.event_resp, 17432287079SMichael Baum sizeof(out.buf)); 17532287079SMichael Baum 17632287079SMichael Baum if (ret < 0) { 17732287079SMichael Baum rte_errno = errno; 17832287079SMichael Baum return -rte_errno; 17932287079SMichael Baum } 18032287079SMichael Baum if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) { 18132287079SMichael Baum rte_errno = EINVAL; 18232287079SMichael Baum return -rte_errno; 18332287079SMichael Baum } 18432287079SMichael Baum return 0; 18532287079SMichael Baum #else 18632287079SMichael Baum (void)rxq_obj; 18732287079SMichael Baum rte_errno = ENOTSUP; 18832287079SMichael Baum return -rte_errno; 18932287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */ 19032287079SMichael Baum } 19132287079SMichael Baum 19232287079SMichael Baum /** 1936deb19e1SMichael Baum * Fill common fields of create RQ attributes structure. 1946deb19e1SMichael Baum * 1956deb19e1SMichael Baum * @param rxq_data 1966deb19e1SMichael Baum * Pointer to Rx queue data. 1976deb19e1SMichael Baum * @param cqn 1986deb19e1SMichael Baum * CQ number to use with this RQ. 1996deb19e1SMichael Baum * @param rq_attr 2006deb19e1SMichael Baum * RQ attributes structure to fill.. 2016deb19e1SMichael Baum */ 2026deb19e1SMichael Baum static void 2036deb19e1SMichael Baum mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn, 2046deb19e1SMichael Baum struct mlx5_devx_create_rq_attr *rq_attr) 2056deb19e1SMichael Baum { 2066deb19e1SMichael Baum rq_attr->state = MLX5_RQC_STATE_RST; 2076deb19e1SMichael Baum rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1; 2086deb19e1SMichael Baum rq_attr->cqn = cqn; 2096deb19e1SMichael Baum rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 2106deb19e1SMichael Baum } 2116deb19e1SMichael Baum 2126deb19e1SMichael Baum /** 2136deb19e1SMichael Baum * Fill common fields of DevX WQ attributes structure. 2146deb19e1SMichael Baum * 2156deb19e1SMichael Baum * @param priv 2166deb19e1SMichael Baum * Pointer to device private data. 2176deb19e1SMichael Baum * @param rxq_ctrl 2186deb19e1SMichael Baum * Pointer to Rx queue control structure. 2196deb19e1SMichael Baum * @param wq_attr 2206deb19e1SMichael Baum * WQ attributes structure to fill.. 2216deb19e1SMichael Baum */ 2226deb19e1SMichael Baum static void 2236deb19e1SMichael Baum mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, 2246deb19e1SMichael Baum struct mlx5_devx_wq_attr *wq_attr) 2256deb19e1SMichael Baum { 2266deb19e1SMichael Baum wq_attr->end_padding_mode = priv->config.cqe_pad ? 2276deb19e1SMichael Baum MLX5_WQ_END_PAD_MODE_ALIGN : 2286deb19e1SMichael Baum MLX5_WQ_END_PAD_MODE_NONE; 2296deb19e1SMichael Baum wq_attr->pd = priv->sh->pdn; 2306deb19e1SMichael Baum wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset; 231f6dee900SMichael Baum wq_attr->dbr_umem_id = 232f6dee900SMichael Baum mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem); 2336deb19e1SMichael Baum wq_attr->dbr_umem_valid = 1; 2346deb19e1SMichael Baum wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem); 2356deb19e1SMichael Baum wq_attr->wq_umem_valid = 1; 2366deb19e1SMichael Baum } 2376deb19e1SMichael Baum 2386deb19e1SMichael Baum /** 2396deb19e1SMichael Baum * Create a RQ object using DevX. 2406deb19e1SMichael Baum * 2416deb19e1SMichael Baum * @param dev 2426deb19e1SMichael Baum * Pointer to Ethernet device. 2436deb19e1SMichael Baum * @param idx 2446deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 2456deb19e1SMichael Baum * 2466deb19e1SMichael Baum * @return 247f6dee900SMichael Baum * The DevX RQ object initialized, NULL otherwise and rte_errno is set. 2486deb19e1SMichael Baum */ 2496deb19e1SMichael Baum static struct mlx5_devx_obj * 250*88f2e3f1SMichael Baum mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 2516deb19e1SMichael Baum { 2526deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 2536deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 2546deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 2556deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 2566deb19e1SMichael Baum struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 2576deb19e1SMichael Baum uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); 258f6dee900SMichael Baum uint32_t cqn = rxq_ctrl->obj->devx_cq->id; 259f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page; 260f6dee900SMichael Baum int64_t dbr_offset; 2616deb19e1SMichael Baum uint32_t wq_size = 0; 2626deb19e1SMichael Baum uint32_t wqe_size = 0; 2636deb19e1SMichael Baum uint32_t log_wqe_size = 0; 2646deb19e1SMichael Baum void *buf = NULL; 2656deb19e1SMichael Baum struct mlx5_devx_obj *rq; 2666deb19e1SMichael Baum 2676deb19e1SMichael Baum /* Fill RQ attributes. */ 2686deb19e1SMichael Baum rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 2696deb19e1SMichael Baum rq_attr.flush_in_error_en = 1; 2706deb19e1SMichael Baum mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr); 2716deb19e1SMichael Baum /* Fill WQ attributes for this RQ. */ 2726deb19e1SMichael Baum if (mlx5_rxq_mprq_enabled(rxq_data)) { 2736deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 2746deb19e1SMichael Baum /* 2756deb19e1SMichael Baum * Number of strides in each WQE: 2766deb19e1SMichael Baum * 512*2^single_wqe_log_num_of_strides. 2776deb19e1SMichael Baum */ 2786deb19e1SMichael Baum rq_attr.wq_attr.single_wqe_log_num_of_strides = 2796deb19e1SMichael Baum rxq_data->strd_num_n - 2806deb19e1SMichael Baum MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 2816deb19e1SMichael Baum /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 2826deb19e1SMichael Baum rq_attr.wq_attr.single_stride_log_num_of_bytes = 2836deb19e1SMichael Baum rxq_data->strd_sz_n - 2846deb19e1SMichael Baum MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 2856deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_mprq); 2866deb19e1SMichael Baum } else { 2876deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 2886deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_data_seg); 2896deb19e1SMichael Baum } 2906deb19e1SMichael Baum log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 2916deb19e1SMichael Baum rq_attr.wq_attr.log_wq_stride = log_wqe_size; 2926deb19e1SMichael Baum rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; 2936deb19e1SMichael Baum /* Calculate and allocate WQ memory space. */ 2946deb19e1SMichael Baum wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 2956deb19e1SMichael Baum wq_size = wqe_n * wqe_size; 2966deb19e1SMichael Baum size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 2976deb19e1SMichael Baum if (alignment == (size_t)-1) { 2986deb19e1SMichael Baum DRV_LOG(ERR, "Failed to get mem page size"); 2996deb19e1SMichael Baum rte_errno = ENOMEM; 3006deb19e1SMichael Baum return NULL; 3016deb19e1SMichael Baum } 3026deb19e1SMichael Baum buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size, 3036deb19e1SMichael Baum alignment, rxq_ctrl->socket); 3046deb19e1SMichael Baum if (!buf) 3056deb19e1SMichael Baum return NULL; 3066deb19e1SMichael Baum rxq_data->wqes = buf; 3076deb19e1SMichael Baum rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 3086deb19e1SMichael Baum buf, wq_size, 0); 309f6dee900SMichael Baum if (!rxq_ctrl->wq_umem) 310f6dee900SMichael Baum goto error; 311f6dee900SMichael Baum /* Allocate RQ door-bell. */ 312f6dee900SMichael Baum dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 313f6dee900SMichael Baum if (dbr_offset < 0) { 314f6dee900SMichael Baum DRV_LOG(ERR, "Failed to allocate RQ door-bell."); 315f6dee900SMichael Baum goto error; 3166deb19e1SMichael Baum } 317f6dee900SMichael Baum rxq_ctrl->rq_dbr_offset = dbr_offset; 318f6dee900SMichael Baum rxq_ctrl->rq_dbrec_page = dbr_page; 319f6dee900SMichael Baum rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 320f6dee900SMichael Baum (uintptr_t)rxq_ctrl->rq_dbr_offset); 321f6dee900SMichael Baum /* Create RQ using DevX API. */ 3226deb19e1SMichael Baum mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr); 3236deb19e1SMichael Baum rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket); 3246deb19e1SMichael Baum if (!rq) 325f6dee900SMichael Baum goto error; 3266deb19e1SMichael Baum return rq; 327f6dee900SMichael Baum error: 328*88f2e3f1SMichael Baum mlx5_rxq_release_devx_rq_resources(rxq_ctrl); 329f6dee900SMichael Baum return NULL; 3306deb19e1SMichael Baum } 3316deb19e1SMichael Baum 3326deb19e1SMichael Baum /** 3336deb19e1SMichael Baum * Create a DevX CQ object for an Rx queue. 3346deb19e1SMichael Baum * 3356deb19e1SMichael Baum * @param dev 3366deb19e1SMichael Baum * Pointer to Ethernet device. 3376deb19e1SMichael Baum * @param idx 3386deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 3396deb19e1SMichael Baum * 3406deb19e1SMichael Baum * @return 341f6dee900SMichael Baum * The DevX CQ object initialized, NULL otherwise and rte_errno is set. 3426deb19e1SMichael Baum */ 3436deb19e1SMichael Baum static struct mlx5_devx_obj * 344*88f2e3f1SMichael Baum mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 3456deb19e1SMichael Baum { 3466deb19e1SMichael Baum struct mlx5_devx_obj *cq_obj = 0; 3476deb19e1SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 3486deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 3496deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 3506deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 3516deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 3526deb19e1SMichael Baum size_t page_size = rte_mem_page_size(); 353f6dee900SMichael Baum unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 354f6dee900SMichael Baum struct mlx5_devx_dbr_page *dbr_page; 355f6dee900SMichael Baum int64_t dbr_offset; 3566deb19e1SMichael Baum void *buf = NULL; 3576deb19e1SMichael Baum uint16_t event_nums[1] = {0}; 3586deb19e1SMichael Baum uint32_t log_cqe_n; 3596deb19e1SMichael Baum uint32_t cq_size; 3606deb19e1SMichael Baum int ret = 0; 3616deb19e1SMichael Baum 3626deb19e1SMichael Baum if (page_size == (size_t)-1) { 3636deb19e1SMichael Baum DRV_LOG(ERR, "Failed to get page_size."); 3646deb19e1SMichael Baum goto error; 3656deb19e1SMichael Baum } 3666deb19e1SMichael Baum if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 3676deb19e1SMichael Baum !rxq_data->lro) { 36838f9369dSDekel Peled cq_attr.cqe_comp_en = 1u; 3696deb19e1SMichael Baum cq_attr.mini_cqe_res_format = 3706deb19e1SMichael Baum mlx5_rxq_mprq_enabled(rxq_data) ? 37138f9369dSDekel Peled MLX5_CQE_RESP_FORMAT_CSUM_STRIDX : 37238f9369dSDekel Peled MLX5_CQE_RESP_FORMAT_HASH; 3736deb19e1SMichael Baum /* 3746deb19e1SMichael Baum * For vectorized Rx, it must not be doubled in order to 3756deb19e1SMichael Baum * make cq_ci and rq_ci aligned. 3766deb19e1SMichael Baum */ 3776deb19e1SMichael Baum if (mlx5_rxq_check_vec_support(rxq_data) < 0) 3786deb19e1SMichael Baum cqe_n *= 2; 3796deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 3806deb19e1SMichael Baum DRV_LOG(DEBUG, 3816deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for HW" 3826deb19e1SMichael Baum " timestamp.", 3836deb19e1SMichael Baum dev->data->port_id); 3846deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->lro) { 3856deb19e1SMichael Baum DRV_LOG(DEBUG, 3866deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for LRO.", 3876deb19e1SMichael Baum dev->data->port_id); 3886deb19e1SMichael Baum } 3896deb19e1SMichael Baum if (priv->config.cqe_pad) 39038f9369dSDekel Peled cq_attr.cqe_size = MLX5_CQE_SIZE_128B; 3916deb19e1SMichael Baum log_cqe_n = log2above(cqe_n); 3926deb19e1SMichael Baum cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n); 3936deb19e1SMichael Baum buf = rte_calloc_socket(__func__, 1, cq_size, page_size, 3946deb19e1SMichael Baum rxq_ctrl->socket); 3956deb19e1SMichael Baum if (!buf) { 3966deb19e1SMichael Baum DRV_LOG(ERR, "Failed to allocate memory for CQ."); 3976deb19e1SMichael Baum goto error; 3986deb19e1SMichael Baum } 3996deb19e1SMichael Baum rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf; 4006deb19e1SMichael Baum rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf, 4016deb19e1SMichael Baum cq_size, 4026deb19e1SMichael Baum IBV_ACCESS_LOCAL_WRITE); 4036deb19e1SMichael Baum if (!rxq_ctrl->cq_umem) { 4046deb19e1SMichael Baum DRV_LOG(ERR, "Failed to register umem for CQ."); 4056deb19e1SMichael Baum goto error; 4066deb19e1SMichael Baum } 407f6dee900SMichael Baum /* Allocate CQ door-bell. */ 408f6dee900SMichael Baum dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 409f6dee900SMichael Baum if (dbr_offset < 0) { 410f6dee900SMichael Baum DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 411f6dee900SMichael Baum goto error; 412f6dee900SMichael Baum } 413f6dee900SMichael Baum rxq_ctrl->cq_dbr_offset = dbr_offset; 414f6dee900SMichael Baum rxq_ctrl->cq_dbrec_page = dbr_page; 415f6dee900SMichael Baum rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 416f6dee900SMichael Baum (uintptr_t)rxq_ctrl->cq_dbr_offset); 417f6dee900SMichael Baum rxq_data->cq_uar = 418f6dee900SMichael Baum mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar); 419f6dee900SMichael Baum /* Create CQ using DevX API. */ 420e7055bbfSMichael Baum cq_attr.eqn = priv->sh->eqn; 4216deb19e1SMichael Baum cq_attr.uar_page_id = 4226deb19e1SMichael Baum mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar); 4236deb19e1SMichael Baum cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem); 4246deb19e1SMichael Baum cq_attr.q_umem_valid = 1; 4256deb19e1SMichael Baum cq_attr.log_cq_size = log_cqe_n; 4266deb19e1SMichael Baum cq_attr.log_page_size = rte_log2_u32(page_size); 4276deb19e1SMichael Baum cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset; 428f6dee900SMichael Baum cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem); 4296deb19e1SMichael Baum cq_attr.db_umem_valid = 1; 4306deb19e1SMichael Baum cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 4316deb19e1SMichael Baum if (!cq_obj) 4326deb19e1SMichael Baum goto error; 4336deb19e1SMichael Baum rxq_data->cqe_n = log_cqe_n; 4346deb19e1SMichael Baum rxq_data->cqn = cq_obj->id; 435f6dee900SMichael Baum if (rxq_ctrl->obj->devx_channel) { 4366deb19e1SMichael Baum ret = mlx5_glue->devx_subscribe_devx_event 437f6dee900SMichael Baum (rxq_ctrl->obj->devx_channel, 4386deb19e1SMichael Baum cq_obj->obj, 4396deb19e1SMichael Baum sizeof(event_nums), 4406deb19e1SMichael Baum event_nums, 4416deb19e1SMichael Baum (uint64_t)(uintptr_t)cq_obj); 4426deb19e1SMichael Baum if (ret) { 4436deb19e1SMichael Baum DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 4446deb19e1SMichael Baum rte_errno = errno; 4456deb19e1SMichael Baum goto error; 4466deb19e1SMichael Baum } 4476deb19e1SMichael Baum } 4486deb19e1SMichael Baum /* Initialise CQ to 1's to mark HW ownership for all CQEs. */ 4496deb19e1SMichael Baum memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size); 4506deb19e1SMichael Baum return cq_obj; 4516deb19e1SMichael Baum error: 4526deb19e1SMichael Baum if (cq_obj) 4536deb19e1SMichael Baum mlx5_devx_cmd_destroy(cq_obj); 454*88f2e3f1SMichael Baum mlx5_rxq_release_devx_cq_resources(rxq_ctrl); 4556deb19e1SMichael Baum return NULL; 4566deb19e1SMichael Baum } 4576deb19e1SMichael Baum 4586deb19e1SMichael Baum /** 4596deb19e1SMichael Baum * Create the Rx hairpin queue object. 4606deb19e1SMichael Baum * 4616deb19e1SMichael Baum * @param dev 4626deb19e1SMichael Baum * Pointer to Ethernet device. 4636deb19e1SMichael Baum * @param idx 4646deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4656deb19e1SMichael Baum * 4666deb19e1SMichael Baum * @return 4671260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4686deb19e1SMichael Baum */ 4691260a87bSMichael Baum static int 4706deb19e1SMichael Baum mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 4716deb19e1SMichael Baum { 4726deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4736deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4746deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4756deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4766deb19e1SMichael Baum struct mlx5_devx_create_rq_attr attr = { 0 }; 4771260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4786deb19e1SMichael Baum uint32_t max_wq_data; 4796deb19e1SMichael Baum 4806deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4811260a87bSMichael Baum MLX5_ASSERT(tmpl); 4826deb19e1SMichael Baum tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN; 4836deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4846deb19e1SMichael Baum attr.hairpin = 1; 4856deb19e1SMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 4866deb19e1SMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 4876deb19e1SMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 4886deb19e1SMichael Baum if (priv->config.log_hp_size > max_wq_data) { 4896deb19e1SMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 4906deb19e1SMichael Baum "too large for hairpin.", 4916deb19e1SMichael Baum priv->config.log_hp_size); 4926deb19e1SMichael Baum rte_errno = ERANGE; 4931260a87bSMichael Baum return -rte_errno; 4946deb19e1SMichael Baum } 4956deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 4966deb19e1SMichael Baum } else { 4976deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = 4986deb19e1SMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 4996deb19e1SMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 5006deb19e1SMichael Baum } 5016deb19e1SMichael Baum /* Set the packets number to the maximum value for performance. */ 5026deb19e1SMichael Baum attr.wq_attr.log_hairpin_num_packets = 5036deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz - 5046deb19e1SMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 5056deb19e1SMichael Baum tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, 5066deb19e1SMichael Baum rxq_ctrl->socket); 5076deb19e1SMichael Baum if (!tmpl->rq) { 5086deb19e1SMichael Baum DRV_LOG(ERR, 5096deb19e1SMichael Baum "Port %u Rx hairpin queue %u can't create rq object.", 5106deb19e1SMichael Baum dev->data->port_id, idx); 5116deb19e1SMichael Baum rte_errno = errno; 5121260a87bSMichael Baum return -rte_errno; 5136deb19e1SMichael Baum } 5146deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 5151260a87bSMichael Baum return 0; 5166deb19e1SMichael Baum } 5176deb19e1SMichael Baum 5186deb19e1SMichael Baum /** 5196deb19e1SMichael Baum * Create the Rx queue DevX object. 5206deb19e1SMichael Baum * 5216deb19e1SMichael Baum * @param dev 5226deb19e1SMichael Baum * Pointer to Ethernet device. 5236deb19e1SMichael Baum * @param idx 5246deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 5256deb19e1SMichael Baum * 5266deb19e1SMichael Baum * @return 5271260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 5286deb19e1SMichael Baum */ 5291260a87bSMichael Baum static int 5306deb19e1SMichael Baum mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 5316deb19e1SMichael Baum { 5326deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 5336deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 5346deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 5356deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 5361260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 5376deb19e1SMichael Baum int ret = 0; 5386deb19e1SMichael Baum 5396deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 5401260a87bSMichael Baum MLX5_ASSERT(tmpl); 5416deb19e1SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 5426deb19e1SMichael Baum return mlx5_rxq_obj_hairpin_new(dev, idx); 5436deb19e1SMichael Baum tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; 5446deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 5456deb19e1SMichael Baum if (rxq_ctrl->irq) { 5466deb19e1SMichael Baum int devx_ev_flag = 5476deb19e1SMichael Baum MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 5486deb19e1SMichael Baum 5496deb19e1SMichael Baum tmpl->devx_channel = mlx5_glue->devx_create_event_channel 5506deb19e1SMichael Baum (priv->sh->ctx, 5516deb19e1SMichael Baum devx_ev_flag); 5526deb19e1SMichael Baum if (!tmpl->devx_channel) { 5536deb19e1SMichael Baum rte_errno = errno; 5546deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", 5556deb19e1SMichael Baum rte_errno); 5566deb19e1SMichael Baum goto error; 5576deb19e1SMichael Baum } 5586deb19e1SMichael Baum tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 5596deb19e1SMichael Baum } 5606deb19e1SMichael Baum /* Create CQ using DevX API. */ 561*88f2e3f1SMichael Baum tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx); 5626deb19e1SMichael Baum if (!tmpl->devx_cq) { 5636deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create CQ."); 5646deb19e1SMichael Baum goto error; 5656deb19e1SMichael Baum } 5666deb19e1SMichael Baum /* Create RQ using DevX API. */ 567*88f2e3f1SMichael Baum tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx); 5686deb19e1SMichael Baum if (!tmpl->rq) { 5696deb19e1SMichael Baum DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 5706deb19e1SMichael Baum dev->data->port_id, idx); 5716deb19e1SMichael Baum rte_errno = ENOMEM; 5726deb19e1SMichael Baum goto error; 5736deb19e1SMichael Baum } 5746deb19e1SMichael Baum /* Change queue state to ready. */ 575fa2c85ccSMichael Baum ret = mlx5_devx_modify_rq(tmpl, true); 5766deb19e1SMichael Baum if (ret) 5776deb19e1SMichael Baum goto error; 5786deb19e1SMichael Baum rxq_data->cq_arm_sn = 0; 5796deb19e1SMichael Baum mlx5_rxq_initialize(rxq_data); 5806deb19e1SMichael Baum rxq_data->cq_ci = 0; 5816deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 5826deb19e1SMichael Baum rxq_ctrl->wqn = tmpl->rq->id; 5831260a87bSMichael Baum return 0; 5846deb19e1SMichael Baum error: 5856deb19e1SMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 5866deb19e1SMichael Baum if (tmpl->rq) 5876deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(tmpl->rq)); 5886deb19e1SMichael Baum if (tmpl->devx_cq) 5896deb19e1SMichael Baum claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq)); 5906deb19e1SMichael Baum if (tmpl->devx_channel) 5911260a87bSMichael Baum mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel); 592*88f2e3f1SMichael Baum mlx5_rxq_release_devx_rq_resources(rxq_ctrl); 593*88f2e3f1SMichael Baum mlx5_rxq_release_devx_cq_resources(rxq_ctrl); 5941260a87bSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 5951260a87bSMichael Baum return -rte_errno; 5966deb19e1SMichael Baum } 5976deb19e1SMichael Baum 59887e2db37SMichael Baum /** 59925ae7f1aSMichael Baum * Create RQT using DevX API as a filed of indirection table. 60087e2db37SMichael Baum * 60187e2db37SMichael Baum * @param dev 60287e2db37SMichael Baum * Pointer to Ethernet device. 60325ae7f1aSMichael Baum * @param log_n 60425ae7f1aSMichael Baum * Log of number of queues in the array. 60525ae7f1aSMichael Baum * @param ind_tbl 60625ae7f1aSMichael Baum * DevX indirection table object. 60787e2db37SMichael Baum * 60887e2db37SMichael Baum * @return 60925ae7f1aSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 61087e2db37SMichael Baum */ 61125ae7f1aSMichael Baum static int 61225ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 61325ae7f1aSMichael Baum struct mlx5_ind_table_obj *ind_tbl) 61487e2db37SMichael Baum { 61587e2db37SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 61687e2db37SMichael Baum struct mlx5_devx_rqt_attr *rqt_attr = NULL; 61725ae7f1aSMichael Baum const unsigned int rqt_n = 1 << log_n; 61825ae7f1aSMichael Baum unsigned int i, j; 61987e2db37SMichael Baum 62025ae7f1aSMichael Baum MLX5_ASSERT(ind_tbl); 62187e2db37SMichael Baum rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 62287e2db37SMichael Baum rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 62387e2db37SMichael Baum if (!rqt_attr) { 62487e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 62587e2db37SMichael Baum dev->data->port_id); 62687e2db37SMichael Baum rte_errno = ENOMEM; 62725ae7f1aSMichael Baum return -rte_errno; 62887e2db37SMichael Baum } 62987e2db37SMichael Baum rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 63087e2db37SMichael Baum rqt_attr->rqt_actual_size = rqt_n; 63125ae7f1aSMichael Baum for (i = 0; i != ind_tbl->queues_n; ++i) { 63225ae7f1aSMichael Baum struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; 63325ae7f1aSMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 63425ae7f1aSMichael Baum container_of(rxq, struct mlx5_rxq_ctrl, rxq); 63525ae7f1aSMichael Baum 63625ae7f1aSMichael Baum rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id; 63787e2db37SMichael Baum } 63825ae7f1aSMichael Baum MLX5_ASSERT(i > 0); 63925ae7f1aSMichael Baum for (j = 0; i != rqt_n; ++j, ++i) 64025ae7f1aSMichael Baum rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 64187e2db37SMichael Baum ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); 64287e2db37SMichael Baum mlx5_free(rqt_attr); 64387e2db37SMichael Baum if (!ind_tbl->rqt) { 64487e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 64587e2db37SMichael Baum dev->data->port_id); 64687e2db37SMichael Baum rte_errno = errno; 64725ae7f1aSMichael Baum return -rte_errno; 64887e2db37SMichael Baum } 64925ae7f1aSMichael Baum return 0; 65087e2db37SMichael Baum } 65187e2db37SMichael Baum 65287e2db37SMichael Baum /** 65387e2db37SMichael Baum * Destroy the DevX RQT object. 65487e2db37SMichael Baum * 65587e2db37SMichael Baum * @param ind_table 65687e2db37SMichael Baum * Indirection table to release. 65787e2db37SMichael Baum */ 65887e2db37SMichael Baum static void 65925ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 66087e2db37SMichael Baum { 66187e2db37SMichael Baum claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 66287e2db37SMichael Baum } 66387e2db37SMichael Baum 66485552726SMichael Baum /** 66585552726SMichael Baum * Create an Rx Hash queue. 66685552726SMichael Baum * 66785552726SMichael Baum * @param dev 66885552726SMichael Baum * Pointer to Ethernet device. 6695a959cbfSMichael Baum * @param hrxq 6705a959cbfSMichael Baum * Pointer to Rx Hash queue. 67185552726SMichael Baum * @param tunnel 67285552726SMichael Baum * Tunnel type. 67385552726SMichael Baum * 67485552726SMichael Baum * @return 6755a959cbfSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 67685552726SMichael Baum */ 6775a959cbfSMichael Baum static int 6785a959cbfSMichael Baum mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 67985552726SMichael Baum int tunnel __rte_unused) 68085552726SMichael Baum { 68185552726SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 6825a959cbfSMichael Baum struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table; 6835a959cbfSMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 68485552726SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 68585552726SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 68685552726SMichael Baum struct mlx5_devx_tir_attr tir_attr; 6875a959cbfSMichael Baum const uint8_t *rss_key = hrxq->rss_key; 6885a959cbfSMichael Baum uint64_t hash_fields = hrxq->hash_fields; 68985552726SMichael Baum bool lro = true; 6905a959cbfSMichael Baum uint32_t i; 6915a959cbfSMichael Baum int err; 69285552726SMichael Baum 69385552726SMichael Baum /* Enable TIR LRO only if all the queues were configured for. */ 6945a959cbfSMichael Baum for (i = 0; i < ind_tbl->queues_n; ++i) { 6955a959cbfSMichael Baum if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 69685552726SMichael Baum lro = false; 69785552726SMichael Baum break; 69885552726SMichael Baum } 69985552726SMichael Baum } 70085552726SMichael Baum memset(&tir_attr, 0, sizeof(tir_attr)); 70185552726SMichael Baum tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 70285552726SMichael Baum tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 70385552726SMichael Baum tir_attr.tunneled_offload_en = !!tunnel; 70485552726SMichael Baum /* If needed, translate hash_fields bitmap to PRM format. */ 70585552726SMichael Baum if (hash_fields) { 70685552726SMichael Baum struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL; 70785552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 70885552726SMichael Baum rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ? 70985552726SMichael Baum &tir_attr.rx_hash_field_selector_inner : 71085552726SMichael Baum &tir_attr.rx_hash_field_selector_outer; 71185552726SMichael Baum #else 71285552726SMichael Baum rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer; 71385552726SMichael Baum #endif 71485552726SMichael Baum /* 1 bit: 0: IPv4, 1: IPv6. */ 71585552726SMichael Baum rx_hash_field_select->l3_prot_type = 71685552726SMichael Baum !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 71785552726SMichael Baum /* 1 bit: 0: TCP, 1: UDP. */ 71885552726SMichael Baum rx_hash_field_select->l4_prot_type = 71985552726SMichael Baum !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 72085552726SMichael Baum /* Bitmask which sets which fields to use in RX Hash. */ 72185552726SMichael Baum rx_hash_field_select->selected_fields = 72285552726SMichael Baum ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 72385552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 72485552726SMichael Baum (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 72585552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 72685552726SMichael Baum (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 72785552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 72885552726SMichael Baum (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 72985552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 73085552726SMichael Baum } 73185552726SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 73285552726SMichael Baum tir_attr.transport_domain = priv->sh->td->id; 73385552726SMichael Baum else 73485552726SMichael Baum tir_attr.transport_domain = priv->sh->tdn; 73585552726SMichael Baum memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 73685552726SMichael Baum tir_attr.indirect_table = ind_tbl->rqt->id; 73785552726SMichael Baum if (dev->data->dev_conf.lpbk_mode) 73885552726SMichael Baum tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 73985552726SMichael Baum if (lro) { 74085552726SMichael Baum tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; 74185552726SMichael Baum tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; 74285552726SMichael Baum tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 74385552726SMichael Baum MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 74485552726SMichael Baum } 7455a959cbfSMichael Baum hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); 7465a959cbfSMichael Baum if (!hrxq->tir) { 74785552726SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 74885552726SMichael Baum dev->data->port_id); 74985552726SMichael Baum rte_errno = errno; 75085552726SMichael Baum goto error; 75185552726SMichael Baum } 75285552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 75385552726SMichael Baum hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir 75485552726SMichael Baum (hrxq->tir->obj); 75585552726SMichael Baum if (!hrxq->action) { 75685552726SMichael Baum rte_errno = errno; 75785552726SMichael Baum goto error; 75885552726SMichael Baum } 75985552726SMichael Baum #endif 7605a959cbfSMichael Baum return 0; 76185552726SMichael Baum error: 76285552726SMichael Baum err = rte_errno; /* Save rte_errno before cleanup. */ 7635a959cbfSMichael Baum if (hrxq->tir) 7645a959cbfSMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 76585552726SMichael Baum rte_errno = err; /* Restore rte_errno. */ 7665a959cbfSMichael Baum return -rte_errno; 76785552726SMichael Baum } 76885552726SMichael Baum 76985552726SMichael Baum /** 77085552726SMichael Baum * Destroy a DevX TIR object. 77185552726SMichael Baum * 77285552726SMichael Baum * @param hrxq 77385552726SMichael Baum * Hash Rx queue to release its tir. 77485552726SMichael Baum */ 77585552726SMichael Baum static void 77685552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 77785552726SMichael Baum { 77885552726SMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 77985552726SMichael Baum } 78085552726SMichael Baum 7815eaf882eSMichael Baum /** 7820c762e81SMichael Baum * Create a DevX drop action for Rx Hash queue. 7835eaf882eSMichael Baum * 7845eaf882eSMichael Baum * @param dev 7855eaf882eSMichael Baum * Pointer to Ethernet device. 7865eaf882eSMichael Baum * 7875eaf882eSMichael Baum * @return 7880c762e81SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 7895eaf882eSMichael Baum */ 7900c762e81SMichael Baum static int 7910c762e81SMichael Baum mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 7925eaf882eSMichael Baum { 7935eaf882eSMichael Baum (void)dev; 79486d259ceSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet."); 7955eaf882eSMichael Baum rte_errno = ENOTSUP; 7960c762e81SMichael Baum return -rte_errno; 7975eaf882eSMichael Baum } 7985eaf882eSMichael Baum 7995eaf882eSMichael Baum /** 8005eaf882eSMichael Baum * Release a drop hash Rx queue. 8015eaf882eSMichael Baum * 8025eaf882eSMichael Baum * @param dev 8035eaf882eSMichael Baum * Pointer to Ethernet device. 8045eaf882eSMichael Baum */ 8055eaf882eSMichael Baum static void 8060c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 8075eaf882eSMichael Baum { 8085eaf882eSMichael Baum (void)dev; 80986d259ceSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet."); 8105eaf882eSMichael Baum rte_errno = ENOTSUP; 8115eaf882eSMichael Baum } 8125eaf882eSMichael Baum 81386d259ceSMichael Baum /** 81486d259ceSMichael Baum * Create the Tx hairpin queue object. 81586d259ceSMichael Baum * 81686d259ceSMichael Baum * @param dev 81786d259ceSMichael Baum * Pointer to Ethernet device. 81886d259ceSMichael Baum * @param idx 81986d259ceSMichael Baum * Queue index in DPDK Tx queue array. 82086d259ceSMichael Baum * 82186d259ceSMichael Baum * @return 822f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 82386d259ceSMichael Baum */ 824f49f4483SMichael Baum static int 82586d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 82686d259ceSMichael Baum { 82786d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 82886d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 82986d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 83086d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 83186d259ceSMichael Baum struct mlx5_devx_create_sq_attr attr = { 0 }; 832f49f4483SMichael Baum struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 83386d259ceSMichael Baum uint32_t max_wq_data; 83486d259ceSMichael Baum 83586d259ceSMichael Baum MLX5_ASSERT(txq_data); 836f49f4483SMichael Baum MLX5_ASSERT(tmpl); 83786d259ceSMichael Baum tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN; 83886d259ceSMichael Baum tmpl->txq_ctrl = txq_ctrl; 83986d259ceSMichael Baum attr.hairpin = 1; 84086d259ceSMichael Baum attr.tis_lst_sz = 1; 84186d259ceSMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 84286d259ceSMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 84386d259ceSMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 84486d259ceSMichael Baum if (priv->config.log_hp_size > max_wq_data) { 84586d259ceSMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 84686d259ceSMichael Baum "too large for hairpin.", 84786d259ceSMichael Baum priv->config.log_hp_size); 84886d259ceSMichael Baum rte_errno = ERANGE; 849f49f4483SMichael Baum return -rte_errno; 85086d259ceSMichael Baum } 85186d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 85286d259ceSMichael Baum } else { 85386d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = 85486d259ceSMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 85586d259ceSMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 85686d259ceSMichael Baum } 85786d259ceSMichael Baum /* Set the packets number to the maximum value for performance. */ 85886d259ceSMichael Baum attr.wq_attr.log_hairpin_num_packets = 85986d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz - 86086d259ceSMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 86186d259ceSMichael Baum attr.tis_num = priv->sh->tis->id; 86286d259ceSMichael Baum tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); 86386d259ceSMichael Baum if (!tmpl->sq) { 86486d259ceSMichael Baum DRV_LOG(ERR, 86586d259ceSMichael Baum "Port %u tx hairpin queue %u can't create SQ object.", 86686d259ceSMichael Baum dev->data->port_id, idx); 86786d259ceSMichael Baum rte_errno = errno; 868f49f4483SMichael Baum return -rte_errno; 86986d259ceSMichael Baum } 870f49f4483SMichael Baum return 0; 87186d259ceSMichael Baum } 87286d259ceSMichael Baum 87386d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 87486d259ceSMichael Baum /** 87586d259ceSMichael Baum * Release DevX SQ resources. 87686d259ceSMichael Baum * 87786d259ceSMichael Baum * @param txq_obj 87886d259ceSMichael Baum * DevX Tx queue object. 87986d259ceSMichael Baum */ 88086d259ceSMichael Baum static void 881*88f2e3f1SMichael Baum mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj) 88286d259ceSMichael Baum { 88386d259ceSMichael Baum if (txq_obj->sq_devx) 88486d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx)); 88586d259ceSMichael Baum if (txq_obj->sq_umem) 88686d259ceSMichael Baum claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem)); 88786d259ceSMichael Baum if (txq_obj->sq_buf) 88886d259ceSMichael Baum mlx5_free(txq_obj->sq_buf); 88986d259ceSMichael Baum if (txq_obj->sq_dbrec_page) 89086d259ceSMichael Baum claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs, 89186d259ceSMichael Baum mlx5_os_get_umem_id 89286d259ceSMichael Baum (txq_obj->sq_dbrec_page->umem), 89386d259ceSMichael Baum txq_obj->sq_dbrec_offset)); 89486d259ceSMichael Baum } 89586d259ceSMichael Baum 89686d259ceSMichael Baum /** 89786d259ceSMichael Baum * Release DevX Tx CQ resources. 89886d259ceSMichael Baum * 89986d259ceSMichael Baum * @param txq_obj 90086d259ceSMichael Baum * DevX Tx queue object. 90186d259ceSMichael Baum */ 90286d259ceSMichael Baum static void 903*88f2e3f1SMichael Baum mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj) 90486d259ceSMichael Baum { 90586d259ceSMichael Baum if (txq_obj->cq_devx) 90686d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx)); 90786d259ceSMichael Baum if (txq_obj->cq_umem) 90886d259ceSMichael Baum claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem)); 90986d259ceSMichael Baum if (txq_obj->cq_buf) 91086d259ceSMichael Baum mlx5_free(txq_obj->cq_buf); 91186d259ceSMichael Baum if (txq_obj->cq_dbrec_page) 91286d259ceSMichael Baum claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs, 91386d259ceSMichael Baum mlx5_os_get_umem_id 91486d259ceSMichael Baum (txq_obj->cq_dbrec_page->umem), 91586d259ceSMichael Baum txq_obj->cq_dbrec_offset)); 91686d259ceSMichael Baum } 91786d259ceSMichael Baum 91886d259ceSMichael Baum /** 91986d259ceSMichael Baum * Destroy the Tx queue DevX object. 92086d259ceSMichael Baum * 92186d259ceSMichael Baum * @param txq_obj 92286d259ceSMichael Baum * Txq object to destroy. 92386d259ceSMichael Baum */ 92486d259ceSMichael Baum static void 925*88f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 92686d259ceSMichael Baum { 92786d259ceSMichael Baum MLX5_ASSERT(txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ); 92886d259ceSMichael Baum 929*88f2e3f1SMichael Baum mlx5_txq_release_devx_cq_resources(txq_obj); 930*88f2e3f1SMichael Baum mlx5_txq_release_devx_sq_resources(txq_obj); 93186d259ceSMichael Baum } 93286d259ceSMichael Baum 93386d259ceSMichael Baum /** 934*88f2e3f1SMichael Baum * Create a DevX CQ object and its resources for an Tx queue. 93586d259ceSMichael Baum * 93686d259ceSMichael Baum * @param dev 93786d259ceSMichael Baum * Pointer to Ethernet device. 93886d259ceSMichael Baum * @param idx 93986d259ceSMichael Baum * Queue index in DPDK Tx queue array. 94086d259ceSMichael Baum * 94186d259ceSMichael Baum * @return 942*88f2e3f1SMichael Baum * Number of CQEs in CQ, 0 otherwise and rte_errno is set. 94386d259ceSMichael Baum */ 944*88f2e3f1SMichael Baum static uint32_t 945*88f2e3f1SMichael Baum mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 94686d259ceSMichael Baum { 94786d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 94886d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 949*88f2e3f1SMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 950*88f2e3f1SMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 951*88f2e3f1SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 95286d259ceSMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 95386d259ceSMichael Baum struct mlx5_cqe *cqe; 95486d259ceSMichael Baum size_t page_size; 95586d259ceSMichael Baum size_t alignment; 956*88f2e3f1SMichael Baum uint32_t cqe_n; 95786d259ceSMichael Baum uint32_t i; 95886d259ceSMichael Baum int ret; 95986d259ceSMichael Baum 96086d259ceSMichael Baum MLX5_ASSERT(txq_data); 96186d259ceSMichael Baum MLX5_ASSERT(txq_obj); 96286d259ceSMichael Baum page_size = rte_mem_page_size(); 96386d259ceSMichael Baum if (page_size == (size_t)-1) { 96486d259ceSMichael Baum DRV_LOG(ERR, "Failed to get mem page size."); 96586d259ceSMichael Baum rte_errno = ENOMEM; 966*88f2e3f1SMichael Baum return 0; 96786d259ceSMichael Baum } 96886d259ceSMichael Baum /* Allocate memory buffer for CQEs. */ 96986d259ceSMichael Baum alignment = MLX5_CQE_BUF_ALIGNMENT; 97086d259ceSMichael Baum if (alignment == (size_t)-1) { 97186d259ceSMichael Baum DRV_LOG(ERR, "Failed to get CQE buf alignment."); 97286d259ceSMichael Baum rte_errno = ENOMEM; 973*88f2e3f1SMichael Baum return 0; 97486d259ceSMichael Baum } 975*88f2e3f1SMichael Baum /* Create the Completion Queue. */ 976*88f2e3f1SMichael Baum cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 977*88f2e3f1SMichael Baum 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 97886d259ceSMichael Baum cqe_n = 1UL << log2above(cqe_n); 97986d259ceSMichael Baum if (cqe_n > UINT16_MAX) { 98086d259ceSMichael Baum DRV_LOG(ERR, 98186d259ceSMichael Baum "Port %u Tx queue %u requests to many CQEs %u.", 98286d259ceSMichael Baum dev->data->port_id, txq_data->idx, cqe_n); 98386d259ceSMichael Baum rte_errno = EINVAL; 984*88f2e3f1SMichael Baum return 0; 98586d259ceSMichael Baum } 98686d259ceSMichael Baum txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 98786d259ceSMichael Baum cqe_n * sizeof(struct mlx5_cqe), 98886d259ceSMichael Baum alignment, 98986d259ceSMichael Baum priv->sh->numa_node); 99086d259ceSMichael Baum if (!txq_obj->cq_buf) { 99186d259ceSMichael Baum DRV_LOG(ERR, 99286d259ceSMichael Baum "Port %u Tx queue %u cannot allocate memory (CQ).", 99386d259ceSMichael Baum dev->data->port_id, txq_data->idx); 99486d259ceSMichael Baum rte_errno = ENOMEM; 995*88f2e3f1SMichael Baum return 0; 99686d259ceSMichael Baum } 99786d259ceSMichael Baum /* Register allocated buffer in user space with DevX. */ 99886d259ceSMichael Baum txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 99986d259ceSMichael Baum (void *)txq_obj->cq_buf, 100086d259ceSMichael Baum cqe_n * sizeof(struct mlx5_cqe), 100186d259ceSMichael Baum IBV_ACCESS_LOCAL_WRITE); 100286d259ceSMichael Baum if (!txq_obj->cq_umem) { 100386d259ceSMichael Baum rte_errno = errno; 100486d259ceSMichael Baum DRV_LOG(ERR, 100586d259ceSMichael Baum "Port %u Tx queue %u cannot register memory (CQ).", 100686d259ceSMichael Baum dev->data->port_id, txq_data->idx); 100786d259ceSMichael Baum goto error; 100886d259ceSMichael Baum } 100986d259ceSMichael Baum /* Allocate doorbell record for completion queue. */ 101086d259ceSMichael Baum txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx, 101186d259ceSMichael Baum &priv->dbrpgs, 101286d259ceSMichael Baum &txq_obj->cq_dbrec_page); 101386d259ceSMichael Baum if (txq_obj->cq_dbrec_offset < 0) { 101486d259ceSMichael Baum rte_errno = errno; 101586d259ceSMichael Baum DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 101686d259ceSMichael Baum goto error; 101786d259ceSMichael Baum } 101886d259ceSMichael Baum cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? 101986d259ceSMichael Baum MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; 102086d259ceSMichael Baum cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar); 102186d259ceSMichael Baum cq_attr.eqn = priv->sh->eqn; 102286d259ceSMichael Baum cq_attr.q_umem_valid = 1; 102386d259ceSMichael Baum cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size; 102486d259ceSMichael Baum cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem); 102586d259ceSMichael Baum cq_attr.db_umem_valid = 1; 102686d259ceSMichael Baum cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset; 102786d259ceSMichael Baum cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem); 102886d259ceSMichael Baum cq_attr.log_cq_size = rte_log2_u32(cqe_n); 102986d259ceSMichael Baum cq_attr.log_page_size = rte_log2_u32(page_size); 103086d259ceSMichael Baum /* Create completion queue object with DevX. */ 1031*88f2e3f1SMichael Baum txq_obj->cq_devx = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 1032*88f2e3f1SMichael Baum if (!txq_obj->cq_devx) { 103386d259ceSMichael Baum rte_errno = errno; 103486d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 103586d259ceSMichael Baum dev->data->port_id, idx); 103686d259ceSMichael Baum goto error; 103786d259ceSMichael Baum } 103886d259ceSMichael Baum /* Initial fill CQ buffer with invalid CQE opcode. */ 103986d259ceSMichael Baum cqe = (struct mlx5_cqe *)txq_obj->cq_buf; 1040*88f2e3f1SMichael Baum for (i = 0; i < cqe_n; i++) { 104186d259ceSMichael Baum cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 104286d259ceSMichael Baum ++cqe; 104386d259ceSMichael Baum } 1044*88f2e3f1SMichael Baum return cqe_n; 104586d259ceSMichael Baum error: 104686d259ceSMichael Baum ret = rte_errno; 1047*88f2e3f1SMichael Baum mlx5_txq_release_devx_cq_resources(txq_obj); 104886d259ceSMichael Baum rte_errno = ret; 1049*88f2e3f1SMichael Baum return 0; 105086d259ceSMichael Baum } 105186d259ceSMichael Baum 105286d259ceSMichael Baum /** 1053*88f2e3f1SMichael Baum * Create a SQ object and its resources using DevX. 105486d259ceSMichael Baum * 105586d259ceSMichael Baum * @param dev 105686d259ceSMichael Baum * Pointer to Ethernet device. 105786d259ceSMichael Baum * @param idx 105886d259ceSMichael Baum * Queue index in DPDK Tx queue array. 105986d259ceSMichael Baum * 106086d259ceSMichael Baum * @return 1061*88f2e3f1SMichael Baum * Number of WQEs in SQ, 0 otherwise and rte_errno is set. 106286d259ceSMichael Baum */ 1063*88f2e3f1SMichael Baum static uint32_t 1064*88f2e3f1SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx) 106586d259ceSMichael Baum { 106686d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 106786d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 1068*88f2e3f1SMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 1069*88f2e3f1SMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 1070*88f2e3f1SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 107186d259ceSMichael Baum struct mlx5_devx_create_sq_attr sq_attr = { 0 }; 107286d259ceSMichael Baum size_t page_size; 107386d259ceSMichael Baum uint32_t wqe_n; 107486d259ceSMichael Baum int ret; 107586d259ceSMichael Baum 107686d259ceSMichael Baum MLX5_ASSERT(txq_data); 107786d259ceSMichael Baum MLX5_ASSERT(txq_obj); 107886d259ceSMichael Baum page_size = rte_mem_page_size(); 107986d259ceSMichael Baum if (page_size == (size_t)-1) { 108086d259ceSMichael Baum DRV_LOG(ERR, "Failed to get mem page size."); 108186d259ceSMichael Baum rte_errno = ENOMEM; 1082*88f2e3f1SMichael Baum return 0; 108386d259ceSMichael Baum } 108486d259ceSMichael Baum wqe_n = RTE_MIN(1UL << txq_data->elts_n, 108586d259ceSMichael Baum (uint32_t)priv->sh->device_attr.max_qp_wr); 108686d259ceSMichael Baum txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 108786d259ceSMichael Baum wqe_n * sizeof(struct mlx5_wqe), 108886d259ceSMichael Baum page_size, priv->sh->numa_node); 108986d259ceSMichael Baum if (!txq_obj->sq_buf) { 109086d259ceSMichael Baum DRV_LOG(ERR, 109186d259ceSMichael Baum "Port %u Tx queue %u cannot allocate memory (SQ).", 109286d259ceSMichael Baum dev->data->port_id, txq_data->idx); 109386d259ceSMichael Baum rte_errno = ENOMEM; 109486d259ceSMichael Baum goto error; 109586d259ceSMichael Baum } 109686d259ceSMichael Baum /* Register allocated buffer in user space with DevX. */ 109786d259ceSMichael Baum txq_obj->sq_umem = mlx5_glue->devx_umem_reg 109886d259ceSMichael Baum (priv->sh->ctx, 109986d259ceSMichael Baum (void *)txq_obj->sq_buf, 110086d259ceSMichael Baum wqe_n * sizeof(struct mlx5_wqe), 110186d259ceSMichael Baum IBV_ACCESS_LOCAL_WRITE); 110286d259ceSMichael Baum if (!txq_obj->sq_umem) { 110386d259ceSMichael Baum rte_errno = errno; 110486d259ceSMichael Baum DRV_LOG(ERR, 110586d259ceSMichael Baum "Port %u Tx queue %u cannot register memory (SQ).", 110686d259ceSMichael Baum dev->data->port_id, txq_data->idx); 110786d259ceSMichael Baum goto error; 110886d259ceSMichael Baum } 110986d259ceSMichael Baum /* Allocate doorbell record for send queue. */ 111086d259ceSMichael Baum txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx, 111186d259ceSMichael Baum &priv->dbrpgs, 111286d259ceSMichael Baum &txq_obj->sq_dbrec_page); 111386d259ceSMichael Baum if (txq_obj->sq_dbrec_offset < 0) { 111486d259ceSMichael Baum rte_errno = errno; 111586d259ceSMichael Baum DRV_LOG(ERR, "Failed to allocate SQ door-bell."); 111686d259ceSMichael Baum goto error; 111786d259ceSMichael Baum } 111886d259ceSMichael Baum sq_attr.tis_lst_sz = 1; 111986d259ceSMichael Baum sq_attr.tis_num = priv->sh->tis->id; 112086d259ceSMichael Baum sq_attr.state = MLX5_SQC_STATE_RST; 112186d259ceSMichael Baum sq_attr.cqn = txq_obj->cq_devx->id; 112286d259ceSMichael Baum sq_attr.flush_in_error_en = 1; 112386d259ceSMichael Baum sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps; 112486d259ceSMichael Baum sq_attr.allow_swp = !!priv->config.swp; 112586d259ceSMichael Baum sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode; 112686d259ceSMichael Baum sq_attr.wq_attr.uar_page = 112786d259ceSMichael Baum mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar); 112886d259ceSMichael Baum sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 112986d259ceSMichael Baum sq_attr.wq_attr.pd = priv->sh->pdn; 113086d259ceSMichael Baum sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 1131*88f2e3f1SMichael Baum sq_attr.wq_attr.log_wq_sz = log2above(wqe_n); 113286d259ceSMichael Baum sq_attr.wq_attr.dbr_umem_valid = 1; 113386d259ceSMichael Baum sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset; 113486d259ceSMichael Baum sq_attr.wq_attr.dbr_umem_id = 113586d259ceSMichael Baum mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem); 113686d259ceSMichael Baum sq_attr.wq_attr.wq_umem_valid = 1; 113786d259ceSMichael Baum sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem); 113886d259ceSMichael Baum sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size; 113986d259ceSMichael Baum /* Create Send Queue object with DevX. */ 1140*88f2e3f1SMichael Baum txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr); 1141*88f2e3f1SMichael Baum if (!txq_obj->sq_devx) { 114286d259ceSMichael Baum rte_errno = errno; 114386d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 114486d259ceSMichael Baum dev->data->port_id, idx); 114586d259ceSMichael Baum goto error; 114686d259ceSMichael Baum } 1147*88f2e3f1SMichael Baum return wqe_n; 114886d259ceSMichael Baum error: 114986d259ceSMichael Baum ret = rte_errno; 1150*88f2e3f1SMichael Baum mlx5_txq_release_devx_sq_resources(txq_obj); 115186d259ceSMichael Baum rte_errno = ret; 1152*88f2e3f1SMichael Baum return 0; 115386d259ceSMichael Baum } 115486d259ceSMichael Baum #endif 115586d259ceSMichael Baum 115686d259ceSMichael Baum /** 115786d259ceSMichael Baum * Create the Tx queue DevX object. 115886d259ceSMichael Baum * 115986d259ceSMichael Baum * @param dev 116086d259ceSMichael Baum * Pointer to Ethernet device. 116186d259ceSMichael Baum * @param idx 116286d259ceSMichael Baum * Queue index in DPDK Tx queue array. 116386d259ceSMichael Baum * 116486d259ceSMichael Baum * @return 1165f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 116686d259ceSMichael Baum */ 1167f49f4483SMichael Baum int 116886d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 116986d259ceSMichael Baum { 117086d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 117186d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 117286d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 117386d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 117486d259ceSMichael Baum 117586d259ceSMichael Baum if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 117686d259ceSMichael Baum return mlx5_txq_obj_hairpin_new(dev, idx); 117786d259ceSMichael Baum #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 117886d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 117986d259ceSMichael Baum dev->data->port_id, idx); 118086d259ceSMichael Baum rte_errno = ENOMEM; 1181f49f4483SMichael Baum return -rte_errno; 118286d259ceSMichael Baum #else 118386d259ceSMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 118486d259ceSMichael Baum struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 1185f49f4483SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 118686d259ceSMichael Baum void *reg_addr; 118786d259ceSMichael Baum uint32_t cqe_n; 1188*88f2e3f1SMichael Baum uint32_t wqe_n; 118986d259ceSMichael Baum int ret = 0; 119086d259ceSMichael Baum 119186d259ceSMichael Baum MLX5_ASSERT(txq_data); 1192f49f4483SMichael Baum MLX5_ASSERT(txq_obj); 119386d259ceSMichael Baum txq_obj->type = MLX5_TXQ_OBJ_TYPE_DEVX_SQ; 119486d259ceSMichael Baum txq_obj->txq_ctrl = txq_ctrl; 119586d259ceSMichael Baum txq_obj->dev = dev; 1196*88f2e3f1SMichael Baum cqe_n = mlx5_txq_create_devx_cq_resources(dev, idx); 1197*88f2e3f1SMichael Baum if (!cqe_n) { 119886d259ceSMichael Baum rte_errno = errno; 119986d259ceSMichael Baum goto error; 120086d259ceSMichael Baum } 1201*88f2e3f1SMichael Baum txq_data->cqe_n = log2above(cqe_n); 1202*88f2e3f1SMichael Baum txq_data->cqe_s = 1 << txq_data->cqe_n; 120386d259ceSMichael Baum txq_data->cqe_m = txq_data->cqe_s - 1; 120486d259ceSMichael Baum txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf; 120586d259ceSMichael Baum txq_data->cq_ci = 0; 120686d259ceSMichael Baum txq_data->cq_pi = 0; 120786d259ceSMichael Baum txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs + 120886d259ceSMichael Baum txq_obj->cq_dbrec_offset); 120986d259ceSMichael Baum *txq_data->cq_db = 0; 121086d259ceSMichael Baum /* Create Send Queue object with DevX. */ 1211*88f2e3f1SMichael Baum wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx); 1212*88f2e3f1SMichael Baum if (!wqe_n) { 121386d259ceSMichael Baum rte_errno = errno; 121486d259ceSMichael Baum goto error; 121586d259ceSMichael Baum } 121686d259ceSMichael Baum /* Create the Work Queue. */ 1217*88f2e3f1SMichael Baum txq_data->wqe_n = log2above(wqe_n); 121886d259ceSMichael Baum txq_data->wqe_s = 1 << txq_data->wqe_n; 121986d259ceSMichael Baum txq_data->wqe_m = txq_data->wqe_s - 1; 122086d259ceSMichael Baum txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf; 122186d259ceSMichael Baum txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 122286d259ceSMichael Baum txq_data->wqe_ci = 0; 122386d259ceSMichael Baum txq_data->wqe_pi = 0; 122486d259ceSMichael Baum txq_data->wqe_comp = 0; 122586d259ceSMichael Baum txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 122686d259ceSMichael Baum txq_data->qp_db = (volatile uint32_t *) 122786d259ceSMichael Baum (txq_obj->sq_dbrec_page->dbrs + 122886d259ceSMichael Baum txq_obj->sq_dbrec_offset + 122986d259ceSMichael Baum MLX5_SND_DBR * sizeof(uint32_t)); 123086d259ceSMichael Baum *txq_data->qp_db = 0; 123186d259ceSMichael Baum txq_data->qp_num_8s = txq_obj->sq_devx->id << 8; 123286d259ceSMichael Baum /* Change Send Queue state to Ready-to-Send. */ 123386d259ceSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RST; 123486d259ceSMichael Baum msq_attr.state = MLX5_SQC_STATE_RDY; 123586d259ceSMichael Baum ret = mlx5_devx_cmd_modify_sq(txq_obj->sq_devx, &msq_attr); 123686d259ceSMichael Baum if (ret) { 123786d259ceSMichael Baum rte_errno = errno; 123886d259ceSMichael Baum DRV_LOG(ERR, 123986d259ceSMichael Baum "Port %u Tx queue %u SP state to SQC_STATE_RDY failed.", 124086d259ceSMichael Baum dev->data->port_id, idx); 124186d259ceSMichael Baum goto error; 124286d259ceSMichael Baum } 124386d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 124486d259ceSMichael Baum /* 124586d259ceSMichael Baum * If using DevX need to query and store TIS transport domain value. 124686d259ceSMichael Baum * This is done once per port. 124786d259ceSMichael Baum * Will use this value on Rx, when creating matching TIR. 124886d259ceSMichael Baum */ 124986d259ceSMichael Baum if (!priv->sh->tdn) 125086d259ceSMichael Baum priv->sh->tdn = priv->sh->td->id; 125186d259ceSMichael Baum #endif 125286d259ceSMichael Baum MLX5_ASSERT(sh->tx_uar); 125386d259ceSMichael Baum reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 125486d259ceSMichael Baum MLX5_ASSERT(reg_addr); 125586d259ceSMichael Baum txq_ctrl->bf_reg = reg_addr; 125686d259ceSMichael Baum txq_ctrl->uar_mmap_offset = 125786d259ceSMichael Baum mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); 125886d259ceSMichael Baum txq_uar_init(txq_ctrl); 1259f49f4483SMichael Baum return 0; 126086d259ceSMichael Baum error: 126186d259ceSMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 1262*88f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 126386d259ceSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 1264f49f4483SMichael Baum return -rte_errno; 126586d259ceSMichael Baum #endif 126686d259ceSMichael Baum } 126786d259ceSMichael Baum 126886d259ceSMichael Baum /** 126986d259ceSMichael Baum * Release an Tx DevX queue object. 127086d259ceSMichael Baum * 127186d259ceSMichael Baum * @param txq_obj 127286d259ceSMichael Baum * DevX Tx queue object. 127386d259ceSMichael Baum */ 127486d259ceSMichael Baum void 127586d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 127686d259ceSMichael Baum { 127786d259ceSMichael Baum MLX5_ASSERT(txq_obj); 127886d259ceSMichael Baum if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) { 127986d259ceSMichael Baum if (txq_obj->tis) 128086d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 128186d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 128286d259ceSMichael Baum } else { 1283*88f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 128486d259ceSMichael Baum #endif 128586d259ceSMichael Baum } 128686d259ceSMichael Baum } 128786d259ceSMichael Baum 12888bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = { 12898bb2410eSOphir Munk .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 12906deb19e1SMichael Baum .rxq_obj_new = mlx5_rxq_devx_obj_new, 129132287079SMichael Baum .rxq_event_get = mlx5_rx_devx_get_event, 1292c279f187SMichael Baum .rxq_obj_modify = mlx5_devx_modify_rq, 12936deb19e1SMichael Baum .rxq_obj_release = mlx5_rxq_devx_obj_release, 129425ae7f1aSMichael Baum .ind_table_new = mlx5_devx_ind_table_new, 129525ae7f1aSMichael Baum .ind_table_destroy = mlx5_devx_ind_table_destroy, 129685552726SMichael Baum .hrxq_new = mlx5_devx_hrxq_new, 129785552726SMichael Baum .hrxq_destroy = mlx5_devx_tir_destroy, 12980c762e81SMichael Baum .drop_action_create = mlx5_devx_drop_action_create, 12990c762e81SMichael Baum .drop_action_destroy = mlx5_devx_drop_action_destroy, 130086d259ceSMichael Baum .txq_obj_new = mlx5_txq_devx_obj_new, 130186d259ceSMichael Baum .txq_obj_release = mlx5_txq_devx_obj_release, 13028bb2410eSOphir Munk }; 1303