18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 28bb2410eSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 38bb2410eSOphir Munk */ 48bb2410eSOphir Munk 58bb2410eSOphir Munk #include <stddef.h> 68bb2410eSOphir Munk #include <errno.h> 7c279f187SMichael Baum #include <stdbool.h> 88bb2410eSOphir Munk #include <string.h> 98bb2410eSOphir Munk #include <stdint.h> 108bb2410eSOphir Munk #include <sys/queue.h> 118bb2410eSOphir Munk 128bb2410eSOphir Munk #include <rte_malloc.h> 138bb2410eSOphir Munk #include <rte_common.h> 148bb2410eSOphir Munk #include <rte_eal_paging.h> 158bb2410eSOphir Munk 168bb2410eSOphir Munk #include <mlx5_glue.h> 178bb2410eSOphir Munk #include <mlx5_devx_cmds.h> 185f04f70cSMichael Baum #include <mlx5_common_devx.h> 198bb2410eSOphir Munk #include <mlx5_malloc.h> 208bb2410eSOphir Munk 218bb2410eSOphir Munk #include "mlx5.h" 228bb2410eSOphir Munk #include "mlx5_common_os.h" 23377b69fbSMichael Baum #include "mlx5_tx.h" 24151cbe3aSMichael Baum #include "mlx5_rx.h" 258bb2410eSOphir Munk #include "mlx5_utils.h" 268bb2410eSOphir Munk #include "mlx5_devx.h" 2787e2db37SMichael Baum #include "mlx5_flow.h" 2888019723SOphir Munk #include "mlx5_flow_os.h" 29f6dee900SMichael Baum 30f6dee900SMichael Baum /** 318bb2410eSOphir Munk * Modify RQ vlan stripping offload 328bb2410eSOphir Munk * 338bb2410eSOphir Munk * @param rxq_obj 348bb2410eSOphir Munk * Rx queue object. 358bb2410eSOphir Munk * 36f6dee900SMichael Baum * @return 37f6dee900SMichael Baum * 0 on success, non-0 otherwise 388bb2410eSOphir Munk */ 398bb2410eSOphir Munk static int 408bb2410eSOphir Munk mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 418bb2410eSOphir Munk { 428bb2410eSOphir Munk struct mlx5_devx_modify_rq_attr rq_attr; 438bb2410eSOphir Munk 448bb2410eSOphir Munk memset(&rq_attr, 0, sizeof(rq_attr)); 458bb2410eSOphir Munk rq_attr.rq_state = MLX5_RQC_STATE_RDY; 468bb2410eSOphir Munk rq_attr.state = MLX5_RQC_STATE_RDY; 478bb2410eSOphir Munk rq_attr.vsd = (on ? 0 : 1); 488bb2410eSOphir Munk rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 496e0a3637SMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 508bb2410eSOphir Munk } 518bb2410eSOphir Munk 526deb19e1SMichael Baum /** 53fa2c85ccSMichael Baum * Modify RQ using DevX API. 54fa2c85ccSMichael Baum * 55fa2c85ccSMichael Baum * @param rxq_obj 56fa2c85ccSMichael Baum * DevX Rx queue object. 574c6d80f1SMichael Baum * @param type 584c6d80f1SMichael Baum * Type of change queue state. 59fa2c85ccSMichael Baum * 60fa2c85ccSMichael Baum * @return 61fa2c85ccSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 62fa2c85ccSMichael Baum */ 63fa2c85ccSMichael Baum static int 644c6d80f1SMichael Baum mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) 65fa2c85ccSMichael Baum { 66fa2c85ccSMichael Baum struct mlx5_devx_modify_rq_attr rq_attr; 67fa2c85ccSMichael Baum 68fa2c85ccSMichael Baum memset(&rq_attr, 0, sizeof(rq_attr)); 694c6d80f1SMichael Baum switch (type) { 704c6d80f1SMichael Baum case MLX5_RXQ_MOD_ERR2RST: 714c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_ERR; 724c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 734c6d80f1SMichael Baum break; 744c6d80f1SMichael Baum case MLX5_RXQ_MOD_RST2RDY: 75fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RST; 76fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RDY; 774c6d80f1SMichael Baum break; 784c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2ERR: 794c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 804c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_ERR; 814c6d80f1SMichael Baum break; 824c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2RST: 83fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 84fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 854c6d80f1SMichael Baum break; 864c6d80f1SMichael Baum default: 874c6d80f1SMichael Baum break; 88fa2c85ccSMichael Baum } 896e0a3637SMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 90fa2c85ccSMichael Baum } 91fa2c85ccSMichael Baum 92fa2c85ccSMichael Baum /** 935d9f3c3fSMichael Baum * Modify SQ using DevX API. 945d9f3c3fSMichael Baum * 955d9f3c3fSMichael Baum * @param txq_obj 965d9f3c3fSMichael Baum * DevX Tx queue object. 975d9f3c3fSMichael Baum * @param type 985d9f3c3fSMichael Baum * Type of change queue state. 995d9f3c3fSMichael Baum * @param dev_port 1005d9f3c3fSMichael Baum * Unnecessary. 1015d9f3c3fSMichael Baum * 1025d9f3c3fSMichael Baum * @return 1035d9f3c3fSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 1045d9f3c3fSMichael Baum */ 1055d9f3c3fSMichael Baum static int 1065d9f3c3fSMichael Baum mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 1075d9f3c3fSMichael Baum uint8_t dev_port) 1085d9f3c3fSMichael Baum { 1095d9f3c3fSMichael Baum struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 1105d9f3c3fSMichael Baum int ret; 1115d9f3c3fSMichael Baum 1125d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RST2RDY) { 1135d9f3c3fSMichael Baum /* Change queue state to reset. */ 1145d9f3c3fSMichael Baum if (type == MLX5_TXQ_MOD_ERR2RDY) 1155d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_ERR; 1165d9f3c3fSMichael Baum else 1175d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RDY; 1185d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RST; 11974e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1205d9f3c3fSMichael Baum if (ret) { 1215d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET" 1225d9f3c3fSMichael Baum " %s", strerror(errno)); 1235d9f3c3fSMichael Baum rte_errno = errno; 1245d9f3c3fSMichael Baum return ret; 1255d9f3c3fSMichael Baum } 1265d9f3c3fSMichael Baum } 1275d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RDY2RST) { 1285d9f3c3fSMichael Baum /* Change queue state to ready. */ 1295d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RST; 1305d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RDY; 13174e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1325d9f3c3fSMichael Baum if (ret) { 1335d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to READY" 1345d9f3c3fSMichael Baum " %s", strerror(errno)); 1355d9f3c3fSMichael Baum rte_errno = errno; 1365d9f3c3fSMichael Baum return ret; 1375d9f3c3fSMichael Baum } 1385d9f3c3fSMichael Baum } 1395d9f3c3fSMichael Baum /* 1405d9f3c3fSMichael Baum * The dev_port variable is relevant only in Verbs API, and there is a 1415d9f3c3fSMichael Baum * pointer that points to this function and a parallel function in verbs 1425d9f3c3fSMichael Baum * intermittently, so they should have the same parameters. 1435d9f3c3fSMichael Baum */ 1445d9f3c3fSMichael Baum (void)dev_port; 1455d9f3c3fSMichael Baum return 0; 1465d9f3c3fSMichael Baum } 1475d9f3c3fSMichael Baum 1485d9f3c3fSMichael Baum /** 1495cd33796SMichael Baum * Destroy the Rx queue DevX object. 1506deb19e1SMichael Baum * 1515cd33796SMichael Baum * @param rxq_obj 1525cd33796SMichael Baum * Rxq object to destroy. 1536deb19e1SMichael Baum */ 1546deb19e1SMichael Baum static void 1556e0a3637SMichael Baum mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj) 1566deb19e1SMichael Baum { 1576e0a3637SMichael Baum mlx5_devx_rq_destroy(&rxq_obj->rq_obj); 1586e0a3637SMichael Baum memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj)); 1596e0a3637SMichael Baum mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 1606e0a3637SMichael Baum memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); 1616deb19e1SMichael Baum } 1626deb19e1SMichael Baum 1636deb19e1SMichael Baum /** 1646deb19e1SMichael Baum * Release an Rx DevX queue object. 1656deb19e1SMichael Baum * 1666deb19e1SMichael Baum * @param rxq_obj 1676deb19e1SMichael Baum * DevX Rx queue object. 1686deb19e1SMichael Baum */ 1696deb19e1SMichael Baum static void 1706deb19e1SMichael Baum mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 1716deb19e1SMichael Baum { 1726deb19e1SMichael Baum MLX5_ASSERT(rxq_obj); 173e96242efSMichael Baum if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { 1746e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->rq); 1754c6d80f1SMichael Baum mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST); 176fa2c85ccSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1776deb19e1SMichael Baum } else { 1786e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->cq_obj.cq); 1796e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->rq_obj.rq); 1806e0a3637SMichael Baum mlx5_rxq_release_devx_resources(rxq_obj); 1816deb19e1SMichael Baum if (rxq_obj->devx_channel) 18298174626STal Shnaiderman mlx5_os_devx_destroy_event_channel 1836deb19e1SMichael Baum (rxq_obj->devx_channel); 1846deb19e1SMichael Baum } 1856deb19e1SMichael Baum } 1866deb19e1SMichael Baum 1876deb19e1SMichael Baum /** 18832287079SMichael Baum * Get event for an Rx DevX queue object. 18932287079SMichael Baum * 19032287079SMichael Baum * @param rxq_obj 19132287079SMichael Baum * DevX Rx queue object. 19232287079SMichael Baum * 19332287079SMichael Baum * @return 19432287079SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 19532287079SMichael Baum */ 19632287079SMichael Baum static int 19732287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 19832287079SMichael Baum { 19932287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT 20032287079SMichael Baum union { 20132287079SMichael Baum struct mlx5dv_devx_async_event_hdr event_resp; 20232287079SMichael Baum uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 20332287079SMichael Baum } out; 20432287079SMichael Baum int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 20532287079SMichael Baum &out.event_resp, 20632287079SMichael Baum sizeof(out.buf)); 20732287079SMichael Baum 20832287079SMichael Baum if (ret < 0) { 20932287079SMichael Baum rte_errno = errno; 21032287079SMichael Baum return -rte_errno; 21132287079SMichael Baum } 2125cd33796SMichael Baum if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) { 21332287079SMichael Baum rte_errno = EINVAL; 21432287079SMichael Baum return -rte_errno; 21532287079SMichael Baum } 21632287079SMichael Baum return 0; 21732287079SMichael Baum #else 21832287079SMichael Baum (void)rxq_obj; 21932287079SMichael Baum rte_errno = ENOTSUP; 22032287079SMichael Baum return -rte_errno; 22132287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */ 22232287079SMichael Baum } 22332287079SMichael Baum 22432287079SMichael Baum /** 2256deb19e1SMichael Baum * Create a RQ object using DevX. 2266deb19e1SMichael Baum * 2276deb19e1SMichael Baum * @param dev 2286deb19e1SMichael Baum * Pointer to Ethernet device. 2296deb19e1SMichael Baum * @param idx 2306deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 2316deb19e1SMichael Baum * 2326deb19e1SMichael Baum * @return 2336e0a3637SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2346deb19e1SMichael Baum */ 2356e0a3637SMichael Baum static int 23688f2e3f1SMichael Baum mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 2376deb19e1SMichael Baum { 2386deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 2396deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 2406deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 2416deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 2426deb19e1SMichael Baum struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 2436e0a3637SMichael Baum uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; 2446e0a3637SMichael Baum uint32_t wqe_size, log_wqe_size; 2456deb19e1SMichael Baum 2466deb19e1SMichael Baum /* Fill RQ attributes. */ 2476deb19e1SMichael Baum rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 2486deb19e1SMichael Baum rq_attr.flush_in_error_en = 1; 2496e0a3637SMichael Baum rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1; 2506e0a3637SMichael Baum rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id; 2516e0a3637SMichael Baum rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 2526deb19e1SMichael Baum /* Fill WQ attributes for this RQ. */ 2536deb19e1SMichael Baum if (mlx5_rxq_mprq_enabled(rxq_data)) { 2546deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 2556deb19e1SMichael Baum /* 2566deb19e1SMichael Baum * Number of strides in each WQE: 2576deb19e1SMichael Baum * 512*2^single_wqe_log_num_of_strides. 2586deb19e1SMichael Baum */ 2596deb19e1SMichael Baum rq_attr.wq_attr.single_wqe_log_num_of_strides = 2606deb19e1SMichael Baum rxq_data->strd_num_n - 2616deb19e1SMichael Baum MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 2626deb19e1SMichael Baum /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 2636deb19e1SMichael Baum rq_attr.wq_attr.single_stride_log_num_of_bytes = 2646deb19e1SMichael Baum rxq_data->strd_sz_n - 2656deb19e1SMichael Baum MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 2666deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_mprq); 2676deb19e1SMichael Baum } else { 2686deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 2696deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_data_seg); 2706deb19e1SMichael Baum } 2716deb19e1SMichael Baum log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 2726deb19e1SMichael Baum wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 2736e0a3637SMichael Baum rq_attr.wq_attr.log_wq_stride = log_wqe_size; 2746e0a3637SMichael Baum rq_attr.wq_attr.log_wq_sz = log_desc_n; 2756e0a3637SMichael Baum rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ? 2766e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_ALIGN : 2776e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_NONE; 2786e0a3637SMichael Baum rq_attr.wq_attr.pd = priv->sh->pdn; 279e6988afdSMatan Azrad rq_attr.counter_set_id = priv->counter_set_id; 280f6dee900SMichael Baum /* Create RQ using DevX API. */ 2816e0a3637SMichael Baum return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj, 2826e0a3637SMichael Baum wqe_size, log_desc_n, &rq_attr, 2836e0a3637SMichael Baum rxq_ctrl->socket); 2846deb19e1SMichael Baum } 2856deb19e1SMichael Baum 2866deb19e1SMichael Baum /** 2876deb19e1SMichael Baum * Create a DevX CQ object for an Rx queue. 2886deb19e1SMichael Baum * 2896deb19e1SMichael Baum * @param dev 2906deb19e1SMichael Baum * Pointer to Ethernet device. 2916deb19e1SMichael Baum * @param idx 2926deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 2936deb19e1SMichael Baum * 2946deb19e1SMichael Baum * @return 2955cd33796SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2966deb19e1SMichael Baum */ 2975cd33796SMichael Baum static int 29888f2e3f1SMichael Baum mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 2996deb19e1SMichael Baum { 3005cd33796SMichael Baum struct mlx5_devx_cq *cq_obj = 0; 3016deb19e1SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 3026deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 3035cd33796SMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 3046deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 3056deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 3066deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 307f6dee900SMichael Baum unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 3086deb19e1SMichael Baum uint32_t log_cqe_n; 3095cd33796SMichael Baum uint16_t event_nums[1] = { 0 }; 3106deb19e1SMichael Baum int ret = 0; 3116deb19e1SMichael Baum 3126deb19e1SMichael Baum if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 3136deb19e1SMichael Baum !rxq_data->lro) { 31438f9369dSDekel Peled cq_attr.cqe_comp_en = 1u; 31554c2d46bSAlexander Kozyrev rxq_data->mcqe_format = priv->config.cqe_comp_fmt; 31654c2d46bSAlexander Kozyrev rxq_data->byte_mask = UINT32_MAX; 31754c2d46bSAlexander Kozyrev switch (priv->config.cqe_comp_fmt) { 31854c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_HASH: 31954c2d46bSAlexander Kozyrev /* fallthrough */ 32054c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM: 3210f20acbfSAlexander Kozyrev /* 32254c2d46bSAlexander Kozyrev * Select CSUM miniCQE format only for non-vectorized 32354c2d46bSAlexander Kozyrev * MPRQ Rx burst, use HASH miniCQE format for others. 3240f20acbfSAlexander Kozyrev */ 3250f20acbfSAlexander Kozyrev if (mlx5_rxq_check_vec_support(rxq_data) < 0 && 3260f20acbfSAlexander Kozyrev mlx5_rxq_mprq_enabled(rxq_data)) 3276deb19e1SMichael Baum cq_attr.mini_cqe_res_format = 3280f20acbfSAlexander Kozyrev MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 3290f20acbfSAlexander Kozyrev else 3300f20acbfSAlexander Kozyrev cq_attr.mini_cqe_res_format = 33138f9369dSDekel Peled MLX5_CQE_RESP_FORMAT_HASH; 33254c2d46bSAlexander Kozyrev rxq_data->mcqe_format = cq_attr.mini_cqe_res_format; 33354c2d46bSAlexander Kozyrev break; 33454c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX: 33554c2d46bSAlexander Kozyrev rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK; 33654c2d46bSAlexander Kozyrev /* fallthrough */ 33754c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX: 33854c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt; 33954c2d46bSAlexander Kozyrev break; 34054c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_L34H_STRIDX: 34154c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = 0; 34254c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format_ext = 1; 34354c2d46bSAlexander Kozyrev break; 34454c2d46bSAlexander Kozyrev } 34554c2d46bSAlexander Kozyrev DRV_LOG(DEBUG, 34654c2d46bSAlexander Kozyrev "Port %u Rx CQE compression is enabled, format %d.", 34754c2d46bSAlexander Kozyrev dev->data->port_id, priv->config.cqe_comp_fmt); 3486deb19e1SMichael Baum /* 3496deb19e1SMichael Baum * For vectorized Rx, it must not be doubled in order to 3506deb19e1SMichael Baum * make cq_ci and rq_ci aligned. 3516deb19e1SMichael Baum */ 3526deb19e1SMichael Baum if (mlx5_rxq_check_vec_support(rxq_data) < 0) 3536deb19e1SMichael Baum cqe_n *= 2; 3546deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 3556deb19e1SMichael Baum DRV_LOG(DEBUG, 3566deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for HW" 3576deb19e1SMichael Baum " timestamp.", 3586deb19e1SMichael Baum dev->data->port_id); 3596deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->lro) { 3606deb19e1SMichael Baum DRV_LOG(DEBUG, 3616deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for LRO.", 3626deb19e1SMichael Baum dev->data->port_id); 3636deb19e1SMichael Baum } 3645cd33796SMichael Baum cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar); 3656deb19e1SMichael Baum log_cqe_n = log2above(cqe_n); 366f6dee900SMichael Baum /* Create CQ using DevX API. */ 3675cd33796SMichael Baum ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n, 3685cd33796SMichael Baum &cq_attr, sh->numa_node); 3695cd33796SMichael Baum if (ret) 3705cd33796SMichael Baum return ret; 3715cd33796SMichael Baum cq_obj = &rxq_ctrl->obj->cq_obj; 3725cd33796SMichael Baum rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) 3735cd33796SMichael Baum (uintptr_t)cq_obj->cqes; 3745cd33796SMichael Baum rxq_data->cq_db = cq_obj->db_rec; 3755cd33796SMichael Baum rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); 3766deb19e1SMichael Baum rxq_data->cqe_n = log_cqe_n; 3775cd33796SMichael Baum rxq_data->cqn = cq_obj->cq->id; 378f6dee900SMichael Baum if (rxq_ctrl->obj->devx_channel) { 37998174626STal Shnaiderman ret = mlx5_os_devx_subscribe_devx_event 380f6dee900SMichael Baum (rxq_ctrl->obj->devx_channel, 3815cd33796SMichael Baum cq_obj->cq->obj, 3826deb19e1SMichael Baum sizeof(event_nums), 3836deb19e1SMichael Baum event_nums, 3845cd33796SMichael Baum (uint64_t)(uintptr_t)cq_obj->cq); 3856deb19e1SMichael Baum if (ret) { 3866deb19e1SMichael Baum DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 3875cd33796SMichael Baum ret = errno; 3885cd33796SMichael Baum mlx5_devx_cq_destroy(cq_obj); 3895cd33796SMichael Baum memset(cq_obj, 0, sizeof(*cq_obj)); 3905cd33796SMichael Baum rte_errno = ret; 3915cd33796SMichael Baum return -ret; 3926deb19e1SMichael Baum } 3936deb19e1SMichael Baum } 3945cd33796SMichael Baum return 0; 3956deb19e1SMichael Baum } 3966deb19e1SMichael Baum 3976deb19e1SMichael Baum /** 3986deb19e1SMichael Baum * Create the Rx hairpin queue object. 3996deb19e1SMichael Baum * 4006deb19e1SMichael Baum * @param dev 4016deb19e1SMichael Baum * Pointer to Ethernet device. 4026deb19e1SMichael Baum * @param idx 4036deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4046deb19e1SMichael Baum * 4056deb19e1SMichael Baum * @return 4061260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4076deb19e1SMichael Baum */ 4081260a87bSMichael Baum static int 4096deb19e1SMichael Baum mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 4106deb19e1SMichael Baum { 4116deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4126deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4136deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4146deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4156deb19e1SMichael Baum struct mlx5_devx_create_rq_attr attr = { 0 }; 4161260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4176deb19e1SMichael Baum uint32_t max_wq_data; 4186deb19e1SMichael Baum 4196deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4201260a87bSMichael Baum MLX5_ASSERT(tmpl); 4216deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4226deb19e1SMichael Baum attr.hairpin = 1; 4236deb19e1SMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 4246deb19e1SMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 4256deb19e1SMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 4266deb19e1SMichael Baum if (priv->config.log_hp_size > max_wq_data) { 4276deb19e1SMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 4286deb19e1SMichael Baum "too large for hairpin.", 4296deb19e1SMichael Baum priv->config.log_hp_size); 4306deb19e1SMichael Baum rte_errno = ERANGE; 4311260a87bSMichael Baum return -rte_errno; 4326deb19e1SMichael Baum } 4336deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 4346deb19e1SMichael Baum } else { 4356deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = 4366deb19e1SMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 4376deb19e1SMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 4386deb19e1SMichael Baum } 4396deb19e1SMichael Baum /* Set the packets number to the maximum value for performance. */ 4406deb19e1SMichael Baum attr.wq_attr.log_hairpin_num_packets = 4416deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz - 4426deb19e1SMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 443e6988afdSMatan Azrad attr.counter_set_id = priv->counter_set_id; 4446deb19e1SMichael Baum tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, 4456deb19e1SMichael Baum rxq_ctrl->socket); 4466deb19e1SMichael Baum if (!tmpl->rq) { 4476deb19e1SMichael Baum DRV_LOG(ERR, 4486deb19e1SMichael Baum "Port %u Rx hairpin queue %u can't create rq object.", 4496deb19e1SMichael Baum dev->data->port_id, idx); 4506deb19e1SMichael Baum rte_errno = errno; 4511260a87bSMichael Baum return -rte_errno; 4526deb19e1SMichael Baum } 4536deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 4541260a87bSMichael Baum return 0; 4556deb19e1SMichael Baum } 4566deb19e1SMichael Baum 4576deb19e1SMichael Baum /** 4586deb19e1SMichael Baum * Create the Rx queue DevX object. 4596deb19e1SMichael Baum * 4606deb19e1SMichael Baum * @param dev 4616deb19e1SMichael Baum * Pointer to Ethernet device. 4626deb19e1SMichael Baum * @param idx 4636deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4646deb19e1SMichael Baum * 4656deb19e1SMichael Baum * @return 4661260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4676deb19e1SMichael Baum */ 4681260a87bSMichael Baum static int 4696deb19e1SMichael Baum mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 4706deb19e1SMichael Baum { 4716deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4726deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4736deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4746deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4751260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4766deb19e1SMichael Baum int ret = 0; 4776deb19e1SMichael Baum 4786deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4791260a87bSMichael Baum MLX5_ASSERT(tmpl); 4806deb19e1SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 4816deb19e1SMichael Baum return mlx5_rxq_obj_hairpin_new(dev, idx); 4826deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4836deb19e1SMichael Baum if (rxq_ctrl->irq) { 4846deb19e1SMichael Baum int devx_ev_flag = 4856deb19e1SMichael Baum MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 4866deb19e1SMichael Baum 48798174626STal Shnaiderman tmpl->devx_channel = mlx5_os_devx_create_event_channel 4886deb19e1SMichael Baum (priv->sh->ctx, 4896deb19e1SMichael Baum devx_ev_flag); 4906deb19e1SMichael Baum if (!tmpl->devx_channel) { 4916deb19e1SMichael Baum rte_errno = errno; 4926deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", 4936deb19e1SMichael Baum rte_errno); 4946deb19e1SMichael Baum goto error; 4956deb19e1SMichael Baum } 4966deb19e1SMichael Baum tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 4976deb19e1SMichael Baum } 4986deb19e1SMichael Baum /* Create CQ using DevX API. */ 4995cd33796SMichael Baum ret = mlx5_rxq_create_devx_cq_resources(dev, idx); 5005cd33796SMichael Baum if (ret) { 5016deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create CQ."); 5026deb19e1SMichael Baum goto error; 5036deb19e1SMichael Baum } 5046deb19e1SMichael Baum /* Create RQ using DevX API. */ 5056e0a3637SMichael Baum ret = mlx5_rxq_create_devx_rq_resources(dev, idx); 5066e0a3637SMichael Baum if (ret) { 5076deb19e1SMichael Baum DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 5086deb19e1SMichael Baum dev->data->port_id, idx); 5096deb19e1SMichael Baum rte_errno = ENOMEM; 5106deb19e1SMichael Baum goto error; 5116deb19e1SMichael Baum } 5126deb19e1SMichael Baum /* Change queue state to ready. */ 5134c6d80f1SMichael Baum ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY); 5146deb19e1SMichael Baum if (ret) 5156deb19e1SMichael Baum goto error; 5166e0a3637SMichael Baum rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf; 5176e0a3637SMichael Baum rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec; 5186deb19e1SMichael Baum rxq_data->cq_arm_sn = 0; 5196deb19e1SMichael Baum rxq_data->cq_ci = 0; 5206e0a3637SMichael Baum mlx5_rxq_initialize(rxq_data); 5216deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 5226e0a3637SMichael Baum rxq_ctrl->wqn = tmpl->rq_obj.rq->id; 5231260a87bSMichael Baum return 0; 5246deb19e1SMichael Baum error: 5256deb19e1SMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 5266e0a3637SMichael Baum mlx5_rxq_devx_obj_release(tmpl); 5271260a87bSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 5281260a87bSMichael Baum return -rte_errno; 5296deb19e1SMichael Baum } 5306deb19e1SMichael Baum 53187e2db37SMichael Baum /** 532fa7ad49eSAndrey Vesnovaty * Prepare RQT attribute structure for DevX RQT API. 533fa7ad49eSAndrey Vesnovaty * 534fa7ad49eSAndrey Vesnovaty * @param dev 535fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 536fa7ad49eSAndrey Vesnovaty * @param log_n 537fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 538fa7ad49eSAndrey Vesnovaty * @param ind_tbl 539fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 540fa7ad49eSAndrey Vesnovaty * 541fa7ad49eSAndrey Vesnovaty * @return 542fa7ad49eSAndrey Vesnovaty * The RQT attr object initialized, NULL otherwise and rte_errno is set. 543fa7ad49eSAndrey Vesnovaty */ 544fa7ad49eSAndrey Vesnovaty static struct mlx5_devx_rqt_attr * 545fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, 546fa7ad49eSAndrey Vesnovaty const unsigned int log_n, 547fa7ad49eSAndrey Vesnovaty const uint16_t *queues, 548fa7ad49eSAndrey Vesnovaty const uint32_t queues_n) 549fa7ad49eSAndrey Vesnovaty { 550fa7ad49eSAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 551fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 552fa7ad49eSAndrey Vesnovaty const unsigned int rqt_n = 1 << log_n; 553fa7ad49eSAndrey Vesnovaty unsigned int i, j; 554fa7ad49eSAndrey Vesnovaty 555fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 556fa7ad49eSAndrey Vesnovaty rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 557fa7ad49eSAndrey Vesnovaty if (!rqt_attr) { 558fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 559fa7ad49eSAndrey Vesnovaty dev->data->port_id); 560fa7ad49eSAndrey Vesnovaty rte_errno = ENOMEM; 561fa7ad49eSAndrey Vesnovaty return NULL; 562fa7ad49eSAndrey Vesnovaty } 563fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 564fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_actual_size = rqt_n; 565fa7ad49eSAndrey Vesnovaty for (i = 0; i != queues_n; ++i) { 566fa7ad49eSAndrey Vesnovaty struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]]; 567fa7ad49eSAndrey Vesnovaty struct mlx5_rxq_ctrl *rxq_ctrl = 568fa7ad49eSAndrey Vesnovaty container_of(rxq, struct mlx5_rxq_ctrl, rxq); 569fa7ad49eSAndrey Vesnovaty 5706e0a3637SMichael Baum rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id; 571fa7ad49eSAndrey Vesnovaty } 572fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(i > 0); 573fa7ad49eSAndrey Vesnovaty for (j = 0; i != rqt_n; ++j, ++i) 574fa7ad49eSAndrey Vesnovaty rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 575fa7ad49eSAndrey Vesnovaty return rqt_attr; 576fa7ad49eSAndrey Vesnovaty } 577fa7ad49eSAndrey Vesnovaty 578fa7ad49eSAndrey Vesnovaty /** 57925ae7f1aSMichael Baum * Create RQT using DevX API as a filed of indirection table. 58087e2db37SMichael Baum * 58187e2db37SMichael Baum * @param dev 58287e2db37SMichael Baum * Pointer to Ethernet device. 58325ae7f1aSMichael Baum * @param log_n 58425ae7f1aSMichael Baum * Log of number of queues in the array. 58525ae7f1aSMichael Baum * @param ind_tbl 58625ae7f1aSMichael Baum * DevX indirection table object. 58787e2db37SMichael Baum * 58887e2db37SMichael Baum * @return 58925ae7f1aSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 59087e2db37SMichael Baum */ 59125ae7f1aSMichael Baum static int 59225ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 59325ae7f1aSMichael Baum struct mlx5_ind_table_obj *ind_tbl) 59487e2db37SMichael Baum { 59587e2db37SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 59687e2db37SMichael Baum struct mlx5_devx_rqt_attr *rqt_attr = NULL; 59787e2db37SMichael Baum 59825ae7f1aSMichael Baum MLX5_ASSERT(ind_tbl); 599fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 600fa7ad49eSAndrey Vesnovaty ind_tbl->queues, 601fa7ad49eSAndrey Vesnovaty ind_tbl->queues_n); 602fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 60325ae7f1aSMichael Baum return -rte_errno; 60487e2db37SMichael Baum ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); 60587e2db37SMichael Baum mlx5_free(rqt_attr); 60687e2db37SMichael Baum if (!ind_tbl->rqt) { 60787e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 60887e2db37SMichael Baum dev->data->port_id); 60987e2db37SMichael Baum rte_errno = errno; 61025ae7f1aSMichael Baum return -rte_errno; 61187e2db37SMichael Baum } 61225ae7f1aSMichael Baum return 0; 61387e2db37SMichael Baum } 61487e2db37SMichael Baum 61587e2db37SMichael Baum /** 616fa7ad49eSAndrey Vesnovaty * Modify RQT using DevX API as a filed of indirection table. 617fa7ad49eSAndrey Vesnovaty * 618fa7ad49eSAndrey Vesnovaty * @param dev 619fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 620fa7ad49eSAndrey Vesnovaty * @param log_n 621fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 622fa7ad49eSAndrey Vesnovaty * @param ind_tbl 623fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 624fa7ad49eSAndrey Vesnovaty * 625fa7ad49eSAndrey Vesnovaty * @return 626fa7ad49eSAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 627fa7ad49eSAndrey Vesnovaty */ 628fa7ad49eSAndrey Vesnovaty static int 629fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n, 630fa7ad49eSAndrey Vesnovaty const uint16_t *queues, const uint32_t queues_n, 631fa7ad49eSAndrey Vesnovaty struct mlx5_ind_table_obj *ind_tbl) 632fa7ad49eSAndrey Vesnovaty { 633fa7ad49eSAndrey Vesnovaty int ret = 0; 634fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 635fa7ad49eSAndrey Vesnovaty 636fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(ind_tbl); 637fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 638fa7ad49eSAndrey Vesnovaty queues, 639fa7ad49eSAndrey Vesnovaty queues_n); 640fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 641fa7ad49eSAndrey Vesnovaty return -rte_errno; 642fa7ad49eSAndrey Vesnovaty ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr); 643fa7ad49eSAndrey Vesnovaty mlx5_free(rqt_attr); 644fa7ad49eSAndrey Vesnovaty if (ret) 645fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot modify DevX RQT.", 646fa7ad49eSAndrey Vesnovaty dev->data->port_id); 647fa7ad49eSAndrey Vesnovaty return ret; 648fa7ad49eSAndrey Vesnovaty } 649fa7ad49eSAndrey Vesnovaty 650fa7ad49eSAndrey Vesnovaty /** 65187e2db37SMichael Baum * Destroy the DevX RQT object. 65287e2db37SMichael Baum * 65387e2db37SMichael Baum * @param ind_table 65487e2db37SMichael Baum * Indirection table to release. 65587e2db37SMichael Baum */ 65687e2db37SMichael Baum static void 65725ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 65887e2db37SMichael Baum { 65987e2db37SMichael Baum claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 66087e2db37SMichael Baum } 66187e2db37SMichael Baum 66285552726SMichael Baum /** 663b8cc58c1SAndrey Vesnovaty * Set TIR attribute struct with relevant input values. 66485552726SMichael Baum * 665b8cc58c1SAndrey Vesnovaty * @param[in] dev 66685552726SMichael Baum * Pointer to Ethernet device. 667b8cc58c1SAndrey Vesnovaty * @param[in] rss_key 668b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 669b8cc58c1SAndrey Vesnovaty * @param[in] hash_fields 670b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 671b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 672b8cc58c1SAndrey Vesnovaty * Indirection table for TIR. 673b8cc58c1SAndrey Vesnovaty * @param[in] tunnel 67485552726SMichael Baum * Tunnel type. 675b8cc58c1SAndrey Vesnovaty * @param[out] tir_attr 676b8cc58c1SAndrey Vesnovaty * Parameters structure for TIR creation/modification. 67785552726SMichael Baum * 67885552726SMichael Baum * @return 679b8cc58c1SAndrey Vesnovaty * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. 68085552726SMichael Baum */ 681b8cc58c1SAndrey Vesnovaty static void 682b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, 683b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 684b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl, 685b8cc58c1SAndrey Vesnovaty int tunnel, struct mlx5_devx_tir_attr *tir_attr) 68685552726SMichael Baum { 68785552726SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 6885a959cbfSMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 68985552726SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 69085552726SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 691b8cc58c1SAndrey Vesnovaty enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type; 69285552726SMichael Baum bool lro = true; 6935a959cbfSMichael Baum uint32_t i; 69485552726SMichael Baum 69585552726SMichael Baum /* Enable TIR LRO only if all the queues were configured for. */ 6965a959cbfSMichael Baum for (i = 0; i < ind_tbl->queues_n; ++i) { 6975a959cbfSMichael Baum if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 69885552726SMichael Baum lro = false; 69985552726SMichael Baum break; 70085552726SMichael Baum } 70185552726SMichael Baum } 702b8cc58c1SAndrey Vesnovaty memset(tir_attr, 0, sizeof(*tir_attr)); 703b8cc58c1SAndrey Vesnovaty tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 704b8cc58c1SAndrey Vesnovaty tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 705b8cc58c1SAndrey Vesnovaty tir_attr->tunneled_offload_en = !!tunnel; 70685552726SMichael Baum /* If needed, translate hash_fields bitmap to PRM format. */ 70785552726SMichael Baum if (hash_fields) { 708b8cc58c1SAndrey Vesnovaty struct mlx5_rx_hash_field_select *rx_hash_field_select = 70985552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 710b8cc58c1SAndrey Vesnovaty hash_fields & IBV_RX_HASH_INNER ? 711b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_inner : 71285552726SMichael Baum #endif 713b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_outer; 71485552726SMichael Baum /* 1 bit: 0: IPv4, 1: IPv6. */ 71585552726SMichael Baum rx_hash_field_select->l3_prot_type = 71685552726SMichael Baum !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 71785552726SMichael Baum /* 1 bit: 0: TCP, 1: UDP. */ 71885552726SMichael Baum rx_hash_field_select->l4_prot_type = 71985552726SMichael Baum !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 72085552726SMichael Baum /* Bitmask which sets which fields to use in RX Hash. */ 72185552726SMichael Baum rx_hash_field_select->selected_fields = 72285552726SMichael Baum ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 72385552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 72485552726SMichael Baum (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 72585552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 72685552726SMichael Baum (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 72785552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 72885552726SMichael Baum (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 72985552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 73085552726SMichael Baum } 731b8cc58c1SAndrey Vesnovaty if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN) 732b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->td->id; 73385552726SMichael Baum else 734b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->tdn; 735b8cc58c1SAndrey Vesnovaty memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 736b8cc58c1SAndrey Vesnovaty tir_attr->indirect_table = ind_tbl->rqt->id; 73785552726SMichael Baum if (dev->data->dev_conf.lpbk_mode) 738b8cc58c1SAndrey Vesnovaty tir_attr->self_lb_block = 739b8cc58c1SAndrey Vesnovaty MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 74085552726SMichael Baum if (lro) { 741b8cc58c1SAndrey Vesnovaty tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout; 742b8cc58c1SAndrey Vesnovaty tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; 743b8cc58c1SAndrey Vesnovaty tir_attr->lro_enable_mask = 744b8cc58c1SAndrey Vesnovaty MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 74585552726SMichael Baum MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 74685552726SMichael Baum } 747b8cc58c1SAndrey Vesnovaty } 748b8cc58c1SAndrey Vesnovaty 749b8cc58c1SAndrey Vesnovaty /** 750b8cc58c1SAndrey Vesnovaty * Create an Rx Hash queue. 751b8cc58c1SAndrey Vesnovaty * 752b8cc58c1SAndrey Vesnovaty * @param dev 753b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 754b8cc58c1SAndrey Vesnovaty * @param hrxq 755b8cc58c1SAndrey Vesnovaty * Pointer to Rx Hash queue. 756b8cc58c1SAndrey Vesnovaty * @param tunnel 757b8cc58c1SAndrey Vesnovaty * Tunnel type. 758b8cc58c1SAndrey Vesnovaty * 759b8cc58c1SAndrey Vesnovaty * @return 760b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 761b8cc58c1SAndrey Vesnovaty */ 762b8cc58c1SAndrey Vesnovaty static int 763b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 764b8cc58c1SAndrey Vesnovaty int tunnel __rte_unused) 765b8cc58c1SAndrey Vesnovaty { 766b8cc58c1SAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 767b8cc58c1SAndrey Vesnovaty struct mlx5_devx_tir_attr tir_attr = {0}; 768b8cc58c1SAndrey Vesnovaty int err; 769b8cc58c1SAndrey Vesnovaty 770b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields, 771b8cc58c1SAndrey Vesnovaty hrxq->ind_table, tunnel, &tir_attr); 7725a959cbfSMichael Baum hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); 7735a959cbfSMichael Baum if (!hrxq->tir) { 77485552726SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 77585552726SMichael Baum dev->data->port_id); 77685552726SMichael Baum rte_errno = errno; 77785552726SMichael Baum goto error; 77885552726SMichael Baum } 779f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 78088019723SOphir Munk if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir, 78188019723SOphir Munk &hrxq->action)) { 78285552726SMichael Baum rte_errno = errno; 78385552726SMichael Baum goto error; 78485552726SMichael Baum } 78585552726SMichael Baum #endif 7865a959cbfSMichael Baum return 0; 78785552726SMichael Baum error: 78885552726SMichael Baum err = rte_errno; /* Save rte_errno before cleanup. */ 7895a959cbfSMichael Baum if (hrxq->tir) 7905a959cbfSMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 79185552726SMichael Baum rte_errno = err; /* Restore rte_errno. */ 7925a959cbfSMichael Baum return -rte_errno; 79385552726SMichael Baum } 79485552726SMichael Baum 79585552726SMichael Baum /** 79685552726SMichael Baum * Destroy a DevX TIR object. 79785552726SMichael Baum * 79885552726SMichael Baum * @param hrxq 79985552726SMichael Baum * Hash Rx queue to release its tir. 80085552726SMichael Baum */ 80185552726SMichael Baum static void 80285552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 80385552726SMichael Baum { 80485552726SMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 80585552726SMichael Baum } 80685552726SMichael Baum 8075eaf882eSMichael Baum /** 808b8cc58c1SAndrey Vesnovaty * Modify an Rx Hash queue configuration. 809b8cc58c1SAndrey Vesnovaty * 810b8cc58c1SAndrey Vesnovaty * @param dev 811b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 812b8cc58c1SAndrey Vesnovaty * @param hrxq 813b8cc58c1SAndrey Vesnovaty * Hash Rx queue to modify. 814b8cc58c1SAndrey Vesnovaty * @param rss_key 815b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 816b8cc58c1SAndrey Vesnovaty * @param hash_fields 817b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 818b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 819b8cc58c1SAndrey Vesnovaty * Indirection table for TIR. 820b8cc58c1SAndrey Vesnovaty * 821b8cc58c1SAndrey Vesnovaty * @return 822b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 823b8cc58c1SAndrey Vesnovaty */ 824b8cc58c1SAndrey Vesnovaty static int 825b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 826b8cc58c1SAndrey Vesnovaty const uint8_t *rss_key, 827b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 828b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl) 829b8cc58c1SAndrey Vesnovaty { 830b8cc58c1SAndrey Vesnovaty struct mlx5_devx_modify_tir_attr modify_tir = {0}; 831b8cc58c1SAndrey Vesnovaty 832b8cc58c1SAndrey Vesnovaty /* 833b8cc58c1SAndrey Vesnovaty * untested for modification fields: 834b8cc58c1SAndrey Vesnovaty * - rx_hash_symmetric not set in hrxq_new(), 835b8cc58c1SAndrey Vesnovaty * - rx_hash_fn set hard-coded in hrxq_new(), 836b8cc58c1SAndrey Vesnovaty * - lro_xxx not set after rxq setup 837b8cc58c1SAndrey Vesnovaty */ 838b8cc58c1SAndrey Vesnovaty if (ind_tbl != hrxq->ind_table) 839b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 840b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE; 841b8cc58c1SAndrey Vesnovaty if (hash_fields != hrxq->hash_fields || 842b8cc58c1SAndrey Vesnovaty memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN)) 843b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 844b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH; 845b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl, 846b8cc58c1SAndrey Vesnovaty 0, /* N/A - tunnel modification unsupported */ 847b8cc58c1SAndrey Vesnovaty &modify_tir.tir); 848b8cc58c1SAndrey Vesnovaty modify_tir.tirn = hrxq->tir->id; 849b8cc58c1SAndrey Vesnovaty if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) { 850b8cc58c1SAndrey Vesnovaty DRV_LOG(ERR, "port %u cannot modify DevX TIR", 851b8cc58c1SAndrey Vesnovaty dev->data->port_id); 852b8cc58c1SAndrey Vesnovaty rte_errno = errno; 853b8cc58c1SAndrey Vesnovaty return -rte_errno; 854b8cc58c1SAndrey Vesnovaty } 855b8cc58c1SAndrey Vesnovaty return 0; 856b8cc58c1SAndrey Vesnovaty } 857b8cc58c1SAndrey Vesnovaty 858b8cc58c1SAndrey Vesnovaty /** 8590c762e81SMichael Baum * Create a DevX drop action for Rx Hash queue. 8605eaf882eSMichael Baum * 8615eaf882eSMichael Baum * @param dev 8625eaf882eSMichael Baum * Pointer to Ethernet device. 8635eaf882eSMichael Baum * 8645eaf882eSMichael Baum * @return 8650c762e81SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 8665eaf882eSMichael Baum */ 8670c762e81SMichael Baum static int 8680c762e81SMichael Baum mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 8695eaf882eSMichael Baum { 8705eaf882eSMichael Baum (void)dev; 87186d259ceSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet."); 8725eaf882eSMichael Baum rte_errno = ENOTSUP; 8730c762e81SMichael Baum return -rte_errno; 8745eaf882eSMichael Baum } 8755eaf882eSMichael Baum 8765eaf882eSMichael Baum /** 8775eaf882eSMichael Baum * Release a drop hash Rx queue. 8785eaf882eSMichael Baum * 8795eaf882eSMichael Baum * @param dev 8805eaf882eSMichael Baum * Pointer to Ethernet device. 8815eaf882eSMichael Baum */ 8825eaf882eSMichael Baum static void 8830c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 8845eaf882eSMichael Baum { 8855eaf882eSMichael Baum (void)dev; 88686d259ceSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet."); 8875eaf882eSMichael Baum rte_errno = ENOTSUP; 8885eaf882eSMichael Baum } 8895eaf882eSMichael Baum 89086d259ceSMichael Baum /** 89186d259ceSMichael Baum * Create the Tx hairpin queue object. 89286d259ceSMichael Baum * 89386d259ceSMichael Baum * @param dev 89486d259ceSMichael Baum * Pointer to Ethernet device. 89586d259ceSMichael Baum * @param idx 89686d259ceSMichael Baum * Queue index in DPDK Tx queue array. 89786d259ceSMichael Baum * 89886d259ceSMichael Baum * @return 899f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 90086d259ceSMichael Baum */ 901f49f4483SMichael Baum static int 90286d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 90386d259ceSMichael Baum { 90486d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 90586d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 90686d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 90786d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 90886d259ceSMichael Baum struct mlx5_devx_create_sq_attr attr = { 0 }; 909f49f4483SMichael Baum struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 91086d259ceSMichael Baum uint32_t max_wq_data; 91186d259ceSMichael Baum 91286d259ceSMichael Baum MLX5_ASSERT(txq_data); 913f49f4483SMichael Baum MLX5_ASSERT(tmpl); 91486d259ceSMichael Baum tmpl->txq_ctrl = txq_ctrl; 91586d259ceSMichael Baum attr.hairpin = 1; 91686d259ceSMichael Baum attr.tis_lst_sz = 1; 91786d259ceSMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 91886d259ceSMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 91986d259ceSMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 92086d259ceSMichael Baum if (priv->config.log_hp_size > max_wq_data) { 92186d259ceSMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 92286d259ceSMichael Baum "too large for hairpin.", 92386d259ceSMichael Baum priv->config.log_hp_size); 92486d259ceSMichael Baum rte_errno = ERANGE; 925f49f4483SMichael Baum return -rte_errno; 92686d259ceSMichael Baum } 92786d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 92886d259ceSMichael Baum } else { 92986d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = 93086d259ceSMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 93186d259ceSMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 93286d259ceSMichael Baum } 93386d259ceSMichael Baum /* Set the packets number to the maximum value for performance. */ 93486d259ceSMichael Baum attr.wq_attr.log_hairpin_num_packets = 93586d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz - 93686d259ceSMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 93786d259ceSMichael Baum attr.tis_num = priv->sh->tis->id; 93886d259ceSMichael Baum tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); 93986d259ceSMichael Baum if (!tmpl->sq) { 94086d259ceSMichael Baum DRV_LOG(ERR, 94186d259ceSMichael Baum "Port %u tx hairpin queue %u can't create SQ object.", 94286d259ceSMichael Baum dev->data->port_id, idx); 94386d259ceSMichael Baum rte_errno = errno; 944f49f4483SMichael Baum return -rte_errno; 94586d259ceSMichael Baum } 946f49f4483SMichael Baum return 0; 94786d259ceSMichael Baum } 94886d259ceSMichael Baum 949f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 95086d259ceSMichael Baum /** 95186d259ceSMichael Baum * Destroy the Tx queue DevX object. 95286d259ceSMichael Baum * 95386d259ceSMichael Baum * @param txq_obj 95486d259ceSMichael Baum * Txq object to destroy. 95586d259ceSMichael Baum */ 95686d259ceSMichael Baum static void 95788f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 95886d259ceSMichael Baum { 95974e91860SMichael Baum mlx5_devx_sq_destroy(&txq_obj->sq_obj); 96074e91860SMichael Baum memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj)); 9615f04f70cSMichael Baum mlx5_devx_cq_destroy(&txq_obj->cq_obj); 9625f04f70cSMichael Baum memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj)); 96386d259ceSMichael Baum } 96486d259ceSMichael Baum 96586d259ceSMichael Baum /** 96688f2e3f1SMichael Baum * Create a SQ object and its resources using DevX. 96786d259ceSMichael Baum * 96886d259ceSMichael Baum * @param dev 96986d259ceSMichael Baum * Pointer to Ethernet device. 97086d259ceSMichael Baum * @param idx 97186d259ceSMichael Baum * Queue index in DPDK Tx queue array. 97274e91860SMichael Baum * @param[in] log_desc_n 97374e91860SMichael Baum * Log of number of descriptors in queue. 97486d259ceSMichael Baum * 97586d259ceSMichael Baum * @return 97674e91860SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 97786d259ceSMichael Baum */ 97874e91860SMichael Baum static int 97974e91860SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, 98074e91860SMichael Baum uint16_t log_desc_n) 98186d259ceSMichael Baum { 98286d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 98386d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 98488f2e3f1SMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 98588f2e3f1SMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 98688f2e3f1SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 98774e91860SMichael Baum struct mlx5_devx_create_sq_attr sq_attr = { 98874e91860SMichael Baum .flush_in_error_en = 1, 98974e91860SMichael Baum .allow_multi_pkt_send_wqe = !!priv->config.mps, 99074e91860SMichael Baum .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode, 99174e91860SMichael Baum .allow_swp = !!priv->config.swp, 99274e91860SMichael Baum .cqn = txq_obj->cq_obj.cq->id, 99374e91860SMichael Baum .tis_lst_sz = 1, 99474e91860SMichael Baum .tis_num = priv->sh->tis->id, 99574e91860SMichael Baum .wq_attr = (struct mlx5_devx_wq_attr){ 99674e91860SMichael Baum .pd = priv->sh->pdn, 99774e91860SMichael Baum .uar_page = 99874e91860SMichael Baum mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar), 99974e91860SMichael Baum }, 1000d61381adSViacheslav Ovsiienko .ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format), 100174e91860SMichael Baum }; 100286d259ceSMichael Baum /* Create Send Queue object with DevX. */ 100374e91860SMichael Baum return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n, 100474e91860SMichael Baum &sq_attr, priv->sh->numa_node); 100586d259ceSMichael Baum } 100686d259ceSMichael Baum #endif 100786d259ceSMichael Baum 100886d259ceSMichael Baum /** 100986d259ceSMichael Baum * Create the Tx queue DevX object. 101086d259ceSMichael Baum * 101186d259ceSMichael Baum * @param dev 101286d259ceSMichael Baum * Pointer to Ethernet device. 101386d259ceSMichael Baum * @param idx 101486d259ceSMichael Baum * Queue index in DPDK Tx queue array. 101586d259ceSMichael Baum * 101686d259ceSMichael Baum * @return 1017f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 101886d259ceSMichael Baum */ 1019f49f4483SMichael Baum int 102086d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 102186d259ceSMichael Baum { 102286d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 102386d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 102486d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 102586d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 102686d259ceSMichael Baum 102786d259ceSMichael Baum if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 102886d259ceSMichael Baum return mlx5_txq_obj_hairpin_new(dev, idx); 1029f1ae0b35SOphir Munk #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H) 103086d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 103186d259ceSMichael Baum dev->data->port_id, idx); 103286d259ceSMichael Baum rte_errno = ENOMEM; 1033f49f4483SMichael Baum return -rte_errno; 103486d259ceSMichael Baum #else 103586d259ceSMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 1036f49f4483SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 10375f04f70cSMichael Baum struct mlx5_devx_cq_attr cq_attr = { 10385f04f70cSMichael Baum .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar), 10395f04f70cSMichael Baum }; 104086d259ceSMichael Baum void *reg_addr; 10415f04f70cSMichael Baum uint32_t cqe_n, log_desc_n; 104200984de5SViacheslav Ovsiienko uint32_t wqe_n, wqe_size; 104386d259ceSMichael Baum int ret = 0; 104486d259ceSMichael Baum 104586d259ceSMichael Baum MLX5_ASSERT(txq_data); 1046f49f4483SMichael Baum MLX5_ASSERT(txq_obj); 104786d259ceSMichael Baum txq_obj->txq_ctrl = txq_ctrl; 104886d259ceSMichael Baum txq_obj->dev = dev; 10495f04f70cSMichael Baum cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 10505f04f70cSMichael Baum 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 10515f04f70cSMichael Baum log_desc_n = log2above(cqe_n); 10525f04f70cSMichael Baum cqe_n = 1UL << log_desc_n; 10535f04f70cSMichael Baum if (cqe_n > UINT16_MAX) { 10545f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.", 10555f04f70cSMichael Baum dev->data->port_id, txq_data->idx, cqe_n); 10565f04f70cSMichael Baum rte_errno = EINVAL; 10575f04f70cSMichael Baum return 0; 10585f04f70cSMichael Baum } 10595f04f70cSMichael Baum /* Create completion queue object with DevX. */ 10605f04f70cSMichael Baum ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n, 10615f04f70cSMichael Baum &cq_attr, priv->sh->numa_node); 10625f04f70cSMichael Baum if (ret) { 10635f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 10645f04f70cSMichael Baum dev->data->port_id, idx); 106586d259ceSMichael Baum goto error; 106686d259ceSMichael Baum } 10675f04f70cSMichael Baum txq_data->cqe_n = log_desc_n; 10685f04f70cSMichael Baum txq_data->cqe_s = cqe_n; 106986d259ceSMichael Baum txq_data->cqe_m = txq_data->cqe_s - 1; 10705f04f70cSMichael Baum txq_data->cqes = txq_obj->cq_obj.cqes; 107186d259ceSMichael Baum txq_data->cq_ci = 0; 107286d259ceSMichael Baum txq_data->cq_pi = 0; 10735f04f70cSMichael Baum txq_data->cq_db = txq_obj->cq_obj.db_rec; 107486d259ceSMichael Baum *txq_data->cq_db = 0; 107500984de5SViacheslav Ovsiienko /* 107600984de5SViacheslav Ovsiienko * Adjust the amount of WQEs depending on inline settings. 107700984de5SViacheslav Ovsiienko * The number of descriptors should be enough to handle 107800984de5SViacheslav Ovsiienko * the specified number of packets. If queue is being created 107900984de5SViacheslav Ovsiienko * with Verbs the rdma-core does queue size adjustment 108000984de5SViacheslav Ovsiienko * internally in the mlx5_calc_sq_size(), we do the same 108100984de5SViacheslav Ovsiienko * for the queue being created with DevX at this point. 108200984de5SViacheslav Ovsiienko */ 108300984de5SViacheslav Ovsiienko wqe_size = txq_data->tso_en ? 108400984de5SViacheslav Ovsiienko RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0; 108500984de5SViacheslav Ovsiienko wqe_size += sizeof(struct mlx5_wqe_cseg) + 108600984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 108700984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_dseg); 108800984de5SViacheslav Ovsiienko if (txq_data->inlen_send) 108900984de5SViacheslav Ovsiienko wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) + 109000984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 109100984de5SViacheslav Ovsiienko RTE_ALIGN(txq_data->inlen_send + 109200984de5SViacheslav Ovsiienko sizeof(uint32_t), 109300984de5SViacheslav Ovsiienko MLX5_WSEG_SIZE)); 109400984de5SViacheslav Ovsiienko wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 109586d259ceSMichael Baum /* Create Send Queue object with DevX. */ 109600984de5SViacheslav Ovsiienko wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size, 109774e91860SMichael Baum (uint32_t)priv->sh->device_attr.max_qp_wr); 109874e91860SMichael Baum log_desc_n = log2above(wqe_n); 109974e91860SMichael Baum ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n); 110074e91860SMichael Baum if (ret) { 110174e91860SMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 110274e91860SMichael Baum dev->data->port_id, idx); 110386d259ceSMichael Baum rte_errno = errno; 110486d259ceSMichael Baum goto error; 110586d259ceSMichael Baum } 110686d259ceSMichael Baum /* Create the Work Queue. */ 110774e91860SMichael Baum txq_data->wqe_n = log_desc_n; 110886d259ceSMichael Baum txq_data->wqe_s = 1 << txq_data->wqe_n; 110986d259ceSMichael Baum txq_data->wqe_m = txq_data->wqe_s - 1; 111074e91860SMichael Baum txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes; 111186d259ceSMichael Baum txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 111286d259ceSMichael Baum txq_data->wqe_ci = 0; 111386d259ceSMichael Baum txq_data->wqe_pi = 0; 111486d259ceSMichael Baum txq_data->wqe_comp = 0; 111586d259ceSMichael Baum txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 111631625e62SViacheslav Ovsiienko txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; 111786d259ceSMichael Baum *txq_data->qp_db = 0; 111874e91860SMichael Baum txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; 111986d259ceSMichael Baum /* Change Send Queue state to Ready-to-Send. */ 1120a9c79306SMichael Baum ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); 112186d259ceSMichael Baum if (ret) { 112286d259ceSMichael Baum rte_errno = errno; 112386d259ceSMichael Baum DRV_LOG(ERR, 1124a9c79306SMichael Baum "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.", 112586d259ceSMichael Baum dev->data->port_id, idx); 112686d259ceSMichael Baum goto error; 112786d259ceSMichael Baum } 112886d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 112986d259ceSMichael Baum /* 113086d259ceSMichael Baum * If using DevX need to query and store TIS transport domain value. 113186d259ceSMichael Baum * This is done once per port. 113286d259ceSMichael Baum * Will use this value on Rx, when creating matching TIR. 113386d259ceSMichael Baum */ 113486d259ceSMichael Baum if (!priv->sh->tdn) 113586d259ceSMichael Baum priv->sh->tdn = priv->sh->td->id; 113686d259ceSMichael Baum #endif 113786d259ceSMichael Baum MLX5_ASSERT(sh->tx_uar); 113886d259ceSMichael Baum reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 113986d259ceSMichael Baum MLX5_ASSERT(reg_addr); 114086d259ceSMichael Baum txq_ctrl->bf_reg = reg_addr; 114186d259ceSMichael Baum txq_ctrl->uar_mmap_offset = 114286d259ceSMichael Baum mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); 114386d259ceSMichael Baum txq_uar_init(txq_ctrl); 1144876b5d52SMatan Azrad dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 1145f49f4483SMichael Baum return 0; 114686d259ceSMichael Baum error: 114786d259ceSMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 114888f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 114986d259ceSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 1150f49f4483SMichael Baum return -rte_errno; 115186d259ceSMichael Baum #endif 115286d259ceSMichael Baum } 115386d259ceSMichael Baum 115486d259ceSMichael Baum /** 115586d259ceSMichael Baum * Release an Tx DevX queue object. 115686d259ceSMichael Baum * 115786d259ceSMichael Baum * @param txq_obj 115886d259ceSMichael Baum * DevX Tx queue object. 115986d259ceSMichael Baum */ 116086d259ceSMichael Baum void 116186d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 116286d259ceSMichael Baum { 116386d259ceSMichael Baum MLX5_ASSERT(txq_obj); 1164354cc08aSMichael Baum if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 116586d259ceSMichael Baum if (txq_obj->tis) 116686d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 1167f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 116886d259ceSMichael Baum } else { 116988f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 117086d259ceSMichael Baum #endif 117186d259ceSMichael Baum } 117286d259ceSMichael Baum } 117386d259ceSMichael Baum 11748bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = { 11758bb2410eSOphir Munk .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 11766deb19e1SMichael Baum .rxq_obj_new = mlx5_rxq_devx_obj_new, 117732287079SMichael Baum .rxq_event_get = mlx5_rx_devx_get_event, 1178c279f187SMichael Baum .rxq_obj_modify = mlx5_devx_modify_rq, 11796deb19e1SMichael Baum .rxq_obj_release = mlx5_rxq_devx_obj_release, 118025ae7f1aSMichael Baum .ind_table_new = mlx5_devx_ind_table_new, 1181fa7ad49eSAndrey Vesnovaty .ind_table_modify = mlx5_devx_ind_table_modify, 118225ae7f1aSMichael Baum .ind_table_destroy = mlx5_devx_ind_table_destroy, 118385552726SMichael Baum .hrxq_new = mlx5_devx_hrxq_new, 118485552726SMichael Baum .hrxq_destroy = mlx5_devx_tir_destroy, 1185b8cc58c1SAndrey Vesnovaty .hrxq_modify = mlx5_devx_hrxq_modify, 11860c762e81SMichael Baum .drop_action_create = mlx5_devx_drop_action_create, 11870c762e81SMichael Baum .drop_action_destroy = mlx5_devx_drop_action_destroy, 118886d259ceSMichael Baum .txq_obj_new = mlx5_txq_devx_obj_new, 11895d9f3c3fSMichael Baum .txq_obj_modify = mlx5_devx_modify_sq, 119086d259ceSMichael Baum .txq_obj_release = mlx5_txq_devx_obj_release, 1191*23233fd6SBing Zhao .lb_dummy_queue_create = NULL, 1192*23233fd6SBing Zhao .lb_dummy_queue_release = NULL, 11938bb2410eSOphir Munk }; 1194