18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 28bb2410eSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 38bb2410eSOphir Munk */ 48bb2410eSOphir Munk 58bb2410eSOphir Munk #include <stddef.h> 68bb2410eSOphir Munk #include <errno.h> 7c279f187SMichael Baum #include <stdbool.h> 88bb2410eSOphir Munk #include <string.h> 98bb2410eSOphir Munk #include <stdint.h> 108bb2410eSOphir Munk #include <sys/queue.h> 118bb2410eSOphir Munk 128bb2410eSOphir Munk #include <rte_malloc.h> 138bb2410eSOphir Munk #include <rte_common.h> 148bb2410eSOphir Munk #include <rte_eal_paging.h> 158bb2410eSOphir Munk 168bb2410eSOphir Munk #include <mlx5_glue.h> 178bb2410eSOphir Munk #include <mlx5_devx_cmds.h> 185f04f70cSMichael Baum #include <mlx5_common_devx.h> 198bb2410eSOphir Munk #include <mlx5_malloc.h> 208bb2410eSOphir Munk 218bb2410eSOphir Munk #include "mlx5.h" 228bb2410eSOphir Munk #include "mlx5_common_os.h" 23377b69fbSMichael Baum #include "mlx5_tx.h" 24151cbe3aSMichael Baum #include "mlx5_rx.h" 258bb2410eSOphir Munk #include "mlx5_utils.h" 268bb2410eSOphir Munk #include "mlx5_devx.h" 2787e2db37SMichael Baum #include "mlx5_flow.h" 2888019723SOphir Munk #include "mlx5_flow_os.h" 29f6dee900SMichael Baum 30f6dee900SMichael Baum /** 318bb2410eSOphir Munk * Modify RQ vlan stripping offload 328bb2410eSOphir Munk * 338bb2410eSOphir Munk * @param rxq_obj 348bb2410eSOphir Munk * Rx queue object. 358bb2410eSOphir Munk * 36f6dee900SMichael Baum * @return 37f6dee900SMichael Baum * 0 on success, non-0 otherwise 388bb2410eSOphir Munk */ 398bb2410eSOphir Munk static int 408bb2410eSOphir Munk mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 418bb2410eSOphir Munk { 428bb2410eSOphir Munk struct mlx5_devx_modify_rq_attr rq_attr; 438bb2410eSOphir Munk 448bb2410eSOphir Munk memset(&rq_attr, 0, sizeof(rq_attr)); 458bb2410eSOphir Munk rq_attr.rq_state = MLX5_RQC_STATE_RDY; 468bb2410eSOphir Munk rq_attr.state = MLX5_RQC_STATE_RDY; 478bb2410eSOphir Munk rq_attr.vsd = (on ? 0 : 1); 488bb2410eSOphir Munk rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 496e0a3637SMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 508bb2410eSOphir Munk } 518bb2410eSOphir Munk 526deb19e1SMichael Baum /** 53fa2c85ccSMichael Baum * Modify RQ using DevX API. 54fa2c85ccSMichael Baum * 55fa2c85ccSMichael Baum * @param rxq_obj 56fa2c85ccSMichael Baum * DevX Rx queue object. 574c6d80f1SMichael Baum * @param type 584c6d80f1SMichael Baum * Type of change queue state. 59fa2c85ccSMichael Baum * 60fa2c85ccSMichael Baum * @return 61fa2c85ccSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 62fa2c85ccSMichael Baum */ 63fa2c85ccSMichael Baum static int 644c6d80f1SMichael Baum mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) 65fa2c85ccSMichael Baum { 66fa2c85ccSMichael Baum struct mlx5_devx_modify_rq_attr rq_attr; 67fa2c85ccSMichael Baum 68fa2c85ccSMichael Baum memset(&rq_attr, 0, sizeof(rq_attr)); 694c6d80f1SMichael Baum switch (type) { 704c6d80f1SMichael Baum case MLX5_RXQ_MOD_ERR2RST: 714c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_ERR; 724c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 734c6d80f1SMichael Baum break; 744c6d80f1SMichael Baum case MLX5_RXQ_MOD_RST2RDY: 75fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RST; 76fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RDY; 774c6d80f1SMichael Baum break; 784c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2ERR: 794c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 804c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_ERR; 814c6d80f1SMichael Baum break; 824c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2RST: 83fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 84fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 854c6d80f1SMichael Baum break; 864c6d80f1SMichael Baum default: 874c6d80f1SMichael Baum break; 88fa2c85ccSMichael Baum } 896e0a3637SMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 90fa2c85ccSMichael Baum } 91fa2c85ccSMichael Baum 92fa2c85ccSMichael Baum /** 935d9f3c3fSMichael Baum * Modify SQ using DevX API. 945d9f3c3fSMichael Baum * 955d9f3c3fSMichael Baum * @param txq_obj 965d9f3c3fSMichael Baum * DevX Tx queue object. 975d9f3c3fSMichael Baum * @param type 985d9f3c3fSMichael Baum * Type of change queue state. 995d9f3c3fSMichael Baum * @param dev_port 1005d9f3c3fSMichael Baum * Unnecessary. 1015d9f3c3fSMichael Baum * 1025d9f3c3fSMichael Baum * @return 1035d9f3c3fSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 1045d9f3c3fSMichael Baum */ 105686d05b6SXueming Li int 106686d05b6SXueming Li mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 1075d9f3c3fSMichael Baum uint8_t dev_port) 1085d9f3c3fSMichael Baum { 1095d9f3c3fSMichael Baum struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 1105d9f3c3fSMichael Baum int ret; 1115d9f3c3fSMichael Baum 1125d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RST2RDY) { 1135d9f3c3fSMichael Baum /* Change queue state to reset. */ 1145d9f3c3fSMichael Baum if (type == MLX5_TXQ_MOD_ERR2RDY) 1155d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_ERR; 1165d9f3c3fSMichael Baum else 1175d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RDY; 1185d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RST; 11974e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1205d9f3c3fSMichael Baum if (ret) { 1215d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET" 1225d9f3c3fSMichael Baum " %s", strerror(errno)); 1235d9f3c3fSMichael Baum rte_errno = errno; 1245d9f3c3fSMichael Baum return ret; 1255d9f3c3fSMichael Baum } 1265d9f3c3fSMichael Baum } 1275d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RDY2RST) { 1285d9f3c3fSMichael Baum /* Change queue state to ready. */ 1295d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RST; 1305d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RDY; 13174e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1325d9f3c3fSMichael Baum if (ret) { 1335d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to READY" 1345d9f3c3fSMichael Baum " %s", strerror(errno)); 1355d9f3c3fSMichael Baum rte_errno = errno; 1365d9f3c3fSMichael Baum return ret; 1375d9f3c3fSMichael Baum } 1385d9f3c3fSMichael Baum } 1395d9f3c3fSMichael Baum /* 1405d9f3c3fSMichael Baum * The dev_port variable is relevant only in Verbs API, and there is a 1415d9f3c3fSMichael Baum * pointer that points to this function and a parallel function in verbs 1425d9f3c3fSMichael Baum * intermittently, so they should have the same parameters. 1435d9f3c3fSMichael Baum */ 1445d9f3c3fSMichael Baum (void)dev_port; 1455d9f3c3fSMichael Baum return 0; 1465d9f3c3fSMichael Baum } 1475d9f3c3fSMichael Baum 1485d9f3c3fSMichael Baum /** 1495cd33796SMichael Baum * Destroy the Rx queue DevX object. 1506deb19e1SMichael Baum * 1515cd33796SMichael Baum * @param rxq_obj 1525cd33796SMichael Baum * Rxq object to destroy. 1536deb19e1SMichael Baum */ 1546deb19e1SMichael Baum static void 1556e0a3637SMichael Baum mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj) 1566deb19e1SMichael Baum { 1576e0a3637SMichael Baum mlx5_devx_rq_destroy(&rxq_obj->rq_obj); 1586e0a3637SMichael Baum memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj)); 1596e0a3637SMichael Baum mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 1606e0a3637SMichael Baum memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); 1616deb19e1SMichael Baum } 1626deb19e1SMichael Baum 1636deb19e1SMichael Baum /** 1646deb19e1SMichael Baum * Release an Rx DevX queue object. 1656deb19e1SMichael Baum * 1666deb19e1SMichael Baum * @param rxq_obj 1676deb19e1SMichael Baum * DevX Rx queue object. 1686deb19e1SMichael Baum */ 1696deb19e1SMichael Baum static void 1706deb19e1SMichael Baum mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 1716deb19e1SMichael Baum { 1726deb19e1SMichael Baum MLX5_ASSERT(rxq_obj); 173e96242efSMichael Baum if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { 1746e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->rq); 1754c6d80f1SMichael Baum mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST); 176fa2c85ccSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1776deb19e1SMichael Baum } else { 1786e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->cq_obj.cq); 1796e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->rq_obj.rq); 1806e0a3637SMichael Baum mlx5_rxq_release_devx_resources(rxq_obj); 1816deb19e1SMichael Baum if (rxq_obj->devx_channel) 18298174626STal Shnaiderman mlx5_os_devx_destroy_event_channel 1836deb19e1SMichael Baum (rxq_obj->devx_channel); 1846deb19e1SMichael Baum } 1856deb19e1SMichael Baum } 1866deb19e1SMichael Baum 1876deb19e1SMichael Baum /** 18832287079SMichael Baum * Get event for an Rx DevX queue object. 18932287079SMichael Baum * 19032287079SMichael Baum * @param rxq_obj 19132287079SMichael Baum * DevX Rx queue object. 19232287079SMichael Baum * 19332287079SMichael Baum * @return 19432287079SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 19532287079SMichael Baum */ 19632287079SMichael Baum static int 19732287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 19832287079SMichael Baum { 19932287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT 20032287079SMichael Baum union { 20132287079SMichael Baum struct mlx5dv_devx_async_event_hdr event_resp; 20232287079SMichael Baum uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 20332287079SMichael Baum } out; 20432287079SMichael Baum int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 20532287079SMichael Baum &out.event_resp, 20632287079SMichael Baum sizeof(out.buf)); 20732287079SMichael Baum 20832287079SMichael Baum if (ret < 0) { 20932287079SMichael Baum rte_errno = errno; 21032287079SMichael Baum return -rte_errno; 21132287079SMichael Baum } 2125cd33796SMichael Baum if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) { 21332287079SMichael Baum rte_errno = EINVAL; 21432287079SMichael Baum return -rte_errno; 21532287079SMichael Baum } 21632287079SMichael Baum return 0; 21732287079SMichael Baum #else 21832287079SMichael Baum (void)rxq_obj; 21932287079SMichael Baum rte_errno = ENOTSUP; 22032287079SMichael Baum return -rte_errno; 22132287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */ 22232287079SMichael Baum } 22332287079SMichael Baum 22432287079SMichael Baum /** 2256deb19e1SMichael Baum * Create a RQ object using DevX. 2266deb19e1SMichael Baum * 2276deb19e1SMichael Baum * @param dev 2286deb19e1SMichael Baum * Pointer to Ethernet device. 2296deb19e1SMichael Baum * @param idx 2306deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 2316deb19e1SMichael Baum * 2326deb19e1SMichael Baum * @return 2336e0a3637SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2346deb19e1SMichael Baum */ 2356e0a3637SMichael Baum static int 23688f2e3f1SMichael Baum mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 2376deb19e1SMichael Baum { 2386deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 239fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 2406deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 2416deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 2426deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 2436deb19e1SMichael Baum struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 2446e0a3637SMichael Baum uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; 2456e0a3637SMichael Baum uint32_t wqe_size, log_wqe_size; 2466deb19e1SMichael Baum 2476deb19e1SMichael Baum /* Fill RQ attributes. */ 2486deb19e1SMichael Baum rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 2496deb19e1SMichael Baum rq_attr.flush_in_error_en = 1; 2506e0a3637SMichael Baum rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1; 2516e0a3637SMichael Baum rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id; 2526e0a3637SMichael Baum rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 253fe46b20cSMichael Baum rq_attr.ts_format = 254fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format); 2556deb19e1SMichael Baum /* Fill WQ attributes for this RQ. */ 2566deb19e1SMichael Baum if (mlx5_rxq_mprq_enabled(rxq_data)) { 2576deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 2586deb19e1SMichael Baum /* 2596deb19e1SMichael Baum * Number of strides in each WQE: 2606deb19e1SMichael Baum * 512*2^single_wqe_log_num_of_strides. 2616deb19e1SMichael Baum */ 2626deb19e1SMichael Baum rq_attr.wq_attr.single_wqe_log_num_of_strides = 2636deb19e1SMichael Baum rxq_data->strd_num_n - 2646deb19e1SMichael Baum MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 2656deb19e1SMichael Baum /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 2666deb19e1SMichael Baum rq_attr.wq_attr.single_stride_log_num_of_bytes = 2676deb19e1SMichael Baum rxq_data->strd_sz_n - 2686deb19e1SMichael Baum MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 2696deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_mprq); 2706deb19e1SMichael Baum } else { 2716deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 2726deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_data_seg); 2736deb19e1SMichael Baum } 2746deb19e1SMichael Baum log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 2756deb19e1SMichael Baum wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 2766e0a3637SMichael Baum rq_attr.wq_attr.log_wq_stride = log_wqe_size; 2776e0a3637SMichael Baum rq_attr.wq_attr.log_wq_sz = log_desc_n; 2786e0a3637SMichael Baum rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ? 2796e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_ALIGN : 2806e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_NONE; 281fe46b20cSMichael Baum rq_attr.wq_attr.pd = cdev->pdn; 282e6988afdSMatan Azrad rq_attr.counter_set_id = priv->counter_set_id; 283f6dee900SMichael Baum /* Create RQ using DevX API. */ 284fe46b20cSMichael Baum return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size, 285fe46b20cSMichael Baum log_desc_n, &rq_attr, rxq_ctrl->socket); 2866deb19e1SMichael Baum } 2876deb19e1SMichael Baum 2886deb19e1SMichael Baum /** 2896deb19e1SMichael Baum * Create a DevX CQ object for an Rx queue. 2906deb19e1SMichael Baum * 2916deb19e1SMichael Baum * @param dev 2926deb19e1SMichael Baum * Pointer to Ethernet device. 2936deb19e1SMichael Baum * @param idx 2946deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 2956deb19e1SMichael Baum * 2966deb19e1SMichael Baum * @return 2975cd33796SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2986deb19e1SMichael Baum */ 2995cd33796SMichael Baum static int 30088f2e3f1SMichael Baum mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 3016deb19e1SMichael Baum { 3025cd33796SMichael Baum struct mlx5_devx_cq *cq_obj = 0; 3036deb19e1SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 3046deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 3055cd33796SMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 3066deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 3076deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 3086deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 309f6dee900SMichael Baum unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 3106deb19e1SMichael Baum uint32_t log_cqe_n; 3115cd33796SMichael Baum uint16_t event_nums[1] = { 0 }; 3126deb19e1SMichael Baum int ret = 0; 3136deb19e1SMichael Baum 3146deb19e1SMichael Baum if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 3156deb19e1SMichael Baum !rxq_data->lro) { 31638f9369dSDekel Peled cq_attr.cqe_comp_en = 1u; 31754c2d46bSAlexander Kozyrev rxq_data->mcqe_format = priv->config.cqe_comp_fmt; 31854c2d46bSAlexander Kozyrev rxq_data->byte_mask = UINT32_MAX; 31954c2d46bSAlexander Kozyrev switch (priv->config.cqe_comp_fmt) { 32054c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_HASH: 32154c2d46bSAlexander Kozyrev /* fallthrough */ 32254c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM: 3230f20acbfSAlexander Kozyrev /* 32454c2d46bSAlexander Kozyrev * Select CSUM miniCQE format only for non-vectorized 32554c2d46bSAlexander Kozyrev * MPRQ Rx burst, use HASH miniCQE format for others. 3260f20acbfSAlexander Kozyrev */ 3270f20acbfSAlexander Kozyrev if (mlx5_rxq_check_vec_support(rxq_data) < 0 && 3280f20acbfSAlexander Kozyrev mlx5_rxq_mprq_enabled(rxq_data)) 3296deb19e1SMichael Baum cq_attr.mini_cqe_res_format = 3300f20acbfSAlexander Kozyrev MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 3310f20acbfSAlexander Kozyrev else 3320f20acbfSAlexander Kozyrev cq_attr.mini_cqe_res_format = 33338f9369dSDekel Peled MLX5_CQE_RESP_FORMAT_HASH; 33454c2d46bSAlexander Kozyrev rxq_data->mcqe_format = cq_attr.mini_cqe_res_format; 33554c2d46bSAlexander Kozyrev break; 33654c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX: 33754c2d46bSAlexander Kozyrev rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK; 33854c2d46bSAlexander Kozyrev /* fallthrough */ 33954c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX: 34054c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt; 34154c2d46bSAlexander Kozyrev break; 34254c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_L34H_STRIDX: 34354c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = 0; 34454c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format_ext = 1; 34554c2d46bSAlexander Kozyrev break; 34654c2d46bSAlexander Kozyrev } 34754c2d46bSAlexander Kozyrev DRV_LOG(DEBUG, 34854c2d46bSAlexander Kozyrev "Port %u Rx CQE compression is enabled, format %d.", 34954c2d46bSAlexander Kozyrev dev->data->port_id, priv->config.cqe_comp_fmt); 3506deb19e1SMichael Baum /* 3516deb19e1SMichael Baum * For vectorized Rx, it must not be doubled in order to 3526deb19e1SMichael Baum * make cq_ci and rq_ci aligned. 3536deb19e1SMichael Baum */ 3546deb19e1SMichael Baum if (mlx5_rxq_check_vec_support(rxq_data) < 0) 3556deb19e1SMichael Baum cqe_n *= 2; 3566deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 3576deb19e1SMichael Baum DRV_LOG(DEBUG, 3586deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for HW" 3596deb19e1SMichael Baum " timestamp.", 3606deb19e1SMichael Baum dev->data->port_id); 3616deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->lro) { 3626deb19e1SMichael Baum DRV_LOG(DEBUG, 3636deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for LRO.", 3646deb19e1SMichael Baum dev->data->port_id); 3656deb19e1SMichael Baum } 3665cd33796SMichael Baum cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar); 3676deb19e1SMichael Baum log_cqe_n = log2above(cqe_n); 368f6dee900SMichael Baum /* Create CQ using DevX API. */ 369ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj, 370ca1418ceSMichael Baum log_cqe_n, &cq_attr, sh->numa_node); 3715cd33796SMichael Baum if (ret) 3725cd33796SMichael Baum return ret; 3735cd33796SMichael Baum cq_obj = &rxq_ctrl->obj->cq_obj; 3745cd33796SMichael Baum rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) 3755cd33796SMichael Baum (uintptr_t)cq_obj->cqes; 3765cd33796SMichael Baum rxq_data->cq_db = cq_obj->db_rec; 3775cd33796SMichael Baum rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); 3786deb19e1SMichael Baum rxq_data->cqe_n = log_cqe_n; 3795cd33796SMichael Baum rxq_data->cqn = cq_obj->cq->id; 380f6dee900SMichael Baum if (rxq_ctrl->obj->devx_channel) { 38198174626STal Shnaiderman ret = mlx5_os_devx_subscribe_devx_event 382f6dee900SMichael Baum (rxq_ctrl->obj->devx_channel, 3835cd33796SMichael Baum cq_obj->cq->obj, 3846deb19e1SMichael Baum sizeof(event_nums), 3856deb19e1SMichael Baum event_nums, 3865cd33796SMichael Baum (uint64_t)(uintptr_t)cq_obj->cq); 3876deb19e1SMichael Baum if (ret) { 3886deb19e1SMichael Baum DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 3895cd33796SMichael Baum ret = errno; 3905cd33796SMichael Baum mlx5_devx_cq_destroy(cq_obj); 3915cd33796SMichael Baum memset(cq_obj, 0, sizeof(*cq_obj)); 3925cd33796SMichael Baum rte_errno = ret; 3935cd33796SMichael Baum return -ret; 3946deb19e1SMichael Baum } 3956deb19e1SMichael Baum } 3965cd33796SMichael Baum return 0; 3976deb19e1SMichael Baum } 3986deb19e1SMichael Baum 3996deb19e1SMichael Baum /** 4006deb19e1SMichael Baum * Create the Rx hairpin queue object. 4016deb19e1SMichael Baum * 4026deb19e1SMichael Baum * @param dev 4036deb19e1SMichael Baum * Pointer to Ethernet device. 4046deb19e1SMichael Baum * @param idx 4056deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4066deb19e1SMichael Baum * 4076deb19e1SMichael Baum * @return 4081260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4096deb19e1SMichael Baum */ 4101260a87bSMichael Baum static int 4116deb19e1SMichael Baum mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 4126deb19e1SMichael Baum { 4136deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4146deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4156deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4166deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4176deb19e1SMichael Baum struct mlx5_devx_create_rq_attr attr = { 0 }; 4181260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4196deb19e1SMichael Baum uint32_t max_wq_data; 4206deb19e1SMichael Baum 4216deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4221260a87bSMichael Baum MLX5_ASSERT(tmpl); 4236deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4246deb19e1SMichael Baum attr.hairpin = 1; 4256deb19e1SMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 4266deb19e1SMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 4276deb19e1SMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 4286deb19e1SMichael Baum if (priv->config.log_hp_size > max_wq_data) { 4296deb19e1SMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 4306deb19e1SMichael Baum "too large for hairpin.", 4316deb19e1SMichael Baum priv->config.log_hp_size); 4326deb19e1SMichael Baum rte_errno = ERANGE; 4331260a87bSMichael Baum return -rte_errno; 4346deb19e1SMichael Baum } 4356deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 4366deb19e1SMichael Baum } else { 4376deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = 4386deb19e1SMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 4396deb19e1SMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 4406deb19e1SMichael Baum } 4416deb19e1SMichael Baum /* Set the packets number to the maximum value for performance. */ 4426deb19e1SMichael Baum attr.wq_attr.log_hairpin_num_packets = 4436deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz - 4446deb19e1SMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 445e6988afdSMatan Azrad attr.counter_set_id = priv->counter_set_id; 446ca1418ceSMichael Baum tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr, 4476deb19e1SMichael Baum rxq_ctrl->socket); 4486deb19e1SMichael Baum if (!tmpl->rq) { 4496deb19e1SMichael Baum DRV_LOG(ERR, 4506deb19e1SMichael Baum "Port %u Rx hairpin queue %u can't create rq object.", 4516deb19e1SMichael Baum dev->data->port_id, idx); 4526deb19e1SMichael Baum rte_errno = errno; 4531260a87bSMichael Baum return -rte_errno; 4546deb19e1SMichael Baum } 4556deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 4561260a87bSMichael Baum return 0; 4576deb19e1SMichael Baum } 4586deb19e1SMichael Baum 4596deb19e1SMichael Baum /** 4606deb19e1SMichael Baum * Create the Rx queue DevX object. 4616deb19e1SMichael Baum * 4626deb19e1SMichael Baum * @param dev 4636deb19e1SMichael Baum * Pointer to Ethernet device. 4646deb19e1SMichael Baum * @param idx 4656deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4666deb19e1SMichael Baum * 4676deb19e1SMichael Baum * @return 4681260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4696deb19e1SMichael Baum */ 4701260a87bSMichael Baum static int 4716deb19e1SMichael Baum mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 4726deb19e1SMichael Baum { 4736deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4746deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4756deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4766deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4771260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4786deb19e1SMichael Baum int ret = 0; 4796deb19e1SMichael Baum 4806deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4811260a87bSMichael Baum MLX5_ASSERT(tmpl); 4826deb19e1SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 4836deb19e1SMichael Baum return mlx5_rxq_obj_hairpin_new(dev, idx); 4846deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4856deb19e1SMichael Baum if (rxq_ctrl->irq) { 4866deb19e1SMichael Baum int devx_ev_flag = 4876deb19e1SMichael Baum MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 4886deb19e1SMichael Baum 48998174626STal Shnaiderman tmpl->devx_channel = mlx5_os_devx_create_event_channel 490ca1418ceSMichael Baum (priv->sh->cdev->ctx, 4916deb19e1SMichael Baum devx_ev_flag); 4926deb19e1SMichael Baum if (!tmpl->devx_channel) { 4936deb19e1SMichael Baum rte_errno = errno; 4946deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", 4956deb19e1SMichael Baum rte_errno); 4966deb19e1SMichael Baum goto error; 4976deb19e1SMichael Baum } 4986deb19e1SMichael Baum tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 4996deb19e1SMichael Baum } 5006deb19e1SMichael Baum /* Create CQ using DevX API. */ 5015cd33796SMichael Baum ret = mlx5_rxq_create_devx_cq_resources(dev, idx); 5025cd33796SMichael Baum if (ret) { 5036deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create CQ."); 5046deb19e1SMichael Baum goto error; 5056deb19e1SMichael Baum } 5066deb19e1SMichael Baum /* Create RQ using DevX API. */ 5076e0a3637SMichael Baum ret = mlx5_rxq_create_devx_rq_resources(dev, idx); 5086e0a3637SMichael Baum if (ret) { 5096deb19e1SMichael Baum DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 5106deb19e1SMichael Baum dev->data->port_id, idx); 5116deb19e1SMichael Baum rte_errno = ENOMEM; 5126deb19e1SMichael Baum goto error; 5136deb19e1SMichael Baum } 5146deb19e1SMichael Baum /* Change queue state to ready. */ 5154c6d80f1SMichael Baum ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY); 5166deb19e1SMichael Baum if (ret) 5176deb19e1SMichael Baum goto error; 5186e0a3637SMichael Baum rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf; 5196e0a3637SMichael Baum rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec; 5206deb19e1SMichael Baum rxq_data->cq_arm_sn = 0; 5216deb19e1SMichael Baum rxq_data->cq_ci = 0; 5226e0a3637SMichael Baum mlx5_rxq_initialize(rxq_data); 5236deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 5246e0a3637SMichael Baum rxq_ctrl->wqn = tmpl->rq_obj.rq->id; 5251260a87bSMichael Baum return 0; 5266deb19e1SMichael Baum error: 5276deb19e1SMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 5286e0a3637SMichael Baum mlx5_rxq_devx_obj_release(tmpl); 5291260a87bSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 5301260a87bSMichael Baum return -rte_errno; 5316deb19e1SMichael Baum } 5326deb19e1SMichael Baum 53387e2db37SMichael Baum /** 534fa7ad49eSAndrey Vesnovaty * Prepare RQT attribute structure for DevX RQT API. 535fa7ad49eSAndrey Vesnovaty * 536fa7ad49eSAndrey Vesnovaty * @param dev 537fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 538fa7ad49eSAndrey Vesnovaty * @param log_n 539fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 540fa7ad49eSAndrey Vesnovaty * @param ind_tbl 541fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 542fa7ad49eSAndrey Vesnovaty * 543fa7ad49eSAndrey Vesnovaty * @return 544fa7ad49eSAndrey Vesnovaty * The RQT attr object initialized, NULL otherwise and rte_errno is set. 545fa7ad49eSAndrey Vesnovaty */ 546fa7ad49eSAndrey Vesnovaty static struct mlx5_devx_rqt_attr * 547fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, 548fa7ad49eSAndrey Vesnovaty const unsigned int log_n, 549fa7ad49eSAndrey Vesnovaty const uint16_t *queues, 550fa7ad49eSAndrey Vesnovaty const uint32_t queues_n) 551fa7ad49eSAndrey Vesnovaty { 552fa7ad49eSAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 553fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 554fa7ad49eSAndrey Vesnovaty const unsigned int rqt_n = 1 << log_n; 555fa7ad49eSAndrey Vesnovaty unsigned int i, j; 556fa7ad49eSAndrey Vesnovaty 557fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 558fa7ad49eSAndrey Vesnovaty rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 559fa7ad49eSAndrey Vesnovaty if (!rqt_attr) { 560fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 561fa7ad49eSAndrey Vesnovaty dev->data->port_id); 562fa7ad49eSAndrey Vesnovaty rte_errno = ENOMEM; 563fa7ad49eSAndrey Vesnovaty return NULL; 564fa7ad49eSAndrey Vesnovaty } 565fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 566fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_actual_size = rqt_n; 567fa7ad49eSAndrey Vesnovaty for (i = 0; i != queues_n; ++i) { 568fa7ad49eSAndrey Vesnovaty struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]]; 569fa7ad49eSAndrey Vesnovaty struct mlx5_rxq_ctrl *rxq_ctrl = 570fa7ad49eSAndrey Vesnovaty container_of(rxq, struct mlx5_rxq_ctrl, rxq); 571fa7ad49eSAndrey Vesnovaty 5726e0a3637SMichael Baum rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id; 573fa7ad49eSAndrey Vesnovaty } 574fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(i > 0); 575fa7ad49eSAndrey Vesnovaty for (j = 0; i != rqt_n; ++j, ++i) 576fa7ad49eSAndrey Vesnovaty rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 577fa7ad49eSAndrey Vesnovaty return rqt_attr; 578fa7ad49eSAndrey Vesnovaty } 579fa7ad49eSAndrey Vesnovaty 580fa7ad49eSAndrey Vesnovaty /** 58125ae7f1aSMichael Baum * Create RQT using DevX API as a filed of indirection table. 58287e2db37SMichael Baum * 58387e2db37SMichael Baum * @param dev 58487e2db37SMichael Baum * Pointer to Ethernet device. 58525ae7f1aSMichael Baum * @param log_n 58625ae7f1aSMichael Baum * Log of number of queues in the array. 58725ae7f1aSMichael Baum * @param ind_tbl 58825ae7f1aSMichael Baum * DevX indirection table object. 58987e2db37SMichael Baum * 59087e2db37SMichael Baum * @return 59125ae7f1aSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 59287e2db37SMichael Baum */ 59325ae7f1aSMichael Baum static int 59425ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 59525ae7f1aSMichael Baum struct mlx5_ind_table_obj *ind_tbl) 59687e2db37SMichael Baum { 59787e2db37SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 59887e2db37SMichael Baum struct mlx5_devx_rqt_attr *rqt_attr = NULL; 59987e2db37SMichael Baum 60025ae7f1aSMichael Baum MLX5_ASSERT(ind_tbl); 601fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 602fa7ad49eSAndrey Vesnovaty ind_tbl->queues, 603fa7ad49eSAndrey Vesnovaty ind_tbl->queues_n); 604fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 60525ae7f1aSMichael Baum return -rte_errno; 606ca1418ceSMichael Baum ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr); 60787e2db37SMichael Baum mlx5_free(rqt_attr); 60887e2db37SMichael Baum if (!ind_tbl->rqt) { 60987e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 61087e2db37SMichael Baum dev->data->port_id); 61187e2db37SMichael Baum rte_errno = errno; 61225ae7f1aSMichael Baum return -rte_errno; 61387e2db37SMichael Baum } 61425ae7f1aSMichael Baum return 0; 61587e2db37SMichael Baum } 61687e2db37SMichael Baum 61787e2db37SMichael Baum /** 618fa7ad49eSAndrey Vesnovaty * Modify RQT using DevX API as a filed of indirection table. 619fa7ad49eSAndrey Vesnovaty * 620fa7ad49eSAndrey Vesnovaty * @param dev 621fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 622fa7ad49eSAndrey Vesnovaty * @param log_n 623fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 624fa7ad49eSAndrey Vesnovaty * @param ind_tbl 625fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 626fa7ad49eSAndrey Vesnovaty * 627fa7ad49eSAndrey Vesnovaty * @return 628fa7ad49eSAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 629fa7ad49eSAndrey Vesnovaty */ 630fa7ad49eSAndrey Vesnovaty static int 631fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n, 632fa7ad49eSAndrey Vesnovaty const uint16_t *queues, const uint32_t queues_n, 633fa7ad49eSAndrey Vesnovaty struct mlx5_ind_table_obj *ind_tbl) 634fa7ad49eSAndrey Vesnovaty { 635fa7ad49eSAndrey Vesnovaty int ret = 0; 636fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 637fa7ad49eSAndrey Vesnovaty 638fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(ind_tbl); 639fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 640fa7ad49eSAndrey Vesnovaty queues, 641fa7ad49eSAndrey Vesnovaty queues_n); 642fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 643fa7ad49eSAndrey Vesnovaty return -rte_errno; 644fa7ad49eSAndrey Vesnovaty ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr); 645fa7ad49eSAndrey Vesnovaty mlx5_free(rqt_attr); 646fa7ad49eSAndrey Vesnovaty if (ret) 647fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot modify DevX RQT.", 648fa7ad49eSAndrey Vesnovaty dev->data->port_id); 649fa7ad49eSAndrey Vesnovaty return ret; 650fa7ad49eSAndrey Vesnovaty } 651fa7ad49eSAndrey Vesnovaty 652fa7ad49eSAndrey Vesnovaty /** 65387e2db37SMichael Baum * Destroy the DevX RQT object. 65487e2db37SMichael Baum * 65587e2db37SMichael Baum * @param ind_table 65687e2db37SMichael Baum * Indirection table to release. 65787e2db37SMichael Baum */ 65887e2db37SMichael Baum static void 65925ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 66087e2db37SMichael Baum { 66187e2db37SMichael Baum claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 66287e2db37SMichael Baum } 66387e2db37SMichael Baum 66485552726SMichael Baum /** 665b8cc58c1SAndrey Vesnovaty * Set TIR attribute struct with relevant input values. 66685552726SMichael Baum * 667b8cc58c1SAndrey Vesnovaty * @param[in] dev 66885552726SMichael Baum * Pointer to Ethernet device. 669b8cc58c1SAndrey Vesnovaty * @param[in] rss_key 670b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 671b8cc58c1SAndrey Vesnovaty * @param[in] hash_fields 672b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 673b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 674b8cc58c1SAndrey Vesnovaty * Indirection table for TIR. 675b8cc58c1SAndrey Vesnovaty * @param[in] tunnel 67685552726SMichael Baum * Tunnel type. 677b8cc58c1SAndrey Vesnovaty * @param[out] tir_attr 678b8cc58c1SAndrey Vesnovaty * Parameters structure for TIR creation/modification. 67985552726SMichael Baum * 68085552726SMichael Baum * @return 681b8cc58c1SAndrey Vesnovaty * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. 68285552726SMichael Baum */ 683b8cc58c1SAndrey Vesnovaty static void 684b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, 685b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 686b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl, 687b8cc58c1SAndrey Vesnovaty int tunnel, struct mlx5_devx_tir_attr *tir_attr) 68885552726SMichael Baum { 68985552726SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 6905a959cbfSMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 69185552726SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 69285552726SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 693b8cc58c1SAndrey Vesnovaty enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type; 69485552726SMichael Baum bool lro = true; 6955a959cbfSMichael Baum uint32_t i; 69685552726SMichael Baum 69785552726SMichael Baum /* Enable TIR LRO only if all the queues were configured for. */ 6985a959cbfSMichael Baum for (i = 0; i < ind_tbl->queues_n; ++i) { 6995a959cbfSMichael Baum if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 70085552726SMichael Baum lro = false; 70185552726SMichael Baum break; 70285552726SMichael Baum } 70385552726SMichael Baum } 704b8cc58c1SAndrey Vesnovaty memset(tir_attr, 0, sizeof(*tir_attr)); 705b8cc58c1SAndrey Vesnovaty tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 706b8cc58c1SAndrey Vesnovaty tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 707b8cc58c1SAndrey Vesnovaty tir_attr->tunneled_offload_en = !!tunnel; 70885552726SMichael Baum /* If needed, translate hash_fields bitmap to PRM format. */ 70985552726SMichael Baum if (hash_fields) { 710b8cc58c1SAndrey Vesnovaty struct mlx5_rx_hash_field_select *rx_hash_field_select = 71185552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 712b8cc58c1SAndrey Vesnovaty hash_fields & IBV_RX_HASH_INNER ? 713b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_inner : 71485552726SMichael Baum #endif 715b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_outer; 71685552726SMichael Baum /* 1 bit: 0: IPv4, 1: IPv6. */ 71785552726SMichael Baum rx_hash_field_select->l3_prot_type = 71885552726SMichael Baum !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 71985552726SMichael Baum /* 1 bit: 0: TCP, 1: UDP. */ 72085552726SMichael Baum rx_hash_field_select->l4_prot_type = 72185552726SMichael Baum !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 72285552726SMichael Baum /* Bitmask which sets which fields to use in RX Hash. */ 72385552726SMichael Baum rx_hash_field_select->selected_fields = 72485552726SMichael Baum ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 72585552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 72685552726SMichael Baum (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 72785552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 72885552726SMichael Baum (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 72985552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 73085552726SMichael Baum (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 73185552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 73285552726SMichael Baum } 733b8cc58c1SAndrey Vesnovaty if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN) 734b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->td->id; 73585552726SMichael Baum else 736b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->tdn; 737b8cc58c1SAndrey Vesnovaty memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 738b8cc58c1SAndrey Vesnovaty tir_attr->indirect_table = ind_tbl->rqt->id; 73985552726SMichael Baum if (dev->data->dev_conf.lpbk_mode) 740b8cc58c1SAndrey Vesnovaty tir_attr->self_lb_block = 741b8cc58c1SAndrey Vesnovaty MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 74285552726SMichael Baum if (lro) { 743b8cc58c1SAndrey Vesnovaty tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout; 744b8cc58c1SAndrey Vesnovaty tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; 745b8cc58c1SAndrey Vesnovaty tir_attr->lro_enable_mask = 746b8cc58c1SAndrey Vesnovaty MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 74785552726SMichael Baum MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 74885552726SMichael Baum } 749b8cc58c1SAndrey Vesnovaty } 750b8cc58c1SAndrey Vesnovaty 751b8cc58c1SAndrey Vesnovaty /** 752b8cc58c1SAndrey Vesnovaty * Create an Rx Hash queue. 753b8cc58c1SAndrey Vesnovaty * 754b8cc58c1SAndrey Vesnovaty * @param dev 755b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 756b8cc58c1SAndrey Vesnovaty * @param hrxq 757b8cc58c1SAndrey Vesnovaty * Pointer to Rx Hash queue. 758b8cc58c1SAndrey Vesnovaty * @param tunnel 759b8cc58c1SAndrey Vesnovaty * Tunnel type. 760b8cc58c1SAndrey Vesnovaty * 761b8cc58c1SAndrey Vesnovaty * @return 762b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 763b8cc58c1SAndrey Vesnovaty */ 764b8cc58c1SAndrey Vesnovaty static int 765b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 766b8cc58c1SAndrey Vesnovaty int tunnel __rte_unused) 767b8cc58c1SAndrey Vesnovaty { 768b8cc58c1SAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 769b8cc58c1SAndrey Vesnovaty struct mlx5_devx_tir_attr tir_attr = {0}; 770b8cc58c1SAndrey Vesnovaty int err; 771b8cc58c1SAndrey Vesnovaty 772b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields, 773b8cc58c1SAndrey Vesnovaty hrxq->ind_table, tunnel, &tir_attr); 774ca1418ceSMichael Baum hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr); 7755a959cbfSMichael Baum if (!hrxq->tir) { 77685552726SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 77785552726SMichael Baum dev->data->port_id); 77885552726SMichael Baum rte_errno = errno; 77985552726SMichael Baum goto error; 78085552726SMichael Baum } 781f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 78288019723SOphir Munk if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir, 78388019723SOphir Munk &hrxq->action)) { 78485552726SMichael Baum rte_errno = errno; 78585552726SMichael Baum goto error; 78685552726SMichael Baum } 78785552726SMichael Baum #endif 7885a959cbfSMichael Baum return 0; 78985552726SMichael Baum error: 79085552726SMichael Baum err = rte_errno; /* Save rte_errno before cleanup. */ 7915a959cbfSMichael Baum if (hrxq->tir) 7925a959cbfSMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 79385552726SMichael Baum rte_errno = err; /* Restore rte_errno. */ 7945a959cbfSMichael Baum return -rte_errno; 79585552726SMichael Baum } 79685552726SMichael Baum 79785552726SMichael Baum /** 79885552726SMichael Baum * Destroy a DevX TIR object. 79985552726SMichael Baum * 80085552726SMichael Baum * @param hrxq 80185552726SMichael Baum * Hash Rx queue to release its tir. 80285552726SMichael Baum */ 80385552726SMichael Baum static void 80485552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 80585552726SMichael Baum { 80685552726SMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 80785552726SMichael Baum } 80885552726SMichael Baum 8095eaf882eSMichael Baum /** 810b8cc58c1SAndrey Vesnovaty * Modify an Rx Hash queue configuration. 811b8cc58c1SAndrey Vesnovaty * 812b8cc58c1SAndrey Vesnovaty * @param dev 813b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 814b8cc58c1SAndrey Vesnovaty * @param hrxq 815b8cc58c1SAndrey Vesnovaty * Hash Rx queue to modify. 816b8cc58c1SAndrey Vesnovaty * @param rss_key 817b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 818b8cc58c1SAndrey Vesnovaty * @param hash_fields 819b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 820b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 821b8cc58c1SAndrey Vesnovaty * Indirection table for TIR. 822b8cc58c1SAndrey Vesnovaty * 823b8cc58c1SAndrey Vesnovaty * @return 824b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 825b8cc58c1SAndrey Vesnovaty */ 826b8cc58c1SAndrey Vesnovaty static int 827b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 828b8cc58c1SAndrey Vesnovaty const uint8_t *rss_key, 829b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 830b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl) 831b8cc58c1SAndrey Vesnovaty { 832b8cc58c1SAndrey Vesnovaty struct mlx5_devx_modify_tir_attr modify_tir = {0}; 833b8cc58c1SAndrey Vesnovaty 834b8cc58c1SAndrey Vesnovaty /* 835b8cc58c1SAndrey Vesnovaty * untested for modification fields: 836b8cc58c1SAndrey Vesnovaty * - rx_hash_symmetric not set in hrxq_new(), 837b8cc58c1SAndrey Vesnovaty * - rx_hash_fn set hard-coded in hrxq_new(), 838b8cc58c1SAndrey Vesnovaty * - lro_xxx not set after rxq setup 839b8cc58c1SAndrey Vesnovaty */ 840b8cc58c1SAndrey Vesnovaty if (ind_tbl != hrxq->ind_table) 841b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 842b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE; 843b8cc58c1SAndrey Vesnovaty if (hash_fields != hrxq->hash_fields || 844b8cc58c1SAndrey Vesnovaty memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN)) 845b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 846b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH; 847b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl, 848b8cc58c1SAndrey Vesnovaty 0, /* N/A - tunnel modification unsupported */ 849b8cc58c1SAndrey Vesnovaty &modify_tir.tir); 850b8cc58c1SAndrey Vesnovaty modify_tir.tirn = hrxq->tir->id; 851b8cc58c1SAndrey Vesnovaty if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) { 852b8cc58c1SAndrey Vesnovaty DRV_LOG(ERR, "port %u cannot modify DevX TIR", 853b8cc58c1SAndrey Vesnovaty dev->data->port_id); 854b8cc58c1SAndrey Vesnovaty rte_errno = errno; 855b8cc58c1SAndrey Vesnovaty return -rte_errno; 856b8cc58c1SAndrey Vesnovaty } 857b8cc58c1SAndrey Vesnovaty return 0; 858b8cc58c1SAndrey Vesnovaty } 859b8cc58c1SAndrey Vesnovaty 860b8cc58c1SAndrey Vesnovaty /** 8610c762e81SMichael Baum * Create a DevX drop action for Rx Hash queue. 8625eaf882eSMichael Baum * 8635eaf882eSMichael Baum * @param dev 8645eaf882eSMichael Baum * Pointer to Ethernet device. 8655eaf882eSMichael Baum * 8665eaf882eSMichael Baum * @return 8670c762e81SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 8685eaf882eSMichael Baum */ 8690c762e81SMichael Baum static int 8700c762e81SMichael Baum mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 8715eaf882eSMichael Baum { 8725eaf882eSMichael Baum (void)dev; 87386d259ceSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet."); 8745eaf882eSMichael Baum rte_errno = ENOTSUP; 8750c762e81SMichael Baum return -rte_errno; 8765eaf882eSMichael Baum } 8775eaf882eSMichael Baum 8785eaf882eSMichael Baum /** 8795eaf882eSMichael Baum * Release a drop hash Rx queue. 8805eaf882eSMichael Baum * 8815eaf882eSMichael Baum * @param dev 8825eaf882eSMichael Baum * Pointer to Ethernet device. 8835eaf882eSMichael Baum */ 8845eaf882eSMichael Baum static void 8850c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 8865eaf882eSMichael Baum { 8875eaf882eSMichael Baum (void)dev; 88886d259ceSMichael Baum DRV_LOG(ERR, "DevX drop action is not supported yet."); 8895eaf882eSMichael Baum rte_errno = ENOTSUP; 8905eaf882eSMichael Baum } 8915eaf882eSMichael Baum 89286d259ceSMichael Baum /** 893*a89f6433SRongwei Liu * Select TXQ TIS number. 894*a89f6433SRongwei Liu * 895*a89f6433SRongwei Liu * @param dev 896*a89f6433SRongwei Liu * Pointer to Ethernet device. 897*a89f6433SRongwei Liu * @param queue_idx 898*a89f6433SRongwei Liu * Queue index in DPDK Tx queue array. 899*a89f6433SRongwei Liu * 900*a89f6433SRongwei Liu * @return 901*a89f6433SRongwei Liu * > 0 on success, a negative errno value otherwise. 902*a89f6433SRongwei Liu */ 903*a89f6433SRongwei Liu static uint32_t 904*a89f6433SRongwei Liu mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx) 905*a89f6433SRongwei Liu { 906*a89f6433SRongwei Liu struct mlx5_priv *priv = dev->data->dev_private; 907*a89f6433SRongwei Liu int tis_idx; 908*a89f6433SRongwei Liu 909*a89f6433SRongwei Liu if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode == 910*a89f6433SRongwei Liu MLX5_LAG_MODE_TIS) { 911*a89f6433SRongwei Liu tis_idx = (priv->lag_affinity_idx + queue_idx) % 912*a89f6433SRongwei Liu priv->sh->bond.n_port; 913*a89f6433SRongwei Liu DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.", 914*a89f6433SRongwei Liu dev->data->port_id, queue_idx, tis_idx + 1, 915*a89f6433SRongwei Liu priv->sh->lag.tx_remap_affinity[tis_idx]); 916*a89f6433SRongwei Liu } else { 917*a89f6433SRongwei Liu tis_idx = 0; 918*a89f6433SRongwei Liu } 919*a89f6433SRongwei Liu MLX5_ASSERT(priv->sh->tis[tis_idx]); 920*a89f6433SRongwei Liu return priv->sh->tis[tis_idx]->id; 921*a89f6433SRongwei Liu } 922*a89f6433SRongwei Liu 923*a89f6433SRongwei Liu /** 92486d259ceSMichael Baum * Create the Tx hairpin queue object. 92586d259ceSMichael Baum * 92686d259ceSMichael Baum * @param dev 92786d259ceSMichael Baum * Pointer to Ethernet device. 92886d259ceSMichael Baum * @param idx 92986d259ceSMichael Baum * Queue index in DPDK Tx queue array. 93086d259ceSMichael Baum * 93186d259ceSMichael Baum * @return 932f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 93386d259ceSMichael Baum */ 934f49f4483SMichael Baum static int 93586d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 93686d259ceSMichael Baum { 93786d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 93886d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 93986d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 94086d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 94186d259ceSMichael Baum struct mlx5_devx_create_sq_attr attr = { 0 }; 942f49f4483SMichael Baum struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 94386d259ceSMichael Baum uint32_t max_wq_data; 94486d259ceSMichael Baum 94586d259ceSMichael Baum MLX5_ASSERT(txq_data); 946f49f4483SMichael Baum MLX5_ASSERT(tmpl); 94786d259ceSMichael Baum tmpl->txq_ctrl = txq_ctrl; 94886d259ceSMichael Baum attr.hairpin = 1; 94986d259ceSMichael Baum attr.tis_lst_sz = 1; 95086d259ceSMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 95186d259ceSMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 95286d259ceSMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 95386d259ceSMichael Baum if (priv->config.log_hp_size > max_wq_data) { 95486d259ceSMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 95586d259ceSMichael Baum "too large for hairpin.", 95686d259ceSMichael Baum priv->config.log_hp_size); 95786d259ceSMichael Baum rte_errno = ERANGE; 958f49f4483SMichael Baum return -rte_errno; 95986d259ceSMichael Baum } 96086d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 96186d259ceSMichael Baum } else { 96286d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = 96386d259ceSMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 96486d259ceSMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 96586d259ceSMichael Baum } 96686d259ceSMichael Baum /* Set the packets number to the maximum value for performance. */ 96786d259ceSMichael Baum attr.wq_attr.log_hairpin_num_packets = 96886d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz - 96986d259ceSMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 970*a89f6433SRongwei Liu 971*a89f6433SRongwei Liu attr.tis_num = mlx5_get_txq_tis_num(dev, idx); 972ca1418ceSMichael Baum tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr); 97386d259ceSMichael Baum if (!tmpl->sq) { 97486d259ceSMichael Baum DRV_LOG(ERR, 97586d259ceSMichael Baum "Port %u tx hairpin queue %u can't create SQ object.", 97686d259ceSMichael Baum dev->data->port_id, idx); 97786d259ceSMichael Baum rte_errno = errno; 978f49f4483SMichael Baum return -rte_errno; 97986d259ceSMichael Baum } 980f49f4483SMichael Baum return 0; 98186d259ceSMichael Baum } 98286d259ceSMichael Baum 983f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 98486d259ceSMichael Baum /** 98586d259ceSMichael Baum * Destroy the Tx queue DevX object. 98686d259ceSMichael Baum * 98786d259ceSMichael Baum * @param txq_obj 98886d259ceSMichael Baum * Txq object to destroy. 98986d259ceSMichael Baum */ 99086d259ceSMichael Baum static void 99188f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 99286d259ceSMichael Baum { 99374e91860SMichael Baum mlx5_devx_sq_destroy(&txq_obj->sq_obj); 99474e91860SMichael Baum memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj)); 9955f04f70cSMichael Baum mlx5_devx_cq_destroy(&txq_obj->cq_obj); 9965f04f70cSMichael Baum memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj)); 99786d259ceSMichael Baum } 99886d259ceSMichael Baum 99986d259ceSMichael Baum /** 100088f2e3f1SMichael Baum * Create a SQ object and its resources using DevX. 100186d259ceSMichael Baum * 100286d259ceSMichael Baum * @param dev 100386d259ceSMichael Baum * Pointer to Ethernet device. 100486d259ceSMichael Baum * @param idx 100586d259ceSMichael Baum * Queue index in DPDK Tx queue array. 100674e91860SMichael Baum * @param[in] log_desc_n 100774e91860SMichael Baum * Log of number of descriptors in queue. 100886d259ceSMichael Baum * 100986d259ceSMichael Baum * @return 101074e91860SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 101186d259ceSMichael Baum */ 101274e91860SMichael Baum static int 101374e91860SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, 101474e91860SMichael Baum uint16_t log_desc_n) 101586d259ceSMichael Baum { 101686d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 1017fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 101886d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 101988f2e3f1SMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 102088f2e3f1SMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 102188f2e3f1SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 102274e91860SMichael Baum struct mlx5_devx_create_sq_attr sq_attr = { 102374e91860SMichael Baum .flush_in_error_en = 1, 102474e91860SMichael Baum .allow_multi_pkt_send_wqe = !!priv->config.mps, 102574e91860SMichael Baum .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode, 102674e91860SMichael Baum .allow_swp = !!priv->config.swp, 102774e91860SMichael Baum .cqn = txq_obj->cq_obj.cq->id, 102874e91860SMichael Baum .tis_lst_sz = 1, 102974e91860SMichael Baum .wq_attr = (struct mlx5_devx_wq_attr){ 1030fe46b20cSMichael Baum .pd = cdev->pdn, 103174e91860SMichael Baum .uar_page = 103274e91860SMichael Baum mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar), 103374e91860SMichael Baum }, 1034fe46b20cSMichael Baum .ts_format = 1035fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format), 1036*a89f6433SRongwei Liu .tis_num = mlx5_get_txq_tis_num(dev, idx), 103774e91860SMichael Baum }; 1038*a89f6433SRongwei Liu 103986d259ceSMichael Baum /* Create Send Queue object with DevX. */ 1040fe46b20cSMichael Baum return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj, 1041ca1418ceSMichael Baum log_desc_n, &sq_attr, priv->sh->numa_node); 104286d259ceSMichael Baum } 104386d259ceSMichael Baum #endif 104486d259ceSMichael Baum 104586d259ceSMichael Baum /** 104686d259ceSMichael Baum * Create the Tx queue DevX object. 104786d259ceSMichael Baum * 104886d259ceSMichael Baum * @param dev 104986d259ceSMichael Baum * Pointer to Ethernet device. 105086d259ceSMichael Baum * @param idx 105186d259ceSMichael Baum * Queue index in DPDK Tx queue array. 105286d259ceSMichael Baum * 105386d259ceSMichael Baum * @return 1054f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 105586d259ceSMichael Baum */ 1056f49f4483SMichael Baum int 105786d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 105886d259ceSMichael Baum { 105986d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 106086d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 106186d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 106286d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 106386d259ceSMichael Baum 106486d259ceSMichael Baum if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 106586d259ceSMichael Baum return mlx5_txq_obj_hairpin_new(dev, idx); 1066f1ae0b35SOphir Munk #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H) 106786d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 106886d259ceSMichael Baum dev->data->port_id, idx); 106986d259ceSMichael Baum rte_errno = ENOMEM; 1070f49f4483SMichael Baum return -rte_errno; 107186d259ceSMichael Baum #else 107286d259ceSMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 1073f49f4483SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 10745f04f70cSMichael Baum struct mlx5_devx_cq_attr cq_attr = { 10755f04f70cSMichael Baum .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar), 10765f04f70cSMichael Baum }; 107786d259ceSMichael Baum void *reg_addr; 10785f04f70cSMichael Baum uint32_t cqe_n, log_desc_n; 107900984de5SViacheslav Ovsiienko uint32_t wqe_n, wqe_size; 108086d259ceSMichael Baum int ret = 0; 108186d259ceSMichael Baum 108286d259ceSMichael Baum MLX5_ASSERT(txq_data); 1083f49f4483SMichael Baum MLX5_ASSERT(txq_obj); 108486d259ceSMichael Baum txq_obj->txq_ctrl = txq_ctrl; 108586d259ceSMichael Baum txq_obj->dev = dev; 10865f04f70cSMichael Baum cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 10875f04f70cSMichael Baum 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 10885f04f70cSMichael Baum log_desc_n = log2above(cqe_n); 10895f04f70cSMichael Baum cqe_n = 1UL << log_desc_n; 10905f04f70cSMichael Baum if (cqe_n > UINT16_MAX) { 10915f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.", 10925f04f70cSMichael Baum dev->data->port_id, txq_data->idx, cqe_n); 10935f04f70cSMichael Baum rte_errno = EINVAL; 10945f04f70cSMichael Baum return 0; 10955f04f70cSMichael Baum } 10965f04f70cSMichael Baum /* Create completion queue object with DevX. */ 1097ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n, 10985f04f70cSMichael Baum &cq_attr, priv->sh->numa_node); 10995f04f70cSMichael Baum if (ret) { 11005f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 11015f04f70cSMichael Baum dev->data->port_id, idx); 110286d259ceSMichael Baum goto error; 110386d259ceSMichael Baum } 11045f04f70cSMichael Baum txq_data->cqe_n = log_desc_n; 11055f04f70cSMichael Baum txq_data->cqe_s = cqe_n; 110686d259ceSMichael Baum txq_data->cqe_m = txq_data->cqe_s - 1; 11075f04f70cSMichael Baum txq_data->cqes = txq_obj->cq_obj.cqes; 110886d259ceSMichael Baum txq_data->cq_ci = 0; 110986d259ceSMichael Baum txq_data->cq_pi = 0; 11105f04f70cSMichael Baum txq_data->cq_db = txq_obj->cq_obj.db_rec; 111186d259ceSMichael Baum *txq_data->cq_db = 0; 111200984de5SViacheslav Ovsiienko /* 111300984de5SViacheslav Ovsiienko * Adjust the amount of WQEs depending on inline settings. 111400984de5SViacheslav Ovsiienko * The number of descriptors should be enough to handle 111500984de5SViacheslav Ovsiienko * the specified number of packets. If queue is being created 111600984de5SViacheslav Ovsiienko * with Verbs the rdma-core does queue size adjustment 111700984de5SViacheslav Ovsiienko * internally in the mlx5_calc_sq_size(), we do the same 111800984de5SViacheslav Ovsiienko * for the queue being created with DevX at this point. 111900984de5SViacheslav Ovsiienko */ 112000984de5SViacheslav Ovsiienko wqe_size = txq_data->tso_en ? 112100984de5SViacheslav Ovsiienko RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0; 112200984de5SViacheslav Ovsiienko wqe_size += sizeof(struct mlx5_wqe_cseg) + 112300984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 112400984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_dseg); 112500984de5SViacheslav Ovsiienko if (txq_data->inlen_send) 112600984de5SViacheslav Ovsiienko wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) + 112700984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 112800984de5SViacheslav Ovsiienko RTE_ALIGN(txq_data->inlen_send + 112900984de5SViacheslav Ovsiienko sizeof(uint32_t), 113000984de5SViacheslav Ovsiienko MLX5_WSEG_SIZE)); 113100984de5SViacheslav Ovsiienko wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 113286d259ceSMichael Baum /* Create Send Queue object with DevX. */ 113300984de5SViacheslav Ovsiienko wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size, 113474e91860SMichael Baum (uint32_t)priv->sh->device_attr.max_qp_wr); 113574e91860SMichael Baum log_desc_n = log2above(wqe_n); 113674e91860SMichael Baum ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n); 113774e91860SMichael Baum if (ret) { 113874e91860SMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 113974e91860SMichael Baum dev->data->port_id, idx); 114086d259ceSMichael Baum rte_errno = errno; 114186d259ceSMichael Baum goto error; 114286d259ceSMichael Baum } 114386d259ceSMichael Baum /* Create the Work Queue. */ 114474e91860SMichael Baum txq_data->wqe_n = log_desc_n; 114586d259ceSMichael Baum txq_data->wqe_s = 1 << txq_data->wqe_n; 114686d259ceSMichael Baum txq_data->wqe_m = txq_data->wqe_s - 1; 114774e91860SMichael Baum txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes; 114886d259ceSMichael Baum txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 114986d259ceSMichael Baum txq_data->wqe_ci = 0; 115086d259ceSMichael Baum txq_data->wqe_pi = 0; 115186d259ceSMichael Baum txq_data->wqe_comp = 0; 115286d259ceSMichael Baum txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 115331625e62SViacheslav Ovsiienko txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; 115486d259ceSMichael Baum *txq_data->qp_db = 0; 115574e91860SMichael Baum txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; 115686d259ceSMichael Baum /* Change Send Queue state to Ready-to-Send. */ 1157686d05b6SXueming Li ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); 115886d259ceSMichael Baum if (ret) { 115986d259ceSMichael Baum rte_errno = errno; 116086d259ceSMichael Baum DRV_LOG(ERR, 1161a9c79306SMichael Baum "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.", 116286d259ceSMichael Baum dev->data->port_id, idx); 116386d259ceSMichael Baum goto error; 116486d259ceSMichael Baum } 116586d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 116686d259ceSMichael Baum /* 116786d259ceSMichael Baum * If using DevX need to query and store TIS transport domain value. 116886d259ceSMichael Baum * This is done once per port. 116986d259ceSMichael Baum * Will use this value on Rx, when creating matching TIR. 117086d259ceSMichael Baum */ 117186d259ceSMichael Baum if (!priv->sh->tdn) 117286d259ceSMichael Baum priv->sh->tdn = priv->sh->td->id; 117386d259ceSMichael Baum #endif 117486d259ceSMichael Baum MLX5_ASSERT(sh->tx_uar); 117586d259ceSMichael Baum reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 117686d259ceSMichael Baum MLX5_ASSERT(reg_addr); 117786d259ceSMichael Baum txq_ctrl->bf_reg = reg_addr; 117886d259ceSMichael Baum txq_ctrl->uar_mmap_offset = 117986d259ceSMichael Baum mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); 118086d259ceSMichael Baum txq_uar_init(txq_ctrl); 1181876b5d52SMatan Azrad dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 1182f49f4483SMichael Baum return 0; 118386d259ceSMichael Baum error: 118486d259ceSMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 118588f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 118686d259ceSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 1187f49f4483SMichael Baum return -rte_errno; 118886d259ceSMichael Baum #endif 118986d259ceSMichael Baum } 119086d259ceSMichael Baum 119186d259ceSMichael Baum /** 119286d259ceSMichael Baum * Release an Tx DevX queue object. 119386d259ceSMichael Baum * 119486d259ceSMichael Baum * @param txq_obj 119586d259ceSMichael Baum * DevX Tx queue object. 119686d259ceSMichael Baum */ 119786d259ceSMichael Baum void 119886d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 119986d259ceSMichael Baum { 120086d259ceSMichael Baum MLX5_ASSERT(txq_obj); 1201354cc08aSMichael Baum if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 120286d259ceSMichael Baum if (txq_obj->tis) 120386d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 1204f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 120586d259ceSMichael Baum } else { 120688f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 120786d259ceSMichael Baum #endif 120886d259ceSMichael Baum } 120986d259ceSMichael Baum } 121086d259ceSMichael Baum 12118bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = { 12128bb2410eSOphir Munk .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 12136deb19e1SMichael Baum .rxq_obj_new = mlx5_rxq_devx_obj_new, 121432287079SMichael Baum .rxq_event_get = mlx5_rx_devx_get_event, 1215c279f187SMichael Baum .rxq_obj_modify = mlx5_devx_modify_rq, 12166deb19e1SMichael Baum .rxq_obj_release = mlx5_rxq_devx_obj_release, 121725ae7f1aSMichael Baum .ind_table_new = mlx5_devx_ind_table_new, 1218fa7ad49eSAndrey Vesnovaty .ind_table_modify = mlx5_devx_ind_table_modify, 121925ae7f1aSMichael Baum .ind_table_destroy = mlx5_devx_ind_table_destroy, 122085552726SMichael Baum .hrxq_new = mlx5_devx_hrxq_new, 122185552726SMichael Baum .hrxq_destroy = mlx5_devx_tir_destroy, 1222b8cc58c1SAndrey Vesnovaty .hrxq_modify = mlx5_devx_hrxq_modify, 12230c762e81SMichael Baum .drop_action_create = mlx5_devx_drop_action_create, 12240c762e81SMichael Baum .drop_action_destroy = mlx5_devx_drop_action_destroy, 122586d259ceSMichael Baum .txq_obj_new = mlx5_txq_devx_obj_new, 1226686d05b6SXueming Li .txq_obj_modify = mlx5_txq_devx_modify, 122786d259ceSMichael Baum .txq_obj_release = mlx5_txq_devx_obj_release, 122823233fd6SBing Zhao .lb_dummy_queue_create = NULL, 122923233fd6SBing Zhao .lb_dummy_queue_release = NULL, 12308bb2410eSOphir Munk }; 1231