18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 28bb2410eSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 38bb2410eSOphir Munk */ 48bb2410eSOphir Munk 58bb2410eSOphir Munk #include <stddef.h> 68bb2410eSOphir Munk #include <errno.h> 7c279f187SMichael Baum #include <stdbool.h> 88bb2410eSOphir Munk #include <string.h> 98bb2410eSOphir Munk #include <stdint.h> 108bb2410eSOphir Munk #include <sys/queue.h> 118bb2410eSOphir Munk 128bb2410eSOphir Munk #include <rte_malloc.h> 138bb2410eSOphir Munk #include <rte_common.h> 148bb2410eSOphir Munk #include <rte_eal_paging.h> 158bb2410eSOphir Munk 168bb2410eSOphir Munk #include <mlx5_glue.h> 178bb2410eSOphir Munk #include <mlx5_devx_cmds.h> 185f04f70cSMichael Baum #include <mlx5_common_devx.h> 198bb2410eSOphir Munk #include <mlx5_malloc.h> 208bb2410eSOphir Munk 218bb2410eSOphir Munk #include "mlx5.h" 228bb2410eSOphir Munk #include "mlx5_common_os.h" 23377b69fbSMichael Baum #include "mlx5_tx.h" 24151cbe3aSMichael Baum #include "mlx5_rx.h" 258bb2410eSOphir Munk #include "mlx5_utils.h" 268bb2410eSOphir Munk #include "mlx5_devx.h" 2787e2db37SMichael Baum #include "mlx5_flow.h" 2888019723SOphir Munk #include "mlx5_flow_os.h" 29f6dee900SMichael Baum 30f6dee900SMichael Baum /** 318bb2410eSOphir Munk * Modify RQ vlan stripping offload 328bb2410eSOphir Munk * 338bb2410eSOphir Munk * @param rxq_obj 348bb2410eSOphir Munk * Rx queue object. 358bb2410eSOphir Munk * 36f6dee900SMichael Baum * @return 37f6dee900SMichael Baum * 0 on success, non-0 otherwise 388bb2410eSOphir Munk */ 398bb2410eSOphir Munk static int 408bb2410eSOphir Munk mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 418bb2410eSOphir Munk { 428bb2410eSOphir Munk struct mlx5_devx_modify_rq_attr rq_attr; 438bb2410eSOphir Munk 448bb2410eSOphir Munk memset(&rq_attr, 0, sizeof(rq_attr)); 458bb2410eSOphir Munk rq_attr.rq_state = MLX5_RQC_STATE_RDY; 468bb2410eSOphir Munk rq_attr.state = MLX5_RQC_STATE_RDY; 478bb2410eSOphir Munk rq_attr.vsd = (on ? 0 : 1); 488bb2410eSOphir Munk rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 496e0a3637SMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 508bb2410eSOphir Munk } 518bb2410eSOphir Munk 526deb19e1SMichael Baum /** 53fa2c85ccSMichael Baum * Modify RQ using DevX API. 54fa2c85ccSMichael Baum * 55fa2c85ccSMichael Baum * @param rxq_obj 56fa2c85ccSMichael Baum * DevX Rx queue object. 574c6d80f1SMichael Baum * @param type 584c6d80f1SMichael Baum * Type of change queue state. 59fa2c85ccSMichael Baum * 60fa2c85ccSMichael Baum * @return 61fa2c85ccSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 62fa2c85ccSMichael Baum */ 63fa2c85ccSMichael Baum static int 644c6d80f1SMichael Baum mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) 65fa2c85ccSMichael Baum { 66fa2c85ccSMichael Baum struct mlx5_devx_modify_rq_attr rq_attr; 67fa2c85ccSMichael Baum 68fa2c85ccSMichael Baum memset(&rq_attr, 0, sizeof(rq_attr)); 694c6d80f1SMichael Baum switch (type) { 704c6d80f1SMichael Baum case MLX5_RXQ_MOD_ERR2RST: 714c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_ERR; 724c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 734c6d80f1SMichael Baum break; 744c6d80f1SMichael Baum case MLX5_RXQ_MOD_RST2RDY: 75fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RST; 76fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RDY; 774c6d80f1SMichael Baum break; 784c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2ERR: 794c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 804c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_ERR; 814c6d80f1SMichael Baum break; 824c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2RST: 83fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 84fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 854c6d80f1SMichael Baum break; 864c6d80f1SMichael Baum default: 874c6d80f1SMichael Baum break; 88fa2c85ccSMichael Baum } 896e0a3637SMichael Baum return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr); 90fa2c85ccSMichael Baum } 91fa2c85ccSMichael Baum 92fa2c85ccSMichael Baum /** 935d9f3c3fSMichael Baum * Modify SQ using DevX API. 945d9f3c3fSMichael Baum * 955d9f3c3fSMichael Baum * @param txq_obj 965d9f3c3fSMichael Baum * DevX Tx queue object. 975d9f3c3fSMichael Baum * @param type 985d9f3c3fSMichael Baum * Type of change queue state. 995d9f3c3fSMichael Baum * @param dev_port 1005d9f3c3fSMichael Baum * Unnecessary. 1015d9f3c3fSMichael Baum * 1025d9f3c3fSMichael Baum * @return 1035d9f3c3fSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 1045d9f3c3fSMichael Baum */ 105686d05b6SXueming Li int 106686d05b6SXueming Li mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 1075d9f3c3fSMichael Baum uint8_t dev_port) 1085d9f3c3fSMichael Baum { 1095d9f3c3fSMichael Baum struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 1105d9f3c3fSMichael Baum int ret; 1115d9f3c3fSMichael Baum 1125d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RST2RDY) { 1135d9f3c3fSMichael Baum /* Change queue state to reset. */ 1145d9f3c3fSMichael Baum if (type == MLX5_TXQ_MOD_ERR2RDY) 1155d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_ERR; 1165d9f3c3fSMichael Baum else 1175d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RDY; 1185d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RST; 11974e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1205d9f3c3fSMichael Baum if (ret) { 1215d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET" 1225d9f3c3fSMichael Baum " %s", strerror(errno)); 1235d9f3c3fSMichael Baum rte_errno = errno; 1245d9f3c3fSMichael Baum return ret; 1255d9f3c3fSMichael Baum } 1265d9f3c3fSMichael Baum } 1275d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RDY2RST) { 1285d9f3c3fSMichael Baum /* Change queue state to ready. */ 1295d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RST; 1305d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RDY; 13174e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1325d9f3c3fSMichael Baum if (ret) { 1335d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to READY" 1345d9f3c3fSMichael Baum " %s", strerror(errno)); 1355d9f3c3fSMichael Baum rte_errno = errno; 1365d9f3c3fSMichael Baum return ret; 1375d9f3c3fSMichael Baum } 1385d9f3c3fSMichael Baum } 1395d9f3c3fSMichael Baum /* 1405d9f3c3fSMichael Baum * The dev_port variable is relevant only in Verbs API, and there is a 1415d9f3c3fSMichael Baum * pointer that points to this function and a parallel function in verbs 1425d9f3c3fSMichael Baum * intermittently, so they should have the same parameters. 1435d9f3c3fSMichael Baum */ 1445d9f3c3fSMichael Baum (void)dev_port; 1455d9f3c3fSMichael Baum return 0; 1465d9f3c3fSMichael Baum } 1475d9f3c3fSMichael Baum 1485d9f3c3fSMichael Baum /** 1495cd33796SMichael Baum * Destroy the Rx queue DevX object. 1506deb19e1SMichael Baum * 1515cd33796SMichael Baum * @param rxq_obj 1525cd33796SMichael Baum * Rxq object to destroy. 1536deb19e1SMichael Baum */ 1546deb19e1SMichael Baum static void 1556e0a3637SMichael Baum mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj) 1566deb19e1SMichael Baum { 1576e0a3637SMichael Baum mlx5_devx_rq_destroy(&rxq_obj->rq_obj); 1586e0a3637SMichael Baum memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj)); 1596e0a3637SMichael Baum mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 1606e0a3637SMichael Baum memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); 1616deb19e1SMichael Baum } 1626deb19e1SMichael Baum 1636deb19e1SMichael Baum /** 1646deb19e1SMichael Baum * Release an Rx DevX queue object. 1656deb19e1SMichael Baum * 1666deb19e1SMichael Baum * @param rxq_obj 1676deb19e1SMichael Baum * DevX Rx queue object. 1686deb19e1SMichael Baum */ 1696deb19e1SMichael Baum static void 1706deb19e1SMichael Baum mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 1716deb19e1SMichael Baum { 1726deb19e1SMichael Baum MLX5_ASSERT(rxq_obj); 173e96242efSMichael Baum if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { 1746e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->rq); 1754c6d80f1SMichael Baum mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST); 176fa2c85ccSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1776deb19e1SMichael Baum } else { 1786e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->cq_obj.cq); 1796e0a3637SMichael Baum MLX5_ASSERT(rxq_obj->rq_obj.rq); 1806e0a3637SMichael Baum mlx5_rxq_release_devx_resources(rxq_obj); 1816deb19e1SMichael Baum if (rxq_obj->devx_channel) 18298174626STal Shnaiderman mlx5_os_devx_destroy_event_channel 1836deb19e1SMichael Baum (rxq_obj->devx_channel); 1846deb19e1SMichael Baum } 1856deb19e1SMichael Baum } 1866deb19e1SMichael Baum 1876deb19e1SMichael Baum /** 18832287079SMichael Baum * Get event for an Rx DevX queue object. 18932287079SMichael Baum * 19032287079SMichael Baum * @param rxq_obj 19132287079SMichael Baum * DevX Rx queue object. 19232287079SMichael Baum * 19332287079SMichael Baum * @return 19432287079SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 19532287079SMichael Baum */ 19632287079SMichael Baum static int 19732287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 19832287079SMichael Baum { 19932287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT 20032287079SMichael Baum union { 20132287079SMichael Baum struct mlx5dv_devx_async_event_hdr event_resp; 20232287079SMichael Baum uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 20332287079SMichael Baum } out; 20432287079SMichael Baum int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 20532287079SMichael Baum &out.event_resp, 20632287079SMichael Baum sizeof(out.buf)); 20732287079SMichael Baum 20832287079SMichael Baum if (ret < 0) { 20932287079SMichael Baum rte_errno = errno; 21032287079SMichael Baum return -rte_errno; 21132287079SMichael Baum } 2125cd33796SMichael Baum if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) { 21332287079SMichael Baum rte_errno = EINVAL; 21432287079SMichael Baum return -rte_errno; 21532287079SMichael Baum } 21632287079SMichael Baum return 0; 21732287079SMichael Baum #else 21832287079SMichael Baum (void)rxq_obj; 21932287079SMichael Baum rte_errno = ENOTSUP; 22032287079SMichael Baum return -rte_errno; 22132287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */ 22232287079SMichael Baum } 22332287079SMichael Baum 22432287079SMichael Baum /** 2256deb19e1SMichael Baum * Create a RQ object using DevX. 2266deb19e1SMichael Baum * 2276deb19e1SMichael Baum * @param dev 2286deb19e1SMichael Baum * Pointer to Ethernet device. 229*bc5bee02SDmitry Kozlyuk * @param rxq_data 230*bc5bee02SDmitry Kozlyuk * RX queue data. 2316deb19e1SMichael Baum * 2326deb19e1SMichael Baum * @return 2336e0a3637SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2346deb19e1SMichael Baum */ 2356e0a3637SMichael Baum static int 236*bc5bee02SDmitry Kozlyuk mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, 237*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_data *rxq_data) 2386deb19e1SMichael Baum { 2396deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 240fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 2416deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 2426deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 2436deb19e1SMichael Baum struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 2446e0a3637SMichael Baum uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; 2456e0a3637SMichael Baum uint32_t wqe_size, log_wqe_size; 2466deb19e1SMichael Baum 2476deb19e1SMichael Baum /* Fill RQ attributes. */ 2486deb19e1SMichael Baum rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 2496deb19e1SMichael Baum rq_attr.flush_in_error_en = 1; 2506e0a3637SMichael Baum rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1; 2516e0a3637SMichael Baum rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id; 2526e0a3637SMichael Baum rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 253fe46b20cSMichael Baum rq_attr.ts_format = 254fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format); 2556deb19e1SMichael Baum /* Fill WQ attributes for this RQ. */ 2566deb19e1SMichael Baum if (mlx5_rxq_mprq_enabled(rxq_data)) { 2576deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 2586deb19e1SMichael Baum /* 2596deb19e1SMichael Baum * Number of strides in each WQE: 2606deb19e1SMichael Baum * 512*2^single_wqe_log_num_of_strides. 2616deb19e1SMichael Baum */ 2626deb19e1SMichael Baum rq_attr.wq_attr.single_wqe_log_num_of_strides = 2636deb19e1SMichael Baum rxq_data->strd_num_n - 2646deb19e1SMichael Baum MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 2656deb19e1SMichael Baum /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 2666deb19e1SMichael Baum rq_attr.wq_attr.single_stride_log_num_of_bytes = 2676deb19e1SMichael Baum rxq_data->strd_sz_n - 2686deb19e1SMichael Baum MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 2696deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_mprq); 2706deb19e1SMichael Baum } else { 2716deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 2726deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_data_seg); 2736deb19e1SMichael Baum } 2746deb19e1SMichael Baum log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 2756deb19e1SMichael Baum wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 2766e0a3637SMichael Baum rq_attr.wq_attr.log_wq_stride = log_wqe_size; 2776e0a3637SMichael Baum rq_attr.wq_attr.log_wq_sz = log_desc_n; 2786e0a3637SMichael Baum rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ? 2796e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_ALIGN : 2806e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_NONE; 281fe46b20cSMichael Baum rq_attr.wq_attr.pd = cdev->pdn; 282e6988afdSMatan Azrad rq_attr.counter_set_id = priv->counter_set_id; 283f6dee900SMichael Baum /* Create RQ using DevX API. */ 284fe46b20cSMichael Baum return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size, 285fe46b20cSMichael Baum log_desc_n, &rq_attr, rxq_ctrl->socket); 2866deb19e1SMichael Baum } 2876deb19e1SMichael Baum 2886deb19e1SMichael Baum /** 2896deb19e1SMichael Baum * Create a DevX CQ object for an Rx queue. 2906deb19e1SMichael Baum * 2916deb19e1SMichael Baum * @param dev 2926deb19e1SMichael Baum * Pointer to Ethernet device. 293*bc5bee02SDmitry Kozlyuk * @param rxq_data 294*bc5bee02SDmitry Kozlyuk * RX queue data. 2956deb19e1SMichael Baum * 2966deb19e1SMichael Baum * @return 2975cd33796SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2986deb19e1SMichael Baum */ 2995cd33796SMichael Baum static int 300*bc5bee02SDmitry Kozlyuk mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, 301*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_data *rxq_data) 3026deb19e1SMichael Baum { 3035cd33796SMichael Baum struct mlx5_devx_cq *cq_obj = 0; 3046deb19e1SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 3056deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 3065cd33796SMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 3076deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 3086deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 309f6dee900SMichael Baum unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 3106deb19e1SMichael Baum uint32_t log_cqe_n; 3115cd33796SMichael Baum uint16_t event_nums[1] = { 0 }; 3126deb19e1SMichael Baum int ret = 0; 3136deb19e1SMichael Baum 3146deb19e1SMichael Baum if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 3156deb19e1SMichael Baum !rxq_data->lro) { 31638f9369dSDekel Peled cq_attr.cqe_comp_en = 1u; 31754c2d46bSAlexander Kozyrev rxq_data->mcqe_format = priv->config.cqe_comp_fmt; 31854c2d46bSAlexander Kozyrev rxq_data->byte_mask = UINT32_MAX; 31954c2d46bSAlexander Kozyrev switch (priv->config.cqe_comp_fmt) { 32054c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_HASH: 32154c2d46bSAlexander Kozyrev /* fallthrough */ 32254c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM: 3230f20acbfSAlexander Kozyrev /* 32454c2d46bSAlexander Kozyrev * Select CSUM miniCQE format only for non-vectorized 32554c2d46bSAlexander Kozyrev * MPRQ Rx burst, use HASH miniCQE format for others. 3260f20acbfSAlexander Kozyrev */ 3270f20acbfSAlexander Kozyrev if (mlx5_rxq_check_vec_support(rxq_data) < 0 && 3280f20acbfSAlexander Kozyrev mlx5_rxq_mprq_enabled(rxq_data)) 3296deb19e1SMichael Baum cq_attr.mini_cqe_res_format = 3300f20acbfSAlexander Kozyrev MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 3310f20acbfSAlexander Kozyrev else 3320f20acbfSAlexander Kozyrev cq_attr.mini_cqe_res_format = 33338f9369dSDekel Peled MLX5_CQE_RESP_FORMAT_HASH; 33454c2d46bSAlexander Kozyrev rxq_data->mcqe_format = cq_attr.mini_cqe_res_format; 33554c2d46bSAlexander Kozyrev break; 33654c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX: 33754c2d46bSAlexander Kozyrev rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK; 33854c2d46bSAlexander Kozyrev /* fallthrough */ 33954c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX: 34054c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt; 34154c2d46bSAlexander Kozyrev break; 34254c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_L34H_STRIDX: 34354c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = 0; 34454c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format_ext = 1; 34554c2d46bSAlexander Kozyrev break; 34654c2d46bSAlexander Kozyrev } 34754c2d46bSAlexander Kozyrev DRV_LOG(DEBUG, 34854c2d46bSAlexander Kozyrev "Port %u Rx CQE compression is enabled, format %d.", 34954c2d46bSAlexander Kozyrev dev->data->port_id, priv->config.cqe_comp_fmt); 3506deb19e1SMichael Baum /* 3516deb19e1SMichael Baum * For vectorized Rx, it must not be doubled in order to 3526deb19e1SMichael Baum * make cq_ci and rq_ci aligned. 3536deb19e1SMichael Baum */ 3546deb19e1SMichael Baum if (mlx5_rxq_check_vec_support(rxq_data) < 0) 3556deb19e1SMichael Baum cqe_n *= 2; 3566deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 3576deb19e1SMichael Baum DRV_LOG(DEBUG, 3586deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for HW" 3596deb19e1SMichael Baum " timestamp.", 3606deb19e1SMichael Baum dev->data->port_id); 3616deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->lro) { 3626deb19e1SMichael Baum DRV_LOG(DEBUG, 3636deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for LRO.", 3646deb19e1SMichael Baum dev->data->port_id); 3656deb19e1SMichael Baum } 3665cd33796SMichael Baum cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar); 3676deb19e1SMichael Baum log_cqe_n = log2above(cqe_n); 368f6dee900SMichael Baum /* Create CQ using DevX API. */ 369ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj, 370ca1418ceSMichael Baum log_cqe_n, &cq_attr, sh->numa_node); 3715cd33796SMichael Baum if (ret) 3725cd33796SMichael Baum return ret; 3735cd33796SMichael Baum cq_obj = &rxq_ctrl->obj->cq_obj; 3745cd33796SMichael Baum rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) 3755cd33796SMichael Baum (uintptr_t)cq_obj->cqes; 3765cd33796SMichael Baum rxq_data->cq_db = cq_obj->db_rec; 3775cd33796SMichael Baum rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); 3786deb19e1SMichael Baum rxq_data->cqe_n = log_cqe_n; 3795cd33796SMichael Baum rxq_data->cqn = cq_obj->cq->id; 380f6dee900SMichael Baum if (rxq_ctrl->obj->devx_channel) { 38198174626STal Shnaiderman ret = mlx5_os_devx_subscribe_devx_event 382f6dee900SMichael Baum (rxq_ctrl->obj->devx_channel, 3835cd33796SMichael Baum cq_obj->cq->obj, 3846deb19e1SMichael Baum sizeof(event_nums), 3856deb19e1SMichael Baum event_nums, 3865cd33796SMichael Baum (uint64_t)(uintptr_t)cq_obj->cq); 3876deb19e1SMichael Baum if (ret) { 3886deb19e1SMichael Baum DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 3895cd33796SMichael Baum ret = errno; 3905cd33796SMichael Baum mlx5_devx_cq_destroy(cq_obj); 3915cd33796SMichael Baum memset(cq_obj, 0, sizeof(*cq_obj)); 3925cd33796SMichael Baum rte_errno = ret; 3935cd33796SMichael Baum return -ret; 3946deb19e1SMichael Baum } 3956deb19e1SMichael Baum } 3965cd33796SMichael Baum return 0; 3976deb19e1SMichael Baum } 3986deb19e1SMichael Baum 3996deb19e1SMichael Baum /** 4006deb19e1SMichael Baum * Create the Rx hairpin queue object. 4016deb19e1SMichael Baum * 4026deb19e1SMichael Baum * @param dev 4036deb19e1SMichael Baum * Pointer to Ethernet device. 4046deb19e1SMichael Baum * @param idx 4056deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4066deb19e1SMichael Baum * 4076deb19e1SMichael Baum * @return 4081260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4096deb19e1SMichael Baum */ 4101260a87bSMichael Baum static int 4116deb19e1SMichael Baum mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 4126deb19e1SMichael Baum { 4136deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4146deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4156deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4166deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4176deb19e1SMichael Baum struct mlx5_devx_create_rq_attr attr = { 0 }; 4181260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4196deb19e1SMichael Baum uint32_t max_wq_data; 4206deb19e1SMichael Baum 4216deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4221260a87bSMichael Baum MLX5_ASSERT(tmpl); 4236deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4246deb19e1SMichael Baum attr.hairpin = 1; 4256deb19e1SMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 4266deb19e1SMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 4276deb19e1SMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 4286deb19e1SMichael Baum if (priv->config.log_hp_size > max_wq_data) { 4296deb19e1SMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 4306deb19e1SMichael Baum "too large for hairpin.", 4316deb19e1SMichael Baum priv->config.log_hp_size); 4326deb19e1SMichael Baum rte_errno = ERANGE; 4331260a87bSMichael Baum return -rte_errno; 4346deb19e1SMichael Baum } 4356deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 4366deb19e1SMichael Baum } else { 4376deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz = 4386deb19e1SMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 4396deb19e1SMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 4406deb19e1SMichael Baum } 4416deb19e1SMichael Baum /* Set the packets number to the maximum value for performance. */ 4426deb19e1SMichael Baum attr.wq_attr.log_hairpin_num_packets = 4436deb19e1SMichael Baum attr.wq_attr.log_hairpin_data_sz - 4446deb19e1SMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 445e6988afdSMatan Azrad attr.counter_set_id = priv->counter_set_id; 446ca1418ceSMichael Baum tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr, 4476deb19e1SMichael Baum rxq_ctrl->socket); 4486deb19e1SMichael Baum if (!tmpl->rq) { 4496deb19e1SMichael Baum DRV_LOG(ERR, 4506deb19e1SMichael Baum "Port %u Rx hairpin queue %u can't create rq object.", 4516deb19e1SMichael Baum dev->data->port_id, idx); 4526deb19e1SMichael Baum rte_errno = errno; 4531260a87bSMichael Baum return -rte_errno; 4546deb19e1SMichael Baum } 4556deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 4561260a87bSMichael Baum return 0; 4576deb19e1SMichael Baum } 4586deb19e1SMichael Baum 4596deb19e1SMichael Baum /** 4606deb19e1SMichael Baum * Create the Rx queue DevX object. 4616deb19e1SMichael Baum * 4626deb19e1SMichael Baum * @param dev 4636deb19e1SMichael Baum * Pointer to Ethernet device. 4646deb19e1SMichael Baum * @param idx 4656deb19e1SMichael Baum * Queue index in DPDK Rx queue array. 4666deb19e1SMichael Baum * 4676deb19e1SMichael Baum * @return 4681260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4696deb19e1SMichael Baum */ 4701260a87bSMichael Baum static int 4716deb19e1SMichael Baum mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 4726deb19e1SMichael Baum { 4736deb19e1SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 4746deb19e1SMichael Baum struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 4756deb19e1SMichael Baum struct mlx5_rxq_ctrl *rxq_ctrl = 4766deb19e1SMichael Baum container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 4771260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4786deb19e1SMichael Baum int ret = 0; 4796deb19e1SMichael Baum 4806deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 4811260a87bSMichael Baum MLX5_ASSERT(tmpl); 4826deb19e1SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 4836deb19e1SMichael Baum return mlx5_rxq_obj_hairpin_new(dev, idx); 4846deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 4856deb19e1SMichael Baum if (rxq_ctrl->irq) { 4866deb19e1SMichael Baum int devx_ev_flag = 4876deb19e1SMichael Baum MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 4886deb19e1SMichael Baum 48998174626STal Shnaiderman tmpl->devx_channel = mlx5_os_devx_create_event_channel 490ca1418ceSMichael Baum (priv->sh->cdev->ctx, 4916deb19e1SMichael Baum devx_ev_flag); 4926deb19e1SMichael Baum if (!tmpl->devx_channel) { 4936deb19e1SMichael Baum rte_errno = errno; 4946deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", 4956deb19e1SMichael Baum rte_errno); 4966deb19e1SMichael Baum goto error; 4976deb19e1SMichael Baum } 4986deb19e1SMichael Baum tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 4996deb19e1SMichael Baum } 5006deb19e1SMichael Baum /* Create CQ using DevX API. */ 501*bc5bee02SDmitry Kozlyuk ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data); 5025cd33796SMichael Baum if (ret) { 5036deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create CQ."); 5046deb19e1SMichael Baum goto error; 5056deb19e1SMichael Baum } 5066deb19e1SMichael Baum /* Create RQ using DevX API. */ 507*bc5bee02SDmitry Kozlyuk ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data); 5086e0a3637SMichael Baum if (ret) { 5096deb19e1SMichael Baum DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 5106deb19e1SMichael Baum dev->data->port_id, idx); 5116deb19e1SMichael Baum rte_errno = ENOMEM; 5126deb19e1SMichael Baum goto error; 5136deb19e1SMichael Baum } 5146deb19e1SMichael Baum /* Change queue state to ready. */ 5154c6d80f1SMichael Baum ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY); 5166deb19e1SMichael Baum if (ret) 5176deb19e1SMichael Baum goto error; 5186e0a3637SMichael Baum rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf; 5196e0a3637SMichael Baum rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec; 5206deb19e1SMichael Baum rxq_data->cq_arm_sn = 0; 5216deb19e1SMichael Baum rxq_data->cq_ci = 0; 5226e0a3637SMichael Baum mlx5_rxq_initialize(rxq_data); 5236deb19e1SMichael Baum dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 5246e0a3637SMichael Baum rxq_ctrl->wqn = tmpl->rq_obj.rq->id; 5251260a87bSMichael Baum return 0; 5266deb19e1SMichael Baum error: 5276deb19e1SMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 5286e0a3637SMichael Baum mlx5_rxq_devx_obj_release(tmpl); 5291260a87bSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 5301260a87bSMichael Baum return -rte_errno; 5316deb19e1SMichael Baum } 5326deb19e1SMichael Baum 53387e2db37SMichael Baum /** 534fa7ad49eSAndrey Vesnovaty * Prepare RQT attribute structure for DevX RQT API. 535fa7ad49eSAndrey Vesnovaty * 536fa7ad49eSAndrey Vesnovaty * @param dev 537fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 538fa7ad49eSAndrey Vesnovaty * @param log_n 539fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 540*bc5bee02SDmitry Kozlyuk * @param queues 541*bc5bee02SDmitry Kozlyuk * List of RX queue indices or NULL, in which case 542*bc5bee02SDmitry Kozlyuk * the attribute will be filled by drop queue ID. 543*bc5bee02SDmitry Kozlyuk * @param queues_n 544*bc5bee02SDmitry Kozlyuk * Size of @p queues array or 0 if it is NULL. 545fa7ad49eSAndrey Vesnovaty * @param ind_tbl 546fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 547fa7ad49eSAndrey Vesnovaty * 548fa7ad49eSAndrey Vesnovaty * @return 549fa7ad49eSAndrey Vesnovaty * The RQT attr object initialized, NULL otherwise and rte_errno is set. 550fa7ad49eSAndrey Vesnovaty */ 551fa7ad49eSAndrey Vesnovaty static struct mlx5_devx_rqt_attr * 552fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, 553fa7ad49eSAndrey Vesnovaty const unsigned int log_n, 554fa7ad49eSAndrey Vesnovaty const uint16_t *queues, 555fa7ad49eSAndrey Vesnovaty const uint32_t queues_n) 556fa7ad49eSAndrey Vesnovaty { 557fa7ad49eSAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 558fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 559fa7ad49eSAndrey Vesnovaty const unsigned int rqt_n = 1 << log_n; 560fa7ad49eSAndrey Vesnovaty unsigned int i, j; 561fa7ad49eSAndrey Vesnovaty 562fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 563fa7ad49eSAndrey Vesnovaty rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 564fa7ad49eSAndrey Vesnovaty if (!rqt_attr) { 565fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 566fa7ad49eSAndrey Vesnovaty dev->data->port_id); 567fa7ad49eSAndrey Vesnovaty rte_errno = ENOMEM; 568fa7ad49eSAndrey Vesnovaty return NULL; 569fa7ad49eSAndrey Vesnovaty } 570fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 571fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_actual_size = rqt_n; 572*bc5bee02SDmitry Kozlyuk if (queues == NULL) { 573*bc5bee02SDmitry Kozlyuk for (i = 0; i < rqt_n; i++) 574*bc5bee02SDmitry Kozlyuk rqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id; 575*bc5bee02SDmitry Kozlyuk return rqt_attr; 576*bc5bee02SDmitry Kozlyuk } 577fa7ad49eSAndrey Vesnovaty for (i = 0; i != queues_n; ++i) { 578fa7ad49eSAndrey Vesnovaty struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]]; 579fa7ad49eSAndrey Vesnovaty struct mlx5_rxq_ctrl *rxq_ctrl = 580fa7ad49eSAndrey Vesnovaty container_of(rxq, struct mlx5_rxq_ctrl, rxq); 581fa7ad49eSAndrey Vesnovaty 5826e0a3637SMichael Baum rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id; 583fa7ad49eSAndrey Vesnovaty } 584fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(i > 0); 585fa7ad49eSAndrey Vesnovaty for (j = 0; i != rqt_n; ++j, ++i) 586fa7ad49eSAndrey Vesnovaty rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 587fa7ad49eSAndrey Vesnovaty return rqt_attr; 588fa7ad49eSAndrey Vesnovaty } 589fa7ad49eSAndrey Vesnovaty 590fa7ad49eSAndrey Vesnovaty /** 59125ae7f1aSMichael Baum * Create RQT using DevX API as a filed of indirection table. 59287e2db37SMichael Baum * 59387e2db37SMichael Baum * @param dev 59487e2db37SMichael Baum * Pointer to Ethernet device. 59525ae7f1aSMichael Baum * @param log_n 59625ae7f1aSMichael Baum * Log of number of queues in the array. 59725ae7f1aSMichael Baum * @param ind_tbl 59825ae7f1aSMichael Baum * DevX indirection table object. 59987e2db37SMichael Baum * 60087e2db37SMichael Baum * @return 60125ae7f1aSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 60287e2db37SMichael Baum */ 60325ae7f1aSMichael Baum static int 60425ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 60525ae7f1aSMichael Baum struct mlx5_ind_table_obj *ind_tbl) 60687e2db37SMichael Baum { 60787e2db37SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 60887e2db37SMichael Baum struct mlx5_devx_rqt_attr *rqt_attr = NULL; 609*bc5bee02SDmitry Kozlyuk const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues : 610*bc5bee02SDmitry Kozlyuk NULL; 61187e2db37SMichael Baum 61225ae7f1aSMichael Baum MLX5_ASSERT(ind_tbl); 613*bc5bee02SDmitry Kozlyuk rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues, 614fa7ad49eSAndrey Vesnovaty ind_tbl->queues_n); 615fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 61625ae7f1aSMichael Baum return -rte_errno; 617ca1418ceSMichael Baum ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr); 61887e2db37SMichael Baum mlx5_free(rqt_attr); 61987e2db37SMichael Baum if (!ind_tbl->rqt) { 62087e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 62187e2db37SMichael Baum dev->data->port_id); 62287e2db37SMichael Baum rte_errno = errno; 62325ae7f1aSMichael Baum return -rte_errno; 62487e2db37SMichael Baum } 62525ae7f1aSMichael Baum return 0; 62687e2db37SMichael Baum } 62787e2db37SMichael Baum 62887e2db37SMichael Baum /** 629fa7ad49eSAndrey Vesnovaty * Modify RQT using DevX API as a filed of indirection table. 630fa7ad49eSAndrey Vesnovaty * 631fa7ad49eSAndrey Vesnovaty * @param dev 632fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 633fa7ad49eSAndrey Vesnovaty * @param log_n 634fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 635fa7ad49eSAndrey Vesnovaty * @param ind_tbl 636fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 637fa7ad49eSAndrey Vesnovaty * 638fa7ad49eSAndrey Vesnovaty * @return 639fa7ad49eSAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 640fa7ad49eSAndrey Vesnovaty */ 641fa7ad49eSAndrey Vesnovaty static int 642fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n, 643fa7ad49eSAndrey Vesnovaty const uint16_t *queues, const uint32_t queues_n, 644fa7ad49eSAndrey Vesnovaty struct mlx5_ind_table_obj *ind_tbl) 645fa7ad49eSAndrey Vesnovaty { 646fa7ad49eSAndrey Vesnovaty int ret = 0; 647fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 648fa7ad49eSAndrey Vesnovaty 649fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(ind_tbl); 650fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 651fa7ad49eSAndrey Vesnovaty queues, 652fa7ad49eSAndrey Vesnovaty queues_n); 653fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 654fa7ad49eSAndrey Vesnovaty return -rte_errno; 655fa7ad49eSAndrey Vesnovaty ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr); 656fa7ad49eSAndrey Vesnovaty mlx5_free(rqt_attr); 657fa7ad49eSAndrey Vesnovaty if (ret) 658fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot modify DevX RQT.", 659fa7ad49eSAndrey Vesnovaty dev->data->port_id); 660fa7ad49eSAndrey Vesnovaty return ret; 661fa7ad49eSAndrey Vesnovaty } 662fa7ad49eSAndrey Vesnovaty 663fa7ad49eSAndrey Vesnovaty /** 66487e2db37SMichael Baum * Destroy the DevX RQT object. 66587e2db37SMichael Baum * 66687e2db37SMichael Baum * @param ind_table 66787e2db37SMichael Baum * Indirection table to release. 66887e2db37SMichael Baum */ 66987e2db37SMichael Baum static void 67025ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 67187e2db37SMichael Baum { 67287e2db37SMichael Baum claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 67387e2db37SMichael Baum } 67487e2db37SMichael Baum 67585552726SMichael Baum /** 676b8cc58c1SAndrey Vesnovaty * Set TIR attribute struct with relevant input values. 67785552726SMichael Baum * 678b8cc58c1SAndrey Vesnovaty * @param[in] dev 67985552726SMichael Baum * Pointer to Ethernet device. 680b8cc58c1SAndrey Vesnovaty * @param[in] rss_key 681b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 682b8cc58c1SAndrey Vesnovaty * @param[in] hash_fields 683b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 684b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 685*bc5bee02SDmitry Kozlyuk * Indirection table for TIR. If table queues array is NULL, 686*bc5bee02SDmitry Kozlyuk * a TIR for drop queue is assumed. 687b8cc58c1SAndrey Vesnovaty * @param[in] tunnel 68885552726SMichael Baum * Tunnel type. 689b8cc58c1SAndrey Vesnovaty * @param[out] tir_attr 690b8cc58c1SAndrey Vesnovaty * Parameters structure for TIR creation/modification. 69185552726SMichael Baum * 69285552726SMichael Baum * @return 693b8cc58c1SAndrey Vesnovaty * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. 69485552726SMichael Baum */ 695b8cc58c1SAndrey Vesnovaty static void 696b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, 697b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 698b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl, 699b8cc58c1SAndrey Vesnovaty int tunnel, struct mlx5_devx_tir_attr *tir_attr) 70085552726SMichael Baum { 70185552726SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 702*bc5bee02SDmitry Kozlyuk enum mlx5_rxq_type rxq_obj_type; 70385552726SMichael Baum bool lro = true; 7045a959cbfSMichael Baum uint32_t i; 70585552726SMichael Baum 706*bc5bee02SDmitry Kozlyuk /* NULL queues designate drop queue. */ 707*bc5bee02SDmitry Kozlyuk if (ind_tbl->queues != NULL) { 708*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_data *rxq_data = 709*bc5bee02SDmitry Kozlyuk (*priv->rxqs)[ind_tbl->queues[0]]; 710*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_ctrl *rxq_ctrl = 711*bc5bee02SDmitry Kozlyuk container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 712*bc5bee02SDmitry Kozlyuk rxq_obj_type = rxq_ctrl->type; 713*bc5bee02SDmitry Kozlyuk 71485552726SMichael Baum /* Enable TIR LRO only if all the queues were configured for. */ 7155a959cbfSMichael Baum for (i = 0; i < ind_tbl->queues_n; ++i) { 7165a959cbfSMichael Baum if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 71785552726SMichael Baum lro = false; 71885552726SMichael Baum break; 71985552726SMichael Baum } 72085552726SMichael Baum } 721*bc5bee02SDmitry Kozlyuk } else { 722*bc5bee02SDmitry Kozlyuk rxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type; 723*bc5bee02SDmitry Kozlyuk } 724b8cc58c1SAndrey Vesnovaty memset(tir_attr, 0, sizeof(*tir_attr)); 725b8cc58c1SAndrey Vesnovaty tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 726b8cc58c1SAndrey Vesnovaty tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 727b8cc58c1SAndrey Vesnovaty tir_attr->tunneled_offload_en = !!tunnel; 72885552726SMichael Baum /* If needed, translate hash_fields bitmap to PRM format. */ 72985552726SMichael Baum if (hash_fields) { 730b8cc58c1SAndrey Vesnovaty struct mlx5_rx_hash_field_select *rx_hash_field_select = 73185552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 732b8cc58c1SAndrey Vesnovaty hash_fields & IBV_RX_HASH_INNER ? 733b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_inner : 73485552726SMichael Baum #endif 735b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_outer; 73685552726SMichael Baum /* 1 bit: 0: IPv4, 1: IPv6. */ 73785552726SMichael Baum rx_hash_field_select->l3_prot_type = 73885552726SMichael Baum !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 73985552726SMichael Baum /* 1 bit: 0: TCP, 1: UDP. */ 74085552726SMichael Baum rx_hash_field_select->l4_prot_type = 74185552726SMichael Baum !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 74285552726SMichael Baum /* Bitmask which sets which fields to use in RX Hash. */ 74385552726SMichael Baum rx_hash_field_select->selected_fields = 74485552726SMichael Baum ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 74585552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 74685552726SMichael Baum (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 74785552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 74885552726SMichael Baum (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 74985552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 75085552726SMichael Baum (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 75185552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 75285552726SMichael Baum } 753b8cc58c1SAndrey Vesnovaty if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN) 754b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->td->id; 75585552726SMichael Baum else 756b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->tdn; 757b8cc58c1SAndrey Vesnovaty memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 758b8cc58c1SAndrey Vesnovaty tir_attr->indirect_table = ind_tbl->rqt->id; 75985552726SMichael Baum if (dev->data->dev_conf.lpbk_mode) 760b8cc58c1SAndrey Vesnovaty tir_attr->self_lb_block = 761b8cc58c1SAndrey Vesnovaty MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 76285552726SMichael Baum if (lro) { 763b8cc58c1SAndrey Vesnovaty tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout; 764b8cc58c1SAndrey Vesnovaty tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; 765b8cc58c1SAndrey Vesnovaty tir_attr->lro_enable_mask = 766b8cc58c1SAndrey Vesnovaty MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 76785552726SMichael Baum MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 76885552726SMichael Baum } 769b8cc58c1SAndrey Vesnovaty } 770b8cc58c1SAndrey Vesnovaty 771b8cc58c1SAndrey Vesnovaty /** 772b8cc58c1SAndrey Vesnovaty * Create an Rx Hash queue. 773b8cc58c1SAndrey Vesnovaty * 774b8cc58c1SAndrey Vesnovaty * @param dev 775b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 776b8cc58c1SAndrey Vesnovaty * @param hrxq 777b8cc58c1SAndrey Vesnovaty * Pointer to Rx Hash queue. 778b8cc58c1SAndrey Vesnovaty * @param tunnel 779b8cc58c1SAndrey Vesnovaty * Tunnel type. 780b8cc58c1SAndrey Vesnovaty * 781b8cc58c1SAndrey Vesnovaty * @return 782b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 783b8cc58c1SAndrey Vesnovaty */ 784b8cc58c1SAndrey Vesnovaty static int 785b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 786b8cc58c1SAndrey Vesnovaty int tunnel __rte_unused) 787b8cc58c1SAndrey Vesnovaty { 788b8cc58c1SAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 789b8cc58c1SAndrey Vesnovaty struct mlx5_devx_tir_attr tir_attr = {0}; 790b8cc58c1SAndrey Vesnovaty int err; 791b8cc58c1SAndrey Vesnovaty 792b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields, 793b8cc58c1SAndrey Vesnovaty hrxq->ind_table, tunnel, &tir_attr); 794ca1418ceSMichael Baum hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr); 7955a959cbfSMichael Baum if (!hrxq->tir) { 79685552726SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 79785552726SMichael Baum dev->data->port_id); 79885552726SMichael Baum rte_errno = errno; 79985552726SMichael Baum goto error; 80085552726SMichael Baum } 801f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 80288019723SOphir Munk if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir, 80388019723SOphir Munk &hrxq->action)) { 80485552726SMichael Baum rte_errno = errno; 80585552726SMichael Baum goto error; 80685552726SMichael Baum } 80785552726SMichael Baum #endif 8085a959cbfSMichael Baum return 0; 80985552726SMichael Baum error: 81085552726SMichael Baum err = rte_errno; /* Save rte_errno before cleanup. */ 8115a959cbfSMichael Baum if (hrxq->tir) 8125a959cbfSMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 81385552726SMichael Baum rte_errno = err; /* Restore rte_errno. */ 8145a959cbfSMichael Baum return -rte_errno; 81585552726SMichael Baum } 81685552726SMichael Baum 81785552726SMichael Baum /** 81885552726SMichael Baum * Destroy a DevX TIR object. 81985552726SMichael Baum * 82085552726SMichael Baum * @param hrxq 82185552726SMichael Baum * Hash Rx queue to release its tir. 82285552726SMichael Baum */ 82385552726SMichael Baum static void 82485552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 82585552726SMichael Baum { 82685552726SMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 82785552726SMichael Baum } 82885552726SMichael Baum 8295eaf882eSMichael Baum /** 830b8cc58c1SAndrey Vesnovaty * Modify an Rx Hash queue configuration. 831b8cc58c1SAndrey Vesnovaty * 832b8cc58c1SAndrey Vesnovaty * @param dev 833b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 834b8cc58c1SAndrey Vesnovaty * @param hrxq 835b8cc58c1SAndrey Vesnovaty * Hash Rx queue to modify. 836b8cc58c1SAndrey Vesnovaty * @param rss_key 837b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 838b8cc58c1SAndrey Vesnovaty * @param hash_fields 839b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 840b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 841b8cc58c1SAndrey Vesnovaty * Indirection table for TIR. 842b8cc58c1SAndrey Vesnovaty * 843b8cc58c1SAndrey Vesnovaty * @return 844b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 845b8cc58c1SAndrey Vesnovaty */ 846b8cc58c1SAndrey Vesnovaty static int 847b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 848b8cc58c1SAndrey Vesnovaty const uint8_t *rss_key, 849b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 850b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl) 851b8cc58c1SAndrey Vesnovaty { 852b8cc58c1SAndrey Vesnovaty struct mlx5_devx_modify_tir_attr modify_tir = {0}; 853b8cc58c1SAndrey Vesnovaty 854b8cc58c1SAndrey Vesnovaty /* 855b8cc58c1SAndrey Vesnovaty * untested for modification fields: 856b8cc58c1SAndrey Vesnovaty * - rx_hash_symmetric not set in hrxq_new(), 857b8cc58c1SAndrey Vesnovaty * - rx_hash_fn set hard-coded in hrxq_new(), 858b8cc58c1SAndrey Vesnovaty * - lro_xxx not set after rxq setup 859b8cc58c1SAndrey Vesnovaty */ 860b8cc58c1SAndrey Vesnovaty if (ind_tbl != hrxq->ind_table) 861b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 862b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE; 863b8cc58c1SAndrey Vesnovaty if (hash_fields != hrxq->hash_fields || 864b8cc58c1SAndrey Vesnovaty memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN)) 865b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 866b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH; 867b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl, 868b8cc58c1SAndrey Vesnovaty 0, /* N/A - tunnel modification unsupported */ 869b8cc58c1SAndrey Vesnovaty &modify_tir.tir); 870b8cc58c1SAndrey Vesnovaty modify_tir.tirn = hrxq->tir->id; 871b8cc58c1SAndrey Vesnovaty if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) { 872b8cc58c1SAndrey Vesnovaty DRV_LOG(ERR, "port %u cannot modify DevX TIR", 873b8cc58c1SAndrey Vesnovaty dev->data->port_id); 874b8cc58c1SAndrey Vesnovaty rte_errno = errno; 875b8cc58c1SAndrey Vesnovaty return -rte_errno; 876b8cc58c1SAndrey Vesnovaty } 877b8cc58c1SAndrey Vesnovaty return 0; 878b8cc58c1SAndrey Vesnovaty } 879b8cc58c1SAndrey Vesnovaty 880b8cc58c1SAndrey Vesnovaty /** 881*bc5bee02SDmitry Kozlyuk * Create a DevX drop Rx queue. 8825eaf882eSMichael Baum * 8835eaf882eSMichael Baum * @param dev 8845eaf882eSMichael Baum * Pointer to Ethernet device. 8855eaf882eSMichael Baum * 8865eaf882eSMichael Baum * @return 8870c762e81SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 8885eaf882eSMichael Baum */ 8890c762e81SMichael Baum static int 890*bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) 8915eaf882eSMichael Baum { 892*bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 893*bc5bee02SDmitry Kozlyuk int socket_id = dev->device->numa_node; 894*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_ctrl *rxq_ctrl; 895*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_data *rxq_data; 896*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_obj *rxq = NULL; 897*bc5bee02SDmitry Kozlyuk int ret; 898*bc5bee02SDmitry Kozlyuk 899*bc5bee02SDmitry Kozlyuk /* 900*bc5bee02SDmitry Kozlyuk * Initialize dummy control structures. 901*bc5bee02SDmitry Kozlyuk * They are required to hold pointers for cleanup 902*bc5bee02SDmitry Kozlyuk * and are only accessible via drop queue DevX objects. 903*bc5bee02SDmitry Kozlyuk */ 904*bc5bee02SDmitry Kozlyuk rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 905*bc5bee02SDmitry Kozlyuk 0, socket_id); 906*bc5bee02SDmitry Kozlyuk if (rxq_ctrl == NULL) { 907*bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u could not allocate drop queue control", 908*bc5bee02SDmitry Kozlyuk dev->data->port_id); 909*bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 910*bc5bee02SDmitry Kozlyuk goto error; 911*bc5bee02SDmitry Kozlyuk } 912*bc5bee02SDmitry Kozlyuk rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id); 913*bc5bee02SDmitry Kozlyuk if (rxq == NULL) { 914*bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u could not allocate drop queue object", 915*bc5bee02SDmitry Kozlyuk dev->data->port_id); 916*bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 917*bc5bee02SDmitry Kozlyuk goto error; 918*bc5bee02SDmitry Kozlyuk } 919*bc5bee02SDmitry Kozlyuk rxq->rxq_ctrl = rxq_ctrl; 920*bc5bee02SDmitry Kozlyuk rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD; 921*bc5bee02SDmitry Kozlyuk rxq_ctrl->priv = priv; 922*bc5bee02SDmitry Kozlyuk rxq_ctrl->obj = rxq; 923*bc5bee02SDmitry Kozlyuk rxq_data = &rxq_ctrl->rxq; 924*bc5bee02SDmitry Kozlyuk /* Create CQ using DevX API. */ 925*bc5bee02SDmitry Kozlyuk ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data); 926*bc5bee02SDmitry Kozlyuk if (ret != 0) { 927*bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u drop queue CQ creation failed.", 928*bc5bee02SDmitry Kozlyuk dev->data->port_id); 929*bc5bee02SDmitry Kozlyuk goto error; 930*bc5bee02SDmitry Kozlyuk } 931*bc5bee02SDmitry Kozlyuk /* Create RQ using DevX API. */ 932*bc5bee02SDmitry Kozlyuk ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data); 933*bc5bee02SDmitry Kozlyuk if (ret != 0) { 934*bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u drop queue RQ creation failed.", 935*bc5bee02SDmitry Kozlyuk dev->data->port_id); 936*bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 937*bc5bee02SDmitry Kozlyuk goto error; 938*bc5bee02SDmitry Kozlyuk } 939*bc5bee02SDmitry Kozlyuk /* Change queue state to ready. */ 940*bc5bee02SDmitry Kozlyuk ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); 941*bc5bee02SDmitry Kozlyuk if (ret != 0) 942*bc5bee02SDmitry Kozlyuk goto error; 943*bc5bee02SDmitry Kozlyuk /* Initialize drop queue. */ 944*bc5bee02SDmitry Kozlyuk priv->drop_queue.rxq = rxq; 945*bc5bee02SDmitry Kozlyuk return 0; 946*bc5bee02SDmitry Kozlyuk error: 947*bc5bee02SDmitry Kozlyuk ret = rte_errno; /* Save rte_errno before cleanup. */ 948*bc5bee02SDmitry Kozlyuk if (rxq != NULL) { 949*bc5bee02SDmitry Kozlyuk if (rxq->rq_obj.rq != NULL) 950*bc5bee02SDmitry Kozlyuk mlx5_devx_rq_destroy(&rxq->rq_obj); 951*bc5bee02SDmitry Kozlyuk if (rxq->cq_obj.cq != NULL) 952*bc5bee02SDmitry Kozlyuk mlx5_devx_cq_destroy(&rxq->cq_obj); 953*bc5bee02SDmitry Kozlyuk if (rxq->devx_channel) 954*bc5bee02SDmitry Kozlyuk mlx5_os_devx_destroy_event_channel 955*bc5bee02SDmitry Kozlyuk (rxq->devx_channel); 956*bc5bee02SDmitry Kozlyuk mlx5_free(rxq); 957*bc5bee02SDmitry Kozlyuk } 958*bc5bee02SDmitry Kozlyuk if (rxq_ctrl != NULL) 959*bc5bee02SDmitry Kozlyuk mlx5_free(rxq_ctrl); 960*bc5bee02SDmitry Kozlyuk rte_errno = ret; /* Restore rte_errno. */ 9610c762e81SMichael Baum return -rte_errno; 9625eaf882eSMichael Baum } 9635eaf882eSMichael Baum 9645eaf882eSMichael Baum /** 965*bc5bee02SDmitry Kozlyuk * Release drop Rx queue resources. 966*bc5bee02SDmitry Kozlyuk * 967*bc5bee02SDmitry Kozlyuk * @param dev 968*bc5bee02SDmitry Kozlyuk * Pointer to Ethernet device. 969*bc5bee02SDmitry Kozlyuk */ 970*bc5bee02SDmitry Kozlyuk static void 971*bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev) 972*bc5bee02SDmitry Kozlyuk { 973*bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 974*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; 975*bc5bee02SDmitry Kozlyuk struct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl; 976*bc5bee02SDmitry Kozlyuk 977*bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_release(rxq); 978*bc5bee02SDmitry Kozlyuk mlx5_free(rxq); 979*bc5bee02SDmitry Kozlyuk mlx5_free(rxq_ctrl); 980*bc5bee02SDmitry Kozlyuk priv->drop_queue.rxq = NULL; 981*bc5bee02SDmitry Kozlyuk } 982*bc5bee02SDmitry Kozlyuk 983*bc5bee02SDmitry Kozlyuk /** 9845eaf882eSMichael Baum * Release a drop hash Rx queue. 9855eaf882eSMichael Baum * 9865eaf882eSMichael Baum * @param dev 9875eaf882eSMichael Baum * Pointer to Ethernet device. 9885eaf882eSMichael Baum */ 9895eaf882eSMichael Baum static void 9900c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 9915eaf882eSMichael Baum { 992*bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 993*bc5bee02SDmitry Kozlyuk struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 994*bc5bee02SDmitry Kozlyuk 995*bc5bee02SDmitry Kozlyuk if (hrxq->tir != NULL) 996*bc5bee02SDmitry Kozlyuk mlx5_devx_tir_destroy(hrxq); 997*bc5bee02SDmitry Kozlyuk if (hrxq->ind_table->ind_table != NULL) 998*bc5bee02SDmitry Kozlyuk mlx5_devx_ind_table_destroy(hrxq->ind_table); 999*bc5bee02SDmitry Kozlyuk if (priv->drop_queue.rxq->rq != NULL) 1000*bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_release(dev); 1001*bc5bee02SDmitry Kozlyuk } 1002*bc5bee02SDmitry Kozlyuk 1003*bc5bee02SDmitry Kozlyuk /** 1004*bc5bee02SDmitry Kozlyuk * Create a DevX drop action for Rx Hash queue. 1005*bc5bee02SDmitry Kozlyuk * 1006*bc5bee02SDmitry Kozlyuk * @param dev 1007*bc5bee02SDmitry Kozlyuk * Pointer to Ethernet device. 1008*bc5bee02SDmitry Kozlyuk * 1009*bc5bee02SDmitry Kozlyuk * @return 1010*bc5bee02SDmitry Kozlyuk * 0 on success, a negative errno value otherwise and rte_errno is set. 1011*bc5bee02SDmitry Kozlyuk */ 1012*bc5bee02SDmitry Kozlyuk static int 1013*bc5bee02SDmitry Kozlyuk mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 1014*bc5bee02SDmitry Kozlyuk { 1015*bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1016*bc5bee02SDmitry Kozlyuk struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 1017*bc5bee02SDmitry Kozlyuk int ret; 1018*bc5bee02SDmitry Kozlyuk 1019*bc5bee02SDmitry Kozlyuk ret = mlx5_rxq_devx_obj_drop_create(dev); 1020*bc5bee02SDmitry Kozlyuk if (ret != 0) { 1021*bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop RX queue"); 1022*bc5bee02SDmitry Kozlyuk return ret; 1023*bc5bee02SDmitry Kozlyuk } 1024*bc5bee02SDmitry Kozlyuk /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */ 1025*bc5bee02SDmitry Kozlyuk ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table); 1026*bc5bee02SDmitry Kozlyuk if (ret != 0) { 1027*bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table"); 1028*bc5bee02SDmitry Kozlyuk goto error; 1029*bc5bee02SDmitry Kozlyuk } 1030*bc5bee02SDmitry Kozlyuk ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false); 1031*bc5bee02SDmitry Kozlyuk if (ret != 0) { 1032*bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop hash RX queue"); 1033*bc5bee02SDmitry Kozlyuk goto error; 1034*bc5bee02SDmitry Kozlyuk } 1035*bc5bee02SDmitry Kozlyuk return 0; 1036*bc5bee02SDmitry Kozlyuk error: 1037*bc5bee02SDmitry Kozlyuk mlx5_devx_drop_action_destroy(dev); 1038*bc5bee02SDmitry Kozlyuk return ret; 10395eaf882eSMichael Baum } 10405eaf882eSMichael Baum 104186d259ceSMichael Baum /** 1042a89f6433SRongwei Liu * Select TXQ TIS number. 1043a89f6433SRongwei Liu * 1044a89f6433SRongwei Liu * @param dev 1045a89f6433SRongwei Liu * Pointer to Ethernet device. 1046a89f6433SRongwei Liu * @param queue_idx 1047a89f6433SRongwei Liu * Queue index in DPDK Tx queue array. 1048a89f6433SRongwei Liu * 1049a89f6433SRongwei Liu * @return 1050a89f6433SRongwei Liu * > 0 on success, a negative errno value otherwise. 1051a89f6433SRongwei Liu */ 1052a89f6433SRongwei Liu static uint32_t 1053a89f6433SRongwei Liu mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx) 1054a89f6433SRongwei Liu { 1055a89f6433SRongwei Liu struct mlx5_priv *priv = dev->data->dev_private; 1056a89f6433SRongwei Liu int tis_idx; 1057a89f6433SRongwei Liu 1058a89f6433SRongwei Liu if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode == 1059a89f6433SRongwei Liu MLX5_LAG_MODE_TIS) { 1060a89f6433SRongwei Liu tis_idx = (priv->lag_affinity_idx + queue_idx) % 1061a89f6433SRongwei Liu priv->sh->bond.n_port; 1062a89f6433SRongwei Liu DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.", 1063a89f6433SRongwei Liu dev->data->port_id, queue_idx, tis_idx + 1, 1064a89f6433SRongwei Liu priv->sh->lag.tx_remap_affinity[tis_idx]); 1065a89f6433SRongwei Liu } else { 1066a89f6433SRongwei Liu tis_idx = 0; 1067a89f6433SRongwei Liu } 1068a89f6433SRongwei Liu MLX5_ASSERT(priv->sh->tis[tis_idx]); 1069a89f6433SRongwei Liu return priv->sh->tis[tis_idx]->id; 1070a89f6433SRongwei Liu } 1071a89f6433SRongwei Liu 1072a89f6433SRongwei Liu /** 107386d259ceSMichael Baum * Create the Tx hairpin queue object. 107486d259ceSMichael Baum * 107586d259ceSMichael Baum * @param dev 107686d259ceSMichael Baum * Pointer to Ethernet device. 107786d259ceSMichael Baum * @param idx 107886d259ceSMichael Baum * Queue index in DPDK Tx queue array. 107986d259ceSMichael Baum * 108086d259ceSMichael Baum * @return 1081f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 108286d259ceSMichael Baum */ 1083f49f4483SMichael Baum static int 108486d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 108586d259ceSMichael Baum { 108686d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 108786d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 108886d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 108986d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 109086d259ceSMichael Baum struct mlx5_devx_create_sq_attr attr = { 0 }; 1091f49f4483SMichael Baum struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 109286d259ceSMichael Baum uint32_t max_wq_data; 109386d259ceSMichael Baum 109486d259ceSMichael Baum MLX5_ASSERT(txq_data); 1095f49f4483SMichael Baum MLX5_ASSERT(tmpl); 109686d259ceSMichael Baum tmpl->txq_ctrl = txq_ctrl; 109786d259ceSMichael Baum attr.hairpin = 1; 109886d259ceSMichael Baum attr.tis_lst_sz = 1; 109986d259ceSMichael Baum max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 110086d259ceSMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 110186d259ceSMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 110286d259ceSMichael Baum if (priv->config.log_hp_size > max_wq_data) { 110386d259ceSMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 110486d259ceSMichael Baum "too large for hairpin.", 110586d259ceSMichael Baum priv->config.log_hp_size); 110686d259ceSMichael Baum rte_errno = ERANGE; 1107f49f4483SMichael Baum return -rte_errno; 110886d259ceSMichael Baum } 110986d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 111086d259ceSMichael Baum } else { 111186d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz = 111286d259ceSMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 111386d259ceSMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 111486d259ceSMichael Baum } 111586d259ceSMichael Baum /* Set the packets number to the maximum value for performance. */ 111686d259ceSMichael Baum attr.wq_attr.log_hairpin_num_packets = 111786d259ceSMichael Baum attr.wq_attr.log_hairpin_data_sz - 111886d259ceSMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 1119a89f6433SRongwei Liu 1120a89f6433SRongwei Liu attr.tis_num = mlx5_get_txq_tis_num(dev, idx); 1121ca1418ceSMichael Baum tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr); 112286d259ceSMichael Baum if (!tmpl->sq) { 112386d259ceSMichael Baum DRV_LOG(ERR, 112486d259ceSMichael Baum "Port %u tx hairpin queue %u can't create SQ object.", 112586d259ceSMichael Baum dev->data->port_id, idx); 112686d259ceSMichael Baum rte_errno = errno; 1127f49f4483SMichael Baum return -rte_errno; 112886d259ceSMichael Baum } 1129f49f4483SMichael Baum return 0; 113086d259ceSMichael Baum } 113186d259ceSMichael Baum 1132f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 113386d259ceSMichael Baum /** 113486d259ceSMichael Baum * Destroy the Tx queue DevX object. 113586d259ceSMichael Baum * 113686d259ceSMichael Baum * @param txq_obj 113786d259ceSMichael Baum * Txq object to destroy. 113886d259ceSMichael Baum */ 113986d259ceSMichael Baum static void 114088f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 114186d259ceSMichael Baum { 114274e91860SMichael Baum mlx5_devx_sq_destroy(&txq_obj->sq_obj); 114374e91860SMichael Baum memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj)); 11445f04f70cSMichael Baum mlx5_devx_cq_destroy(&txq_obj->cq_obj); 11455f04f70cSMichael Baum memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj)); 114686d259ceSMichael Baum } 114786d259ceSMichael Baum 114886d259ceSMichael Baum /** 114988f2e3f1SMichael Baum * Create a SQ object and its resources using DevX. 115086d259ceSMichael Baum * 115186d259ceSMichael Baum * @param dev 115286d259ceSMichael Baum * Pointer to Ethernet device. 115386d259ceSMichael Baum * @param idx 115486d259ceSMichael Baum * Queue index in DPDK Tx queue array. 115574e91860SMichael Baum * @param[in] log_desc_n 115674e91860SMichael Baum * Log of number of descriptors in queue. 115786d259ceSMichael Baum * 115886d259ceSMichael Baum * @return 115974e91860SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 116086d259ceSMichael Baum */ 116174e91860SMichael Baum static int 116274e91860SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, 116374e91860SMichael Baum uint16_t log_desc_n) 116486d259ceSMichael Baum { 116586d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 1166fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 116786d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 116888f2e3f1SMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 116988f2e3f1SMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 117088f2e3f1SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 117174e91860SMichael Baum struct mlx5_devx_create_sq_attr sq_attr = { 117274e91860SMichael Baum .flush_in_error_en = 1, 117374e91860SMichael Baum .allow_multi_pkt_send_wqe = !!priv->config.mps, 117474e91860SMichael Baum .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode, 117574e91860SMichael Baum .allow_swp = !!priv->config.swp, 117674e91860SMichael Baum .cqn = txq_obj->cq_obj.cq->id, 117774e91860SMichael Baum .tis_lst_sz = 1, 117874e91860SMichael Baum .wq_attr = (struct mlx5_devx_wq_attr){ 1179fe46b20cSMichael Baum .pd = cdev->pdn, 118074e91860SMichael Baum .uar_page = 118174e91860SMichael Baum mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar), 118274e91860SMichael Baum }, 1183fe46b20cSMichael Baum .ts_format = 1184fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format), 1185a89f6433SRongwei Liu .tis_num = mlx5_get_txq_tis_num(dev, idx), 118674e91860SMichael Baum }; 1187a89f6433SRongwei Liu 118886d259ceSMichael Baum /* Create Send Queue object with DevX. */ 1189fe46b20cSMichael Baum return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj, 1190ca1418ceSMichael Baum log_desc_n, &sq_attr, priv->sh->numa_node); 119186d259ceSMichael Baum } 119286d259ceSMichael Baum #endif 119386d259ceSMichael Baum 119486d259ceSMichael Baum /** 119586d259ceSMichael Baum * Create the Tx queue DevX object. 119686d259ceSMichael Baum * 119786d259ceSMichael Baum * @param dev 119886d259ceSMichael Baum * Pointer to Ethernet device. 119986d259ceSMichael Baum * @param idx 120086d259ceSMichael Baum * Queue index in DPDK Tx queue array. 120186d259ceSMichael Baum * 120286d259ceSMichael Baum * @return 1203f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 120486d259ceSMichael Baum */ 1205f49f4483SMichael Baum int 120686d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 120786d259ceSMichael Baum { 120886d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 120986d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 121086d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 121186d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 121286d259ceSMichael Baum 121386d259ceSMichael Baum if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 121486d259ceSMichael Baum return mlx5_txq_obj_hairpin_new(dev, idx); 1215f1ae0b35SOphir Munk #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H) 121686d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 121786d259ceSMichael Baum dev->data->port_id, idx); 121886d259ceSMichael Baum rte_errno = ENOMEM; 1219f49f4483SMichael Baum return -rte_errno; 122086d259ceSMichael Baum #else 122186d259ceSMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 1222f49f4483SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 12235f04f70cSMichael Baum struct mlx5_devx_cq_attr cq_attr = { 12245f04f70cSMichael Baum .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar), 12255f04f70cSMichael Baum }; 122686d259ceSMichael Baum void *reg_addr; 12275f04f70cSMichael Baum uint32_t cqe_n, log_desc_n; 122800984de5SViacheslav Ovsiienko uint32_t wqe_n, wqe_size; 122986d259ceSMichael Baum int ret = 0; 123086d259ceSMichael Baum 123186d259ceSMichael Baum MLX5_ASSERT(txq_data); 1232f49f4483SMichael Baum MLX5_ASSERT(txq_obj); 123386d259ceSMichael Baum txq_obj->txq_ctrl = txq_ctrl; 123486d259ceSMichael Baum txq_obj->dev = dev; 12355f04f70cSMichael Baum cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 12365f04f70cSMichael Baum 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 12375f04f70cSMichael Baum log_desc_n = log2above(cqe_n); 12385f04f70cSMichael Baum cqe_n = 1UL << log_desc_n; 12395f04f70cSMichael Baum if (cqe_n > UINT16_MAX) { 12405f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.", 12415f04f70cSMichael Baum dev->data->port_id, txq_data->idx, cqe_n); 12425f04f70cSMichael Baum rte_errno = EINVAL; 12435f04f70cSMichael Baum return 0; 12445f04f70cSMichael Baum } 12455f04f70cSMichael Baum /* Create completion queue object with DevX. */ 1246ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n, 12475f04f70cSMichael Baum &cq_attr, priv->sh->numa_node); 12485f04f70cSMichael Baum if (ret) { 12495f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 12505f04f70cSMichael Baum dev->data->port_id, idx); 125186d259ceSMichael Baum goto error; 125286d259ceSMichael Baum } 12535f04f70cSMichael Baum txq_data->cqe_n = log_desc_n; 12545f04f70cSMichael Baum txq_data->cqe_s = cqe_n; 125586d259ceSMichael Baum txq_data->cqe_m = txq_data->cqe_s - 1; 12565f04f70cSMichael Baum txq_data->cqes = txq_obj->cq_obj.cqes; 125786d259ceSMichael Baum txq_data->cq_ci = 0; 125886d259ceSMichael Baum txq_data->cq_pi = 0; 12595f04f70cSMichael Baum txq_data->cq_db = txq_obj->cq_obj.db_rec; 126086d259ceSMichael Baum *txq_data->cq_db = 0; 126100984de5SViacheslav Ovsiienko /* 126200984de5SViacheslav Ovsiienko * Adjust the amount of WQEs depending on inline settings. 126300984de5SViacheslav Ovsiienko * The number of descriptors should be enough to handle 126400984de5SViacheslav Ovsiienko * the specified number of packets. If queue is being created 126500984de5SViacheslav Ovsiienko * with Verbs the rdma-core does queue size adjustment 126600984de5SViacheslav Ovsiienko * internally in the mlx5_calc_sq_size(), we do the same 126700984de5SViacheslav Ovsiienko * for the queue being created with DevX at this point. 126800984de5SViacheslav Ovsiienko */ 126900984de5SViacheslav Ovsiienko wqe_size = txq_data->tso_en ? 127000984de5SViacheslav Ovsiienko RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0; 127100984de5SViacheslav Ovsiienko wqe_size += sizeof(struct mlx5_wqe_cseg) + 127200984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 127300984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_dseg); 127400984de5SViacheslav Ovsiienko if (txq_data->inlen_send) 127500984de5SViacheslav Ovsiienko wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) + 127600984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 127700984de5SViacheslav Ovsiienko RTE_ALIGN(txq_data->inlen_send + 127800984de5SViacheslav Ovsiienko sizeof(uint32_t), 127900984de5SViacheslav Ovsiienko MLX5_WSEG_SIZE)); 128000984de5SViacheslav Ovsiienko wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 128186d259ceSMichael Baum /* Create Send Queue object with DevX. */ 128200984de5SViacheslav Ovsiienko wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size, 128374e91860SMichael Baum (uint32_t)priv->sh->device_attr.max_qp_wr); 128474e91860SMichael Baum log_desc_n = log2above(wqe_n); 128574e91860SMichael Baum ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n); 128674e91860SMichael Baum if (ret) { 128774e91860SMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 128874e91860SMichael Baum dev->data->port_id, idx); 128986d259ceSMichael Baum rte_errno = errno; 129086d259ceSMichael Baum goto error; 129186d259ceSMichael Baum } 129286d259ceSMichael Baum /* Create the Work Queue. */ 129374e91860SMichael Baum txq_data->wqe_n = log_desc_n; 129486d259ceSMichael Baum txq_data->wqe_s = 1 << txq_data->wqe_n; 129586d259ceSMichael Baum txq_data->wqe_m = txq_data->wqe_s - 1; 129674e91860SMichael Baum txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes; 129786d259ceSMichael Baum txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 129886d259ceSMichael Baum txq_data->wqe_ci = 0; 129986d259ceSMichael Baum txq_data->wqe_pi = 0; 130086d259ceSMichael Baum txq_data->wqe_comp = 0; 130186d259ceSMichael Baum txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 130231625e62SViacheslav Ovsiienko txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; 130386d259ceSMichael Baum *txq_data->qp_db = 0; 130474e91860SMichael Baum txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; 130586d259ceSMichael Baum /* Change Send Queue state to Ready-to-Send. */ 1306686d05b6SXueming Li ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); 130786d259ceSMichael Baum if (ret) { 130886d259ceSMichael Baum rte_errno = errno; 130986d259ceSMichael Baum DRV_LOG(ERR, 1310a9c79306SMichael Baum "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.", 131186d259ceSMichael Baum dev->data->port_id, idx); 131286d259ceSMichael Baum goto error; 131386d259ceSMichael Baum } 131486d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 131586d259ceSMichael Baum /* 131686d259ceSMichael Baum * If using DevX need to query and store TIS transport domain value. 131786d259ceSMichael Baum * This is done once per port. 131886d259ceSMichael Baum * Will use this value on Rx, when creating matching TIR. 131986d259ceSMichael Baum */ 132086d259ceSMichael Baum if (!priv->sh->tdn) 132186d259ceSMichael Baum priv->sh->tdn = priv->sh->td->id; 132286d259ceSMichael Baum #endif 132386d259ceSMichael Baum MLX5_ASSERT(sh->tx_uar); 132486d259ceSMichael Baum reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 132586d259ceSMichael Baum MLX5_ASSERT(reg_addr); 132686d259ceSMichael Baum txq_ctrl->bf_reg = reg_addr; 132786d259ceSMichael Baum txq_ctrl->uar_mmap_offset = 132886d259ceSMichael Baum mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); 132986d259ceSMichael Baum txq_uar_init(txq_ctrl); 1330876b5d52SMatan Azrad dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 1331f49f4483SMichael Baum return 0; 133286d259ceSMichael Baum error: 133386d259ceSMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 133488f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 133586d259ceSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 1336f49f4483SMichael Baum return -rte_errno; 133786d259ceSMichael Baum #endif 133886d259ceSMichael Baum } 133986d259ceSMichael Baum 134086d259ceSMichael Baum /** 134186d259ceSMichael Baum * Release an Tx DevX queue object. 134286d259ceSMichael Baum * 134386d259ceSMichael Baum * @param txq_obj 134486d259ceSMichael Baum * DevX Tx queue object. 134586d259ceSMichael Baum */ 134686d259ceSMichael Baum void 134786d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 134886d259ceSMichael Baum { 134986d259ceSMichael Baum MLX5_ASSERT(txq_obj); 1350354cc08aSMichael Baum if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 135186d259ceSMichael Baum if (txq_obj->tis) 135286d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 1353f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 135486d259ceSMichael Baum } else { 135588f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 135686d259ceSMichael Baum #endif 135786d259ceSMichael Baum } 135886d259ceSMichael Baum } 135986d259ceSMichael Baum 13608bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = { 13618bb2410eSOphir Munk .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 13626deb19e1SMichael Baum .rxq_obj_new = mlx5_rxq_devx_obj_new, 136332287079SMichael Baum .rxq_event_get = mlx5_rx_devx_get_event, 1364c279f187SMichael Baum .rxq_obj_modify = mlx5_devx_modify_rq, 13656deb19e1SMichael Baum .rxq_obj_release = mlx5_rxq_devx_obj_release, 136625ae7f1aSMichael Baum .ind_table_new = mlx5_devx_ind_table_new, 1367fa7ad49eSAndrey Vesnovaty .ind_table_modify = mlx5_devx_ind_table_modify, 136825ae7f1aSMichael Baum .ind_table_destroy = mlx5_devx_ind_table_destroy, 136985552726SMichael Baum .hrxq_new = mlx5_devx_hrxq_new, 137085552726SMichael Baum .hrxq_destroy = mlx5_devx_tir_destroy, 1371b8cc58c1SAndrey Vesnovaty .hrxq_modify = mlx5_devx_hrxq_modify, 13720c762e81SMichael Baum .drop_action_create = mlx5_devx_drop_action_create, 13730c762e81SMichael Baum .drop_action_destroy = mlx5_devx_drop_action_destroy, 137486d259ceSMichael Baum .txq_obj_new = mlx5_txq_devx_obj_new, 1375686d05b6SXueming Li .txq_obj_modify = mlx5_txq_devx_modify, 137686d259ceSMichael Baum .txq_obj_release = mlx5_txq_devx_obj_release, 137723233fd6SBing Zhao .lb_dummy_queue_create = NULL, 137823233fd6SBing Zhao .lb_dummy_queue_release = NULL, 13798bb2410eSOphir Munk }; 1380