xref: /dpdk/drivers/net/mlx5/linux/mlx5_verbs.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
14f96d913SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
24f96d913SOphir Munk  * Copyright 2020 Mellanox Technologies, Ltd
34f96d913SOphir Munk  */
44f96d913SOphir Munk 
54f96d913SOphir Munk #include <stddef.h>
64f96d913SOphir Munk #include <errno.h>
74f96d913SOphir Munk #include <string.h>
84f96d913SOphir Munk #include <stdint.h>
94f96d913SOphir Munk #include <unistd.h>
104f96d913SOphir Munk #include <inttypes.h>
116deb19e1SMichael Baum #include <sys/queue.h>
124f96d913SOphir Munk 
134f96d913SOphir Munk #include "mlx5_autoconf.h"
144f96d913SOphir Munk 
154f96d913SOphir Munk #include <rte_mbuf.h>
164f96d913SOphir Munk #include <rte_malloc.h>
17df96fd0dSBruce Richardson #include <ethdev_driver.h>
184f96d913SOphir Munk #include <rte_common.h>
195dfa003dSMichael Baum #include <rte_eal_paging.h>
204f96d913SOphir Munk 
214f96d913SOphir Munk #include <mlx5_glue.h>
224f96d913SOphir Munk #include <mlx5_common.h>
234f96d913SOphir Munk #include <mlx5_common_mr.h>
244f96d913SOphir Munk #include <mlx5_verbs.h>
25151cbe3aSMichael Baum #include <mlx5_rx.h>
26377b69fbSMichael Baum #include <mlx5_tx.h>
276deb19e1SMichael Baum #include <mlx5_utils.h>
286deb19e1SMichael Baum #include <mlx5_malloc.h>
296deb19e1SMichael Baum 
304f96d913SOphir Munk /**
318bb2410eSOphir Munk  * Modify Rx WQ vlan stripping offload
328bb2410eSOphir Munk  *
335ceb3a02SXueming Li  * @param rxq
345ceb3a02SXueming Li  *   Rx queue.
358bb2410eSOphir Munk  *
368bb2410eSOphir Munk  * @return 0 on success, non-0 otherwise
378bb2410eSOphir Munk  */
388bb2410eSOphir Munk static int
mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv * rxq,int on)395ceb3a02SXueming Li mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
408bb2410eSOphir Munk {
418bb2410eSOphir Munk 	uint16_t vlan_offloads =
428bb2410eSOphir Munk 		(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
438bb2410eSOphir Munk 		0;
448bb2410eSOphir Munk 	struct ibv_wq_attr mod;
458bb2410eSOphir Munk 	mod = (struct ibv_wq_attr){
468bb2410eSOphir Munk 		.attr_mask = IBV_WQ_ATTR_FLAGS,
478bb2410eSOphir Munk 		.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
488bb2410eSOphir Munk 		.flags = vlan_offloads,
498bb2410eSOphir Munk 	};
50fa2c85ccSMichael Baum 
515ceb3a02SXueming Li 	return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
52fa2c85ccSMichael Baum }
53fa2c85ccSMichael Baum 
54fa2c85ccSMichael Baum /**
55fa2c85ccSMichael Baum  * Modifies the attributes for the specified WQ.
56fa2c85ccSMichael Baum  *
575ceb3a02SXueming Li  * @param rxq
585ceb3a02SXueming Li  *   Verbs Rx queue.
594c6d80f1SMichael Baum  * @param type
604c6d80f1SMichael Baum  *   Type of change queue state.
61fa2c85ccSMichael Baum  *
62fa2c85ccSMichael Baum  * @return
63fa2c85ccSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
64fa2c85ccSMichael Baum  */
65fa2c85ccSMichael Baum static int
mlx5_ibv_modify_wq(struct mlx5_rxq_priv * rxq,uint8_t type)665ceb3a02SXueming Li mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)
67fa2c85ccSMichael Baum {
68fa2c85ccSMichael Baum 	struct ibv_wq_attr mod = {
69fa2c85ccSMichael Baum 		.attr_mask = IBV_WQ_ATTR_STATE,
704c6d80f1SMichael Baum 		.wq_state = (enum ibv_wq_state)type,
71fa2c85ccSMichael Baum 	};
72fa2c85ccSMichael Baum 
735ceb3a02SXueming Li 	return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
748bb2410eSOphir Munk }
758bb2410eSOphir Munk 
766deb19e1SMichael Baum /**
775d9f3c3fSMichael Baum  * Modify QP using Verbs API.
785d9f3c3fSMichael Baum  *
795d9f3c3fSMichael Baum  * @param txq_obj
805d9f3c3fSMichael Baum  *   Verbs Tx queue object.
815d9f3c3fSMichael Baum  * @param type
825d9f3c3fSMichael Baum  *   Type of change queue state.
835d9f3c3fSMichael Baum  * @param dev_port
845d9f3c3fSMichael Baum  *   IB device port number.
855d9f3c3fSMichael Baum  *
865d9f3c3fSMichael Baum  * @return
875d9f3c3fSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
885d9f3c3fSMichael Baum  */
895d9f3c3fSMichael Baum static int
mlx5_ibv_modify_qp(struct mlx5_txq_obj * obj,enum mlx5_txq_modify_type type,uint8_t dev_port)905d9f3c3fSMichael Baum mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
915d9f3c3fSMichael Baum 		   uint8_t dev_port)
925d9f3c3fSMichael Baum {
935d9f3c3fSMichael Baum 	struct ibv_qp_attr mod = {
945d9f3c3fSMichael Baum 		.qp_state = IBV_QPS_RESET,
955d9f3c3fSMichael Baum 		.port_num = dev_port,
965d9f3c3fSMichael Baum 	};
975d9f3c3fSMichael Baum 	int ret;
985d9f3c3fSMichael Baum 
995d9f3c3fSMichael Baum 	if (type != MLX5_TXQ_MOD_RST2RDY) {
1005d9f3c3fSMichael Baum 		ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
1015d9f3c3fSMichael Baum 		if (ret) {
1025d9f3c3fSMichael Baum 			DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s",
1035d9f3c3fSMichael Baum 				strerror(errno));
1045d9f3c3fSMichael Baum 			rte_errno = errno;
1055d9f3c3fSMichael Baum 			return ret;
1065d9f3c3fSMichael Baum 		}
1075d9f3c3fSMichael Baum 		if (type == MLX5_TXQ_MOD_RDY2RST)
1085d9f3c3fSMichael Baum 			return 0;
1095d9f3c3fSMichael Baum 	}
1105d9f3c3fSMichael Baum 	mod.qp_state = IBV_QPS_INIT;
1111485d961SRaja Zidane 	ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
1125d9f3c3fSMichael Baum 	if (ret) {
1135d9f3c3fSMichael Baum 		DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
1145d9f3c3fSMichael Baum 			strerror(errno));
1155d9f3c3fSMichael Baum 		rte_errno = errno;
1165d9f3c3fSMichael Baum 		return ret;
1175d9f3c3fSMichael Baum 	}
1185d9f3c3fSMichael Baum 	mod.qp_state = IBV_QPS_RTR;
1195d9f3c3fSMichael Baum 	ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
1205d9f3c3fSMichael Baum 	if (ret) {
1215d9f3c3fSMichael Baum 		DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
1225d9f3c3fSMichael Baum 			strerror(errno));
1235d9f3c3fSMichael Baum 		rte_errno = errno;
1245d9f3c3fSMichael Baum 		return ret;
1255d9f3c3fSMichael Baum 	}
1265d9f3c3fSMichael Baum 	mod.qp_state = IBV_QPS_RTS;
1275d9f3c3fSMichael Baum 	ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
1285d9f3c3fSMichael Baum 	if (ret) {
1295d9f3c3fSMichael Baum 		DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
1305d9f3c3fSMichael Baum 			strerror(errno));
1315d9f3c3fSMichael Baum 		rte_errno = errno;
1325d9f3c3fSMichael Baum 		return ret;
1335d9f3c3fSMichael Baum 	}
1345d9f3c3fSMichael Baum 	return 0;
1355d9f3c3fSMichael Baum }
1365d9f3c3fSMichael Baum 
1375d9f3c3fSMichael Baum /**
1386deb19e1SMichael Baum  * Create a CQ Verbs object.
1396deb19e1SMichael Baum  *
1405ceb3a02SXueming Li  * @param rxq
1415ceb3a02SXueming Li  *   Pointer to Rx queue.
1426deb19e1SMichael Baum  *
1436deb19e1SMichael Baum  * @return
144675911d0SMichael Baum  *   The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
1456deb19e1SMichael Baum  */
1466deb19e1SMichael Baum static struct ibv_cq *
mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv * rxq)1475ceb3a02SXueming Li mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)
1486deb19e1SMichael Baum {
1495ceb3a02SXueming Li 	struct mlx5_priv *priv = rxq->priv;
1505ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
1515ceb3a02SXueming Li 	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
152675911d0SMichael Baum 	struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
153675911d0SMichael Baum 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
1546deb19e1SMichael Baum 	struct {
1556deb19e1SMichael Baum 		struct ibv_cq_init_attr_ex ibv;
1566deb19e1SMichael Baum 		struct mlx5dv_cq_init_attr mlx5;
1576deb19e1SMichael Baum 	} cq_attr;
1586deb19e1SMichael Baum 
1596deb19e1SMichael Baum 	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
1606deb19e1SMichael Baum 		.cqe = cqe_n,
1616deb19e1SMichael Baum 		.channel = rxq_obj->ibv_channel,
1626deb19e1SMichael Baum 		.comp_mask = 0,
1636deb19e1SMichael Baum 	};
1646deb19e1SMichael Baum 	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
1656deb19e1SMichael Baum 		.comp_mask = 0,
1666deb19e1SMichael Baum 	};
1676deb19e1SMichael Baum 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
1686deb19e1SMichael Baum 		cq_attr.mlx5.comp_mask |=
1696deb19e1SMichael Baum 				MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
170fdc44cdcSAlexander Kozyrev 		rxq_data->byte_mask = UINT32_MAX;
1716deb19e1SMichael Baum #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
172fdc44cdcSAlexander Kozyrev 		if (mlx5_rxq_mprq_enabled(rxq_data)) {
1736deb19e1SMichael Baum 			cq_attr.mlx5.cqe_comp_res_format =
174fdc44cdcSAlexander Kozyrev 					MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX;
175fdc44cdcSAlexander Kozyrev 			rxq_data->mcqe_format =
176fdc44cdcSAlexander Kozyrev 					MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
177fdc44cdcSAlexander Kozyrev 		} else {
178fdc44cdcSAlexander Kozyrev 			cq_attr.mlx5.cqe_comp_res_format =
1796deb19e1SMichael Baum 					MLX5DV_CQE_RES_FORMAT_HASH;
180fdc44cdcSAlexander Kozyrev 			rxq_data->mcqe_format =
181fdc44cdcSAlexander Kozyrev 					MLX5_CQE_RESP_FORMAT_HASH;
182fdc44cdcSAlexander Kozyrev 		}
1836deb19e1SMichael Baum #else
1846deb19e1SMichael Baum 		cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
185fdc44cdcSAlexander Kozyrev 		rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH;
1866deb19e1SMichael Baum #endif
1876deb19e1SMichael Baum 		/*
1886deb19e1SMichael Baum 		 * For vectorized Rx, it must not be doubled in order to
1896deb19e1SMichael Baum 		 * make cq_ci and rq_ci aligned.
1906deb19e1SMichael Baum 		 */
1916deb19e1SMichael Baum 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1926deb19e1SMichael Baum 			cq_attr.ibv.cqe *= 2;
1936deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1946deb19e1SMichael Baum 		DRV_LOG(DEBUG,
1956deb19e1SMichael Baum 			"Port %u Rx CQE compression is disabled for HW"
1966deb19e1SMichael Baum 			" timestamp.",
1975ceb3a02SXueming Li 			priv->dev_data->port_id);
1986deb19e1SMichael Baum 	}
1996deb19e1SMichael Baum #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
2004a7f979aSMichael Baum 	if (RTE_CACHE_LINE_SIZE == 128) {
2016deb19e1SMichael Baum 		cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
2026deb19e1SMichael Baum 		cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
2036deb19e1SMichael Baum 	}
2046deb19e1SMichael Baum #endif
205ca1418ceSMichael Baum 	return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq
206ca1418ceSMichael Baum 							   (priv->sh->cdev->ctx,
2076deb19e1SMichael Baum 							    &cq_attr.ibv,
2086deb19e1SMichael Baum 							    &cq_attr.mlx5));
2096deb19e1SMichael Baum }
2106deb19e1SMichael Baum 
2116deb19e1SMichael Baum /**
2126deb19e1SMichael Baum  * Create a WQ Verbs object.
2136deb19e1SMichael Baum  *
2145ceb3a02SXueming Li  * @param rxq
2155ceb3a02SXueming Li  *   Pointer to Rx queue.
2166deb19e1SMichael Baum  *
2176deb19e1SMichael Baum  * @return
218675911d0SMichael Baum  *   The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
2196deb19e1SMichael Baum  */
2206deb19e1SMichael Baum static struct ibv_wq *
mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv * rxq)2215ceb3a02SXueming Li mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
2226deb19e1SMichael Baum {
2235ceb3a02SXueming Li 	struct mlx5_priv *priv = rxq->priv;
2245ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
2255ceb3a02SXueming Li 	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
226675911d0SMichael Baum 	struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
227675911d0SMichael Baum 	unsigned int wqe_n = 1 << rxq_data->elts_n;
2286deb19e1SMichael Baum 	struct {
2296deb19e1SMichael Baum 		struct ibv_wq_init_attr ibv;
2306deb19e1SMichael Baum #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2316deb19e1SMichael Baum 		struct mlx5dv_wq_init_attr mlx5;
2326deb19e1SMichael Baum #endif
2336deb19e1SMichael Baum 	} wq_attr;
2346deb19e1SMichael Baum 
2356deb19e1SMichael Baum 	wq_attr.ibv = (struct ibv_wq_init_attr){
2366deb19e1SMichael Baum 		.wq_context = NULL, /* Could be useful in the future. */
2376deb19e1SMichael Baum 		.wq_type = IBV_WQT_RQ,
2386deb19e1SMichael Baum 		/* Max number of outstanding WRs. */
2396deb19e1SMichael Baum 		.max_wr = wqe_n >> rxq_data->sges_n,
2406deb19e1SMichael Baum 		/* Max number of scatter/gather elements in a WR. */
2416deb19e1SMichael Baum 		.max_sge = 1 << rxq_data->sges_n,
242e35ccf24SMichael Baum 		.pd = priv->sh->cdev->pd,
2436deb19e1SMichael Baum 		.cq = rxq_obj->ibv_cq,
2446deb19e1SMichael Baum 		.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
2456deb19e1SMichael Baum 		.create_flags = (rxq_data->vlan_strip ?
2466deb19e1SMichael Baum 				 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
2476deb19e1SMichael Baum 	};
2486deb19e1SMichael Baum 	/* By default, FCS (CRC) is stripped by hardware. */
2496deb19e1SMichael Baum 	if (rxq_data->crc_present) {
2506deb19e1SMichael Baum 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
2516deb19e1SMichael Baum 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
2526deb19e1SMichael Baum 	}
2536deb19e1SMichael Baum 	if (priv->config.hw_padding) {
2546deb19e1SMichael Baum #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
2556deb19e1SMichael Baum 		wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
2566deb19e1SMichael Baum 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
2576deb19e1SMichael Baum #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
2586deb19e1SMichael Baum 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
2596deb19e1SMichael Baum 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
2606deb19e1SMichael Baum #endif
2616deb19e1SMichael Baum 	}
2626deb19e1SMichael Baum #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2636deb19e1SMichael Baum 	wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
2646deb19e1SMichael Baum 		.comp_mask = 0,
2656deb19e1SMichael Baum 	};
2666deb19e1SMichael Baum 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
2676deb19e1SMichael Baum 		struct mlx5dv_striding_rq_init_attr *mprq_attr =
2686deb19e1SMichael Baum 						&wq_attr.mlx5.striding_rq_attrs;
2696deb19e1SMichael Baum 
2706deb19e1SMichael Baum 		wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
2716deb19e1SMichael Baum 		*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
2720947ed38SMichael Baum 			.single_stride_log_num_of_bytes = rxq_data->log_strd_sz,
2730947ed38SMichael Baum 			.single_wqe_log_num_of_strides = rxq_data->log_strd_num,
2746deb19e1SMichael Baum 			.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
2756deb19e1SMichael Baum 		};
2766deb19e1SMichael Baum 	}
277ca1418ceSMichael Baum 	rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->cdev->ctx, &wq_attr.ibv,
2786deb19e1SMichael Baum 					      &wq_attr.mlx5);
2796deb19e1SMichael Baum #else
280ca1418ceSMichael Baum 	rxq_obj->wq = mlx5_glue->create_wq(priv->sh->cdev->ctx, &wq_attr.ibv);
2816deb19e1SMichael Baum #endif
2826deb19e1SMichael Baum 	if (rxq_obj->wq) {
2836deb19e1SMichael Baum 		/*
2846deb19e1SMichael Baum 		 * Make sure number of WRs*SGEs match expectations since a queue
2856deb19e1SMichael Baum 		 * cannot allocate more than "desc" buffers.
2866deb19e1SMichael Baum 		 */
2876deb19e1SMichael Baum 		if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
2886deb19e1SMichael Baum 		    wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
2896deb19e1SMichael Baum 			DRV_LOG(ERR,
2906deb19e1SMichael Baum 				"Port %u Rx queue %u requested %u*%u but got"
2916deb19e1SMichael Baum 				" %u*%u WRs*SGEs.",
2925ceb3a02SXueming Li 				priv->dev_data->port_id, rxq->idx,
2936deb19e1SMichael Baum 				wqe_n >> rxq_data->sges_n,
2946deb19e1SMichael Baum 				(1 << rxq_data->sges_n),
2956deb19e1SMichael Baum 				wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
2966deb19e1SMichael Baum 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
2976deb19e1SMichael Baum 			rxq_obj->wq = NULL;
2986deb19e1SMichael Baum 			rte_errno = EINVAL;
2996deb19e1SMichael Baum 		}
3006deb19e1SMichael Baum 	}
3016deb19e1SMichael Baum 	return rxq_obj->wq;
3026deb19e1SMichael Baum }
3036deb19e1SMichael Baum 
3046deb19e1SMichael Baum /**
3056deb19e1SMichael Baum  * Create the Rx queue Verbs object.
3066deb19e1SMichael Baum  *
3075ceb3a02SXueming Li  * @param rxq
3085ceb3a02SXueming Li  *   Pointer to Rx queue.
3096deb19e1SMichael Baum  *
3106deb19e1SMichael Baum  * @return
3111260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
3126deb19e1SMichael Baum  */
3131260a87bSMichael Baum static int
mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv * rxq)3145ceb3a02SXueming Li mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
3156deb19e1SMichael Baum {
3165ceb3a02SXueming Li 	uint16_t idx = rxq->idx;
3175ceb3a02SXueming Li 	struct mlx5_priv *priv = rxq->priv;
3185ceb3a02SXueming Li 	uint16_t port_id = priv->dev_data->port_id;
3195ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
3205ceb3a02SXueming Li 	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
3211260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
3226deb19e1SMichael Baum 	struct mlx5dv_cq cq_info;
3236deb19e1SMichael Baum 	struct mlx5dv_rwq rwq;
3246deb19e1SMichael Baum 	int ret = 0;
3256deb19e1SMichael Baum 	struct mlx5dv_obj obj;
3266deb19e1SMichael Baum 
3276deb19e1SMichael Baum 	MLX5_ASSERT(rxq_data);
3281260a87bSMichael Baum 	MLX5_ASSERT(tmpl);
3296deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
3306deb19e1SMichael Baum 	if (rxq_ctrl->irq) {
3316deb19e1SMichael Baum 		tmpl->ibv_channel =
332ca1418ceSMichael Baum 			mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
3336deb19e1SMichael Baum 		if (!tmpl->ibv_channel) {
3346deb19e1SMichael Baum 			DRV_LOG(ERR, "Port %u: comp channel creation failure.",
3355ceb3a02SXueming Li 				port_id);
3366deb19e1SMichael Baum 			rte_errno = ENOMEM;
3376deb19e1SMichael Baum 			goto error;
3386deb19e1SMichael Baum 		}
3396deb19e1SMichael Baum 		tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
3406deb19e1SMichael Baum 	}
3416deb19e1SMichael Baum 	/* Create CQ using Verbs API. */
3425ceb3a02SXueming Li 	tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);
3436deb19e1SMichael Baum 	if (!tmpl->ibv_cq) {
3446deb19e1SMichael Baum 		DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
3455ceb3a02SXueming Li 			port_id, idx);
3466deb19e1SMichael Baum 		rte_errno = ENOMEM;
3476deb19e1SMichael Baum 		goto error;
3486deb19e1SMichael Baum 	}
3496deb19e1SMichael Baum 	obj.cq.in = tmpl->ibv_cq;
3506deb19e1SMichael Baum 	obj.cq.out = &cq_info;
3516deb19e1SMichael Baum 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
3526deb19e1SMichael Baum 	if (ret) {
3536deb19e1SMichael Baum 		rte_errno = ret;
3546deb19e1SMichael Baum 		goto error;
3556deb19e1SMichael Baum 	}
3566deb19e1SMichael Baum 	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
3576deb19e1SMichael Baum 		DRV_LOG(ERR,
3586deb19e1SMichael Baum 			"Port %u wrong MLX5_CQE_SIZE environment "
3596deb19e1SMichael Baum 			"variable value: it should be set to %u.",
3605ceb3a02SXueming Li 			port_id, RTE_CACHE_LINE_SIZE);
3616deb19e1SMichael Baum 		rte_errno = EINVAL;
3626deb19e1SMichael Baum 		goto error;
3636deb19e1SMichael Baum 	}
3646deb19e1SMichael Baum 	/* Fill the rings. */
3656deb19e1SMichael Baum 	rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
3666deb19e1SMichael Baum 	rxq_data->cq_db = cq_info.dbrec;
3676deb19e1SMichael Baum 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
3685dfa003dSMichael Baum 	rxq_data->uar_data.db = RTE_PTR_ADD(cq_info.cq_uar, MLX5_CQ_DOORBELL);
3695dfa003dSMichael Baum #ifndef RTE_ARCH_64
3705dfa003dSMichael Baum 	rxq_data->uar_data.sl_p = &priv->sh->uar_lock_cq;
3715dfa003dSMichael Baum #endif
3726deb19e1SMichael Baum 	rxq_data->cqn = cq_info.cqn;
3736deb19e1SMichael Baum 	/* Create WQ (RQ) using Verbs API. */
3745ceb3a02SXueming Li 	tmpl->wq = mlx5_rxq_ibv_wq_create(rxq);
3756deb19e1SMichael Baum 	if (!tmpl->wq) {
3766deb19e1SMichael Baum 		DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
3775ceb3a02SXueming Li 			port_id, idx);
3786deb19e1SMichael Baum 		rte_errno = ENOMEM;
3796deb19e1SMichael Baum 		goto error;
3806deb19e1SMichael Baum 	}
3816deb19e1SMichael Baum 	/* Change queue state to ready. */
3825ceb3a02SXueming Li 	ret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);
3836deb19e1SMichael Baum 	if (ret) {
3846deb19e1SMichael Baum 		DRV_LOG(ERR,
3856deb19e1SMichael Baum 			"Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
3865ceb3a02SXueming Li 			port_id, idx);
3876deb19e1SMichael Baum 		rte_errno = ret;
3886deb19e1SMichael Baum 		goto error;
3896deb19e1SMichael Baum 	}
3906deb19e1SMichael Baum 	obj.rwq.in = tmpl->wq;
3916deb19e1SMichael Baum 	obj.rwq.out = &rwq;
3926deb19e1SMichael Baum 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
3936deb19e1SMichael Baum 	if (ret) {
3946deb19e1SMichael Baum 		rte_errno = ret;
3956deb19e1SMichael Baum 		goto error;
3966deb19e1SMichael Baum 	}
3976deb19e1SMichael Baum 	rxq_data->wqes = rwq.buf;
3986deb19e1SMichael Baum 	rxq_data->rq_db = rwq.dbrec;
3996deb19e1SMichael Baum 	rxq_data->cq_arm_sn = 0;
4006deb19e1SMichael Baum 	mlx5_rxq_initialize(rxq_data);
4016deb19e1SMichael Baum 	rxq_data->cq_ci = 0;
4025ceb3a02SXueming Li 	priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
4036deb19e1SMichael Baum 	rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
4041260a87bSMichael Baum 	return 0;
4056deb19e1SMichael Baum error:
4066deb19e1SMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
4076deb19e1SMichael Baum 	if (tmpl->wq)
4086deb19e1SMichael Baum 		claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
4096deb19e1SMichael Baum 	if (tmpl->ibv_cq)
4106deb19e1SMichael Baum 		claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
4116deb19e1SMichael Baum 	if (tmpl->ibv_channel)
4121260a87bSMichael Baum 		claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
4136deb19e1SMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
4141260a87bSMichael Baum 	return -rte_errno;
4156deb19e1SMichael Baum }
4166deb19e1SMichael Baum 
4176deb19e1SMichael Baum /**
4186deb19e1SMichael Baum  * Release an Rx verbs queue object.
4196deb19e1SMichael Baum  *
4205ceb3a02SXueming Li  * @param rxq
4215ceb3a02SXueming Li  *   Pointer to Rx queue.
4226deb19e1SMichael Baum  */
4236deb19e1SMichael Baum static void
mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv * rxq)4245ceb3a02SXueming Li mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)
4256deb19e1SMichael Baum {
4265ceb3a02SXueming Li 	struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
4275ceb3a02SXueming Li 
42809c25553SXueming Li 	if (rxq_obj == NULL || rxq_obj->wq == NULL)
42909c25553SXueming Li 		return;
4306deb19e1SMichael Baum 	claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
43109c25553SXueming Li 	rxq_obj->wq = NULL;
43209c25553SXueming Li 	MLX5_ASSERT(rxq_obj->ibv_cq);
4336deb19e1SMichael Baum 	claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
4346deb19e1SMichael Baum 	if (rxq_obj->ibv_channel)
4356deb19e1SMichael Baum 		claim_zero(mlx5_glue->destroy_comp_channel
4366deb19e1SMichael Baum 							(rxq_obj->ibv_channel));
43709c25553SXueming Li 	rxq->ctrl->started = false;
4386deb19e1SMichael Baum }
4396deb19e1SMichael Baum 
44032287079SMichael Baum /**
44132287079SMichael Baum  * Get event for an Rx verbs queue object.
44232287079SMichael Baum  *
44332287079SMichael Baum  * @param rxq_obj
44432287079SMichael Baum  *   Verbs Rx queue object.
44532287079SMichael Baum  *
44632287079SMichael Baum  * @return
44732287079SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
44832287079SMichael Baum  */
44932287079SMichael Baum static int
mlx5_rx_ibv_get_event(struct mlx5_rxq_obj * rxq_obj)45032287079SMichael Baum mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj)
45132287079SMichael Baum {
45232287079SMichael Baum 	struct ibv_cq *ev_cq;
45332287079SMichael Baum 	void *ev_ctx;
45432287079SMichael Baum 	int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel,
45532287079SMichael Baum 					  &ev_cq, &ev_ctx);
45632287079SMichael Baum 
45732287079SMichael Baum 	if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
45832287079SMichael Baum 		goto exit;
45932287079SMichael Baum 	mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
46032287079SMichael Baum 	return 0;
46132287079SMichael Baum exit:
46232287079SMichael Baum 	if (ret < 0)
46332287079SMichael Baum 		rte_errno = errno;
46432287079SMichael Baum 	else
46532287079SMichael Baum 		rte_errno = EINVAL;
46632287079SMichael Baum 	return -rte_errno;
46732287079SMichael Baum }
46832287079SMichael Baum 
46987e2db37SMichael Baum /**
47025ae7f1aSMichael Baum  * Creates a receive work queue as a filed of indirection table.
47187e2db37SMichael Baum  *
47287e2db37SMichael Baum  * @param dev
47387e2db37SMichael Baum  *   Pointer to Ethernet device.
47425ae7f1aSMichael Baum  * @param log_n
47525ae7f1aSMichael Baum  *   Log of number of queues in the array.
47625ae7f1aSMichael Baum  * @param ind_tbl
47725ae7f1aSMichael Baum  *   Verbs indirection table object.
47887e2db37SMichael Baum  *
47987e2db37SMichael Baum  * @return
48025ae7f1aSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
48187e2db37SMichael Baum  */
48225ae7f1aSMichael Baum static int
mlx5_ibv_ind_table_new(struct rte_eth_dev * dev,const unsigned int log_n,struct mlx5_ind_table_obj * ind_tbl)48325ae7f1aSMichael Baum mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
48425ae7f1aSMichael Baum 		       struct mlx5_ind_table_obj *ind_tbl)
48587e2db37SMichael Baum {
48687e2db37SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
48725ae7f1aSMichael Baum 	struct ibv_wq *wq[1 << log_n];
48825ae7f1aSMichael Baum 	unsigned int i, j;
48987e2db37SMichael Baum 
49025ae7f1aSMichael Baum 	MLX5_ASSERT(ind_tbl);
49125ae7f1aSMichael Baum 	for (i = 0; i != ind_tbl->queues_n; ++i) {
4925cf0707fSXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev,
4935cf0707fSXueming Li 							 ind_tbl->queues[i]);
49425ae7f1aSMichael Baum 
4955cf0707fSXueming Li 		wq[i] = rxq->ctrl->obj->wq;
49687e2db37SMichael Baum 	}
49725ae7f1aSMichael Baum 	MLX5_ASSERT(i > 0);
49887e2db37SMichael Baum 	/* Finalise indirection table. */
49925ae7f1aSMichael Baum 	for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
50025ae7f1aSMichael Baum 		wq[i] = wq[j];
501ca1418ceSMichael Baum 	ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
502ca1418ceSMichael Baum 					(priv->sh->cdev->ctx,
50387e2db37SMichael Baum 					 &(struct ibv_rwq_ind_table_init_attr){
50425ae7f1aSMichael Baum 						 .log_ind_tbl_size = log_n,
50587e2db37SMichael Baum 						 .ind_tbl = wq,
50687e2db37SMichael Baum 						 .comp_mask = 0,
50787e2db37SMichael Baum 					 });
50887e2db37SMichael Baum 	if (!ind_tbl->ind_table) {
50987e2db37SMichael Baum 		rte_errno = errno;
51025ae7f1aSMichael Baum 		return -rte_errno;
51187e2db37SMichael Baum 	}
51225ae7f1aSMichael Baum 	return 0;
51387e2db37SMichael Baum }
51487e2db37SMichael Baum 
51587e2db37SMichael Baum /**
51687e2db37SMichael Baum  * Destroys the specified Indirection Table.
51787e2db37SMichael Baum  *
51887e2db37SMichael Baum  * @param ind_table
51987e2db37SMichael Baum  *   Indirection table to release.
52087e2db37SMichael Baum  */
52187e2db37SMichael Baum static void
mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj * ind_tbl)52225ae7f1aSMichael Baum mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
52387e2db37SMichael Baum {
52487e2db37SMichael Baum 	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
52587e2db37SMichael Baum }
52687e2db37SMichael Baum 
52785552726SMichael Baum /**
52885552726SMichael Baum  * Create an Rx Hash queue.
52985552726SMichael Baum  *
53085552726SMichael Baum  * @param dev
53185552726SMichael Baum  *   Pointer to Ethernet device.
5325a959cbfSMichael Baum  * @param hrxq
5335a959cbfSMichael Baum  *   Pointer to Rx Hash queue.
53485552726SMichael Baum  * @param tunnel
53585552726SMichael Baum  *   Tunnel type.
53685552726SMichael Baum  *
53785552726SMichael Baum  * @return
5385a959cbfSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
53985552726SMichael Baum  */
5405a959cbfSMichael Baum static int
mlx5_ibv_hrxq_new(struct rte_eth_dev * dev,struct mlx5_hrxq * hrxq,int tunnel __rte_unused)5415a959cbfSMichael Baum mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
54285552726SMichael Baum 		  int tunnel __rte_unused)
54385552726SMichael Baum {
54485552726SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
54585552726SMichael Baum 	struct ibv_qp *qp = NULL;
5465a959cbfSMichael Baum 	struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
5475a959cbfSMichael Baum 	const uint8_t *rss_key = hrxq->rss_key;
5485a959cbfSMichael Baum 	uint64_t hash_fields = hrxq->hash_fields;
54985552726SMichael Baum 	int err;
55085552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
55185552726SMichael Baum 	struct mlx5dv_qp_init_attr qp_init_attr;
55285552726SMichael Baum 
55385552726SMichael Baum 	memset(&qp_init_attr, 0, sizeof(qp_init_attr));
55485552726SMichael Baum 	if (tunnel) {
55585552726SMichael Baum 		qp_init_attr.comp_mask =
55685552726SMichael Baum 				       MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
55785552726SMichael Baum 		qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
55885552726SMichael Baum 	}
55985552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
56085552726SMichael Baum 	if (dev->data->dev_conf.lpbk_mode) {
56185552726SMichael Baum 		/* Allow packet sent from NIC loop back w/o source MAC check. */
56285552726SMichael Baum 		qp_init_attr.comp_mask |=
56385552726SMichael Baum 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
56485552726SMichael Baum 		qp_init_attr.create_flags |=
56585552726SMichael Baum 				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
56685552726SMichael Baum 	}
56785552726SMichael Baum #endif
56885552726SMichael Baum 	qp = mlx5_glue->dv_create_qp
569ca1418ceSMichael Baum 			(priv->sh->cdev->ctx,
57085552726SMichael Baum 			 &(struct ibv_qp_init_attr_ex){
57185552726SMichael Baum 				.qp_type = IBV_QPT_RAW_PACKET,
57285552726SMichael Baum 				.comp_mask =
57385552726SMichael Baum 					IBV_QP_INIT_ATTR_PD |
57485552726SMichael Baum 					IBV_QP_INIT_ATTR_IND_TABLE |
57585552726SMichael Baum 					IBV_QP_INIT_ATTR_RX_HASH,
57685552726SMichael Baum 				.rx_hash_conf = (struct ibv_rx_hash_conf){
57785552726SMichael Baum 					.rx_hash_function =
57885552726SMichael Baum 						IBV_RX_HASH_FUNC_TOEPLITZ,
5795a959cbfSMichael Baum 					.rx_hash_key_len = hrxq->rss_key_len,
58085552726SMichael Baum 					.rx_hash_key =
58185552726SMichael Baum 						(void *)(uintptr_t)rss_key,
58285552726SMichael Baum 					.rx_hash_fields_mask = hash_fields,
58385552726SMichael Baum 				},
58485552726SMichael Baum 				.rwq_ind_tbl = ind_tbl->ind_table,
585e35ccf24SMichael Baum 				.pd = priv->sh->cdev->pd,
58685552726SMichael Baum 			  },
58785552726SMichael Baum 			  &qp_init_attr);
58885552726SMichael Baum #else
58985552726SMichael Baum 	qp = mlx5_glue->create_qp_ex
590ca1418ceSMichael Baum 			(priv->sh->cdev->ctx,
59185552726SMichael Baum 			 &(struct ibv_qp_init_attr_ex){
59285552726SMichael Baum 				.qp_type = IBV_QPT_RAW_PACKET,
59385552726SMichael Baum 				.comp_mask =
59485552726SMichael Baum 					IBV_QP_INIT_ATTR_PD |
59585552726SMichael Baum 					IBV_QP_INIT_ATTR_IND_TABLE |
59685552726SMichael Baum 					IBV_QP_INIT_ATTR_RX_HASH,
59785552726SMichael Baum 				.rx_hash_conf = (struct ibv_rx_hash_conf){
59885552726SMichael Baum 					.rx_hash_function =
59985552726SMichael Baum 						IBV_RX_HASH_FUNC_TOEPLITZ,
6005a959cbfSMichael Baum 					.rx_hash_key_len = hrxq->rss_key_len,
60185552726SMichael Baum 					.rx_hash_key =
60285552726SMichael Baum 						(void *)(uintptr_t)rss_key,
60385552726SMichael Baum 					.rx_hash_fields_mask = hash_fields,
60485552726SMichael Baum 				},
60585552726SMichael Baum 				.rwq_ind_tbl = ind_tbl->ind_table,
606e35ccf24SMichael Baum 				.pd = priv->sh->cdev->pd,
60785552726SMichael Baum 			 });
60885552726SMichael Baum #endif
60985552726SMichael Baum 	if (!qp) {
61085552726SMichael Baum 		rte_errno = errno;
61185552726SMichael Baum 		goto error;
61285552726SMichael Baum 	}
61385552726SMichael Baum 	hrxq->qp = qp;
61485552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
61585552726SMichael Baum 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
61685552726SMichael Baum 	if (!hrxq->action) {
61785552726SMichael Baum 		rte_errno = errno;
61885552726SMichael Baum 		goto error;
61985552726SMichael Baum 	}
62085552726SMichael Baum #endif
6215a959cbfSMichael Baum 	return 0;
62285552726SMichael Baum error:
62385552726SMichael Baum 	err = rte_errno; /* Save rte_errno before cleanup. */
62485552726SMichael Baum 	if (qp)
62585552726SMichael Baum 		claim_zero(mlx5_glue->destroy_qp(qp));
62685552726SMichael Baum 	rte_errno = err; /* Restore rte_errno. */
6275a959cbfSMichael Baum 	return -rte_errno;
62885552726SMichael Baum }
62985552726SMichael Baum 
63085552726SMichael Baum /**
63185552726SMichael Baum  * Destroy a Verbs queue pair.
63285552726SMichael Baum  *
63385552726SMichael Baum  * @param hrxq
63485552726SMichael Baum  *   Hash Rx queue to release its qp.
63585552726SMichael Baum  */
63685552726SMichael Baum static void
mlx5_ibv_qp_destroy(struct mlx5_hrxq * hrxq)63785552726SMichael Baum mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq)
63885552726SMichael Baum {
63985552726SMichael Baum 	claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
64085552726SMichael Baum }
64185552726SMichael Baum 
6425eaf882eSMichael Baum /**
6435eaf882eSMichael Baum  * Release a drop Rx queue Verbs object.
6445eaf882eSMichael Baum  *
6455eaf882eSMichael Baum  * @param dev
6465eaf882eSMichael Baum  *   Pointer to Ethernet device.
6475eaf882eSMichael Baum  */
6485eaf882eSMichael Baum static void
mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev * dev)6490c762e81SMichael Baum mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
6505eaf882eSMichael Baum {
6515eaf882eSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
6525ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
6535ceb3a02SXueming Li 	struct mlx5_rxq_obj *rxq_obj;
6545eaf882eSMichael Baum 
6555ceb3a02SXueming Li 	if (rxq == NULL)
6565ceb3a02SXueming Li 		return;
6575ceb3a02SXueming Li 	if (rxq->ctrl == NULL)
6585ceb3a02SXueming Li 		goto free_priv;
6595ceb3a02SXueming Li 	rxq_obj = rxq->ctrl->obj;
6605ceb3a02SXueming Li 	if (rxq_obj == NULL)
6615ceb3a02SXueming Li 		goto free_ctrl;
6625ceb3a02SXueming Li 	if (rxq_obj->wq)
6635ceb3a02SXueming Li 		claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
6645ceb3a02SXueming Li 	if (rxq_obj->ibv_cq)
6655ceb3a02SXueming Li 		claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
6665ceb3a02SXueming Li 	mlx5_free(rxq_obj);
6675ceb3a02SXueming Li free_ctrl:
6685ceb3a02SXueming Li 	mlx5_free(rxq->ctrl);
6695ceb3a02SXueming Li free_priv:
6705eaf882eSMichael Baum 	mlx5_free(rxq);
6715eaf882eSMichael Baum 	priv->drop_queue.rxq = NULL;
6725eaf882eSMichael Baum }
6735eaf882eSMichael Baum 
6745eaf882eSMichael Baum /**
6750c762e81SMichael Baum  * Create a drop Rx queue Verbs object.
6765eaf882eSMichael Baum  *
6775eaf882eSMichael Baum  * @param dev
6785eaf882eSMichael Baum  *   Pointer to Ethernet device.
6795eaf882eSMichael Baum  *
6805eaf882eSMichael Baum  * @return
6810c762e81SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
6825eaf882eSMichael Baum  */
6830c762e81SMichael Baum static int
mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev * dev)6840c762e81SMichael Baum mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
6855eaf882eSMichael Baum {
6865eaf882eSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
687ca1418ceSMichael Baum 	struct ibv_context *ctx = priv->sh->cdev->ctx;
6885ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
6895ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
6905ceb3a02SXueming Li 	struct mlx5_rxq_obj *rxq_obj = NULL;
6915eaf882eSMichael Baum 
6925ceb3a02SXueming Li 	if (rxq != NULL)
6930c762e81SMichael Baum 		return 0;
6940c762e81SMichael Baum 	rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
6955ceb3a02SXueming Li 	if (rxq == NULL) {
69687acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
6970c762e81SMichael Baum 		      dev->data->port_id);
6980c762e81SMichael Baum 		rte_errno = ENOMEM;
6990c762e81SMichael Baum 		return -rte_errno;
7000c762e81SMichael Baum 	}
7010c762e81SMichael Baum 	priv->drop_queue.rxq = rxq;
7025ceb3a02SXueming Li 	rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,
7035ceb3a02SXueming Li 			       SOCKET_ID_ANY);
7045ceb3a02SXueming Li 	if (rxq_ctrl == NULL) {
7055ceb3a02SXueming Li 		DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue control memory.",
7065ceb3a02SXueming Li 		      dev->data->port_id);
7075ceb3a02SXueming Li 		rte_errno = ENOMEM;
7085ceb3a02SXueming Li 		goto error;
7095ceb3a02SXueming Li 	}
7105ceb3a02SXueming Li 	rxq->ctrl = rxq_ctrl;
7115ceb3a02SXueming Li 	rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,
7125ceb3a02SXueming Li 			      SOCKET_ID_ANY);
7135ceb3a02SXueming Li 	if (rxq_obj == NULL) {
7145ceb3a02SXueming Li 		DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
7155ceb3a02SXueming Li 		      dev->data->port_id);
7165ceb3a02SXueming Li 		rte_errno = ENOMEM;
7175ceb3a02SXueming Li 		goto error;
7185ceb3a02SXueming Li 	}
7195ceb3a02SXueming Li 	rxq_ctrl->obj = rxq_obj;
7205ceb3a02SXueming Li 	rxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
7215ceb3a02SXueming Li 	if (!rxq_obj->ibv_cq) {
72287acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.",
7230c762e81SMichael Baum 		      dev->data->port_id);
7240c762e81SMichael Baum 		rte_errno = errno;
7250c762e81SMichael Baum 		goto error;
7260c762e81SMichael Baum 	}
7275ceb3a02SXueming Li 	rxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
7280c762e81SMichael Baum 						    .wq_type = IBV_WQT_RQ,
7290c762e81SMichael Baum 						    .max_wr = 1,
7300c762e81SMichael Baum 						    .max_sge = 1,
731e35ccf24SMichael Baum 						    .pd = priv->sh->cdev->pd,
7325ceb3a02SXueming Li 						    .cq = rxq_obj->ibv_cq,
7330c762e81SMichael Baum 					      });
7345ceb3a02SXueming Li 	if (!rxq_obj->wq) {
73587acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.",
7360c762e81SMichael Baum 		      dev->data->port_id);
7370c762e81SMichael Baum 		rte_errno = errno;
7380c762e81SMichael Baum 		goto error;
7390c762e81SMichael Baum 	}
7400c762e81SMichael Baum 	return 0;
7410c762e81SMichael Baum error:
7420c762e81SMichael Baum 	mlx5_rxq_ibv_obj_drop_release(dev);
7430c762e81SMichael Baum 	return -rte_errno;
7440c762e81SMichael Baum }
7450c762e81SMichael Baum 
7460c762e81SMichael Baum /**
7470c762e81SMichael Baum  * Create a Verbs drop action for Rx Hash queue.
7480c762e81SMichael Baum  *
7490c762e81SMichael Baum  * @param dev
7500c762e81SMichael Baum  *   Pointer to Ethernet device.
7510c762e81SMichael Baum  *
7520c762e81SMichael Baum  * @return
7530c762e81SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
7540c762e81SMichael Baum  */
7550c762e81SMichael Baum static int
mlx5_ibv_drop_action_create(struct rte_eth_dev * dev)7560c762e81SMichael Baum mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
7570c762e81SMichael Baum {
7580c762e81SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
7590c762e81SMichael Baum 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
7600c762e81SMichael Baum 	struct ibv_rwq_ind_table *ind_tbl = NULL;
7610c762e81SMichael Baum 	struct mlx5_rxq_obj *rxq;
7620c762e81SMichael Baum 	int ret;
7630c762e81SMichael Baum 
7640c762e81SMichael Baum 	MLX5_ASSERT(hrxq && hrxq->ind_table);
7650c762e81SMichael Baum 	ret = mlx5_rxq_ibv_obj_drop_create(dev);
7660c762e81SMichael Baum 	if (ret < 0)
7670c762e81SMichael Baum 		goto error;
7685ceb3a02SXueming Li 	rxq = priv->drop_queue.rxq->ctrl->obj;
7690c762e81SMichael Baum 	ind_tbl = mlx5_glue->create_rwq_ind_table
770ca1418ceSMichael Baum 				(priv->sh->cdev->ctx,
7715eaf882eSMichael Baum 				 &(struct ibv_rwq_ind_table_init_attr){
7725eaf882eSMichael Baum 					.log_ind_tbl_size = 0,
7735eaf882eSMichael Baum 					.ind_tbl = (struct ibv_wq **)&rxq->wq,
7745eaf882eSMichael Baum 					.comp_mask = 0,
7755eaf882eSMichael Baum 				 });
7760c762e81SMichael Baum 	if (!ind_tbl) {
77787acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Port %u"
77887acdcc7SThomas Monjalon 			" cannot allocate indirection table for drop queue.",
77987acdcc7SThomas Monjalon 			dev->data->port_id);
7805eaf882eSMichael Baum 		rte_errno = errno;
7815eaf882eSMichael Baum 		goto error;
7825eaf882eSMichael Baum 	}
783ca1418ceSMichael Baum 	hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx,
7845eaf882eSMichael Baum 		 &(struct ibv_qp_init_attr_ex){
7855eaf882eSMichael Baum 			.qp_type = IBV_QPT_RAW_PACKET,
7860c762e81SMichael Baum 			.comp_mask = IBV_QP_INIT_ATTR_PD |
7875eaf882eSMichael Baum 				     IBV_QP_INIT_ATTR_IND_TABLE |
7885eaf882eSMichael Baum 				     IBV_QP_INIT_ATTR_RX_HASH,
7895eaf882eSMichael Baum 			.rx_hash_conf = (struct ibv_rx_hash_conf){
7900c762e81SMichael Baum 				.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
7915eaf882eSMichael Baum 				.rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
7925eaf882eSMichael Baum 				.rx_hash_key = rss_hash_default_key,
7935eaf882eSMichael Baum 				.rx_hash_fields_mask = 0,
7945eaf882eSMichael Baum 				},
7950c762e81SMichael Baum 			.rwq_ind_tbl = ind_tbl,
796e35ccf24SMichael Baum 			.pd = priv->sh->cdev->pd
7975eaf882eSMichael Baum 		 });
7980c762e81SMichael Baum 	if (!hrxq->qp) {
79987acdcc7SThomas Monjalon 		DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
8005eaf882eSMichael Baum 		      dev->data->port_id);
8015eaf882eSMichael Baum 		rte_errno = errno;
8025eaf882eSMichael Baum 		goto error;
8035eaf882eSMichael Baum 	}
8045eaf882eSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
8055eaf882eSMichael Baum 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
8065eaf882eSMichael Baum 	if (!hrxq->action) {
8075eaf882eSMichael Baum 		rte_errno = errno;
8085eaf882eSMichael Baum 		goto error;
8095eaf882eSMichael Baum 	}
8105eaf882eSMichael Baum #endif
8110c762e81SMichael Baum 	hrxq->ind_table->ind_table = ind_tbl;
8120c762e81SMichael Baum 	return 0;
8135eaf882eSMichael Baum error:
8140c762e81SMichael Baum 	if (hrxq->qp)
8155eaf882eSMichael Baum 		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
8165eaf882eSMichael Baum 	if (ind_tbl)
8170c762e81SMichael Baum 		claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
8180c762e81SMichael Baum 	if (priv->drop_queue.rxq)
8190c762e81SMichael Baum 		mlx5_rxq_ibv_obj_drop_release(dev);
8200c762e81SMichael Baum 	return -rte_errno;
8215eaf882eSMichael Baum }
8225eaf882eSMichael Baum 
8235eaf882eSMichael Baum /**
8245eaf882eSMichael Baum  * Release a drop hash Rx queue.
8255eaf882eSMichael Baum  *
8265eaf882eSMichael Baum  * @param dev
8275eaf882eSMichael Baum  *   Pointer to Ethernet device.
8285eaf882eSMichael Baum  */
8295eaf882eSMichael Baum static void
mlx5_ibv_drop_action_destroy(struct rte_eth_dev * dev)8300c762e81SMichael Baum mlx5_ibv_drop_action_destroy(struct rte_eth_dev *dev)
8315eaf882eSMichael Baum {
8325eaf882eSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
8335eaf882eSMichael Baum 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
8340c762e81SMichael Baum 	struct ibv_rwq_ind_table *ind_tbl = hrxq->ind_table->ind_table;
8355eaf882eSMichael Baum 
8365eaf882eSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
8370c762e81SMichael Baum 	claim_zero(mlx5_glue->destroy_flow_action(hrxq->action));
8385eaf882eSMichael Baum #endif
8395eaf882eSMichael Baum 	claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
8400c762e81SMichael Baum 	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
8410c762e81SMichael Baum 	mlx5_rxq_ibv_obj_drop_release(dev);
8425eaf882eSMichael Baum }
8435eaf882eSMichael Baum 
84486d259ceSMichael Baum /**
84586d259ceSMichael Baum  * Create a QP Verbs object.
84686d259ceSMichael Baum  *
84786d259ceSMichael Baum  * @param dev
84886d259ceSMichael Baum  *   Pointer to Ethernet device.
84986d259ceSMichael Baum  * @param idx
85086d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
85186d259ceSMichael Baum  *
85286d259ceSMichael Baum  * @return
853e8390b3dSMichael Baum  *   The QP Verbs object, NULL otherwise and rte_errno is set.
85486d259ceSMichael Baum  */
85586d259ceSMichael Baum static struct ibv_qp *
mlx5_txq_ibv_qp_create(struct rte_eth_dev * dev,uint16_t idx)856e8390b3dSMichael Baum mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
85786d259ceSMichael Baum {
85886d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
85986d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
86086d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
86186d259ceSMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
86286d259ceSMichael Baum 	struct ibv_qp *qp_obj = NULL;
86386d259ceSMichael Baum 	struct ibv_qp_init_attr_ex qp_attr = { 0 };
86486d259ceSMichael Baum 	const int desc = 1 << txq_data->elts_n;
86586d259ceSMichael Baum 
866e8390b3dSMichael Baum 	MLX5_ASSERT(txq_ctrl->obj->cq);
86786d259ceSMichael Baum 	/* CQ to be associated with the send queue. */
868e8390b3dSMichael Baum 	qp_attr.send_cq = txq_ctrl->obj->cq;
86986d259ceSMichael Baum 	/* CQ to be associated with the receive queue. */
870e8390b3dSMichael Baum 	qp_attr.recv_cq = txq_ctrl->obj->cq;
87186d259ceSMichael Baum 	/* Max number of outstanding WRs. */
87291d1cfafSMichael Baum 	qp_attr.cap.max_send_wr = RTE_MIN(priv->sh->dev_cap.max_qp_wr, desc);
87386d259ceSMichael Baum 	/*
87486d259ceSMichael Baum 	 * Max number of scatter/gather elements in a WR, must be 1 to prevent
87586d259ceSMichael Baum 	 * libmlx5 from trying to affect must be 1 to prevent libmlx5 from
87686d259ceSMichael Baum 	 * trying to affect too much memory. TX gather is not impacted by the
87791d1cfafSMichael Baum 	 * dev_cap.max_sge limit and will still work properly.
87886d259ceSMichael Baum 	 */
87986d259ceSMichael Baum 	qp_attr.cap.max_send_sge = 1;
88086d259ceSMichael Baum 	qp_attr.qp_type = IBV_QPT_RAW_PACKET,
88186d259ceSMichael Baum 	/* Do *NOT* enable this, completions events are managed per Tx burst. */
88286d259ceSMichael Baum 	qp_attr.sq_sig_all = 0;
883e35ccf24SMichael Baum 	qp_attr.pd = priv->sh->cdev->pd;
88486d259ceSMichael Baum 	qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
88586d259ceSMichael Baum 	if (txq_data->inlen_send)
88686d259ceSMichael Baum 		qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
88786d259ceSMichael Baum 	if (txq_data->tso_en) {
88886d259ceSMichael Baum 		qp_attr.max_tso_header = txq_ctrl->max_tso_header;
88986d259ceSMichael Baum 		qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
89086d259ceSMichael Baum 	}
891ca1418ceSMichael Baum 	qp_obj = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &qp_attr);
89286d259ceSMichael Baum 	if (qp_obj == NULL) {
89386d259ceSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
89486d259ceSMichael Baum 			dev->data->port_id, idx);
89586d259ceSMichael Baum 		rte_errno = errno;
89686d259ceSMichael Baum 	}
89786d259ceSMichael Baum 	return qp_obj;
89886d259ceSMichael Baum }
89986d259ceSMichael Baum 
90086d259ceSMichael Baum /**
9015dfa003dSMichael Baum  * Initialize Tx UAR registers for primary process.
9025dfa003dSMichael Baum  *
9035dfa003dSMichael Baum  * @param txq_ctrl
9045dfa003dSMichael Baum  *   Pointer to Tx queue control structure.
9055dfa003dSMichael Baum  * @param bf_reg
9065dfa003dSMichael Baum  *   BlueFlame register from Verbs UAR.
9075dfa003dSMichael Baum  */
9085dfa003dSMichael Baum static void
mlx5_txq_ibv_uar_init(struct mlx5_txq_ctrl * txq_ctrl,void * bf_reg)9095dfa003dSMichael Baum mlx5_txq_ibv_uar_init(struct mlx5_txq_ctrl *txq_ctrl, void *bf_reg)
9105dfa003dSMichael Baum {
9115dfa003dSMichael Baum 	struct mlx5_priv *priv = txq_ctrl->priv;
9125dfa003dSMichael Baum 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
9135dfa003dSMichael Baum 	const size_t page_size = rte_mem_page_size();
9145dfa003dSMichael Baum 	struct mlx5_txq_data *txq = &txq_ctrl->txq;
9155dfa003dSMichael Baum 	off_t uar_mmap_offset = txq_ctrl->uar_mmap_offset;
9165dfa003dSMichael Baum #ifndef RTE_ARCH_64
9175dfa003dSMichael Baum 	unsigned int lock_idx;
9185dfa003dSMichael Baum #endif
9195dfa003dSMichael Baum 
9205dfa003dSMichael Baum 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
9215dfa003dSMichael Baum 	MLX5_ASSERT(ppriv);
9225dfa003dSMichael Baum 	if (page_size == (size_t)-1) {
9235dfa003dSMichael Baum 		DRV_LOG(ERR, "Failed to get mem page size");
9245dfa003dSMichael Baum 		rte_errno = ENOMEM;
9255dfa003dSMichael Baum 	}
926a6b9d5a5SMichael Baum 	txq->db_heu = priv->sh->cdev->config.dbnc == MLX5_SQ_DB_HEURISTIC;
9275dfa003dSMichael Baum 	txq->db_nc = mlx5_db_map_type_get(uar_mmap_offset, page_size);
9285dfa003dSMichael Baum 	ppriv->uar_table[txq->idx].db = bf_reg;
9295dfa003dSMichael Baum #ifndef RTE_ARCH_64
9305dfa003dSMichael Baum 	/* Assign an UAR lock according to UAR page number. */
9315dfa003dSMichael Baum 	lock_idx = (uar_mmap_offset / page_size) & MLX5_UAR_PAGE_NUM_MASK;
9325dfa003dSMichael Baum 	ppriv->uar_table[txq->idx].sl_p = &priv->sh->uar_lock[lock_idx];
9335dfa003dSMichael Baum #endif
9345dfa003dSMichael Baum }
9355dfa003dSMichael Baum 
9365dfa003dSMichael Baum /**
93786d259ceSMichael Baum  * Create the Tx queue Verbs object.
93886d259ceSMichael Baum  *
93986d259ceSMichael Baum  * @param dev
94086d259ceSMichael Baum  *   Pointer to Ethernet device.
94186d259ceSMichael Baum  * @param idx
94286d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
94386d259ceSMichael Baum  *
94486d259ceSMichael Baum  * @return
945f49f4483SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
94686d259ceSMichael Baum  */
947f49f4483SMichael Baum int
mlx5_txq_ibv_obj_new(struct rte_eth_dev * dev,uint16_t idx)94886d259ceSMichael Baum mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
94986d259ceSMichael Baum {
95086d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
95186d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
95286d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
95386d259ceSMichael Baum 		container_of(txq_data, struct mlx5_txq_ctrl, txq);
954f49f4483SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
95586d259ceSMichael Baum 	unsigned int cqe_n;
95686d259ceSMichael Baum 	struct mlx5dv_qp qp;
95786d259ceSMichael Baum 	struct mlx5dv_cq cq_info;
95886d259ceSMichael Baum 	struct mlx5dv_obj obj;
95986d259ceSMichael Baum 	const int desc = 1 << txq_data->elts_n;
96086d259ceSMichael Baum 	int ret = 0;
96186d259ceSMichael Baum 
96286d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
963f49f4483SMichael Baum 	MLX5_ASSERT(txq_obj);
964f49f4483SMichael Baum 	txq_obj->txq_ctrl = txq_ctrl;
96586d259ceSMichael Baum 	if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
96686d259ceSMichael Baum 		DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION "
96786d259ceSMichael Baum 			"must never be set.", dev->data->port_id);
96886d259ceSMichael Baum 		rte_errno = EINVAL;
969f49f4483SMichael Baum 		return -rte_errno;
97086d259ceSMichael Baum 	}
9718fa8d147SViacheslav Ovsiienko 	if (__rte_trace_point_fp_is_enabled() &&
9728fa8d147SViacheslav Ovsiienko 	    txq_data->offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP)
9738fa8d147SViacheslav Ovsiienko 		cqe_n = UINT16_MAX / 2 - 1;
9748fa8d147SViacheslav Ovsiienko 	else
97586d259ceSMichael Baum 		cqe_n = desc / MLX5_TX_COMP_THRESH +
97686d259ceSMichael Baum 			1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
977ca1418ceSMichael Baum 	txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n,
978ca1418ceSMichael Baum 					   NULL, NULL, 0);
979f49f4483SMichael Baum 	if (txq_obj->cq == NULL) {
98086d259ceSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
98186d259ceSMichael Baum 			dev->data->port_id, idx);
98286d259ceSMichael Baum 		rte_errno = errno;
98386d259ceSMichael Baum 		goto error;
98486d259ceSMichael Baum 	}
985e8390b3dSMichael Baum 	txq_obj->qp = mlx5_txq_ibv_qp_create(dev, idx);
986f49f4483SMichael Baum 	if (txq_obj->qp == NULL) {
98786d259ceSMichael Baum 		rte_errno = errno;
98886d259ceSMichael Baum 		goto error;
98986d259ceSMichael Baum 	}
990a9c79306SMichael Baum 	ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY,
991a9c79306SMichael Baum 				 (uint8_t)priv->dev_port);
99286d259ceSMichael Baum 	if (ret) {
993a9c79306SMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.",
99486d259ceSMichael Baum 			dev->data->port_id, idx);
99586d259ceSMichael Baum 		rte_errno = errno;
99686d259ceSMichael Baum 		goto error;
99786d259ceSMichael Baum 	}
99886d259ceSMichael Baum 	qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
99986d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
100086d259ceSMichael Baum 	/* If using DevX, need additional mask to read tisn value. */
10016dc0cbc6SMichael Baum 	if (priv->sh->cdev->config.devx && !priv->sh->tdn)
100286d259ceSMichael Baum 		qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
100386d259ceSMichael Baum #endif
1004f49f4483SMichael Baum 	obj.cq.in = txq_obj->cq;
100586d259ceSMichael Baum 	obj.cq.out = &cq_info;
1006f49f4483SMichael Baum 	obj.qp.in = txq_obj->qp;
100786d259ceSMichael Baum 	obj.qp.out = &qp;
100886d259ceSMichael Baum 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
100986d259ceSMichael Baum 	if (ret != 0) {
101086d259ceSMichael Baum 		rte_errno = errno;
101186d259ceSMichael Baum 		goto error;
101286d259ceSMichael Baum 	}
101386d259ceSMichael Baum 	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
101486d259ceSMichael Baum 		DRV_LOG(ERR,
101586d259ceSMichael Baum 			"Port %u wrong MLX5_CQE_SIZE environment variable"
101686d259ceSMichael Baum 			" value: it should be set to %u.",
101786d259ceSMichael Baum 			dev->data->port_id, RTE_CACHE_LINE_SIZE);
101886d259ceSMichael Baum 		rte_errno = EINVAL;
101986d259ceSMichael Baum 		goto error;
102086d259ceSMichael Baum 	}
102186d259ceSMichael Baum 	txq_data->cqe_n = log2above(cq_info.cqe_cnt);
102286d259ceSMichael Baum 	txq_data->cqe_s = 1 << txq_data->cqe_n;
102386d259ceSMichael Baum 	txq_data->cqe_m = txq_data->cqe_s - 1;
1024f49f4483SMichael Baum 	txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8;
102586d259ceSMichael Baum 	txq_data->wqes = qp.sq.buf;
102686d259ceSMichael Baum 	txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
102786d259ceSMichael Baum 	txq_data->wqe_s = 1 << txq_data->wqe_n;
102886d259ceSMichael Baum 	txq_data->wqe_m = txq_data->wqe_s - 1;
102986d259ceSMichael Baum 	txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
103086d259ceSMichael Baum 	txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
103186d259ceSMichael Baum 	txq_data->cq_db = cq_info.dbrec;
103286d259ceSMichael Baum 	txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
103386d259ceSMichael Baum 	txq_data->cq_ci = 0;
103486d259ceSMichael Baum 	txq_data->cq_pi = 0;
103586d259ceSMichael Baum 	txq_data->wqe_ci = 0;
103686d259ceSMichael Baum 	txq_data->wqe_pi = 0;
103786d259ceSMichael Baum 	txq_data->wqe_comp = 0;
103886d259ceSMichael Baum 	txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
10392f5122dfSViacheslav Ovsiienko 	txq_data->wait_on_time = !!(!priv->sh->config.tx_pp &&
10402f5122dfSViacheslav Ovsiienko 				 priv->sh->cdev->config.hca_attr.wait_on_time &&
10412f5122dfSViacheslav Ovsiienko 				 txq_data->offloads &
10422f5122dfSViacheslav Ovsiienko 				 RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP);
104386d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
104486d259ceSMichael Baum 	/*
104586d259ceSMichael Baum 	 * If using DevX need to query and store TIS transport domain value.
104686d259ceSMichael Baum 	 * This is done once per port.
104786d259ceSMichael Baum 	 * Will use this value on Rx, when creating matching TIR.
104886d259ceSMichael Baum 	 */
10496dc0cbc6SMichael Baum 	if (priv->sh->cdev->config.devx && !priv->sh->tdn) {
1050f49f4483SMichael Baum 		ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
105186d259ceSMichael Baum 						    &priv->sh->tdn);
105286d259ceSMichael Baum 		if (ret) {
105386d259ceSMichael Baum 			DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
105486d259ceSMichael Baum 				"transport domain.", dev->data->port_id, idx);
105586d259ceSMichael Baum 			rte_errno = EINVAL;
105686d259ceSMichael Baum 			goto error;
105786d259ceSMichael Baum 		} else {
105886d259ceSMichael Baum 			DRV_LOG(DEBUG, "Port %u Tx queue %u TIS number %d "
105986d259ceSMichael Baum 				"transport domain %d.", dev->data->port_id,
106086d259ceSMichael Baum 				idx, qp.tisn, priv->sh->tdn);
106186d259ceSMichael Baum 		}
106286d259ceSMichael Baum 	}
106386d259ceSMichael Baum #endif
106486d259ceSMichael Baum 	if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
106586d259ceSMichael Baum 		txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
106686d259ceSMichael Baum 		DRV_LOG(DEBUG, "Port %u: uar_mmap_offset 0x%" PRIx64 ".",
106786d259ceSMichael Baum 			dev->data->port_id, txq_ctrl->uar_mmap_offset);
106886d259ceSMichael Baum 	} else {
106986d259ceSMichael Baum 		DRV_LOG(ERR,
1070b6e9c33cSMichael Baum 			"Port %u failed to retrieve UAR info, invalid libmlx5.so",
107186d259ceSMichael Baum 			dev->data->port_id);
107286d259ceSMichael Baum 		rte_errno = EINVAL;
107386d259ceSMichael Baum 		goto error;
107486d259ceSMichael Baum 	}
10755dfa003dSMichael Baum 	mlx5_txq_ibv_uar_init(txq_ctrl, qp.bf.reg);
1076876b5d52SMatan Azrad 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1077f49f4483SMichael Baum 	return 0;
107886d259ceSMichael Baum error:
107986d259ceSMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
1080f49f4483SMichael Baum 	if (txq_obj->cq)
1081f49f4483SMichael Baum 		claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1082f49f4483SMichael Baum 	if (txq_obj->qp)
1083f49f4483SMichael Baum 		claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
108486d259ceSMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
1085f49f4483SMichael Baum 	return -rte_errno;
108686d259ceSMichael Baum }
108786d259ceSMichael Baum 
108823233fd6SBing Zhao /*
108923233fd6SBing Zhao  * Create the dummy QP with minimal resources for loopback.
109023233fd6SBing Zhao  *
109123233fd6SBing Zhao  * @param dev
109223233fd6SBing Zhao  *   Pointer to Ethernet device.
109323233fd6SBing Zhao  *
109423233fd6SBing Zhao  * @return
109523233fd6SBing Zhao  *   0 on success, a negative errno value otherwise and rte_errno is set.
109623233fd6SBing Zhao  */
109723233fd6SBing Zhao int
mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev * dev)109823233fd6SBing Zhao mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
109923233fd6SBing Zhao {
110023233fd6SBing Zhao #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
110123233fd6SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
110223233fd6SBing Zhao 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1103ca1418ceSMichael Baum 	struct ibv_context *ctx = sh->cdev->ctx;
110423233fd6SBing Zhao 	struct mlx5dv_qp_init_attr qp_init_attr = {0};
110523233fd6SBing Zhao 	struct {
110623233fd6SBing Zhao 		struct ibv_cq_init_attr_ex ibv;
110723233fd6SBing Zhao 		struct mlx5dv_cq_init_attr mlx5;
110823233fd6SBing Zhao 	} cq_attr = {{0}};
110923233fd6SBing Zhao 
111023233fd6SBing Zhao 	if (dev->data->dev_conf.lpbk_mode) {
111123233fd6SBing Zhao 		/* Allow packet sent from NIC loop back w/o source MAC check. */
111223233fd6SBing Zhao 		qp_init_attr.comp_mask |=
111323233fd6SBing Zhao 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
111423233fd6SBing Zhao 		qp_init_attr.create_flags |=
111523233fd6SBing Zhao 				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
111623233fd6SBing Zhao 	} else {
111723233fd6SBing Zhao 		return 0;
111823233fd6SBing Zhao 	}
111923233fd6SBing Zhao 	/* Only need to check refcnt, 0 after "sh" is allocated. */
1120*e12a0166STyler Retzlaff 	if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
112123233fd6SBing Zhao 		MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
112223233fd6SBing Zhao 		priv->lb_used = 1;
112323233fd6SBing Zhao 		return 0;
112423233fd6SBing Zhao 	}
112523233fd6SBing Zhao 	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
112623233fd6SBing Zhao 		.cqe = 1,
112723233fd6SBing Zhao 		.channel = NULL,
112823233fd6SBing Zhao 		.comp_mask = 0,
112923233fd6SBing Zhao 	};
113023233fd6SBing Zhao 	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
113123233fd6SBing Zhao 		.comp_mask = 0,
113223233fd6SBing Zhao 	};
113323233fd6SBing Zhao 	/* Only CQ is needed, no WQ(RQ) is required in this case. */
113423233fd6SBing Zhao 	sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
113523233fd6SBing Zhao 							&cq_attr.ibv,
113623233fd6SBing Zhao 							&cq_attr.mlx5));
113723233fd6SBing Zhao 	if (!sh->self_lb.ibv_cq) {
113823233fd6SBing Zhao 		DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
113923233fd6SBing Zhao 			dev->data->port_id);
114023233fd6SBing Zhao 		rte_errno = errno;
114123233fd6SBing Zhao 		goto error;
114223233fd6SBing Zhao 	}
114323233fd6SBing Zhao 	sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
114423233fd6SBing Zhao 				&(struct ibv_qp_init_attr_ex){
114523233fd6SBing Zhao 					.qp_type = IBV_QPT_RAW_PACKET,
114623233fd6SBing Zhao 					.comp_mask = IBV_QP_INIT_ATTR_PD,
1147e35ccf24SMichael Baum 					.pd = sh->cdev->pd,
114823233fd6SBing Zhao 					.send_cq = sh->self_lb.ibv_cq,
114923233fd6SBing Zhao 					.recv_cq = sh->self_lb.ibv_cq,
115023233fd6SBing Zhao 					.cap.max_recv_wr = 1,
115123233fd6SBing Zhao 				},
115223233fd6SBing Zhao 				&qp_init_attr);
115323233fd6SBing Zhao 	if (!sh->self_lb.qp) {
115423233fd6SBing Zhao 		DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
115523233fd6SBing Zhao 			dev->data->port_id);
115623233fd6SBing Zhao 		rte_errno = errno;
115723233fd6SBing Zhao 		goto error;
115823233fd6SBing Zhao 	}
115923233fd6SBing Zhao 	priv->lb_used = 1;
116023233fd6SBing Zhao 	return 0;
116123233fd6SBing Zhao error:
116223233fd6SBing Zhao 	if (sh->self_lb.ibv_cq) {
116323233fd6SBing Zhao 		claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
116423233fd6SBing Zhao 		sh->self_lb.ibv_cq = NULL;
116523233fd6SBing Zhao 	}
1166*e12a0166STyler Retzlaff 	rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
116723233fd6SBing Zhao 	return -rte_errno;
116823233fd6SBing Zhao #else
116923233fd6SBing Zhao 	RTE_SET_USED(dev);
117023233fd6SBing Zhao 	return 0;
117123233fd6SBing Zhao #endif
117223233fd6SBing Zhao }
117323233fd6SBing Zhao 
117423233fd6SBing Zhao /*
117523233fd6SBing Zhao  * Release the dummy queue resources for loopback.
117623233fd6SBing Zhao  *
117723233fd6SBing Zhao  * @param dev
117823233fd6SBing Zhao  *   Pointer to Ethernet device.
117923233fd6SBing Zhao  */
118023233fd6SBing Zhao void
mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev * dev)118123233fd6SBing Zhao mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
118223233fd6SBing Zhao {
118323233fd6SBing Zhao #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
118423233fd6SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
118523233fd6SBing Zhao 	struct mlx5_dev_ctx_shared *sh = priv->sh;
118623233fd6SBing Zhao 
118723233fd6SBing Zhao 	if (!priv->lb_used)
118823233fd6SBing Zhao 		return;
1189*e12a0166STyler Retzlaff 	MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
1190*e12a0166STyler Retzlaff 	if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
1191*e12a0166STyler Retzlaff 			rte_memory_order_relaxed) - 1)) {
119223233fd6SBing Zhao 		if (sh->self_lb.qp) {
119323233fd6SBing Zhao 			claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
119423233fd6SBing Zhao 			sh->self_lb.qp = NULL;
119523233fd6SBing Zhao 		}
119623233fd6SBing Zhao 		if (sh->self_lb.ibv_cq) {
119723233fd6SBing Zhao 			claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
119823233fd6SBing Zhao 			sh->self_lb.ibv_cq = NULL;
119923233fd6SBing Zhao 		}
120023233fd6SBing Zhao 	}
120123233fd6SBing Zhao 	priv->lb_used = 0;
120223233fd6SBing Zhao #else
120323233fd6SBing Zhao 	RTE_SET_USED(dev);
120423233fd6SBing Zhao 	return;
120523233fd6SBing Zhao #endif
120623233fd6SBing Zhao }
120723233fd6SBing Zhao 
120886d259ceSMichael Baum /**
120986d259ceSMichael Baum  * Release an Tx verbs queue object.
121086d259ceSMichael Baum  *
121186d259ceSMichael Baum  * @param txq_obj
121286d259ceSMichael Baum  *   Verbs Tx queue object..
121386d259ceSMichael Baum  */
121486d259ceSMichael Baum void
mlx5_txq_ibv_obj_release(struct mlx5_txq_obj * txq_obj)121586d259ceSMichael Baum mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj)
121686d259ceSMichael Baum {
121786d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
121886d259ceSMichael Baum 	claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
121986d259ceSMichael Baum 	claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
122086d259ceSMichael Baum }
122186d259ceSMichael Baum 
12228bb2410eSOphir Munk struct mlx5_obj_ops ibv_obj_ops = {
12238bb2410eSOphir Munk 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
12246deb19e1SMichael Baum 	.rxq_obj_new = mlx5_rxq_ibv_obj_new,
122532287079SMichael Baum 	.rxq_event_get = mlx5_rx_ibv_get_event,
1226c279f187SMichael Baum 	.rxq_obj_modify = mlx5_ibv_modify_wq,
12276deb19e1SMichael Baum 	.rxq_obj_release = mlx5_rxq_ibv_obj_release,
122825ae7f1aSMichael Baum 	.ind_table_new = mlx5_ibv_ind_table_new,
122925ae7f1aSMichael Baum 	.ind_table_destroy = mlx5_ibv_ind_table_destroy,
123085552726SMichael Baum 	.hrxq_new = mlx5_ibv_hrxq_new,
123185552726SMichael Baum 	.hrxq_destroy = mlx5_ibv_qp_destroy,
12320c762e81SMichael Baum 	.drop_action_create = mlx5_ibv_drop_action_create,
12330c762e81SMichael Baum 	.drop_action_destroy = mlx5_ibv_drop_action_destroy,
123486d259ceSMichael Baum 	.txq_obj_new = mlx5_txq_ibv_obj_new,
12355d9f3c3fSMichael Baum 	.txq_obj_modify = mlx5_ibv_modify_qp,
123686d259ceSMichael Baum 	.txq_obj_release = mlx5_txq_ibv_obj_release,
123723233fd6SBing Zhao 	.lb_dummy_queue_create = NULL,
123823233fd6SBing Zhao 	.lb_dummy_queue_release = NULL,
12398bb2410eSOphir Munk };
1240