xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision 354cc08a2dbc296f69b53b1c00e1afd39d6d3a27)
18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
28bb2410eSOphir Munk  * Copyright 2020 Mellanox Technologies, Ltd
38bb2410eSOphir Munk  */
48bb2410eSOphir Munk 
58bb2410eSOphir Munk #include <stddef.h>
68bb2410eSOphir Munk #include <errno.h>
7c279f187SMichael Baum #include <stdbool.h>
88bb2410eSOphir Munk #include <string.h>
98bb2410eSOphir Munk #include <stdint.h>
108bb2410eSOphir Munk #include <sys/queue.h>
118bb2410eSOphir Munk 
128bb2410eSOphir Munk #include <rte_malloc.h>
138bb2410eSOphir Munk #include <rte_common.h>
148bb2410eSOphir Munk #include <rte_eal_paging.h>
158bb2410eSOphir Munk 
168bb2410eSOphir Munk #include <mlx5_glue.h>
178bb2410eSOphir Munk #include <mlx5_devx_cmds.h>
188bb2410eSOphir Munk #include <mlx5_malloc.h>
198bb2410eSOphir Munk 
208bb2410eSOphir Munk #include "mlx5.h"
218bb2410eSOphir Munk #include "mlx5_common_os.h"
228bb2410eSOphir Munk #include "mlx5_rxtx.h"
238bb2410eSOphir Munk #include "mlx5_utils.h"
248bb2410eSOphir Munk #include "mlx5_devx.h"
2587e2db37SMichael Baum #include "mlx5_flow.h"
268bb2410eSOphir Munk 
27f6dee900SMichael Baum 
28f6dee900SMichael Baum /**
298bb2410eSOphir Munk  * Modify RQ vlan stripping offload
308bb2410eSOphir Munk  *
318bb2410eSOphir Munk  * @param rxq_obj
328bb2410eSOphir Munk  *   Rx queue object.
338bb2410eSOphir Munk  *
34f6dee900SMichael Baum  * @return
35f6dee900SMichael Baum  *   0 on success, non-0 otherwise
368bb2410eSOphir Munk  */
378bb2410eSOphir Munk static int
388bb2410eSOphir Munk mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
398bb2410eSOphir Munk {
408bb2410eSOphir Munk 	struct mlx5_devx_modify_rq_attr rq_attr;
418bb2410eSOphir Munk 
428bb2410eSOphir Munk 	memset(&rq_attr, 0, sizeof(rq_attr));
438bb2410eSOphir Munk 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
448bb2410eSOphir Munk 	rq_attr.state = MLX5_RQC_STATE_RDY;
458bb2410eSOphir Munk 	rq_attr.vsd = (on ? 0 : 1);
468bb2410eSOphir Munk 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
478bb2410eSOphir Munk 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
488bb2410eSOphir Munk }
498bb2410eSOphir Munk 
506deb19e1SMichael Baum /**
51fa2c85ccSMichael Baum  * Modify RQ using DevX API.
52fa2c85ccSMichael Baum  *
53fa2c85ccSMichael Baum  * @param rxq_obj
54fa2c85ccSMichael Baum  *   DevX Rx queue object.
55fa2c85ccSMichael Baum  *
56fa2c85ccSMichael Baum  * @return
57fa2c85ccSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
58fa2c85ccSMichael Baum  */
59fa2c85ccSMichael Baum static int
60fa2c85ccSMichael Baum mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
61fa2c85ccSMichael Baum {
62fa2c85ccSMichael Baum 	struct mlx5_devx_modify_rq_attr rq_attr;
63fa2c85ccSMichael Baum 
64fa2c85ccSMichael Baum 	memset(&rq_attr, 0, sizeof(rq_attr));
65fa2c85ccSMichael Baum 	if (is_start) {
66fa2c85ccSMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
67fa2c85ccSMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RDY;
68fa2c85ccSMichael Baum 	} else {
69fa2c85ccSMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
70fa2c85ccSMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RST;
71fa2c85ccSMichael Baum 	}
72fa2c85ccSMichael Baum 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
73fa2c85ccSMichael Baum }
74fa2c85ccSMichael Baum 
75fa2c85ccSMichael Baum /**
765d9f3c3fSMichael Baum  * Modify SQ using DevX API.
775d9f3c3fSMichael Baum  *
785d9f3c3fSMichael Baum  * @param txq_obj
795d9f3c3fSMichael Baum  *   DevX Tx queue object.
805d9f3c3fSMichael Baum  * @param type
815d9f3c3fSMichael Baum  *   Type of change queue state.
825d9f3c3fSMichael Baum  * @param dev_port
835d9f3c3fSMichael Baum  *   Unnecessary.
845d9f3c3fSMichael Baum  *
855d9f3c3fSMichael Baum  * @return
865d9f3c3fSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
875d9f3c3fSMichael Baum  */
885d9f3c3fSMichael Baum static int
895d9f3c3fSMichael Baum mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
905d9f3c3fSMichael Baum 		    uint8_t dev_port)
915d9f3c3fSMichael Baum {
925d9f3c3fSMichael Baum 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
935d9f3c3fSMichael Baum 	int ret;
945d9f3c3fSMichael Baum 
955d9f3c3fSMichael Baum 	if (type != MLX5_TXQ_MOD_RST2RDY) {
965d9f3c3fSMichael Baum 		/* Change queue state to reset. */
975d9f3c3fSMichael Baum 		if (type == MLX5_TXQ_MOD_ERR2RDY)
985d9f3c3fSMichael Baum 			msq_attr.sq_state = MLX5_SQC_STATE_ERR;
995d9f3c3fSMichael Baum 		else
1005d9f3c3fSMichael Baum 			msq_attr.sq_state = MLX5_SQC_STATE_RDY;
1015d9f3c3fSMichael Baum 		msq_attr.state = MLX5_SQC_STATE_RST;
1025d9f3c3fSMichael Baum 		ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
1035d9f3c3fSMichael Baum 		if (ret) {
1045d9f3c3fSMichael Baum 			DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
1055d9f3c3fSMichael Baum 				" %s", strerror(errno));
1065d9f3c3fSMichael Baum 			rte_errno = errno;
1075d9f3c3fSMichael Baum 			return ret;
1085d9f3c3fSMichael Baum 		}
1095d9f3c3fSMichael Baum 	}
1105d9f3c3fSMichael Baum 	if (type != MLX5_TXQ_MOD_RDY2RST) {
1115d9f3c3fSMichael Baum 		/* Change queue state to ready. */
1125d9f3c3fSMichael Baum 		msq_attr.sq_state = MLX5_SQC_STATE_RST;
1135d9f3c3fSMichael Baum 		msq_attr.state = MLX5_SQC_STATE_RDY;
1145d9f3c3fSMichael Baum 		ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
1155d9f3c3fSMichael Baum 		if (ret) {
1165d9f3c3fSMichael Baum 			DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
1175d9f3c3fSMichael Baum 				" %s", strerror(errno));
1185d9f3c3fSMichael Baum 			rte_errno = errno;
1195d9f3c3fSMichael Baum 			return ret;
1205d9f3c3fSMichael Baum 		}
1215d9f3c3fSMichael Baum 	}
1225d9f3c3fSMichael Baum 	/*
1235d9f3c3fSMichael Baum 	 * The dev_port variable is relevant only in Verbs API, and there is a
1245d9f3c3fSMichael Baum 	 * pointer that points to this function and a parallel function in verbs
1255d9f3c3fSMichael Baum 	 * intermittently, so they should have the same parameters.
1265d9f3c3fSMichael Baum 	 */
1275d9f3c3fSMichael Baum 	(void)dev_port;
1285d9f3c3fSMichael Baum 	return 0;
1295d9f3c3fSMichael Baum }
1305d9f3c3fSMichael Baum 
1315d9f3c3fSMichael Baum /**
1326deb19e1SMichael Baum  * Release the resources allocated for an RQ DevX object.
1336deb19e1SMichael Baum  *
1346deb19e1SMichael Baum  * @param rxq_ctrl
1356deb19e1SMichael Baum  *   DevX Rx queue object.
1366deb19e1SMichael Baum  */
1376deb19e1SMichael Baum static void
13888f2e3f1SMichael Baum mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
1396deb19e1SMichael Baum {
140f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
141f6dee900SMichael Baum 
1426deb19e1SMichael Baum 	if (rxq_ctrl->rxq.wqes) {
1436deb19e1SMichael Baum 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
1446deb19e1SMichael Baum 		rxq_ctrl->rxq.wqes = NULL;
1456deb19e1SMichael Baum 	}
1466deb19e1SMichael Baum 	if (rxq_ctrl->wq_umem) {
1476deb19e1SMichael Baum 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
1486deb19e1SMichael Baum 		rxq_ctrl->wq_umem = NULL;
1496deb19e1SMichael Baum 	}
150f6dee900SMichael Baum 	if (dbr_page) {
151f6dee900SMichael Baum 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
152f6dee900SMichael Baum 					    mlx5_os_get_umem_id(dbr_page->umem),
153f6dee900SMichael Baum 					    rxq_ctrl->rq_dbr_offset));
154f6dee900SMichael Baum 		rxq_ctrl->rq_dbrec_page = NULL;
155f6dee900SMichael Baum 	}
1566deb19e1SMichael Baum }
1576deb19e1SMichael Baum 
1586deb19e1SMichael Baum /**
1596deb19e1SMichael Baum  * Release the resources allocated for the Rx CQ DevX object.
1606deb19e1SMichael Baum  *
1616deb19e1SMichael Baum  * @param rxq_ctrl
1626deb19e1SMichael Baum  *   DevX Rx queue object.
1636deb19e1SMichael Baum  */
1646deb19e1SMichael Baum static void
16588f2e3f1SMichael Baum mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
1666deb19e1SMichael Baum {
167f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
168f6dee900SMichael Baum 
1696deb19e1SMichael Baum 	if (rxq_ctrl->rxq.cqes) {
1706deb19e1SMichael Baum 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
1716deb19e1SMichael Baum 		rxq_ctrl->rxq.cqes = NULL;
1726deb19e1SMichael Baum 	}
1736deb19e1SMichael Baum 	if (rxq_ctrl->cq_umem) {
1746deb19e1SMichael Baum 		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
1756deb19e1SMichael Baum 		rxq_ctrl->cq_umem = NULL;
1766deb19e1SMichael Baum 	}
177f6dee900SMichael Baum 	if (dbr_page) {
178f6dee900SMichael Baum 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
179f6dee900SMichael Baum 					    mlx5_os_get_umem_id(dbr_page->umem),
180f6dee900SMichael Baum 					    rxq_ctrl->cq_dbr_offset));
181f6dee900SMichael Baum 		rxq_ctrl->cq_dbrec_page = NULL;
182f6dee900SMichael Baum 	}
1836deb19e1SMichael Baum }
1846deb19e1SMichael Baum 
1856deb19e1SMichael Baum /**
1866deb19e1SMichael Baum  * Release an Rx DevX queue object.
1876deb19e1SMichael Baum  *
1886deb19e1SMichael Baum  * @param rxq_obj
1896deb19e1SMichael Baum  *   DevX Rx queue object.
1906deb19e1SMichael Baum  */
1916deb19e1SMichael Baum static void
1926deb19e1SMichael Baum mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
1936deb19e1SMichael Baum {
1946deb19e1SMichael Baum 	MLX5_ASSERT(rxq_obj);
1956deb19e1SMichael Baum 	MLX5_ASSERT(rxq_obj->rq);
1966deb19e1SMichael Baum 	if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
197fa2c85ccSMichael Baum 		mlx5_devx_modify_rq(rxq_obj, false);
198fa2c85ccSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
1996deb19e1SMichael Baum 	} else {
2006deb19e1SMichael Baum 		MLX5_ASSERT(rxq_obj->devx_cq);
2016deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
2026deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
2036deb19e1SMichael Baum 		if (rxq_obj->devx_channel)
2046deb19e1SMichael Baum 			mlx5_glue->devx_destroy_event_channel
2056deb19e1SMichael Baum 							(rxq_obj->devx_channel);
20688f2e3f1SMichael Baum 		mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
20788f2e3f1SMichael Baum 		mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
2086deb19e1SMichael Baum 	}
2096deb19e1SMichael Baum }
2106deb19e1SMichael Baum 
2116deb19e1SMichael Baum /**
21232287079SMichael Baum  * Get event for an Rx DevX queue object.
21332287079SMichael Baum  *
21432287079SMichael Baum  * @param rxq_obj
21532287079SMichael Baum  *   DevX Rx queue object.
21632287079SMichael Baum  *
21732287079SMichael Baum  * @return
21832287079SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
21932287079SMichael Baum  */
22032287079SMichael Baum static int
22132287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
22232287079SMichael Baum {
22332287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT
22432287079SMichael Baum 	union {
22532287079SMichael Baum 		struct mlx5dv_devx_async_event_hdr event_resp;
22632287079SMichael Baum 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
22732287079SMichael Baum 	} out;
22832287079SMichael Baum 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
22932287079SMichael Baum 					    &out.event_resp,
23032287079SMichael Baum 					    sizeof(out.buf));
23132287079SMichael Baum 
23232287079SMichael Baum 	if (ret < 0) {
23332287079SMichael Baum 		rte_errno = errno;
23432287079SMichael Baum 		return -rte_errno;
23532287079SMichael Baum 	}
23632287079SMichael Baum 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
23732287079SMichael Baum 		rte_errno = EINVAL;
23832287079SMichael Baum 		return -rte_errno;
23932287079SMichael Baum 	}
24032287079SMichael Baum 	return 0;
24132287079SMichael Baum #else
24232287079SMichael Baum 	(void)rxq_obj;
24332287079SMichael Baum 	rte_errno = ENOTSUP;
24432287079SMichael Baum 	return -rte_errno;
24532287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */
24632287079SMichael Baum }
24732287079SMichael Baum 
24832287079SMichael Baum /**
2496deb19e1SMichael Baum  * Fill common fields of create RQ attributes structure.
2506deb19e1SMichael Baum  *
2516deb19e1SMichael Baum  * @param rxq_data
2526deb19e1SMichael Baum  *   Pointer to Rx queue data.
2536deb19e1SMichael Baum  * @param cqn
2546deb19e1SMichael Baum  *   CQ number to use with this RQ.
2556deb19e1SMichael Baum  * @param rq_attr
2566deb19e1SMichael Baum  *   RQ attributes structure to fill..
2576deb19e1SMichael Baum  */
2586deb19e1SMichael Baum static void
2596deb19e1SMichael Baum mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
2606deb19e1SMichael Baum 			      struct mlx5_devx_create_rq_attr *rq_attr)
2616deb19e1SMichael Baum {
2626deb19e1SMichael Baum 	rq_attr->state = MLX5_RQC_STATE_RST;
2636deb19e1SMichael Baum 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
2646deb19e1SMichael Baum 	rq_attr->cqn = cqn;
2656deb19e1SMichael Baum 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
2666deb19e1SMichael Baum }
2676deb19e1SMichael Baum 
2686deb19e1SMichael Baum /**
2696deb19e1SMichael Baum  * Fill common fields of DevX WQ attributes structure.
2706deb19e1SMichael Baum  *
2716deb19e1SMichael Baum  * @param priv
2726deb19e1SMichael Baum  *   Pointer to device private data.
2736deb19e1SMichael Baum  * @param rxq_ctrl
2746deb19e1SMichael Baum  *   Pointer to Rx queue control structure.
2756deb19e1SMichael Baum  * @param wq_attr
2766deb19e1SMichael Baum  *   WQ attributes structure to fill..
2776deb19e1SMichael Baum  */
2786deb19e1SMichael Baum static void
2796deb19e1SMichael Baum mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
2806deb19e1SMichael Baum 		       struct mlx5_devx_wq_attr *wq_attr)
2816deb19e1SMichael Baum {
2826deb19e1SMichael Baum 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
2836deb19e1SMichael Baum 					MLX5_WQ_END_PAD_MODE_ALIGN :
2846deb19e1SMichael Baum 					MLX5_WQ_END_PAD_MODE_NONE;
2856deb19e1SMichael Baum 	wq_attr->pd = priv->sh->pdn;
2866deb19e1SMichael Baum 	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
287f6dee900SMichael Baum 	wq_attr->dbr_umem_id =
288f6dee900SMichael Baum 			mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
2896deb19e1SMichael Baum 	wq_attr->dbr_umem_valid = 1;
2906deb19e1SMichael Baum 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
2916deb19e1SMichael Baum 	wq_attr->wq_umem_valid = 1;
2926deb19e1SMichael Baum }
2936deb19e1SMichael Baum 
2946deb19e1SMichael Baum /**
2956deb19e1SMichael Baum  * Create a RQ object using DevX.
2966deb19e1SMichael Baum  *
2976deb19e1SMichael Baum  * @param dev
2986deb19e1SMichael Baum  *   Pointer to Ethernet device.
2996deb19e1SMichael Baum  * @param idx
3006deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
3016deb19e1SMichael Baum  *
3026deb19e1SMichael Baum  * @return
303f6dee900SMichael Baum  *   The DevX RQ object initialized, NULL otherwise and rte_errno is set.
3046deb19e1SMichael Baum  */
3056deb19e1SMichael Baum static struct mlx5_devx_obj *
30688f2e3f1SMichael Baum mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
3076deb19e1SMichael Baum {
3086deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
3096deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
3106deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
3116deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
3126deb19e1SMichael Baum 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
3136deb19e1SMichael Baum 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
314f6dee900SMichael Baum 	uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
315f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page;
316f6dee900SMichael Baum 	int64_t dbr_offset;
3176deb19e1SMichael Baum 	uint32_t wq_size = 0;
3186deb19e1SMichael Baum 	uint32_t wqe_size = 0;
3196deb19e1SMichael Baum 	uint32_t log_wqe_size = 0;
3206deb19e1SMichael Baum 	void *buf = NULL;
3216deb19e1SMichael Baum 	struct mlx5_devx_obj *rq;
3226deb19e1SMichael Baum 
3236deb19e1SMichael Baum 	/* Fill RQ attributes. */
3246deb19e1SMichael Baum 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
3256deb19e1SMichael Baum 	rq_attr.flush_in_error_en = 1;
3266deb19e1SMichael Baum 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
3276deb19e1SMichael Baum 	/* Fill WQ attributes for this RQ. */
3286deb19e1SMichael Baum 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
3296deb19e1SMichael Baum 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
3306deb19e1SMichael Baum 		/*
3316deb19e1SMichael Baum 		 * Number of strides in each WQE:
3326deb19e1SMichael Baum 		 * 512*2^single_wqe_log_num_of_strides.
3336deb19e1SMichael Baum 		 */
3346deb19e1SMichael Baum 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
3356deb19e1SMichael Baum 				rxq_data->strd_num_n -
3366deb19e1SMichael Baum 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
3376deb19e1SMichael Baum 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
3386deb19e1SMichael Baum 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
3396deb19e1SMichael Baum 				rxq_data->strd_sz_n -
3406deb19e1SMichael Baum 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
3416deb19e1SMichael Baum 		wqe_size = sizeof(struct mlx5_wqe_mprq);
3426deb19e1SMichael Baum 	} else {
3436deb19e1SMichael Baum 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
3446deb19e1SMichael Baum 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
3456deb19e1SMichael Baum 	}
3466deb19e1SMichael Baum 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
3476deb19e1SMichael Baum 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
3486deb19e1SMichael Baum 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
3496deb19e1SMichael Baum 	/* Calculate and allocate WQ memory space. */
3506deb19e1SMichael Baum 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
3516deb19e1SMichael Baum 	wq_size = wqe_n * wqe_size;
3526deb19e1SMichael Baum 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
3536deb19e1SMichael Baum 	if (alignment == (size_t)-1) {
3546deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to get mem page size");
3556deb19e1SMichael Baum 		rte_errno = ENOMEM;
3566deb19e1SMichael Baum 		return NULL;
3576deb19e1SMichael Baum 	}
3586deb19e1SMichael Baum 	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
3596deb19e1SMichael Baum 			  alignment, rxq_ctrl->socket);
3606deb19e1SMichael Baum 	if (!buf)
3616deb19e1SMichael Baum 		return NULL;
3626deb19e1SMichael Baum 	rxq_data->wqes = buf;
3636deb19e1SMichael Baum 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
3646deb19e1SMichael Baum 						     buf, wq_size, 0);
365f6dee900SMichael Baum 	if (!rxq_ctrl->wq_umem)
366f6dee900SMichael Baum 		goto error;
367f6dee900SMichael Baum 	/* Allocate RQ door-bell. */
368f6dee900SMichael Baum 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
369f6dee900SMichael Baum 	if (dbr_offset < 0) {
370f6dee900SMichael Baum 		DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
371f6dee900SMichael Baum 		goto error;
3726deb19e1SMichael Baum 	}
373f6dee900SMichael Baum 	rxq_ctrl->rq_dbr_offset = dbr_offset;
374f6dee900SMichael Baum 	rxq_ctrl->rq_dbrec_page = dbr_page;
375f6dee900SMichael Baum 	rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
376f6dee900SMichael Baum 			  (uintptr_t)rxq_ctrl->rq_dbr_offset);
377f6dee900SMichael Baum 	/* Create RQ using DevX API. */
3786deb19e1SMichael Baum 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
3796deb19e1SMichael Baum 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
3806deb19e1SMichael Baum 	if (!rq)
381f6dee900SMichael Baum 		goto error;
3826deb19e1SMichael Baum 	return rq;
383f6dee900SMichael Baum error:
38488f2e3f1SMichael Baum 	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
385f6dee900SMichael Baum 	return NULL;
3866deb19e1SMichael Baum }
3876deb19e1SMichael Baum 
3886deb19e1SMichael Baum /**
3896deb19e1SMichael Baum  * Create a DevX CQ object for an Rx queue.
3906deb19e1SMichael Baum  *
3916deb19e1SMichael Baum  * @param dev
3926deb19e1SMichael Baum  *   Pointer to Ethernet device.
3936deb19e1SMichael Baum  * @param idx
3946deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
3956deb19e1SMichael Baum  *
3966deb19e1SMichael Baum  * @return
397f6dee900SMichael Baum  *   The DevX CQ object initialized, NULL otherwise and rte_errno is set.
3986deb19e1SMichael Baum  */
3996deb19e1SMichael Baum static struct mlx5_devx_obj *
40088f2e3f1SMichael Baum mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
4016deb19e1SMichael Baum {
4026deb19e1SMichael Baum 	struct mlx5_devx_obj *cq_obj = 0;
4036deb19e1SMichael Baum 	struct mlx5_devx_cq_attr cq_attr = { 0 };
4046deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
4056deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
4066deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
4076deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
4086deb19e1SMichael Baum 	size_t page_size = rte_mem_page_size();
409f6dee900SMichael Baum 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
410f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page;
411f6dee900SMichael Baum 	int64_t dbr_offset;
4126deb19e1SMichael Baum 	void *buf = NULL;
4136deb19e1SMichael Baum 	uint16_t event_nums[1] = {0};
4146deb19e1SMichael Baum 	uint32_t log_cqe_n;
4156deb19e1SMichael Baum 	uint32_t cq_size;
4166deb19e1SMichael Baum 	int ret = 0;
4176deb19e1SMichael Baum 
4186deb19e1SMichael Baum 	if (page_size == (size_t)-1) {
4196deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to get page_size.");
4206deb19e1SMichael Baum 		goto error;
4216deb19e1SMichael Baum 	}
4226deb19e1SMichael Baum 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
4236deb19e1SMichael Baum 	    !rxq_data->lro) {
42438f9369dSDekel Peled 		cq_attr.cqe_comp_en = 1u;
4256deb19e1SMichael Baum 		cq_attr.mini_cqe_res_format =
4266deb19e1SMichael Baum 				mlx5_rxq_mprq_enabled(rxq_data) ?
42738f9369dSDekel Peled 					MLX5_CQE_RESP_FORMAT_CSUM_STRIDX :
42838f9369dSDekel Peled 					MLX5_CQE_RESP_FORMAT_HASH;
4296deb19e1SMichael Baum 		/*
4306deb19e1SMichael Baum 		 * For vectorized Rx, it must not be doubled in order to
4316deb19e1SMichael Baum 		 * make cq_ci and rq_ci aligned.
4326deb19e1SMichael Baum 		 */
4336deb19e1SMichael Baum 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
4346deb19e1SMichael Baum 			cqe_n *= 2;
4356deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
4366deb19e1SMichael Baum 		DRV_LOG(DEBUG,
4376deb19e1SMichael Baum 			"Port %u Rx CQE compression is disabled for HW"
4386deb19e1SMichael Baum 			" timestamp.",
4396deb19e1SMichael Baum 			dev->data->port_id);
4406deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->lro) {
4416deb19e1SMichael Baum 		DRV_LOG(DEBUG,
4426deb19e1SMichael Baum 			"Port %u Rx CQE compression is disabled for LRO.",
4436deb19e1SMichael Baum 			dev->data->port_id);
4446deb19e1SMichael Baum 	}
4456deb19e1SMichael Baum 	if (priv->config.cqe_pad)
44638f9369dSDekel Peled 		cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
4476deb19e1SMichael Baum 	log_cqe_n = log2above(cqe_n);
4486deb19e1SMichael Baum 	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
4496deb19e1SMichael Baum 	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
4506deb19e1SMichael Baum 				rxq_ctrl->socket);
4516deb19e1SMichael Baum 	if (!buf) {
4526deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
4536deb19e1SMichael Baum 		goto error;
4546deb19e1SMichael Baum 	}
4556deb19e1SMichael Baum 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
4566deb19e1SMichael Baum 	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
4576deb19e1SMichael Baum 						     cq_size,
4586deb19e1SMichael Baum 						     IBV_ACCESS_LOCAL_WRITE);
4596deb19e1SMichael Baum 	if (!rxq_ctrl->cq_umem) {
4606deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to register umem for CQ.");
4616deb19e1SMichael Baum 		goto error;
4626deb19e1SMichael Baum 	}
463f6dee900SMichael Baum 	/* Allocate CQ door-bell. */
464f6dee900SMichael Baum 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
465f6dee900SMichael Baum 	if (dbr_offset < 0) {
466f6dee900SMichael Baum 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
467f6dee900SMichael Baum 		goto error;
468f6dee900SMichael Baum 	}
469f6dee900SMichael Baum 	rxq_ctrl->cq_dbr_offset = dbr_offset;
470f6dee900SMichael Baum 	rxq_ctrl->cq_dbrec_page = dbr_page;
471f6dee900SMichael Baum 	rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
472f6dee900SMichael Baum 			  (uintptr_t)rxq_ctrl->cq_dbr_offset);
473f6dee900SMichael Baum 	rxq_data->cq_uar =
474f6dee900SMichael Baum 			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
475f6dee900SMichael Baum 	/* Create CQ using DevX API. */
476e7055bbfSMichael Baum 	cq_attr.eqn = priv->sh->eqn;
4776deb19e1SMichael Baum 	cq_attr.uar_page_id =
4786deb19e1SMichael Baum 			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
4796deb19e1SMichael Baum 	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
4806deb19e1SMichael Baum 	cq_attr.q_umem_valid = 1;
4816deb19e1SMichael Baum 	cq_attr.log_cq_size = log_cqe_n;
4826deb19e1SMichael Baum 	cq_attr.log_page_size = rte_log2_u32(page_size);
4836deb19e1SMichael Baum 	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
484f6dee900SMichael Baum 	cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
4856deb19e1SMichael Baum 	cq_attr.db_umem_valid = 1;
4866deb19e1SMichael Baum 	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
4876deb19e1SMichael Baum 	if (!cq_obj)
4886deb19e1SMichael Baum 		goto error;
4896deb19e1SMichael Baum 	rxq_data->cqe_n = log_cqe_n;
4906deb19e1SMichael Baum 	rxq_data->cqn = cq_obj->id;
491f6dee900SMichael Baum 	if (rxq_ctrl->obj->devx_channel) {
4926deb19e1SMichael Baum 		ret = mlx5_glue->devx_subscribe_devx_event
493f6dee900SMichael Baum 						(rxq_ctrl->obj->devx_channel,
4946deb19e1SMichael Baum 						 cq_obj->obj,
4956deb19e1SMichael Baum 						 sizeof(event_nums),
4966deb19e1SMichael Baum 						 event_nums,
4976deb19e1SMichael Baum 						 (uint64_t)(uintptr_t)cq_obj);
4986deb19e1SMichael Baum 		if (ret) {
4996deb19e1SMichael Baum 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
5006deb19e1SMichael Baum 			rte_errno = errno;
5016deb19e1SMichael Baum 			goto error;
5026deb19e1SMichael Baum 		}
5036deb19e1SMichael Baum 	}
5046deb19e1SMichael Baum 	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
5056deb19e1SMichael Baum 	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
5066deb19e1SMichael Baum 	return cq_obj;
5076deb19e1SMichael Baum error:
5086deb19e1SMichael Baum 	if (cq_obj)
5096deb19e1SMichael Baum 		mlx5_devx_cmd_destroy(cq_obj);
51088f2e3f1SMichael Baum 	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
5116deb19e1SMichael Baum 	return NULL;
5126deb19e1SMichael Baum }
5136deb19e1SMichael Baum 
5146deb19e1SMichael Baum /**
5156deb19e1SMichael Baum  * Create the Rx hairpin queue object.
5166deb19e1SMichael Baum  *
5176deb19e1SMichael Baum  * @param dev
5186deb19e1SMichael Baum  *   Pointer to Ethernet device.
5196deb19e1SMichael Baum  * @param idx
5206deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
5216deb19e1SMichael Baum  *
5226deb19e1SMichael Baum  * @return
5231260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
5246deb19e1SMichael Baum  */
5251260a87bSMichael Baum static int
5266deb19e1SMichael Baum mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
5276deb19e1SMichael Baum {
5286deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
5296deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
5306deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
5316deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
5326deb19e1SMichael Baum 	struct mlx5_devx_create_rq_attr attr = { 0 };
5331260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
5346deb19e1SMichael Baum 	uint32_t max_wq_data;
5356deb19e1SMichael Baum 
5366deb19e1SMichael Baum 	MLX5_ASSERT(rxq_data);
5371260a87bSMichael Baum 	MLX5_ASSERT(tmpl);
5386deb19e1SMichael Baum 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
5396deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
5406deb19e1SMichael Baum 	attr.hairpin = 1;
5416deb19e1SMichael Baum 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
5426deb19e1SMichael Baum 	/* Jumbo frames > 9KB should be supported, and more packets. */
5436deb19e1SMichael Baum 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
5446deb19e1SMichael Baum 		if (priv->config.log_hp_size > max_wq_data) {
5456deb19e1SMichael Baum 			DRV_LOG(ERR, "Total data size %u power of 2 is "
5466deb19e1SMichael Baum 				"too large for hairpin.",
5476deb19e1SMichael Baum 				priv->config.log_hp_size);
5486deb19e1SMichael Baum 			rte_errno = ERANGE;
5491260a87bSMichael Baum 			return -rte_errno;
5506deb19e1SMichael Baum 		}
5516deb19e1SMichael Baum 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
5526deb19e1SMichael Baum 	} else {
5536deb19e1SMichael Baum 		attr.wq_attr.log_hairpin_data_sz =
5546deb19e1SMichael Baum 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
5556deb19e1SMichael Baum 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
5566deb19e1SMichael Baum 	}
5576deb19e1SMichael Baum 	/* Set the packets number to the maximum value for performance. */
5586deb19e1SMichael Baum 	attr.wq_attr.log_hairpin_num_packets =
5596deb19e1SMichael Baum 			attr.wq_attr.log_hairpin_data_sz -
5606deb19e1SMichael Baum 			MLX5_HAIRPIN_QUEUE_STRIDE;
5616deb19e1SMichael Baum 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
5626deb19e1SMichael Baum 					   rxq_ctrl->socket);
5636deb19e1SMichael Baum 	if (!tmpl->rq) {
5646deb19e1SMichael Baum 		DRV_LOG(ERR,
5656deb19e1SMichael Baum 			"Port %u Rx hairpin queue %u can't create rq object.",
5666deb19e1SMichael Baum 			dev->data->port_id, idx);
5676deb19e1SMichael Baum 		rte_errno = errno;
5681260a87bSMichael Baum 		return -rte_errno;
5696deb19e1SMichael Baum 	}
5706deb19e1SMichael Baum 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
5711260a87bSMichael Baum 	return 0;
5726deb19e1SMichael Baum }
5736deb19e1SMichael Baum 
5746deb19e1SMichael Baum /**
5756deb19e1SMichael Baum  * Create the Rx queue DevX object.
5766deb19e1SMichael Baum  *
5776deb19e1SMichael Baum  * @param dev
5786deb19e1SMichael Baum  *   Pointer to Ethernet device.
5796deb19e1SMichael Baum  * @param idx
5806deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
5816deb19e1SMichael Baum  *
5826deb19e1SMichael Baum  * @return
5831260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
5846deb19e1SMichael Baum  */
5851260a87bSMichael Baum static int
5866deb19e1SMichael Baum mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
5876deb19e1SMichael Baum {
5886deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
5896deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
5906deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
5916deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
5921260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
5936deb19e1SMichael Baum 	int ret = 0;
5946deb19e1SMichael Baum 
5956deb19e1SMichael Baum 	MLX5_ASSERT(rxq_data);
5961260a87bSMichael Baum 	MLX5_ASSERT(tmpl);
5976deb19e1SMichael Baum 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
5986deb19e1SMichael Baum 		return mlx5_rxq_obj_hairpin_new(dev, idx);
5996deb19e1SMichael Baum 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
6006deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
6016deb19e1SMichael Baum 	if (rxq_ctrl->irq) {
6026deb19e1SMichael Baum 		int devx_ev_flag =
6036deb19e1SMichael Baum 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
6046deb19e1SMichael Baum 
6056deb19e1SMichael Baum 		tmpl->devx_channel = mlx5_glue->devx_create_event_channel
6066deb19e1SMichael Baum 								(priv->sh->ctx,
6076deb19e1SMichael Baum 								 devx_ev_flag);
6086deb19e1SMichael Baum 		if (!tmpl->devx_channel) {
6096deb19e1SMichael Baum 			rte_errno = errno;
6106deb19e1SMichael Baum 			DRV_LOG(ERR, "Failed to create event channel %d.",
6116deb19e1SMichael Baum 				rte_errno);
6126deb19e1SMichael Baum 			goto error;
6136deb19e1SMichael Baum 		}
6146deb19e1SMichael Baum 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
6156deb19e1SMichael Baum 	}
6166deb19e1SMichael Baum 	/* Create CQ using DevX API. */
61788f2e3f1SMichael Baum 	tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx);
6186deb19e1SMichael Baum 	if (!tmpl->devx_cq) {
6196deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to create CQ.");
6206deb19e1SMichael Baum 		goto error;
6216deb19e1SMichael Baum 	}
6226deb19e1SMichael Baum 	/* Create RQ using DevX API. */
62388f2e3f1SMichael Baum 	tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx);
6246deb19e1SMichael Baum 	if (!tmpl->rq) {
6256deb19e1SMichael Baum 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
6266deb19e1SMichael Baum 			dev->data->port_id, idx);
6276deb19e1SMichael Baum 		rte_errno = ENOMEM;
6286deb19e1SMichael Baum 		goto error;
6296deb19e1SMichael Baum 	}
6306deb19e1SMichael Baum 	/* Change queue state to ready. */
631fa2c85ccSMichael Baum 	ret = mlx5_devx_modify_rq(tmpl, true);
6326deb19e1SMichael Baum 	if (ret)
6336deb19e1SMichael Baum 		goto error;
6346deb19e1SMichael Baum 	rxq_data->cq_arm_sn = 0;
6356deb19e1SMichael Baum 	mlx5_rxq_initialize(rxq_data);
6366deb19e1SMichael Baum 	rxq_data->cq_ci = 0;
6376deb19e1SMichael Baum 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
6386deb19e1SMichael Baum 	rxq_ctrl->wqn = tmpl->rq->id;
6391260a87bSMichael Baum 	return 0;
6406deb19e1SMichael Baum error:
6416deb19e1SMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
6426deb19e1SMichael Baum 	if (tmpl->rq)
6436deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
6446deb19e1SMichael Baum 	if (tmpl->devx_cq)
6456deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
6466deb19e1SMichael Baum 	if (tmpl->devx_channel)
6471260a87bSMichael Baum 		mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
64888f2e3f1SMichael Baum 	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
64988f2e3f1SMichael Baum 	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
6501260a87bSMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
6511260a87bSMichael Baum 	return -rte_errno;
6526deb19e1SMichael Baum }
6536deb19e1SMichael Baum 
65487e2db37SMichael Baum /**
65525ae7f1aSMichael Baum  * Create RQT using DevX API as a filed of indirection table.
65687e2db37SMichael Baum  *
65787e2db37SMichael Baum  * @param dev
65887e2db37SMichael Baum  *   Pointer to Ethernet device.
65925ae7f1aSMichael Baum  * @param log_n
66025ae7f1aSMichael Baum  *   Log of number of queues in the array.
66125ae7f1aSMichael Baum  * @param ind_tbl
66225ae7f1aSMichael Baum  *   DevX indirection table object.
66387e2db37SMichael Baum  *
66487e2db37SMichael Baum  * @return
66525ae7f1aSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
66687e2db37SMichael Baum  */
66725ae7f1aSMichael Baum static int
66825ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
66925ae7f1aSMichael Baum 			struct mlx5_ind_table_obj *ind_tbl)
67087e2db37SMichael Baum {
67187e2db37SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
67287e2db37SMichael Baum 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
67325ae7f1aSMichael Baum 	const unsigned int rqt_n = 1 << log_n;
67425ae7f1aSMichael Baum 	unsigned int i, j;
67587e2db37SMichael Baum 
67625ae7f1aSMichael Baum 	MLX5_ASSERT(ind_tbl);
67787e2db37SMichael Baum 	rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
67887e2db37SMichael Baum 			      rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
67987e2db37SMichael Baum 	if (!rqt_attr) {
68087e2db37SMichael Baum 		DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
68187e2db37SMichael Baum 			dev->data->port_id);
68287e2db37SMichael Baum 		rte_errno = ENOMEM;
68325ae7f1aSMichael Baum 		return -rte_errno;
68487e2db37SMichael Baum 	}
68587e2db37SMichael Baum 	rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
68687e2db37SMichael Baum 	rqt_attr->rqt_actual_size = rqt_n;
68725ae7f1aSMichael Baum 	for (i = 0; i != ind_tbl->queues_n; ++i) {
68825ae7f1aSMichael Baum 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
68925ae7f1aSMichael Baum 		struct mlx5_rxq_ctrl *rxq_ctrl =
69025ae7f1aSMichael Baum 				container_of(rxq, struct mlx5_rxq_ctrl, rxq);
69125ae7f1aSMichael Baum 
69225ae7f1aSMichael Baum 		rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
69387e2db37SMichael Baum 	}
69425ae7f1aSMichael Baum 	MLX5_ASSERT(i > 0);
69525ae7f1aSMichael Baum 	for (j = 0; i != rqt_n; ++j, ++i)
69625ae7f1aSMichael Baum 		rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
69787e2db37SMichael Baum 	ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
69887e2db37SMichael Baum 	mlx5_free(rqt_attr);
69987e2db37SMichael Baum 	if (!ind_tbl->rqt) {
70087e2db37SMichael Baum 		DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
70187e2db37SMichael Baum 			dev->data->port_id);
70287e2db37SMichael Baum 		rte_errno = errno;
70325ae7f1aSMichael Baum 		return -rte_errno;
70487e2db37SMichael Baum 	}
70525ae7f1aSMichael Baum 	return 0;
70687e2db37SMichael Baum }
70787e2db37SMichael Baum 
70887e2db37SMichael Baum /**
70987e2db37SMichael Baum  * Destroy the DevX RQT object.
71087e2db37SMichael Baum  *
71187e2db37SMichael Baum  * @param ind_table
71287e2db37SMichael Baum  *   Indirection table to release.
71387e2db37SMichael Baum  */
71487e2db37SMichael Baum static void
71525ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
71687e2db37SMichael Baum {
71787e2db37SMichael Baum 	claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
71887e2db37SMichael Baum }
71987e2db37SMichael Baum 
72085552726SMichael Baum /**
72185552726SMichael Baum  * Create an Rx Hash queue.
72285552726SMichael Baum  *
72385552726SMichael Baum  * @param dev
72485552726SMichael Baum  *   Pointer to Ethernet device.
7255a959cbfSMichael Baum  * @param hrxq
7265a959cbfSMichael Baum  *   Pointer to Rx Hash queue.
72785552726SMichael Baum  * @param tunnel
72885552726SMichael Baum  *   Tunnel type.
72985552726SMichael Baum  *
73085552726SMichael Baum  * @return
7315a959cbfSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
73285552726SMichael Baum  */
7335a959cbfSMichael Baum static int
7345a959cbfSMichael Baum mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
73585552726SMichael Baum 		   int tunnel __rte_unused)
73685552726SMichael Baum {
73785552726SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
7385a959cbfSMichael Baum 	struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
7395a959cbfSMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
74085552726SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
74185552726SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
74285552726SMichael Baum 	struct mlx5_devx_tir_attr tir_attr;
7435a959cbfSMichael Baum 	const uint8_t *rss_key = hrxq->rss_key;
7445a959cbfSMichael Baum 	uint64_t hash_fields = hrxq->hash_fields;
74585552726SMichael Baum 	bool lro = true;
7465a959cbfSMichael Baum 	uint32_t i;
7475a959cbfSMichael Baum 	int err;
74885552726SMichael Baum 
74985552726SMichael Baum 	/* Enable TIR LRO only if all the queues were configured for. */
7505a959cbfSMichael Baum 	for (i = 0; i < ind_tbl->queues_n; ++i) {
7515a959cbfSMichael Baum 		if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
75285552726SMichael Baum 			lro = false;
75385552726SMichael Baum 			break;
75485552726SMichael Baum 		}
75585552726SMichael Baum 	}
75685552726SMichael Baum 	memset(&tir_attr, 0, sizeof(tir_attr));
75785552726SMichael Baum 	tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
75885552726SMichael Baum 	tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
75985552726SMichael Baum 	tir_attr.tunneled_offload_en = !!tunnel;
76085552726SMichael Baum 	/* If needed, translate hash_fields bitmap to PRM format. */
76185552726SMichael Baum 	if (hash_fields) {
76285552726SMichael Baum 		struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL;
76385552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
76485552726SMichael Baum 		rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
76585552726SMichael Baum 				       &tir_attr.rx_hash_field_selector_inner :
76685552726SMichael Baum 				       &tir_attr.rx_hash_field_selector_outer;
76785552726SMichael Baum #else
76885552726SMichael Baum 		rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
76985552726SMichael Baum #endif
77085552726SMichael Baum 		/* 1 bit: 0: IPv4, 1: IPv6. */
77185552726SMichael Baum 		rx_hash_field_select->l3_prot_type =
77285552726SMichael Baum 					!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
77385552726SMichael Baum 		/* 1 bit: 0: TCP, 1: UDP. */
77485552726SMichael Baum 		rx_hash_field_select->l4_prot_type =
77585552726SMichael Baum 					 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
77685552726SMichael Baum 		/* Bitmask which sets which fields to use in RX Hash. */
77785552726SMichael Baum 		rx_hash_field_select->selected_fields =
77885552726SMichael Baum 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
77985552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
78085552726SMichael Baum 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
78185552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
78285552726SMichael Baum 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
78385552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
78485552726SMichael Baum 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
78585552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
78685552726SMichael Baum 	}
78785552726SMichael Baum 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
78885552726SMichael Baum 		tir_attr.transport_domain = priv->sh->td->id;
78985552726SMichael Baum 	else
79085552726SMichael Baum 		tir_attr.transport_domain = priv->sh->tdn;
79185552726SMichael Baum 	memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
79285552726SMichael Baum 	tir_attr.indirect_table = ind_tbl->rqt->id;
79385552726SMichael Baum 	if (dev->data->dev_conf.lpbk_mode)
79485552726SMichael Baum 		tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
79585552726SMichael Baum 	if (lro) {
79685552726SMichael Baum 		tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout;
79785552726SMichael Baum 		tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
79885552726SMichael Baum 		tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
79985552726SMichael Baum 					   MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
80085552726SMichael Baum 	}
8015a959cbfSMichael Baum 	hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
8025a959cbfSMichael Baum 	if (!hrxq->tir) {
80385552726SMichael Baum 		DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
80485552726SMichael Baum 			dev->data->port_id);
80585552726SMichael Baum 		rte_errno = errno;
80685552726SMichael Baum 		goto error;
80785552726SMichael Baum 	}
80885552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
80985552726SMichael Baum 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
81085552726SMichael Baum 							       (hrxq->tir->obj);
81185552726SMichael Baum 	if (!hrxq->action) {
81285552726SMichael Baum 		rte_errno = errno;
81385552726SMichael Baum 		goto error;
81485552726SMichael Baum 	}
81585552726SMichael Baum #endif
8165a959cbfSMichael Baum 	return 0;
81785552726SMichael Baum error:
81885552726SMichael Baum 	err = rte_errno; /* Save rte_errno before cleanup. */
8195a959cbfSMichael Baum 	if (hrxq->tir)
8205a959cbfSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
82185552726SMichael Baum 	rte_errno = err; /* Restore rte_errno. */
8225a959cbfSMichael Baum 	return -rte_errno;
82385552726SMichael Baum }
82485552726SMichael Baum 
82585552726SMichael Baum /**
82685552726SMichael Baum  * Destroy a DevX TIR object.
82785552726SMichael Baum  *
82885552726SMichael Baum  * @param hrxq
82985552726SMichael Baum  *   Hash Rx queue to release its tir.
83085552726SMichael Baum  */
83185552726SMichael Baum static void
83285552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
83385552726SMichael Baum {
83485552726SMichael Baum 	claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
83585552726SMichael Baum }
83685552726SMichael Baum 
8375eaf882eSMichael Baum /**
8380c762e81SMichael Baum  * Create a DevX drop action for Rx Hash queue.
8395eaf882eSMichael Baum  *
8405eaf882eSMichael Baum  * @param dev
8415eaf882eSMichael Baum  *   Pointer to Ethernet device.
8425eaf882eSMichael Baum  *
8435eaf882eSMichael Baum  * @return
8440c762e81SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
8455eaf882eSMichael Baum  */
8460c762e81SMichael Baum static int
8470c762e81SMichael Baum mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
8485eaf882eSMichael Baum {
8495eaf882eSMichael Baum 	(void)dev;
85086d259ceSMichael Baum 	DRV_LOG(ERR, "DevX drop action is not supported yet.");
8515eaf882eSMichael Baum 	rte_errno = ENOTSUP;
8520c762e81SMichael Baum 	return -rte_errno;
8535eaf882eSMichael Baum }
8545eaf882eSMichael Baum 
8555eaf882eSMichael Baum /**
8565eaf882eSMichael Baum  * Release a drop hash Rx queue.
8575eaf882eSMichael Baum  *
8585eaf882eSMichael Baum  * @param dev
8595eaf882eSMichael Baum  *   Pointer to Ethernet device.
8605eaf882eSMichael Baum  */
8615eaf882eSMichael Baum static void
8620c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
8635eaf882eSMichael Baum {
8645eaf882eSMichael Baum 	(void)dev;
86586d259ceSMichael Baum 	DRV_LOG(ERR, "DevX drop action is not supported yet.");
8665eaf882eSMichael Baum 	rte_errno = ENOTSUP;
8675eaf882eSMichael Baum }
8685eaf882eSMichael Baum 
86986d259ceSMichael Baum /**
87086d259ceSMichael Baum  * Create the Tx hairpin queue object.
87186d259ceSMichael Baum  *
87286d259ceSMichael Baum  * @param dev
87386d259ceSMichael Baum  *   Pointer to Ethernet device.
87486d259ceSMichael Baum  * @param idx
87586d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
87686d259ceSMichael Baum  *
87786d259ceSMichael Baum  * @return
878f49f4483SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
87986d259ceSMichael Baum  */
880f49f4483SMichael Baum static int
88186d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
88286d259ceSMichael Baum {
88386d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
88486d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
88586d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
88686d259ceSMichael Baum 		container_of(txq_data, struct mlx5_txq_ctrl, txq);
88786d259ceSMichael Baum 	struct mlx5_devx_create_sq_attr attr = { 0 };
888f49f4483SMichael Baum 	struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
88986d259ceSMichael Baum 	uint32_t max_wq_data;
89086d259ceSMichael Baum 
89186d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
892f49f4483SMichael Baum 	MLX5_ASSERT(tmpl);
89386d259ceSMichael Baum 	tmpl->txq_ctrl = txq_ctrl;
89486d259ceSMichael Baum 	attr.hairpin = 1;
89586d259ceSMichael Baum 	attr.tis_lst_sz = 1;
89686d259ceSMichael Baum 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
89786d259ceSMichael Baum 	/* Jumbo frames > 9KB should be supported, and more packets. */
89886d259ceSMichael Baum 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
89986d259ceSMichael Baum 		if (priv->config.log_hp_size > max_wq_data) {
90086d259ceSMichael Baum 			DRV_LOG(ERR, "Total data size %u power of 2 is "
90186d259ceSMichael Baum 				"too large for hairpin.",
90286d259ceSMichael Baum 				priv->config.log_hp_size);
90386d259ceSMichael Baum 			rte_errno = ERANGE;
904f49f4483SMichael Baum 			return -rte_errno;
90586d259ceSMichael Baum 		}
90686d259ceSMichael Baum 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
90786d259ceSMichael Baum 	} else {
90886d259ceSMichael Baum 		attr.wq_attr.log_hairpin_data_sz =
90986d259ceSMichael Baum 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
91086d259ceSMichael Baum 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
91186d259ceSMichael Baum 	}
91286d259ceSMichael Baum 	/* Set the packets number to the maximum value for performance. */
91386d259ceSMichael Baum 	attr.wq_attr.log_hairpin_num_packets =
91486d259ceSMichael Baum 			attr.wq_attr.log_hairpin_data_sz -
91586d259ceSMichael Baum 			MLX5_HAIRPIN_QUEUE_STRIDE;
91686d259ceSMichael Baum 	attr.tis_num = priv->sh->tis->id;
91786d259ceSMichael Baum 	tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
91886d259ceSMichael Baum 	if (!tmpl->sq) {
91986d259ceSMichael Baum 		DRV_LOG(ERR,
92086d259ceSMichael Baum 			"Port %u tx hairpin queue %u can't create SQ object.",
92186d259ceSMichael Baum 			dev->data->port_id, idx);
92286d259ceSMichael Baum 		rte_errno = errno;
923f49f4483SMichael Baum 		return -rte_errno;
92486d259ceSMichael Baum 	}
925f49f4483SMichael Baum 	return 0;
92686d259ceSMichael Baum }
92786d259ceSMichael Baum 
92886d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
92986d259ceSMichael Baum /**
93086d259ceSMichael Baum  * Release DevX SQ resources.
93186d259ceSMichael Baum  *
93286d259ceSMichael Baum  * @param txq_obj
93386d259ceSMichael Baum  *   DevX Tx queue object.
93486d259ceSMichael Baum  */
93586d259ceSMichael Baum static void
93688f2e3f1SMichael Baum mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
93786d259ceSMichael Baum {
93886d259ceSMichael Baum 	if (txq_obj->sq_devx)
93986d259ceSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
94086d259ceSMichael Baum 	if (txq_obj->sq_umem)
94186d259ceSMichael Baum 		claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
94286d259ceSMichael Baum 	if (txq_obj->sq_buf)
94386d259ceSMichael Baum 		mlx5_free(txq_obj->sq_buf);
94486d259ceSMichael Baum 	if (txq_obj->sq_dbrec_page)
94586d259ceSMichael Baum 		claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
94686d259ceSMichael Baum 					    mlx5_os_get_umem_id
94786d259ceSMichael Baum 						 (txq_obj->sq_dbrec_page->umem),
94886d259ceSMichael Baum 					    txq_obj->sq_dbrec_offset));
94986d259ceSMichael Baum }
95086d259ceSMichael Baum 
95186d259ceSMichael Baum /**
95286d259ceSMichael Baum  * Release DevX Tx CQ resources.
95386d259ceSMichael Baum  *
95486d259ceSMichael Baum  * @param txq_obj
95586d259ceSMichael Baum  *   DevX Tx queue object.
95686d259ceSMichael Baum  */
95786d259ceSMichael Baum static void
95888f2e3f1SMichael Baum mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
95986d259ceSMichael Baum {
96086d259ceSMichael Baum 	if (txq_obj->cq_devx)
96186d259ceSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
96286d259ceSMichael Baum 	if (txq_obj->cq_umem)
96386d259ceSMichael Baum 		claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
96486d259ceSMichael Baum 	if (txq_obj->cq_buf)
96586d259ceSMichael Baum 		mlx5_free(txq_obj->cq_buf);
96686d259ceSMichael Baum 	if (txq_obj->cq_dbrec_page)
96786d259ceSMichael Baum 		claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
96886d259ceSMichael Baum 					    mlx5_os_get_umem_id
96986d259ceSMichael Baum 						 (txq_obj->cq_dbrec_page->umem),
97086d259ceSMichael Baum 					    txq_obj->cq_dbrec_offset));
97186d259ceSMichael Baum }
97286d259ceSMichael Baum 
97386d259ceSMichael Baum /**
97486d259ceSMichael Baum  * Destroy the Tx queue DevX object.
97586d259ceSMichael Baum  *
97686d259ceSMichael Baum  * @param txq_obj
97786d259ceSMichael Baum  *   Txq object to destroy.
97886d259ceSMichael Baum  */
97986d259ceSMichael Baum static void
98088f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
98186d259ceSMichael Baum {
98288f2e3f1SMichael Baum 	mlx5_txq_release_devx_cq_resources(txq_obj);
98388f2e3f1SMichael Baum 	mlx5_txq_release_devx_sq_resources(txq_obj);
98486d259ceSMichael Baum }
98586d259ceSMichael Baum 
98686d259ceSMichael Baum /**
98788f2e3f1SMichael Baum  * Create a DevX CQ object and its resources for an Tx queue.
98886d259ceSMichael Baum  *
98986d259ceSMichael Baum  * @param dev
99086d259ceSMichael Baum  *   Pointer to Ethernet device.
99186d259ceSMichael Baum  * @param idx
99286d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
99386d259ceSMichael Baum  *
99486d259ceSMichael Baum  * @return
99588f2e3f1SMichael Baum  *   Number of CQEs in CQ, 0 otherwise and rte_errno is set.
99686d259ceSMichael Baum  */
99788f2e3f1SMichael Baum static uint32_t
99888f2e3f1SMichael Baum mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
99986d259ceSMichael Baum {
100086d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
100186d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
100288f2e3f1SMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
100388f2e3f1SMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
100488f2e3f1SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
100586d259ceSMichael Baum 	struct mlx5_devx_cq_attr cq_attr = { 0 };
100686d259ceSMichael Baum 	struct mlx5_cqe *cqe;
100786d259ceSMichael Baum 	size_t page_size;
100886d259ceSMichael Baum 	size_t alignment;
100988f2e3f1SMichael Baum 	uint32_t cqe_n;
101086d259ceSMichael Baum 	uint32_t i;
101186d259ceSMichael Baum 	int ret;
101286d259ceSMichael Baum 
101386d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
101486d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
101586d259ceSMichael Baum 	page_size = rte_mem_page_size();
101686d259ceSMichael Baum 	if (page_size == (size_t)-1) {
101786d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to get mem page size.");
101886d259ceSMichael Baum 		rte_errno = ENOMEM;
101988f2e3f1SMichael Baum 		return 0;
102086d259ceSMichael Baum 	}
102186d259ceSMichael Baum 	/* Allocate memory buffer for CQEs. */
102286d259ceSMichael Baum 	alignment = MLX5_CQE_BUF_ALIGNMENT;
102386d259ceSMichael Baum 	if (alignment == (size_t)-1) {
102486d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to get CQE buf alignment.");
102586d259ceSMichael Baum 		rte_errno = ENOMEM;
102688f2e3f1SMichael Baum 		return 0;
102786d259ceSMichael Baum 	}
102888f2e3f1SMichael Baum 	/* Create the Completion Queue. */
102988f2e3f1SMichael Baum 	cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
103088f2e3f1SMichael Baum 		1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
103186d259ceSMichael Baum 	cqe_n = 1UL << log2above(cqe_n);
103286d259ceSMichael Baum 	if (cqe_n > UINT16_MAX) {
103386d259ceSMichael Baum 		DRV_LOG(ERR,
103486d259ceSMichael Baum 			"Port %u Tx queue %u requests to many CQEs %u.",
103586d259ceSMichael Baum 			dev->data->port_id, txq_data->idx, cqe_n);
103686d259ceSMichael Baum 		rte_errno = EINVAL;
103788f2e3f1SMichael Baum 		return 0;
103886d259ceSMichael Baum 	}
103986d259ceSMichael Baum 	txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
104086d259ceSMichael Baum 				      cqe_n * sizeof(struct mlx5_cqe),
104186d259ceSMichael Baum 				      alignment,
104286d259ceSMichael Baum 				      priv->sh->numa_node);
104386d259ceSMichael Baum 	if (!txq_obj->cq_buf) {
104486d259ceSMichael Baum 		DRV_LOG(ERR,
104586d259ceSMichael Baum 			"Port %u Tx queue %u cannot allocate memory (CQ).",
104686d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
104786d259ceSMichael Baum 		rte_errno = ENOMEM;
104888f2e3f1SMichael Baum 		return 0;
104986d259ceSMichael Baum 	}
105086d259ceSMichael Baum 	/* Register allocated buffer in user space with DevX. */
105186d259ceSMichael Baum 	txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
105286d259ceSMichael Baum 						(void *)txq_obj->cq_buf,
105386d259ceSMichael Baum 						cqe_n * sizeof(struct mlx5_cqe),
105486d259ceSMichael Baum 						IBV_ACCESS_LOCAL_WRITE);
105586d259ceSMichael Baum 	if (!txq_obj->cq_umem) {
105686d259ceSMichael Baum 		rte_errno = errno;
105786d259ceSMichael Baum 		DRV_LOG(ERR,
105886d259ceSMichael Baum 			"Port %u Tx queue %u cannot register memory (CQ).",
105986d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
106086d259ceSMichael Baum 		goto error;
106186d259ceSMichael Baum 	}
106286d259ceSMichael Baum 	/* Allocate doorbell record for completion queue. */
106386d259ceSMichael Baum 	txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
106486d259ceSMichael Baum 						&priv->dbrpgs,
106586d259ceSMichael Baum 						&txq_obj->cq_dbrec_page);
106686d259ceSMichael Baum 	if (txq_obj->cq_dbrec_offset < 0) {
106786d259ceSMichael Baum 		rte_errno = errno;
106886d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
106986d259ceSMichael Baum 		goto error;
107086d259ceSMichael Baum 	}
107186d259ceSMichael Baum 	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
107286d259ceSMichael Baum 			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
107386d259ceSMichael Baum 	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
107486d259ceSMichael Baum 	cq_attr.eqn = priv->sh->eqn;
107586d259ceSMichael Baum 	cq_attr.q_umem_valid = 1;
107686d259ceSMichael Baum 	cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
107786d259ceSMichael Baum 	cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
107886d259ceSMichael Baum 	cq_attr.db_umem_valid = 1;
107986d259ceSMichael Baum 	cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
108086d259ceSMichael Baum 	cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
108186d259ceSMichael Baum 	cq_attr.log_cq_size = rte_log2_u32(cqe_n);
108286d259ceSMichael Baum 	cq_attr.log_page_size = rte_log2_u32(page_size);
108386d259ceSMichael Baum 	/* Create completion queue object with DevX. */
108488f2e3f1SMichael Baum 	txq_obj->cq_devx = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
108588f2e3f1SMichael Baum 	if (!txq_obj->cq_devx) {
108686d259ceSMichael Baum 		rte_errno = errno;
108786d259ceSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
108886d259ceSMichael Baum 			dev->data->port_id, idx);
108986d259ceSMichael Baum 		goto error;
109086d259ceSMichael Baum 	}
109186d259ceSMichael Baum 	/* Initial fill CQ buffer with invalid CQE opcode. */
109286d259ceSMichael Baum 	cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
109388f2e3f1SMichael Baum 	for (i = 0; i < cqe_n; i++) {
109486d259ceSMichael Baum 		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
109586d259ceSMichael Baum 		++cqe;
109686d259ceSMichael Baum 	}
109788f2e3f1SMichael Baum 	return cqe_n;
109886d259ceSMichael Baum error:
109986d259ceSMichael Baum 	ret = rte_errno;
110088f2e3f1SMichael Baum 	mlx5_txq_release_devx_cq_resources(txq_obj);
110186d259ceSMichael Baum 	rte_errno = ret;
110288f2e3f1SMichael Baum 	return 0;
110386d259ceSMichael Baum }
110486d259ceSMichael Baum 
110586d259ceSMichael Baum /**
110688f2e3f1SMichael Baum  * Create a SQ object and its resources using DevX.
110786d259ceSMichael Baum  *
110886d259ceSMichael Baum  * @param dev
110986d259ceSMichael Baum  *   Pointer to Ethernet device.
111086d259ceSMichael Baum  * @param idx
111186d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
111286d259ceSMichael Baum  *
111386d259ceSMichael Baum  * @return
111488f2e3f1SMichael Baum  *   Number of WQEs in SQ, 0 otherwise and rte_errno is set.
111586d259ceSMichael Baum  */
111688f2e3f1SMichael Baum static uint32_t
111788f2e3f1SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
111886d259ceSMichael Baum {
111986d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
112086d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
112188f2e3f1SMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
112288f2e3f1SMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
112388f2e3f1SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
112486d259ceSMichael Baum 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
112586d259ceSMichael Baum 	size_t page_size;
112686d259ceSMichael Baum 	uint32_t wqe_n;
112786d259ceSMichael Baum 	int ret;
112886d259ceSMichael Baum 
112986d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
113086d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
113186d259ceSMichael Baum 	page_size = rte_mem_page_size();
113286d259ceSMichael Baum 	if (page_size == (size_t)-1) {
113386d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to get mem page size.");
113486d259ceSMichael Baum 		rte_errno = ENOMEM;
113588f2e3f1SMichael Baum 		return 0;
113686d259ceSMichael Baum 	}
113786d259ceSMichael Baum 	wqe_n = RTE_MIN(1UL << txq_data->elts_n,
113886d259ceSMichael Baum 			(uint32_t)priv->sh->device_attr.max_qp_wr);
113986d259ceSMichael Baum 	txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
114086d259ceSMichael Baum 				      wqe_n * sizeof(struct mlx5_wqe),
114186d259ceSMichael Baum 				      page_size, priv->sh->numa_node);
114286d259ceSMichael Baum 	if (!txq_obj->sq_buf) {
114386d259ceSMichael Baum 		DRV_LOG(ERR,
114486d259ceSMichael Baum 			"Port %u Tx queue %u cannot allocate memory (SQ).",
114586d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
114686d259ceSMichael Baum 		rte_errno = ENOMEM;
114786d259ceSMichael Baum 		goto error;
114886d259ceSMichael Baum 	}
114986d259ceSMichael Baum 	/* Register allocated buffer in user space with DevX. */
115086d259ceSMichael Baum 	txq_obj->sq_umem = mlx5_glue->devx_umem_reg
115186d259ceSMichael Baum 					(priv->sh->ctx,
115286d259ceSMichael Baum 					 (void *)txq_obj->sq_buf,
115386d259ceSMichael Baum 					 wqe_n * sizeof(struct mlx5_wqe),
115486d259ceSMichael Baum 					 IBV_ACCESS_LOCAL_WRITE);
115586d259ceSMichael Baum 	if (!txq_obj->sq_umem) {
115686d259ceSMichael Baum 		rte_errno = errno;
115786d259ceSMichael Baum 		DRV_LOG(ERR,
115886d259ceSMichael Baum 			"Port %u Tx queue %u cannot register memory (SQ).",
115986d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
116086d259ceSMichael Baum 		goto error;
116186d259ceSMichael Baum 	}
116286d259ceSMichael Baum 	/* Allocate doorbell record for send queue. */
116386d259ceSMichael Baum 	txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
116486d259ceSMichael Baum 						&priv->dbrpgs,
116586d259ceSMichael Baum 						&txq_obj->sq_dbrec_page);
116686d259ceSMichael Baum 	if (txq_obj->sq_dbrec_offset < 0) {
116786d259ceSMichael Baum 		rte_errno = errno;
116886d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to allocate SQ door-bell.");
116986d259ceSMichael Baum 		goto error;
117086d259ceSMichael Baum 	}
117186d259ceSMichael Baum 	sq_attr.tis_lst_sz = 1;
117286d259ceSMichael Baum 	sq_attr.tis_num = priv->sh->tis->id;
117386d259ceSMichael Baum 	sq_attr.state = MLX5_SQC_STATE_RST;
117486d259ceSMichael Baum 	sq_attr.cqn = txq_obj->cq_devx->id;
117586d259ceSMichael Baum 	sq_attr.flush_in_error_en = 1;
117686d259ceSMichael Baum 	sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
117786d259ceSMichael Baum 	sq_attr.allow_swp = !!priv->config.swp;
117886d259ceSMichael Baum 	sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
117986d259ceSMichael Baum 	sq_attr.wq_attr.uar_page =
118086d259ceSMichael Baum 				mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
118186d259ceSMichael Baum 	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
118286d259ceSMichael Baum 	sq_attr.wq_attr.pd = priv->sh->pdn;
118386d259ceSMichael Baum 	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
118488f2e3f1SMichael Baum 	sq_attr.wq_attr.log_wq_sz = log2above(wqe_n);
118586d259ceSMichael Baum 	sq_attr.wq_attr.dbr_umem_valid = 1;
118686d259ceSMichael Baum 	sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset;
118786d259ceSMichael Baum 	sq_attr.wq_attr.dbr_umem_id =
118886d259ceSMichael Baum 			mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem);
118986d259ceSMichael Baum 	sq_attr.wq_attr.wq_umem_valid = 1;
119086d259ceSMichael Baum 	sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
119186d259ceSMichael Baum 	sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
119286d259ceSMichael Baum 	/* Create Send Queue object with DevX. */
119388f2e3f1SMichael Baum 	txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr);
119488f2e3f1SMichael Baum 	if (!txq_obj->sq_devx) {
119586d259ceSMichael Baum 		rte_errno = errno;
119686d259ceSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
119786d259ceSMichael Baum 			dev->data->port_id, idx);
119886d259ceSMichael Baum 		goto error;
119986d259ceSMichael Baum 	}
120088f2e3f1SMichael Baum 	return wqe_n;
120186d259ceSMichael Baum error:
120286d259ceSMichael Baum 	ret = rte_errno;
120388f2e3f1SMichael Baum 	mlx5_txq_release_devx_sq_resources(txq_obj);
120486d259ceSMichael Baum 	rte_errno = ret;
120588f2e3f1SMichael Baum 	return 0;
120686d259ceSMichael Baum }
120786d259ceSMichael Baum #endif
120886d259ceSMichael Baum 
120986d259ceSMichael Baum /**
121086d259ceSMichael Baum  * Create the Tx queue DevX object.
121186d259ceSMichael Baum  *
121286d259ceSMichael Baum  * @param dev
121386d259ceSMichael Baum  *   Pointer to Ethernet device.
121486d259ceSMichael Baum  * @param idx
121586d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
121686d259ceSMichael Baum  *
121786d259ceSMichael Baum  * @return
1218f49f4483SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
121986d259ceSMichael Baum  */
1220f49f4483SMichael Baum int
122186d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
122286d259ceSMichael Baum {
122386d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
122486d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
122586d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
122686d259ceSMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
122786d259ceSMichael Baum 
122886d259ceSMichael Baum 	if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
122986d259ceSMichael Baum 		return mlx5_txq_obj_hairpin_new(dev, idx);
123086d259ceSMichael Baum #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
123186d259ceSMichael Baum 	DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
123286d259ceSMichael Baum 		     dev->data->port_id, idx);
123386d259ceSMichael Baum 	rte_errno = ENOMEM;
1234f49f4483SMichael Baum 	return -rte_errno;
123586d259ceSMichael Baum #else
123686d259ceSMichael Baum 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1237f49f4483SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
123886d259ceSMichael Baum 	void *reg_addr;
123986d259ceSMichael Baum 	uint32_t cqe_n;
124088f2e3f1SMichael Baum 	uint32_t wqe_n;
124186d259ceSMichael Baum 	int ret = 0;
124286d259ceSMichael Baum 
124386d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
1244f49f4483SMichael Baum 	MLX5_ASSERT(txq_obj);
124586d259ceSMichael Baum 	txq_obj->txq_ctrl = txq_ctrl;
124686d259ceSMichael Baum 	txq_obj->dev = dev;
124788f2e3f1SMichael Baum 	cqe_n = mlx5_txq_create_devx_cq_resources(dev, idx);
124888f2e3f1SMichael Baum 	if (!cqe_n) {
124986d259ceSMichael Baum 		rte_errno = errno;
125086d259ceSMichael Baum 		goto error;
125186d259ceSMichael Baum 	}
125288f2e3f1SMichael Baum 	txq_data->cqe_n = log2above(cqe_n);
125388f2e3f1SMichael Baum 	txq_data->cqe_s = 1 << txq_data->cqe_n;
125486d259ceSMichael Baum 	txq_data->cqe_m = txq_data->cqe_s - 1;
125586d259ceSMichael Baum 	txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
125686d259ceSMichael Baum 	txq_data->cq_ci = 0;
125786d259ceSMichael Baum 	txq_data->cq_pi = 0;
125886d259ceSMichael Baum 	txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
125986d259ceSMichael Baum 						txq_obj->cq_dbrec_offset);
126086d259ceSMichael Baum 	*txq_data->cq_db = 0;
126186d259ceSMichael Baum 	/* Create Send Queue object with DevX. */
126288f2e3f1SMichael Baum 	wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx);
126388f2e3f1SMichael Baum 	if (!wqe_n) {
126486d259ceSMichael Baum 		rte_errno = errno;
126586d259ceSMichael Baum 		goto error;
126686d259ceSMichael Baum 	}
126786d259ceSMichael Baum 	/* Create the Work Queue. */
126888f2e3f1SMichael Baum 	txq_data->wqe_n = log2above(wqe_n);
126986d259ceSMichael Baum 	txq_data->wqe_s = 1 << txq_data->wqe_n;
127086d259ceSMichael Baum 	txq_data->wqe_m = txq_data->wqe_s - 1;
127186d259ceSMichael Baum 	txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
127286d259ceSMichael Baum 	txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
127386d259ceSMichael Baum 	txq_data->wqe_ci = 0;
127486d259ceSMichael Baum 	txq_data->wqe_pi = 0;
127586d259ceSMichael Baum 	txq_data->wqe_comp = 0;
127686d259ceSMichael Baum 	txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
127786d259ceSMichael Baum 	txq_data->qp_db = (volatile uint32_t *)
127886d259ceSMichael Baum 					(txq_obj->sq_dbrec_page->dbrs +
127986d259ceSMichael Baum 					 txq_obj->sq_dbrec_offset +
128086d259ceSMichael Baum 					 MLX5_SND_DBR * sizeof(uint32_t));
128186d259ceSMichael Baum 	*txq_data->qp_db = 0;
128286d259ceSMichael Baum 	txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
128386d259ceSMichael Baum 	/* Change Send Queue state to Ready-to-Send. */
1284a9c79306SMichael Baum 	ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
128586d259ceSMichael Baum 	if (ret) {
128686d259ceSMichael Baum 		rte_errno = errno;
128786d259ceSMichael Baum 		DRV_LOG(ERR,
1288a9c79306SMichael Baum 			"Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
128986d259ceSMichael Baum 			dev->data->port_id, idx);
129086d259ceSMichael Baum 		goto error;
129186d259ceSMichael Baum 	}
129286d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
129386d259ceSMichael Baum 	/*
129486d259ceSMichael Baum 	 * If using DevX need to query and store TIS transport domain value.
129586d259ceSMichael Baum 	 * This is done once per port.
129686d259ceSMichael Baum 	 * Will use this value on Rx, when creating matching TIR.
129786d259ceSMichael Baum 	 */
129886d259ceSMichael Baum 	if (!priv->sh->tdn)
129986d259ceSMichael Baum 		priv->sh->tdn = priv->sh->td->id;
130086d259ceSMichael Baum #endif
130186d259ceSMichael Baum 	MLX5_ASSERT(sh->tx_uar);
130286d259ceSMichael Baum 	reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
130386d259ceSMichael Baum 	MLX5_ASSERT(reg_addr);
130486d259ceSMichael Baum 	txq_ctrl->bf_reg = reg_addr;
130586d259ceSMichael Baum 	txq_ctrl->uar_mmap_offset =
130686d259ceSMichael Baum 				mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
130786d259ceSMichael Baum 	txq_uar_init(txq_ctrl);
1308f49f4483SMichael Baum 	return 0;
130986d259ceSMichael Baum error:
131086d259ceSMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
131188f2e3f1SMichael Baum 	mlx5_txq_release_devx_resources(txq_obj);
131286d259ceSMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
1313f49f4483SMichael Baum 	return -rte_errno;
131486d259ceSMichael Baum #endif
131586d259ceSMichael Baum }
131686d259ceSMichael Baum 
131786d259ceSMichael Baum /**
131886d259ceSMichael Baum  * Release an Tx DevX queue object.
131986d259ceSMichael Baum  *
132086d259ceSMichael Baum  * @param txq_obj
132186d259ceSMichael Baum  *   DevX Tx queue object.
132286d259ceSMichael Baum  */
132386d259ceSMichael Baum void
132486d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
132586d259ceSMichael Baum {
132686d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
1327*354cc08aSMichael Baum 	if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
132886d259ceSMichael Baum 		if (txq_obj->tis)
132986d259ceSMichael Baum 			claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
133086d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
133186d259ceSMichael Baum 	} else {
133288f2e3f1SMichael Baum 		mlx5_txq_release_devx_resources(txq_obj);
133386d259ceSMichael Baum #endif
133486d259ceSMichael Baum 	}
133586d259ceSMichael Baum }
133686d259ceSMichael Baum 
13378bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = {
13388bb2410eSOphir Munk 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
13396deb19e1SMichael Baum 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
134032287079SMichael Baum 	.rxq_event_get = mlx5_rx_devx_get_event,
1341c279f187SMichael Baum 	.rxq_obj_modify = mlx5_devx_modify_rq,
13426deb19e1SMichael Baum 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
134325ae7f1aSMichael Baum 	.ind_table_new = mlx5_devx_ind_table_new,
134425ae7f1aSMichael Baum 	.ind_table_destroy = mlx5_devx_ind_table_destroy,
134585552726SMichael Baum 	.hrxq_new = mlx5_devx_hrxq_new,
134685552726SMichael Baum 	.hrxq_destroy = mlx5_devx_tir_destroy,
13470c762e81SMichael Baum 	.drop_action_create = mlx5_devx_drop_action_create,
13480c762e81SMichael Baum 	.drop_action_destroy = mlx5_devx_drop_action_destroy,
134986d259ceSMichael Baum 	.txq_obj_new = mlx5_txq_devx_obj_new,
13505d9f3c3fSMichael Baum 	.txq_obj_modify = mlx5_devx_modify_sq,
135186d259ceSMichael Baum 	.txq_obj_release = mlx5_txq_devx_obj_release,
13528bb2410eSOphir Munk };
1353