xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision fa7ad49e96b5dca8fcb774a27d47593b1b6c1bed)
18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
28bb2410eSOphir Munk  * Copyright 2020 Mellanox Technologies, Ltd
38bb2410eSOphir Munk  */
48bb2410eSOphir Munk 
58bb2410eSOphir Munk #include <stddef.h>
68bb2410eSOphir Munk #include <errno.h>
7c279f187SMichael Baum #include <stdbool.h>
88bb2410eSOphir Munk #include <string.h>
98bb2410eSOphir Munk #include <stdint.h>
108bb2410eSOphir Munk #include <sys/queue.h>
118bb2410eSOphir Munk 
128bb2410eSOphir Munk #include <rte_malloc.h>
138bb2410eSOphir Munk #include <rte_common.h>
148bb2410eSOphir Munk #include <rte_eal_paging.h>
158bb2410eSOphir Munk 
168bb2410eSOphir Munk #include <mlx5_glue.h>
178bb2410eSOphir Munk #include <mlx5_devx_cmds.h>
188bb2410eSOphir Munk #include <mlx5_malloc.h>
198bb2410eSOphir Munk 
208bb2410eSOphir Munk #include "mlx5.h"
218bb2410eSOphir Munk #include "mlx5_common_os.h"
228bb2410eSOphir Munk #include "mlx5_rxtx.h"
238bb2410eSOphir Munk #include "mlx5_utils.h"
248bb2410eSOphir Munk #include "mlx5_devx.h"
2587e2db37SMichael Baum #include "mlx5_flow.h"
268bb2410eSOphir Munk 
27f6dee900SMichael Baum 
28f6dee900SMichael Baum /**
298bb2410eSOphir Munk  * Modify RQ vlan stripping offload
308bb2410eSOphir Munk  *
318bb2410eSOphir Munk  * @param rxq_obj
328bb2410eSOphir Munk  *   Rx queue object.
338bb2410eSOphir Munk  *
34f6dee900SMichael Baum  * @return
35f6dee900SMichael Baum  *   0 on success, non-0 otherwise
368bb2410eSOphir Munk  */
378bb2410eSOphir Munk static int
388bb2410eSOphir Munk mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
398bb2410eSOphir Munk {
408bb2410eSOphir Munk 	struct mlx5_devx_modify_rq_attr rq_attr;
418bb2410eSOphir Munk 
428bb2410eSOphir Munk 	memset(&rq_attr, 0, sizeof(rq_attr));
438bb2410eSOphir Munk 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
448bb2410eSOphir Munk 	rq_attr.state = MLX5_RQC_STATE_RDY;
458bb2410eSOphir Munk 	rq_attr.vsd = (on ? 0 : 1);
468bb2410eSOphir Munk 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
478bb2410eSOphir Munk 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
488bb2410eSOphir Munk }
498bb2410eSOphir Munk 
506deb19e1SMichael Baum /**
51fa2c85ccSMichael Baum  * Modify RQ using DevX API.
52fa2c85ccSMichael Baum  *
53fa2c85ccSMichael Baum  * @param rxq_obj
54fa2c85ccSMichael Baum  *   DevX Rx queue object.
554c6d80f1SMichael Baum  * @param type
564c6d80f1SMichael Baum  *   Type of change queue state.
57fa2c85ccSMichael Baum  *
58fa2c85ccSMichael Baum  * @return
59fa2c85ccSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
60fa2c85ccSMichael Baum  */
61fa2c85ccSMichael Baum static int
624c6d80f1SMichael Baum mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
63fa2c85ccSMichael Baum {
64fa2c85ccSMichael Baum 	struct mlx5_devx_modify_rq_attr rq_attr;
65fa2c85ccSMichael Baum 
66fa2c85ccSMichael Baum 	memset(&rq_attr, 0, sizeof(rq_attr));
674c6d80f1SMichael Baum 	switch (type) {
684c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_ERR2RST:
694c6d80f1SMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_ERR;
704c6d80f1SMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RST;
714c6d80f1SMichael Baum 		break;
724c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_RST2RDY:
73fa2c85ccSMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
74fa2c85ccSMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RDY;
754c6d80f1SMichael Baum 		break;
764c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_RDY2ERR:
774c6d80f1SMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
784c6d80f1SMichael Baum 		rq_attr.state = MLX5_RQC_STATE_ERR;
794c6d80f1SMichael Baum 		break;
804c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_RDY2RST:
81fa2c85ccSMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
82fa2c85ccSMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RST;
834c6d80f1SMichael Baum 		break;
844c6d80f1SMichael Baum 	default:
854c6d80f1SMichael Baum 		break;
86fa2c85ccSMichael Baum 	}
87fa2c85ccSMichael Baum 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
88fa2c85ccSMichael Baum }
89fa2c85ccSMichael Baum 
90fa2c85ccSMichael Baum /**
915d9f3c3fSMichael Baum  * Modify SQ using DevX API.
925d9f3c3fSMichael Baum  *
935d9f3c3fSMichael Baum  * @param txq_obj
945d9f3c3fSMichael Baum  *   DevX Tx queue object.
955d9f3c3fSMichael Baum  * @param type
965d9f3c3fSMichael Baum  *   Type of change queue state.
975d9f3c3fSMichael Baum  * @param dev_port
985d9f3c3fSMichael Baum  *   Unnecessary.
995d9f3c3fSMichael Baum  *
1005d9f3c3fSMichael Baum  * @return
1015d9f3c3fSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
1025d9f3c3fSMichael Baum  */
1035d9f3c3fSMichael Baum static int
1045d9f3c3fSMichael Baum mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
1055d9f3c3fSMichael Baum 		    uint8_t dev_port)
1065d9f3c3fSMichael Baum {
1075d9f3c3fSMichael Baum 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
1085d9f3c3fSMichael Baum 	int ret;
1095d9f3c3fSMichael Baum 
1105d9f3c3fSMichael Baum 	if (type != MLX5_TXQ_MOD_RST2RDY) {
1115d9f3c3fSMichael Baum 		/* Change queue state to reset. */
1125d9f3c3fSMichael Baum 		if (type == MLX5_TXQ_MOD_ERR2RDY)
1135d9f3c3fSMichael Baum 			msq_attr.sq_state = MLX5_SQC_STATE_ERR;
1145d9f3c3fSMichael Baum 		else
1155d9f3c3fSMichael Baum 			msq_attr.sq_state = MLX5_SQC_STATE_RDY;
1165d9f3c3fSMichael Baum 		msq_attr.state = MLX5_SQC_STATE_RST;
1175d9f3c3fSMichael Baum 		ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
1185d9f3c3fSMichael Baum 		if (ret) {
1195d9f3c3fSMichael Baum 			DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
1205d9f3c3fSMichael Baum 				" %s", strerror(errno));
1215d9f3c3fSMichael Baum 			rte_errno = errno;
1225d9f3c3fSMichael Baum 			return ret;
1235d9f3c3fSMichael Baum 		}
1245d9f3c3fSMichael Baum 	}
1255d9f3c3fSMichael Baum 	if (type != MLX5_TXQ_MOD_RDY2RST) {
1265d9f3c3fSMichael Baum 		/* Change queue state to ready. */
1275d9f3c3fSMichael Baum 		msq_attr.sq_state = MLX5_SQC_STATE_RST;
1285d9f3c3fSMichael Baum 		msq_attr.state = MLX5_SQC_STATE_RDY;
1295d9f3c3fSMichael Baum 		ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
1305d9f3c3fSMichael Baum 		if (ret) {
1315d9f3c3fSMichael Baum 			DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
1325d9f3c3fSMichael Baum 				" %s", strerror(errno));
1335d9f3c3fSMichael Baum 			rte_errno = errno;
1345d9f3c3fSMichael Baum 			return ret;
1355d9f3c3fSMichael Baum 		}
1365d9f3c3fSMichael Baum 	}
1375d9f3c3fSMichael Baum 	/*
1385d9f3c3fSMichael Baum 	 * The dev_port variable is relevant only in Verbs API, and there is a
1395d9f3c3fSMichael Baum 	 * pointer that points to this function and a parallel function in verbs
1405d9f3c3fSMichael Baum 	 * intermittently, so they should have the same parameters.
1415d9f3c3fSMichael Baum 	 */
1425d9f3c3fSMichael Baum 	(void)dev_port;
1435d9f3c3fSMichael Baum 	return 0;
1445d9f3c3fSMichael Baum }
1455d9f3c3fSMichael Baum 
1465d9f3c3fSMichael Baum /**
1476deb19e1SMichael Baum  * Release the resources allocated for an RQ DevX object.
1486deb19e1SMichael Baum  *
1496deb19e1SMichael Baum  * @param rxq_ctrl
1506deb19e1SMichael Baum  *   DevX Rx queue object.
1516deb19e1SMichael Baum  */
1526deb19e1SMichael Baum static void
15388f2e3f1SMichael Baum mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
1546deb19e1SMichael Baum {
155f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
156f6dee900SMichael Baum 
1576deb19e1SMichael Baum 	if (rxq_ctrl->rxq.wqes) {
1586deb19e1SMichael Baum 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
1596deb19e1SMichael Baum 		rxq_ctrl->rxq.wqes = NULL;
1606deb19e1SMichael Baum 	}
1616deb19e1SMichael Baum 	if (rxq_ctrl->wq_umem) {
1626deb19e1SMichael Baum 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
1636deb19e1SMichael Baum 		rxq_ctrl->wq_umem = NULL;
1646deb19e1SMichael Baum 	}
165f6dee900SMichael Baum 	if (dbr_page) {
166f6dee900SMichael Baum 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
167f6dee900SMichael Baum 					    mlx5_os_get_umem_id(dbr_page->umem),
168f6dee900SMichael Baum 					    rxq_ctrl->rq_dbr_offset));
169f6dee900SMichael Baum 		rxq_ctrl->rq_dbrec_page = NULL;
170f6dee900SMichael Baum 	}
1716deb19e1SMichael Baum }
1726deb19e1SMichael Baum 
1736deb19e1SMichael Baum /**
1746deb19e1SMichael Baum  * Release the resources allocated for the Rx CQ DevX object.
1756deb19e1SMichael Baum  *
1766deb19e1SMichael Baum  * @param rxq_ctrl
1776deb19e1SMichael Baum  *   DevX Rx queue object.
1786deb19e1SMichael Baum  */
1796deb19e1SMichael Baum static void
18088f2e3f1SMichael Baum mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
1816deb19e1SMichael Baum {
182f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
183f6dee900SMichael Baum 
1846deb19e1SMichael Baum 	if (rxq_ctrl->rxq.cqes) {
1856deb19e1SMichael Baum 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
1866deb19e1SMichael Baum 		rxq_ctrl->rxq.cqes = NULL;
1876deb19e1SMichael Baum 	}
1886deb19e1SMichael Baum 	if (rxq_ctrl->cq_umem) {
1896deb19e1SMichael Baum 		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
1906deb19e1SMichael Baum 		rxq_ctrl->cq_umem = NULL;
1916deb19e1SMichael Baum 	}
192f6dee900SMichael Baum 	if (dbr_page) {
193f6dee900SMichael Baum 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
194f6dee900SMichael Baum 					    mlx5_os_get_umem_id(dbr_page->umem),
195f6dee900SMichael Baum 					    rxq_ctrl->cq_dbr_offset));
196f6dee900SMichael Baum 		rxq_ctrl->cq_dbrec_page = NULL;
197f6dee900SMichael Baum 	}
1986deb19e1SMichael Baum }
1996deb19e1SMichael Baum 
2006deb19e1SMichael Baum /**
2016deb19e1SMichael Baum  * Release an Rx DevX queue object.
2026deb19e1SMichael Baum  *
2036deb19e1SMichael Baum  * @param rxq_obj
2046deb19e1SMichael Baum  *   DevX Rx queue object.
2056deb19e1SMichael Baum  */
2066deb19e1SMichael Baum static void
2076deb19e1SMichael Baum mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
2086deb19e1SMichael Baum {
2096deb19e1SMichael Baum 	MLX5_ASSERT(rxq_obj);
2106deb19e1SMichael Baum 	MLX5_ASSERT(rxq_obj->rq);
211e96242efSMichael Baum 	if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
2124c6d80f1SMichael Baum 		mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
213fa2c85ccSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
2146deb19e1SMichael Baum 	} else {
2156deb19e1SMichael Baum 		MLX5_ASSERT(rxq_obj->devx_cq);
2166deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
2176deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
2186deb19e1SMichael Baum 		if (rxq_obj->devx_channel)
2196deb19e1SMichael Baum 			mlx5_glue->devx_destroy_event_channel
2206deb19e1SMichael Baum 							(rxq_obj->devx_channel);
22188f2e3f1SMichael Baum 		mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
22288f2e3f1SMichael Baum 		mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
2236deb19e1SMichael Baum 	}
2246deb19e1SMichael Baum }
2256deb19e1SMichael Baum 
2266deb19e1SMichael Baum /**
22732287079SMichael Baum  * Get event for an Rx DevX queue object.
22832287079SMichael Baum  *
22932287079SMichael Baum  * @param rxq_obj
23032287079SMichael Baum  *   DevX Rx queue object.
23132287079SMichael Baum  *
23232287079SMichael Baum  * @return
23332287079SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
23432287079SMichael Baum  */
23532287079SMichael Baum static int
23632287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
23732287079SMichael Baum {
23832287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT
23932287079SMichael Baum 	union {
24032287079SMichael Baum 		struct mlx5dv_devx_async_event_hdr event_resp;
24132287079SMichael Baum 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
24232287079SMichael Baum 	} out;
24332287079SMichael Baum 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
24432287079SMichael Baum 					    &out.event_resp,
24532287079SMichael Baum 					    sizeof(out.buf));
24632287079SMichael Baum 
24732287079SMichael Baum 	if (ret < 0) {
24832287079SMichael Baum 		rte_errno = errno;
24932287079SMichael Baum 		return -rte_errno;
25032287079SMichael Baum 	}
25132287079SMichael Baum 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
25232287079SMichael Baum 		rte_errno = EINVAL;
25332287079SMichael Baum 		return -rte_errno;
25432287079SMichael Baum 	}
25532287079SMichael Baum 	return 0;
25632287079SMichael Baum #else
25732287079SMichael Baum 	(void)rxq_obj;
25832287079SMichael Baum 	rte_errno = ENOTSUP;
25932287079SMichael Baum 	return -rte_errno;
26032287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */
26132287079SMichael Baum }
26232287079SMichael Baum 
26332287079SMichael Baum /**
2646deb19e1SMichael Baum  * Fill common fields of create RQ attributes structure.
2656deb19e1SMichael Baum  *
2666deb19e1SMichael Baum  * @param rxq_data
2676deb19e1SMichael Baum  *   Pointer to Rx queue data.
2686deb19e1SMichael Baum  * @param cqn
2696deb19e1SMichael Baum  *   CQ number to use with this RQ.
2706deb19e1SMichael Baum  * @param rq_attr
2716deb19e1SMichael Baum  *   RQ attributes structure to fill..
2726deb19e1SMichael Baum  */
2736deb19e1SMichael Baum static void
2746deb19e1SMichael Baum mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
2756deb19e1SMichael Baum 			      struct mlx5_devx_create_rq_attr *rq_attr)
2766deb19e1SMichael Baum {
2776deb19e1SMichael Baum 	rq_attr->state = MLX5_RQC_STATE_RST;
2786deb19e1SMichael Baum 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
2796deb19e1SMichael Baum 	rq_attr->cqn = cqn;
2806deb19e1SMichael Baum 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
2816deb19e1SMichael Baum }
2826deb19e1SMichael Baum 
2836deb19e1SMichael Baum /**
2846deb19e1SMichael Baum  * Fill common fields of DevX WQ attributes structure.
2856deb19e1SMichael Baum  *
2866deb19e1SMichael Baum  * @param priv
2876deb19e1SMichael Baum  *   Pointer to device private data.
2886deb19e1SMichael Baum  * @param rxq_ctrl
2896deb19e1SMichael Baum  *   Pointer to Rx queue control structure.
2906deb19e1SMichael Baum  * @param wq_attr
2916deb19e1SMichael Baum  *   WQ attributes structure to fill..
2926deb19e1SMichael Baum  */
2936deb19e1SMichael Baum static void
2946deb19e1SMichael Baum mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
2956deb19e1SMichael Baum 		       struct mlx5_devx_wq_attr *wq_attr)
2966deb19e1SMichael Baum {
297ff2deadaSAlexander Kozyrev 	wq_attr->end_padding_mode = priv->config.hw_padding ?
2986deb19e1SMichael Baum 					MLX5_WQ_END_PAD_MODE_ALIGN :
2996deb19e1SMichael Baum 					MLX5_WQ_END_PAD_MODE_NONE;
3006deb19e1SMichael Baum 	wq_attr->pd = priv->sh->pdn;
3016deb19e1SMichael Baum 	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
302f6dee900SMichael Baum 	wq_attr->dbr_umem_id =
303f6dee900SMichael Baum 			mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
3046deb19e1SMichael Baum 	wq_attr->dbr_umem_valid = 1;
3056deb19e1SMichael Baum 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
3066deb19e1SMichael Baum 	wq_attr->wq_umem_valid = 1;
3076deb19e1SMichael Baum }
3086deb19e1SMichael Baum 
3096deb19e1SMichael Baum /**
3106deb19e1SMichael Baum  * Create a RQ object using DevX.
3116deb19e1SMichael Baum  *
3126deb19e1SMichael Baum  * @param dev
3136deb19e1SMichael Baum  *   Pointer to Ethernet device.
3146deb19e1SMichael Baum  * @param idx
3156deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
3166deb19e1SMichael Baum  *
3176deb19e1SMichael Baum  * @return
318f6dee900SMichael Baum  *   The DevX RQ object initialized, NULL otherwise and rte_errno is set.
3196deb19e1SMichael Baum  */
3206deb19e1SMichael Baum static struct mlx5_devx_obj *
32188f2e3f1SMichael Baum mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
3226deb19e1SMichael Baum {
3236deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
3246deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
3256deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
3266deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
3276deb19e1SMichael Baum 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
3286deb19e1SMichael Baum 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
329f6dee900SMichael Baum 	uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
330f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page;
331f6dee900SMichael Baum 	int64_t dbr_offset;
3326deb19e1SMichael Baum 	uint32_t wq_size = 0;
3336deb19e1SMichael Baum 	uint32_t wqe_size = 0;
3346deb19e1SMichael Baum 	uint32_t log_wqe_size = 0;
3356deb19e1SMichael Baum 	void *buf = NULL;
3366deb19e1SMichael Baum 	struct mlx5_devx_obj *rq;
3376deb19e1SMichael Baum 
3386deb19e1SMichael Baum 	/* Fill RQ attributes. */
3396deb19e1SMichael Baum 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
3406deb19e1SMichael Baum 	rq_attr.flush_in_error_en = 1;
3416deb19e1SMichael Baum 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
3426deb19e1SMichael Baum 	/* Fill WQ attributes for this RQ. */
3436deb19e1SMichael Baum 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
3446deb19e1SMichael Baum 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
3456deb19e1SMichael Baum 		/*
3466deb19e1SMichael Baum 		 * Number of strides in each WQE:
3476deb19e1SMichael Baum 		 * 512*2^single_wqe_log_num_of_strides.
3486deb19e1SMichael Baum 		 */
3496deb19e1SMichael Baum 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
3506deb19e1SMichael Baum 				rxq_data->strd_num_n -
3516deb19e1SMichael Baum 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
3526deb19e1SMichael Baum 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
3536deb19e1SMichael Baum 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
3546deb19e1SMichael Baum 				rxq_data->strd_sz_n -
3556deb19e1SMichael Baum 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
3566deb19e1SMichael Baum 		wqe_size = sizeof(struct mlx5_wqe_mprq);
3576deb19e1SMichael Baum 	} else {
3586deb19e1SMichael Baum 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
3596deb19e1SMichael Baum 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
3606deb19e1SMichael Baum 	}
3616deb19e1SMichael Baum 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
3626deb19e1SMichael Baum 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
3636deb19e1SMichael Baum 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
3646deb19e1SMichael Baum 	/* Calculate and allocate WQ memory space. */
3656deb19e1SMichael Baum 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
3666deb19e1SMichael Baum 	wq_size = wqe_n * wqe_size;
3676deb19e1SMichael Baum 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
3686deb19e1SMichael Baum 	if (alignment == (size_t)-1) {
3696deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to get mem page size");
3706deb19e1SMichael Baum 		rte_errno = ENOMEM;
3716deb19e1SMichael Baum 		return NULL;
3726deb19e1SMichael Baum 	}
3736deb19e1SMichael Baum 	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
3746deb19e1SMichael Baum 			  alignment, rxq_ctrl->socket);
3756deb19e1SMichael Baum 	if (!buf)
3766deb19e1SMichael Baum 		return NULL;
3776deb19e1SMichael Baum 	rxq_data->wqes = buf;
3786deb19e1SMichael Baum 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
3796deb19e1SMichael Baum 						     buf, wq_size, 0);
380f6dee900SMichael Baum 	if (!rxq_ctrl->wq_umem)
381f6dee900SMichael Baum 		goto error;
382f6dee900SMichael Baum 	/* Allocate RQ door-bell. */
383f6dee900SMichael Baum 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
384f6dee900SMichael Baum 	if (dbr_offset < 0) {
385f6dee900SMichael Baum 		DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
386f6dee900SMichael Baum 		goto error;
3876deb19e1SMichael Baum 	}
388f6dee900SMichael Baum 	rxq_ctrl->rq_dbr_offset = dbr_offset;
389f6dee900SMichael Baum 	rxq_ctrl->rq_dbrec_page = dbr_page;
390f6dee900SMichael Baum 	rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
391f6dee900SMichael Baum 			  (uintptr_t)rxq_ctrl->rq_dbr_offset);
392f6dee900SMichael Baum 	/* Create RQ using DevX API. */
3936deb19e1SMichael Baum 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
3946deb19e1SMichael Baum 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
3956deb19e1SMichael Baum 	if (!rq)
396f6dee900SMichael Baum 		goto error;
3976deb19e1SMichael Baum 	return rq;
398f6dee900SMichael Baum error:
39988f2e3f1SMichael Baum 	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
400f6dee900SMichael Baum 	return NULL;
4016deb19e1SMichael Baum }
4026deb19e1SMichael Baum 
4036deb19e1SMichael Baum /**
4046deb19e1SMichael Baum  * Create a DevX CQ object for an Rx queue.
4056deb19e1SMichael Baum  *
4066deb19e1SMichael Baum  * @param dev
4076deb19e1SMichael Baum  *   Pointer to Ethernet device.
4086deb19e1SMichael Baum  * @param idx
4096deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
4106deb19e1SMichael Baum  *
4116deb19e1SMichael Baum  * @return
412f6dee900SMichael Baum  *   The DevX CQ object initialized, NULL otherwise and rte_errno is set.
4136deb19e1SMichael Baum  */
4146deb19e1SMichael Baum static struct mlx5_devx_obj *
41588f2e3f1SMichael Baum mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
4166deb19e1SMichael Baum {
4176deb19e1SMichael Baum 	struct mlx5_devx_obj *cq_obj = 0;
4186deb19e1SMichael Baum 	struct mlx5_devx_cq_attr cq_attr = { 0 };
4196deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
4206deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
4216deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
4226deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
4236deb19e1SMichael Baum 	size_t page_size = rte_mem_page_size();
424f6dee900SMichael Baum 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
425f6dee900SMichael Baum 	struct mlx5_devx_dbr_page *dbr_page;
426f6dee900SMichael Baum 	int64_t dbr_offset;
4276deb19e1SMichael Baum 	void *buf = NULL;
4286deb19e1SMichael Baum 	uint16_t event_nums[1] = {0};
4296deb19e1SMichael Baum 	uint32_t log_cqe_n;
4306deb19e1SMichael Baum 	uint32_t cq_size;
4316deb19e1SMichael Baum 	int ret = 0;
4326deb19e1SMichael Baum 
4336deb19e1SMichael Baum 	if (page_size == (size_t)-1) {
4346deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to get page_size.");
4356deb19e1SMichael Baum 		goto error;
4366deb19e1SMichael Baum 	}
4376deb19e1SMichael Baum 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
4386deb19e1SMichael Baum 	    !rxq_data->lro) {
43938f9369dSDekel Peled 		cq_attr.cqe_comp_en = 1u;
44054c2d46bSAlexander Kozyrev 		rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
44154c2d46bSAlexander Kozyrev 		rxq_data->byte_mask = UINT32_MAX;
44254c2d46bSAlexander Kozyrev 		switch (priv->config.cqe_comp_fmt) {
44354c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_HASH:
44454c2d46bSAlexander Kozyrev 			/* fallthrough */
44554c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_CSUM:
4460f20acbfSAlexander Kozyrev 			/*
44754c2d46bSAlexander Kozyrev 			 * Select CSUM miniCQE format only for non-vectorized
44854c2d46bSAlexander Kozyrev 			 * MPRQ Rx burst, use HASH miniCQE format for others.
4490f20acbfSAlexander Kozyrev 			 */
4500f20acbfSAlexander Kozyrev 			if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
4510f20acbfSAlexander Kozyrev 			    mlx5_rxq_mprq_enabled(rxq_data))
4526deb19e1SMichael Baum 				cq_attr.mini_cqe_res_format =
4530f20acbfSAlexander Kozyrev 					MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
4540f20acbfSAlexander Kozyrev 			else
4550f20acbfSAlexander Kozyrev 				cq_attr.mini_cqe_res_format =
45638f9369dSDekel Peled 					MLX5_CQE_RESP_FORMAT_HASH;
45754c2d46bSAlexander Kozyrev 			rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
45854c2d46bSAlexander Kozyrev 			break;
45954c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
46054c2d46bSAlexander Kozyrev 			rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
46154c2d46bSAlexander Kozyrev 			/* fallthrough */
46254c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
46354c2d46bSAlexander Kozyrev 			cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
46454c2d46bSAlexander Kozyrev 			break;
46554c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
46654c2d46bSAlexander Kozyrev 			cq_attr.mini_cqe_res_format = 0;
46754c2d46bSAlexander Kozyrev 			cq_attr.mini_cqe_res_format_ext = 1;
46854c2d46bSAlexander Kozyrev 			break;
46954c2d46bSAlexander Kozyrev 		}
47054c2d46bSAlexander Kozyrev 		DRV_LOG(DEBUG,
47154c2d46bSAlexander Kozyrev 			"Port %u Rx CQE compression is enabled, format %d.",
47254c2d46bSAlexander Kozyrev 			dev->data->port_id, priv->config.cqe_comp_fmt);
4736deb19e1SMichael Baum 		/*
4746deb19e1SMichael Baum 		 * For vectorized Rx, it must not be doubled in order to
4756deb19e1SMichael Baum 		 * make cq_ci and rq_ci aligned.
4766deb19e1SMichael Baum 		 */
4776deb19e1SMichael Baum 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
4786deb19e1SMichael Baum 			cqe_n *= 2;
4796deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
4806deb19e1SMichael Baum 		DRV_LOG(DEBUG,
4816deb19e1SMichael Baum 			"Port %u Rx CQE compression is disabled for HW"
4826deb19e1SMichael Baum 			" timestamp.",
4836deb19e1SMichael Baum 			dev->data->port_id);
4846deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->lro) {
4856deb19e1SMichael Baum 		DRV_LOG(DEBUG,
4866deb19e1SMichael Baum 			"Port %u Rx CQE compression is disabled for LRO.",
4876deb19e1SMichael Baum 			dev->data->port_id);
4886deb19e1SMichael Baum 	}
4896deb19e1SMichael Baum 	if (priv->config.cqe_pad)
49038f9369dSDekel Peled 		cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
4916deb19e1SMichael Baum 	log_cqe_n = log2above(cqe_n);
4926deb19e1SMichael Baum 	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
4936deb19e1SMichael Baum 	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
4946deb19e1SMichael Baum 				rxq_ctrl->socket);
4956deb19e1SMichael Baum 	if (!buf) {
4966deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
4976deb19e1SMichael Baum 		goto error;
4986deb19e1SMichael Baum 	}
4996deb19e1SMichael Baum 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
5006deb19e1SMichael Baum 	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
5016deb19e1SMichael Baum 						     cq_size,
5026deb19e1SMichael Baum 						     IBV_ACCESS_LOCAL_WRITE);
5036deb19e1SMichael Baum 	if (!rxq_ctrl->cq_umem) {
5046deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to register umem for CQ.");
5056deb19e1SMichael Baum 		goto error;
5066deb19e1SMichael Baum 	}
507f6dee900SMichael Baum 	/* Allocate CQ door-bell. */
508f6dee900SMichael Baum 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
509f6dee900SMichael Baum 	if (dbr_offset < 0) {
510f6dee900SMichael Baum 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
511f6dee900SMichael Baum 		goto error;
512f6dee900SMichael Baum 	}
513f6dee900SMichael Baum 	rxq_ctrl->cq_dbr_offset = dbr_offset;
514f6dee900SMichael Baum 	rxq_ctrl->cq_dbrec_page = dbr_page;
515f6dee900SMichael Baum 	rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
516f6dee900SMichael Baum 			  (uintptr_t)rxq_ctrl->cq_dbr_offset);
517f6dee900SMichael Baum 	rxq_data->cq_uar =
518f6dee900SMichael Baum 			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
519f6dee900SMichael Baum 	/* Create CQ using DevX API. */
520e7055bbfSMichael Baum 	cq_attr.eqn = priv->sh->eqn;
5216deb19e1SMichael Baum 	cq_attr.uar_page_id =
5226deb19e1SMichael Baum 			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
5236deb19e1SMichael Baum 	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
5246deb19e1SMichael Baum 	cq_attr.q_umem_valid = 1;
5256deb19e1SMichael Baum 	cq_attr.log_cq_size = log_cqe_n;
5266deb19e1SMichael Baum 	cq_attr.log_page_size = rte_log2_u32(page_size);
5276deb19e1SMichael Baum 	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
528f6dee900SMichael Baum 	cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
5296deb19e1SMichael Baum 	cq_attr.db_umem_valid = 1;
5306deb19e1SMichael Baum 	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
5316deb19e1SMichael Baum 	if (!cq_obj)
5326deb19e1SMichael Baum 		goto error;
5336deb19e1SMichael Baum 	rxq_data->cqe_n = log_cqe_n;
5346deb19e1SMichael Baum 	rxq_data->cqn = cq_obj->id;
535f6dee900SMichael Baum 	if (rxq_ctrl->obj->devx_channel) {
5366deb19e1SMichael Baum 		ret = mlx5_glue->devx_subscribe_devx_event
537f6dee900SMichael Baum 						(rxq_ctrl->obj->devx_channel,
5386deb19e1SMichael Baum 						 cq_obj->obj,
5396deb19e1SMichael Baum 						 sizeof(event_nums),
5406deb19e1SMichael Baum 						 event_nums,
5416deb19e1SMichael Baum 						 (uint64_t)(uintptr_t)cq_obj);
5426deb19e1SMichael Baum 		if (ret) {
5436deb19e1SMichael Baum 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
5446deb19e1SMichael Baum 			rte_errno = errno;
5456deb19e1SMichael Baum 			goto error;
5466deb19e1SMichael Baum 		}
5476deb19e1SMichael Baum 	}
5486deb19e1SMichael Baum 	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
5496deb19e1SMichael Baum 	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
5506deb19e1SMichael Baum 	return cq_obj;
5516deb19e1SMichael Baum error:
5526deb19e1SMichael Baum 	if (cq_obj)
5536deb19e1SMichael Baum 		mlx5_devx_cmd_destroy(cq_obj);
55488f2e3f1SMichael Baum 	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
5556deb19e1SMichael Baum 	return NULL;
5566deb19e1SMichael Baum }
5576deb19e1SMichael Baum 
5586deb19e1SMichael Baum /**
5596deb19e1SMichael Baum  * Create the Rx hairpin queue object.
5606deb19e1SMichael Baum  *
5616deb19e1SMichael Baum  * @param dev
5626deb19e1SMichael Baum  *   Pointer to Ethernet device.
5636deb19e1SMichael Baum  * @param idx
5646deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
5656deb19e1SMichael Baum  *
5666deb19e1SMichael Baum  * @return
5671260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
5686deb19e1SMichael Baum  */
5691260a87bSMichael Baum static int
5706deb19e1SMichael Baum mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
5716deb19e1SMichael Baum {
5726deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
5736deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
5746deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
5756deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
5766deb19e1SMichael Baum 	struct mlx5_devx_create_rq_attr attr = { 0 };
5771260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
5786deb19e1SMichael Baum 	uint32_t max_wq_data;
5796deb19e1SMichael Baum 
5806deb19e1SMichael Baum 	MLX5_ASSERT(rxq_data);
5811260a87bSMichael Baum 	MLX5_ASSERT(tmpl);
5826deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
5836deb19e1SMichael Baum 	attr.hairpin = 1;
5846deb19e1SMichael Baum 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
5856deb19e1SMichael Baum 	/* Jumbo frames > 9KB should be supported, and more packets. */
5866deb19e1SMichael Baum 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
5876deb19e1SMichael Baum 		if (priv->config.log_hp_size > max_wq_data) {
5886deb19e1SMichael Baum 			DRV_LOG(ERR, "Total data size %u power of 2 is "
5896deb19e1SMichael Baum 				"too large for hairpin.",
5906deb19e1SMichael Baum 				priv->config.log_hp_size);
5916deb19e1SMichael Baum 			rte_errno = ERANGE;
5921260a87bSMichael Baum 			return -rte_errno;
5936deb19e1SMichael Baum 		}
5946deb19e1SMichael Baum 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
5956deb19e1SMichael Baum 	} else {
5966deb19e1SMichael Baum 		attr.wq_attr.log_hairpin_data_sz =
5976deb19e1SMichael Baum 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
5986deb19e1SMichael Baum 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
5996deb19e1SMichael Baum 	}
6006deb19e1SMichael Baum 	/* Set the packets number to the maximum value for performance. */
6016deb19e1SMichael Baum 	attr.wq_attr.log_hairpin_num_packets =
6026deb19e1SMichael Baum 			attr.wq_attr.log_hairpin_data_sz -
6036deb19e1SMichael Baum 			MLX5_HAIRPIN_QUEUE_STRIDE;
6046deb19e1SMichael Baum 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
6056deb19e1SMichael Baum 					   rxq_ctrl->socket);
6066deb19e1SMichael Baum 	if (!tmpl->rq) {
6076deb19e1SMichael Baum 		DRV_LOG(ERR,
6086deb19e1SMichael Baum 			"Port %u Rx hairpin queue %u can't create rq object.",
6096deb19e1SMichael Baum 			dev->data->port_id, idx);
6106deb19e1SMichael Baum 		rte_errno = errno;
6111260a87bSMichael Baum 		return -rte_errno;
6126deb19e1SMichael Baum 	}
6136deb19e1SMichael Baum 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
6141260a87bSMichael Baum 	return 0;
6156deb19e1SMichael Baum }
6166deb19e1SMichael Baum 
6176deb19e1SMichael Baum /**
6186deb19e1SMichael Baum  * Create the Rx queue DevX object.
6196deb19e1SMichael Baum  *
6206deb19e1SMichael Baum  * @param dev
6216deb19e1SMichael Baum  *   Pointer to Ethernet device.
6226deb19e1SMichael Baum  * @param idx
6236deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
6246deb19e1SMichael Baum  *
6256deb19e1SMichael Baum  * @return
6261260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
6276deb19e1SMichael Baum  */
6281260a87bSMichael Baum static int
6296deb19e1SMichael Baum mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
6306deb19e1SMichael Baum {
6316deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
6326deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
6336deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
6346deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
6351260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
6366deb19e1SMichael Baum 	int ret = 0;
6376deb19e1SMichael Baum 
6386deb19e1SMichael Baum 	MLX5_ASSERT(rxq_data);
6391260a87bSMichael Baum 	MLX5_ASSERT(tmpl);
6406deb19e1SMichael Baum 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
6416deb19e1SMichael Baum 		return mlx5_rxq_obj_hairpin_new(dev, idx);
6426deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
6436deb19e1SMichael Baum 	if (rxq_ctrl->irq) {
6446deb19e1SMichael Baum 		int devx_ev_flag =
6456deb19e1SMichael Baum 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
6466deb19e1SMichael Baum 
6476deb19e1SMichael Baum 		tmpl->devx_channel = mlx5_glue->devx_create_event_channel
6486deb19e1SMichael Baum 								(priv->sh->ctx,
6496deb19e1SMichael Baum 								 devx_ev_flag);
6506deb19e1SMichael Baum 		if (!tmpl->devx_channel) {
6516deb19e1SMichael Baum 			rte_errno = errno;
6526deb19e1SMichael Baum 			DRV_LOG(ERR, "Failed to create event channel %d.",
6536deb19e1SMichael Baum 				rte_errno);
6546deb19e1SMichael Baum 			goto error;
6556deb19e1SMichael Baum 		}
6566deb19e1SMichael Baum 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
6576deb19e1SMichael Baum 	}
6586deb19e1SMichael Baum 	/* Create CQ using DevX API. */
65988f2e3f1SMichael Baum 	tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx);
6606deb19e1SMichael Baum 	if (!tmpl->devx_cq) {
6616deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to create CQ.");
6626deb19e1SMichael Baum 		goto error;
6636deb19e1SMichael Baum 	}
6646deb19e1SMichael Baum 	/* Create RQ using DevX API. */
66588f2e3f1SMichael Baum 	tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx);
6666deb19e1SMichael Baum 	if (!tmpl->rq) {
6676deb19e1SMichael Baum 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
6686deb19e1SMichael Baum 			dev->data->port_id, idx);
6696deb19e1SMichael Baum 		rte_errno = ENOMEM;
6706deb19e1SMichael Baum 		goto error;
6716deb19e1SMichael Baum 	}
6726deb19e1SMichael Baum 	/* Change queue state to ready. */
6734c6d80f1SMichael Baum 	ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
6746deb19e1SMichael Baum 	if (ret)
6756deb19e1SMichael Baum 		goto error;
6766deb19e1SMichael Baum 	rxq_data->cq_arm_sn = 0;
6776deb19e1SMichael Baum 	mlx5_rxq_initialize(rxq_data);
6786deb19e1SMichael Baum 	rxq_data->cq_ci = 0;
6796deb19e1SMichael Baum 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
6806deb19e1SMichael Baum 	rxq_ctrl->wqn = tmpl->rq->id;
6811260a87bSMichael Baum 	return 0;
6826deb19e1SMichael Baum error:
6836deb19e1SMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
6846deb19e1SMichael Baum 	if (tmpl->rq)
6856deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
6866deb19e1SMichael Baum 	if (tmpl->devx_cq)
6876deb19e1SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
6886deb19e1SMichael Baum 	if (tmpl->devx_channel)
6891260a87bSMichael Baum 		mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
69088f2e3f1SMichael Baum 	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
69188f2e3f1SMichael Baum 	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
6921260a87bSMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
6931260a87bSMichael Baum 	return -rte_errno;
6946deb19e1SMichael Baum }
6956deb19e1SMichael Baum 
69687e2db37SMichael Baum /**
697*fa7ad49eSAndrey Vesnovaty  * Prepare RQT attribute structure for DevX RQT API.
698*fa7ad49eSAndrey Vesnovaty  *
699*fa7ad49eSAndrey Vesnovaty  * @param dev
700*fa7ad49eSAndrey Vesnovaty  *   Pointer to Ethernet device.
701*fa7ad49eSAndrey Vesnovaty  * @param log_n
702*fa7ad49eSAndrey Vesnovaty  *   Log of number of queues in the array.
703*fa7ad49eSAndrey Vesnovaty  * @param ind_tbl
704*fa7ad49eSAndrey Vesnovaty  *   DevX indirection table object.
705*fa7ad49eSAndrey Vesnovaty  *
706*fa7ad49eSAndrey Vesnovaty  * @return
707*fa7ad49eSAndrey Vesnovaty  *   The RQT attr object initialized, NULL otherwise and rte_errno is set.
708*fa7ad49eSAndrey Vesnovaty  */
709*fa7ad49eSAndrey Vesnovaty static struct mlx5_devx_rqt_attr *
710*fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
711*fa7ad49eSAndrey Vesnovaty 				     const unsigned int log_n,
712*fa7ad49eSAndrey Vesnovaty 				     const uint16_t *queues,
713*fa7ad49eSAndrey Vesnovaty 				     const uint32_t queues_n)
714*fa7ad49eSAndrey Vesnovaty {
715*fa7ad49eSAndrey Vesnovaty 	struct mlx5_priv *priv = dev->data->dev_private;
716*fa7ad49eSAndrey Vesnovaty 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
717*fa7ad49eSAndrey Vesnovaty 	const unsigned int rqt_n = 1 << log_n;
718*fa7ad49eSAndrey Vesnovaty 	unsigned int i, j;
719*fa7ad49eSAndrey Vesnovaty 
720*fa7ad49eSAndrey Vesnovaty 	rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
721*fa7ad49eSAndrey Vesnovaty 			      rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
722*fa7ad49eSAndrey Vesnovaty 	if (!rqt_attr) {
723*fa7ad49eSAndrey Vesnovaty 		DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
724*fa7ad49eSAndrey Vesnovaty 			dev->data->port_id);
725*fa7ad49eSAndrey Vesnovaty 		rte_errno = ENOMEM;
726*fa7ad49eSAndrey Vesnovaty 		return NULL;
727*fa7ad49eSAndrey Vesnovaty 	}
728*fa7ad49eSAndrey Vesnovaty 	rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
729*fa7ad49eSAndrey Vesnovaty 	rqt_attr->rqt_actual_size = rqt_n;
730*fa7ad49eSAndrey Vesnovaty 	for (i = 0; i != queues_n; ++i) {
731*fa7ad49eSAndrey Vesnovaty 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
732*fa7ad49eSAndrey Vesnovaty 		struct mlx5_rxq_ctrl *rxq_ctrl =
733*fa7ad49eSAndrey Vesnovaty 				container_of(rxq, struct mlx5_rxq_ctrl, rxq);
734*fa7ad49eSAndrey Vesnovaty 
735*fa7ad49eSAndrey Vesnovaty 		rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
736*fa7ad49eSAndrey Vesnovaty 	}
737*fa7ad49eSAndrey Vesnovaty 	MLX5_ASSERT(i > 0);
738*fa7ad49eSAndrey Vesnovaty 	for (j = 0; i != rqt_n; ++j, ++i)
739*fa7ad49eSAndrey Vesnovaty 		rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
740*fa7ad49eSAndrey Vesnovaty 	return rqt_attr;
741*fa7ad49eSAndrey Vesnovaty }
742*fa7ad49eSAndrey Vesnovaty 
743*fa7ad49eSAndrey Vesnovaty /**
74425ae7f1aSMichael Baum  * Create RQT using DevX API as a filed of indirection table.
74587e2db37SMichael Baum  *
74687e2db37SMichael Baum  * @param dev
74787e2db37SMichael Baum  *   Pointer to Ethernet device.
74825ae7f1aSMichael Baum  * @param log_n
74925ae7f1aSMichael Baum  *   Log of number of queues in the array.
75025ae7f1aSMichael Baum  * @param ind_tbl
75125ae7f1aSMichael Baum  *   DevX indirection table object.
75287e2db37SMichael Baum  *
75387e2db37SMichael Baum  * @return
75425ae7f1aSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
75587e2db37SMichael Baum  */
75625ae7f1aSMichael Baum static int
75725ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
75825ae7f1aSMichael Baum 			struct mlx5_ind_table_obj *ind_tbl)
75987e2db37SMichael Baum {
76087e2db37SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
76187e2db37SMichael Baum 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
76287e2db37SMichael Baum 
76325ae7f1aSMichael Baum 	MLX5_ASSERT(ind_tbl);
764*fa7ad49eSAndrey Vesnovaty 	rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
765*fa7ad49eSAndrey Vesnovaty 							ind_tbl->queues,
766*fa7ad49eSAndrey Vesnovaty 							ind_tbl->queues_n);
767*fa7ad49eSAndrey Vesnovaty 	if (!rqt_attr)
76825ae7f1aSMichael Baum 		return -rte_errno;
76987e2db37SMichael Baum 	ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
77087e2db37SMichael Baum 	mlx5_free(rqt_attr);
77187e2db37SMichael Baum 	if (!ind_tbl->rqt) {
77287e2db37SMichael Baum 		DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
77387e2db37SMichael Baum 			dev->data->port_id);
77487e2db37SMichael Baum 		rte_errno = errno;
77525ae7f1aSMichael Baum 		return -rte_errno;
77687e2db37SMichael Baum 	}
77725ae7f1aSMichael Baum 	return 0;
77887e2db37SMichael Baum }
77987e2db37SMichael Baum 
78087e2db37SMichael Baum /**
781*fa7ad49eSAndrey Vesnovaty  * Modify RQT using DevX API as a filed of indirection table.
782*fa7ad49eSAndrey Vesnovaty  *
783*fa7ad49eSAndrey Vesnovaty  * @param dev
784*fa7ad49eSAndrey Vesnovaty  *   Pointer to Ethernet device.
785*fa7ad49eSAndrey Vesnovaty  * @param log_n
786*fa7ad49eSAndrey Vesnovaty  *   Log of number of queues in the array.
787*fa7ad49eSAndrey Vesnovaty  * @param ind_tbl
788*fa7ad49eSAndrey Vesnovaty  *   DevX indirection table object.
789*fa7ad49eSAndrey Vesnovaty  *
790*fa7ad49eSAndrey Vesnovaty  * @return
791*fa7ad49eSAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
792*fa7ad49eSAndrey Vesnovaty  */
793*fa7ad49eSAndrey Vesnovaty static int
794*fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
795*fa7ad49eSAndrey Vesnovaty 			   const uint16_t *queues, const uint32_t queues_n,
796*fa7ad49eSAndrey Vesnovaty 			   struct mlx5_ind_table_obj *ind_tbl)
797*fa7ad49eSAndrey Vesnovaty {
798*fa7ad49eSAndrey Vesnovaty 	int ret = 0;
799*fa7ad49eSAndrey Vesnovaty 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
800*fa7ad49eSAndrey Vesnovaty 
801*fa7ad49eSAndrey Vesnovaty 	MLX5_ASSERT(ind_tbl);
802*fa7ad49eSAndrey Vesnovaty 	rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
803*fa7ad49eSAndrey Vesnovaty 							queues,
804*fa7ad49eSAndrey Vesnovaty 							queues_n);
805*fa7ad49eSAndrey Vesnovaty 	if (!rqt_attr)
806*fa7ad49eSAndrey Vesnovaty 		return -rte_errno;
807*fa7ad49eSAndrey Vesnovaty 	ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
808*fa7ad49eSAndrey Vesnovaty 	mlx5_free(rqt_attr);
809*fa7ad49eSAndrey Vesnovaty 	if (ret)
810*fa7ad49eSAndrey Vesnovaty 		DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
811*fa7ad49eSAndrey Vesnovaty 			dev->data->port_id);
812*fa7ad49eSAndrey Vesnovaty 	return ret;
813*fa7ad49eSAndrey Vesnovaty }
814*fa7ad49eSAndrey Vesnovaty 
815*fa7ad49eSAndrey Vesnovaty /**
81687e2db37SMichael Baum  * Destroy the DevX RQT object.
81787e2db37SMichael Baum  *
81887e2db37SMichael Baum  * @param ind_table
81987e2db37SMichael Baum  *   Indirection table to release.
82087e2db37SMichael Baum  */
82187e2db37SMichael Baum static void
82225ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
82387e2db37SMichael Baum {
82487e2db37SMichael Baum 	claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
82587e2db37SMichael Baum }
82687e2db37SMichael Baum 
82785552726SMichael Baum /**
828b8cc58c1SAndrey Vesnovaty  * Set TIR attribute struct with relevant input values.
82985552726SMichael Baum  *
830b8cc58c1SAndrey Vesnovaty  * @param[in] dev
83185552726SMichael Baum  *   Pointer to Ethernet device.
832b8cc58c1SAndrey Vesnovaty  * @param[in] rss_key
833b8cc58c1SAndrey Vesnovaty  *   RSS key for the Rx hash queue.
834b8cc58c1SAndrey Vesnovaty  * @param[in] hash_fields
835b8cc58c1SAndrey Vesnovaty  *   Verbs protocol hash field to make the RSS on.
836b8cc58c1SAndrey Vesnovaty  * @param[in] ind_tbl
837b8cc58c1SAndrey Vesnovaty  *   Indirection table for TIR.
838b8cc58c1SAndrey Vesnovaty  * @param[in] tunnel
83985552726SMichael Baum  *   Tunnel type.
840b8cc58c1SAndrey Vesnovaty  * @param[out] tir_attr
841b8cc58c1SAndrey Vesnovaty  *   Parameters structure for TIR creation/modification.
84285552726SMichael Baum  *
84385552726SMichael Baum  * @return
844b8cc58c1SAndrey Vesnovaty  *   The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
84585552726SMichael Baum  */
846b8cc58c1SAndrey Vesnovaty static void
847b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
848b8cc58c1SAndrey Vesnovaty 		       uint64_t hash_fields,
849b8cc58c1SAndrey Vesnovaty 		       const struct mlx5_ind_table_obj *ind_tbl,
850b8cc58c1SAndrey Vesnovaty 		       int tunnel, struct mlx5_devx_tir_attr *tir_attr)
85185552726SMichael Baum {
85285552726SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
8535a959cbfSMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
85485552726SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
85585552726SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
856b8cc58c1SAndrey Vesnovaty 	enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
85785552726SMichael Baum 	bool lro = true;
8585a959cbfSMichael Baum 	uint32_t i;
85985552726SMichael Baum 
86085552726SMichael Baum 	/* Enable TIR LRO only if all the queues were configured for. */
8615a959cbfSMichael Baum 	for (i = 0; i < ind_tbl->queues_n; ++i) {
8625a959cbfSMichael Baum 		if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
86385552726SMichael Baum 			lro = false;
86485552726SMichael Baum 			break;
86585552726SMichael Baum 		}
86685552726SMichael Baum 	}
867b8cc58c1SAndrey Vesnovaty 	memset(tir_attr, 0, sizeof(*tir_attr));
868b8cc58c1SAndrey Vesnovaty 	tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
869b8cc58c1SAndrey Vesnovaty 	tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
870b8cc58c1SAndrey Vesnovaty 	tir_attr->tunneled_offload_en = !!tunnel;
87185552726SMichael Baum 	/* If needed, translate hash_fields bitmap to PRM format. */
87285552726SMichael Baum 	if (hash_fields) {
873b8cc58c1SAndrey Vesnovaty 		struct mlx5_rx_hash_field_select *rx_hash_field_select =
87485552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
875b8cc58c1SAndrey Vesnovaty 			hash_fields & IBV_RX_HASH_INNER ?
876b8cc58c1SAndrey Vesnovaty 				&tir_attr->rx_hash_field_selector_inner :
87785552726SMichael Baum #endif
878b8cc58c1SAndrey Vesnovaty 				&tir_attr->rx_hash_field_selector_outer;
87985552726SMichael Baum 		/* 1 bit: 0: IPv4, 1: IPv6. */
88085552726SMichael Baum 		rx_hash_field_select->l3_prot_type =
88185552726SMichael Baum 					!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
88285552726SMichael Baum 		/* 1 bit: 0: TCP, 1: UDP. */
88385552726SMichael Baum 		rx_hash_field_select->l4_prot_type =
88485552726SMichael Baum 					!!(hash_fields & MLX5_UDP_IBV_RX_HASH);
88585552726SMichael Baum 		/* Bitmask which sets which fields to use in RX Hash. */
88685552726SMichael Baum 		rx_hash_field_select->selected_fields =
88785552726SMichael Baum 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
88885552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
88985552726SMichael Baum 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
89085552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
89185552726SMichael Baum 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
89285552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
89385552726SMichael Baum 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
89485552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
89585552726SMichael Baum 	}
896b8cc58c1SAndrey Vesnovaty 	if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
897b8cc58c1SAndrey Vesnovaty 		tir_attr->transport_domain = priv->sh->td->id;
89885552726SMichael Baum 	else
899b8cc58c1SAndrey Vesnovaty 		tir_attr->transport_domain = priv->sh->tdn;
900b8cc58c1SAndrey Vesnovaty 	memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
901b8cc58c1SAndrey Vesnovaty 	tir_attr->indirect_table = ind_tbl->rqt->id;
90285552726SMichael Baum 	if (dev->data->dev_conf.lpbk_mode)
903b8cc58c1SAndrey Vesnovaty 		tir_attr->self_lb_block =
904b8cc58c1SAndrey Vesnovaty 					MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
90585552726SMichael Baum 	if (lro) {
906b8cc58c1SAndrey Vesnovaty 		tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
907b8cc58c1SAndrey Vesnovaty 		tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
908b8cc58c1SAndrey Vesnovaty 		tir_attr->lro_enable_mask =
909b8cc58c1SAndrey Vesnovaty 				MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
91085552726SMichael Baum 				MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
91185552726SMichael Baum 	}
912b8cc58c1SAndrey Vesnovaty }
913b8cc58c1SAndrey Vesnovaty 
914b8cc58c1SAndrey Vesnovaty /**
915b8cc58c1SAndrey Vesnovaty  * Create an Rx Hash queue.
916b8cc58c1SAndrey Vesnovaty  *
917b8cc58c1SAndrey Vesnovaty  * @param dev
918b8cc58c1SAndrey Vesnovaty  *   Pointer to Ethernet device.
919b8cc58c1SAndrey Vesnovaty  * @param hrxq
920b8cc58c1SAndrey Vesnovaty  *   Pointer to Rx Hash queue.
921b8cc58c1SAndrey Vesnovaty  * @param tunnel
922b8cc58c1SAndrey Vesnovaty  *   Tunnel type.
923b8cc58c1SAndrey Vesnovaty  *
924b8cc58c1SAndrey Vesnovaty  * @return
925b8cc58c1SAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
926b8cc58c1SAndrey Vesnovaty  */
927b8cc58c1SAndrey Vesnovaty static int
928b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
929b8cc58c1SAndrey Vesnovaty 		   int tunnel __rte_unused)
930b8cc58c1SAndrey Vesnovaty {
931b8cc58c1SAndrey Vesnovaty 	struct mlx5_priv *priv = dev->data->dev_private;
932b8cc58c1SAndrey Vesnovaty 	struct mlx5_devx_tir_attr tir_attr = {0};
933b8cc58c1SAndrey Vesnovaty 	int err;
934b8cc58c1SAndrey Vesnovaty 
935b8cc58c1SAndrey Vesnovaty 	mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
936b8cc58c1SAndrey Vesnovaty 			       hrxq->ind_table, tunnel, &tir_attr);
9375a959cbfSMichael Baum 	hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
9385a959cbfSMichael Baum 	if (!hrxq->tir) {
93985552726SMichael Baum 		DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
94085552726SMichael Baum 			dev->data->port_id);
94185552726SMichael Baum 		rte_errno = errno;
94285552726SMichael Baum 		goto error;
94385552726SMichael Baum 	}
94485552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
94585552726SMichael Baum 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
94685552726SMichael Baum 							       (hrxq->tir->obj);
94785552726SMichael Baum 	if (!hrxq->action) {
94885552726SMichael Baum 		rte_errno = errno;
94985552726SMichael Baum 		goto error;
95085552726SMichael Baum 	}
95185552726SMichael Baum #endif
9525a959cbfSMichael Baum 	return 0;
95385552726SMichael Baum error:
95485552726SMichael Baum 	err = rte_errno; /* Save rte_errno before cleanup. */
9555a959cbfSMichael Baum 	if (hrxq->tir)
9565a959cbfSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
95785552726SMichael Baum 	rte_errno = err; /* Restore rte_errno. */
9585a959cbfSMichael Baum 	return -rte_errno;
95985552726SMichael Baum }
96085552726SMichael Baum 
96185552726SMichael Baum /**
96285552726SMichael Baum  * Destroy a DevX TIR object.
96385552726SMichael Baum  *
96485552726SMichael Baum  * @param hrxq
96585552726SMichael Baum  *   Hash Rx queue to release its tir.
96685552726SMichael Baum  */
96785552726SMichael Baum static void
96885552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
96985552726SMichael Baum {
97085552726SMichael Baum 	claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
97185552726SMichael Baum }
97285552726SMichael Baum 
9735eaf882eSMichael Baum /**
974b8cc58c1SAndrey Vesnovaty  * Modify an Rx Hash queue configuration.
975b8cc58c1SAndrey Vesnovaty  *
976b8cc58c1SAndrey Vesnovaty  * @param dev
977b8cc58c1SAndrey Vesnovaty  *   Pointer to Ethernet device.
978b8cc58c1SAndrey Vesnovaty  * @param hrxq
979b8cc58c1SAndrey Vesnovaty  *   Hash Rx queue to modify.
980b8cc58c1SAndrey Vesnovaty  * @param rss_key
981b8cc58c1SAndrey Vesnovaty  *   RSS key for the Rx hash queue.
982b8cc58c1SAndrey Vesnovaty  * @param hash_fields
983b8cc58c1SAndrey Vesnovaty  *   Verbs protocol hash field to make the RSS on.
984b8cc58c1SAndrey Vesnovaty  * @param[in] ind_tbl
985b8cc58c1SAndrey Vesnovaty  *   Indirection table for TIR.
986b8cc58c1SAndrey Vesnovaty  *
987b8cc58c1SAndrey Vesnovaty  * @return
988b8cc58c1SAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
989b8cc58c1SAndrey Vesnovaty  */
990b8cc58c1SAndrey Vesnovaty static int
991b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
992b8cc58c1SAndrey Vesnovaty 		       const uint8_t *rss_key,
993b8cc58c1SAndrey Vesnovaty 		       uint64_t hash_fields,
994b8cc58c1SAndrey Vesnovaty 		       const struct mlx5_ind_table_obj *ind_tbl)
995b8cc58c1SAndrey Vesnovaty {
996b8cc58c1SAndrey Vesnovaty 	struct mlx5_devx_modify_tir_attr modify_tir = {0};
997b8cc58c1SAndrey Vesnovaty 
998b8cc58c1SAndrey Vesnovaty 	/*
999b8cc58c1SAndrey Vesnovaty 	 * untested for modification fields:
1000b8cc58c1SAndrey Vesnovaty 	 * - rx_hash_symmetric not set in hrxq_new(),
1001b8cc58c1SAndrey Vesnovaty 	 * - rx_hash_fn set hard-coded in hrxq_new(),
1002b8cc58c1SAndrey Vesnovaty 	 * - lro_xxx not set after rxq setup
1003b8cc58c1SAndrey Vesnovaty 	 */
1004b8cc58c1SAndrey Vesnovaty 	if (ind_tbl != hrxq->ind_table)
1005b8cc58c1SAndrey Vesnovaty 		modify_tir.modify_bitmask |=
1006b8cc58c1SAndrey Vesnovaty 			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
1007b8cc58c1SAndrey Vesnovaty 	if (hash_fields != hrxq->hash_fields ||
1008b8cc58c1SAndrey Vesnovaty 			memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
1009b8cc58c1SAndrey Vesnovaty 		modify_tir.modify_bitmask |=
1010b8cc58c1SAndrey Vesnovaty 			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
1011b8cc58c1SAndrey Vesnovaty 	mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
1012b8cc58c1SAndrey Vesnovaty 			       0, /* N/A - tunnel modification unsupported */
1013b8cc58c1SAndrey Vesnovaty 			       &modify_tir.tir);
1014b8cc58c1SAndrey Vesnovaty 	modify_tir.tirn = hrxq->tir->id;
1015b8cc58c1SAndrey Vesnovaty 	if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
1016b8cc58c1SAndrey Vesnovaty 		DRV_LOG(ERR, "port %u cannot modify DevX TIR",
1017b8cc58c1SAndrey Vesnovaty 			dev->data->port_id);
1018b8cc58c1SAndrey Vesnovaty 		rte_errno = errno;
1019b8cc58c1SAndrey Vesnovaty 		return -rte_errno;
1020b8cc58c1SAndrey Vesnovaty 	}
1021b8cc58c1SAndrey Vesnovaty 	return 0;
1022b8cc58c1SAndrey Vesnovaty }
1023b8cc58c1SAndrey Vesnovaty 
1024b8cc58c1SAndrey Vesnovaty /**
10250c762e81SMichael Baum  * Create a DevX drop action for Rx Hash queue.
10265eaf882eSMichael Baum  *
10275eaf882eSMichael Baum  * @param dev
10285eaf882eSMichael Baum  *   Pointer to Ethernet device.
10295eaf882eSMichael Baum  *
10305eaf882eSMichael Baum  * @return
10310c762e81SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
10325eaf882eSMichael Baum  */
10330c762e81SMichael Baum static int
10340c762e81SMichael Baum mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
10355eaf882eSMichael Baum {
10365eaf882eSMichael Baum 	(void)dev;
103786d259ceSMichael Baum 	DRV_LOG(ERR, "DevX drop action is not supported yet.");
10385eaf882eSMichael Baum 	rte_errno = ENOTSUP;
10390c762e81SMichael Baum 	return -rte_errno;
10405eaf882eSMichael Baum }
10415eaf882eSMichael Baum 
10425eaf882eSMichael Baum /**
10435eaf882eSMichael Baum  * Release a drop hash Rx queue.
10445eaf882eSMichael Baum  *
10455eaf882eSMichael Baum  * @param dev
10465eaf882eSMichael Baum  *   Pointer to Ethernet device.
10475eaf882eSMichael Baum  */
10485eaf882eSMichael Baum static void
10490c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
10505eaf882eSMichael Baum {
10515eaf882eSMichael Baum 	(void)dev;
105286d259ceSMichael Baum 	DRV_LOG(ERR, "DevX drop action is not supported yet.");
10535eaf882eSMichael Baum 	rte_errno = ENOTSUP;
10545eaf882eSMichael Baum }
10555eaf882eSMichael Baum 
105686d259ceSMichael Baum /**
105786d259ceSMichael Baum  * Create the Tx hairpin queue object.
105886d259ceSMichael Baum  *
105986d259ceSMichael Baum  * @param dev
106086d259ceSMichael Baum  *   Pointer to Ethernet device.
106186d259ceSMichael Baum  * @param idx
106286d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
106386d259ceSMichael Baum  *
106486d259ceSMichael Baum  * @return
1065f49f4483SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
106686d259ceSMichael Baum  */
1067f49f4483SMichael Baum static int
106886d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
106986d259ceSMichael Baum {
107086d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
107186d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
107286d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
107386d259ceSMichael Baum 		container_of(txq_data, struct mlx5_txq_ctrl, txq);
107486d259ceSMichael Baum 	struct mlx5_devx_create_sq_attr attr = { 0 };
1075f49f4483SMichael Baum 	struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
107686d259ceSMichael Baum 	uint32_t max_wq_data;
107786d259ceSMichael Baum 
107886d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
1079f49f4483SMichael Baum 	MLX5_ASSERT(tmpl);
108086d259ceSMichael Baum 	tmpl->txq_ctrl = txq_ctrl;
108186d259ceSMichael Baum 	attr.hairpin = 1;
108286d259ceSMichael Baum 	attr.tis_lst_sz = 1;
108386d259ceSMichael Baum 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
108486d259ceSMichael Baum 	/* Jumbo frames > 9KB should be supported, and more packets. */
108586d259ceSMichael Baum 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
108686d259ceSMichael Baum 		if (priv->config.log_hp_size > max_wq_data) {
108786d259ceSMichael Baum 			DRV_LOG(ERR, "Total data size %u power of 2 is "
108886d259ceSMichael Baum 				"too large for hairpin.",
108986d259ceSMichael Baum 				priv->config.log_hp_size);
109086d259ceSMichael Baum 			rte_errno = ERANGE;
1091f49f4483SMichael Baum 			return -rte_errno;
109286d259ceSMichael Baum 		}
109386d259ceSMichael Baum 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
109486d259ceSMichael Baum 	} else {
109586d259ceSMichael Baum 		attr.wq_attr.log_hairpin_data_sz =
109686d259ceSMichael Baum 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
109786d259ceSMichael Baum 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
109886d259ceSMichael Baum 	}
109986d259ceSMichael Baum 	/* Set the packets number to the maximum value for performance. */
110086d259ceSMichael Baum 	attr.wq_attr.log_hairpin_num_packets =
110186d259ceSMichael Baum 			attr.wq_attr.log_hairpin_data_sz -
110286d259ceSMichael Baum 			MLX5_HAIRPIN_QUEUE_STRIDE;
110386d259ceSMichael Baum 	attr.tis_num = priv->sh->tis->id;
110486d259ceSMichael Baum 	tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
110586d259ceSMichael Baum 	if (!tmpl->sq) {
110686d259ceSMichael Baum 		DRV_LOG(ERR,
110786d259ceSMichael Baum 			"Port %u tx hairpin queue %u can't create SQ object.",
110886d259ceSMichael Baum 			dev->data->port_id, idx);
110986d259ceSMichael Baum 		rte_errno = errno;
1110f49f4483SMichael Baum 		return -rte_errno;
111186d259ceSMichael Baum 	}
1112f49f4483SMichael Baum 	return 0;
111386d259ceSMichael Baum }
111486d259ceSMichael Baum 
111586d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
111686d259ceSMichael Baum /**
111786d259ceSMichael Baum  * Release DevX SQ resources.
111886d259ceSMichael Baum  *
111986d259ceSMichael Baum  * @param txq_obj
112086d259ceSMichael Baum  *   DevX Tx queue object.
112186d259ceSMichael Baum  */
112286d259ceSMichael Baum static void
112388f2e3f1SMichael Baum mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
112486d259ceSMichael Baum {
11258178d9beSTal Shnaiderman 	if (txq_obj->sq_devx) {
112686d259ceSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
11278178d9beSTal Shnaiderman 		txq_obj->sq_devx = NULL;
11288178d9beSTal Shnaiderman 	}
11298178d9beSTal Shnaiderman 	if (txq_obj->sq_umem) {
113086d259ceSMichael Baum 		claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
11318178d9beSTal Shnaiderman 		txq_obj->sq_umem = NULL;
11328178d9beSTal Shnaiderman 	}
11338178d9beSTal Shnaiderman 	if (txq_obj->sq_buf) {
113486d259ceSMichael Baum 		mlx5_free(txq_obj->sq_buf);
11358178d9beSTal Shnaiderman 		txq_obj->sq_buf = NULL;
11368178d9beSTal Shnaiderman 	}
11378178d9beSTal Shnaiderman 	if (txq_obj->sq_dbrec_page) {
113886d259ceSMichael Baum 		claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
113986d259ceSMichael Baum 					    mlx5_os_get_umem_id
114086d259ceSMichael Baum 						 (txq_obj->sq_dbrec_page->umem),
114186d259ceSMichael Baum 					    txq_obj->sq_dbrec_offset));
11428178d9beSTal Shnaiderman 		txq_obj->sq_dbrec_page = NULL;
11438178d9beSTal Shnaiderman 	}
114486d259ceSMichael Baum }
114586d259ceSMichael Baum 
114686d259ceSMichael Baum /**
114786d259ceSMichael Baum  * Release DevX Tx CQ resources.
114886d259ceSMichael Baum  *
114986d259ceSMichael Baum  * @param txq_obj
115086d259ceSMichael Baum  *   DevX Tx queue object.
115186d259ceSMichael Baum  */
115286d259ceSMichael Baum static void
115388f2e3f1SMichael Baum mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
115486d259ceSMichael Baum {
115586d259ceSMichael Baum 	if (txq_obj->cq_devx)
115686d259ceSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
115786d259ceSMichael Baum 	if (txq_obj->cq_umem)
115886d259ceSMichael Baum 		claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
115986d259ceSMichael Baum 	if (txq_obj->cq_buf)
116086d259ceSMichael Baum 		mlx5_free(txq_obj->cq_buf);
116186d259ceSMichael Baum 	if (txq_obj->cq_dbrec_page)
116286d259ceSMichael Baum 		claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
116386d259ceSMichael Baum 					    mlx5_os_get_umem_id
116486d259ceSMichael Baum 						 (txq_obj->cq_dbrec_page->umem),
116586d259ceSMichael Baum 					    txq_obj->cq_dbrec_offset));
116686d259ceSMichael Baum }
116786d259ceSMichael Baum 
116886d259ceSMichael Baum /**
116986d259ceSMichael Baum  * Destroy the Tx queue DevX object.
117086d259ceSMichael Baum  *
117186d259ceSMichael Baum  * @param txq_obj
117286d259ceSMichael Baum  *   Txq object to destroy.
117386d259ceSMichael Baum  */
117486d259ceSMichael Baum static void
117588f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
117686d259ceSMichael Baum {
117788f2e3f1SMichael Baum 	mlx5_txq_release_devx_cq_resources(txq_obj);
117888f2e3f1SMichael Baum 	mlx5_txq_release_devx_sq_resources(txq_obj);
117986d259ceSMichael Baum }
118086d259ceSMichael Baum 
118186d259ceSMichael Baum /**
118288f2e3f1SMichael Baum  * Create a DevX CQ object and its resources for an Tx queue.
118386d259ceSMichael Baum  *
118486d259ceSMichael Baum  * @param dev
118586d259ceSMichael Baum  *   Pointer to Ethernet device.
118686d259ceSMichael Baum  * @param idx
118786d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
118886d259ceSMichael Baum  *
118986d259ceSMichael Baum  * @return
119088f2e3f1SMichael Baum  *   Number of CQEs in CQ, 0 otherwise and rte_errno is set.
119186d259ceSMichael Baum  */
119288f2e3f1SMichael Baum static uint32_t
119388f2e3f1SMichael Baum mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
119486d259ceSMichael Baum {
119586d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
119686d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
119788f2e3f1SMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
119888f2e3f1SMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
119988f2e3f1SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
120086d259ceSMichael Baum 	struct mlx5_devx_cq_attr cq_attr = { 0 };
120186d259ceSMichael Baum 	struct mlx5_cqe *cqe;
120286d259ceSMichael Baum 	size_t page_size;
120386d259ceSMichael Baum 	size_t alignment;
120488f2e3f1SMichael Baum 	uint32_t cqe_n;
120586d259ceSMichael Baum 	uint32_t i;
120686d259ceSMichael Baum 	int ret;
120786d259ceSMichael Baum 
120886d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
120986d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
121086d259ceSMichael Baum 	page_size = rte_mem_page_size();
121186d259ceSMichael Baum 	if (page_size == (size_t)-1) {
121286d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to get mem page size.");
121386d259ceSMichael Baum 		rte_errno = ENOMEM;
121488f2e3f1SMichael Baum 		return 0;
121586d259ceSMichael Baum 	}
121686d259ceSMichael Baum 	/* Allocate memory buffer for CQEs. */
121786d259ceSMichael Baum 	alignment = MLX5_CQE_BUF_ALIGNMENT;
121886d259ceSMichael Baum 	if (alignment == (size_t)-1) {
121986d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to get CQE buf alignment.");
122086d259ceSMichael Baum 		rte_errno = ENOMEM;
122188f2e3f1SMichael Baum 		return 0;
122286d259ceSMichael Baum 	}
122388f2e3f1SMichael Baum 	/* Create the Completion Queue. */
122488f2e3f1SMichael Baum 	cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
122588f2e3f1SMichael Baum 		1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
122686d259ceSMichael Baum 	cqe_n = 1UL << log2above(cqe_n);
122786d259ceSMichael Baum 	if (cqe_n > UINT16_MAX) {
122886d259ceSMichael Baum 		DRV_LOG(ERR,
122986d259ceSMichael Baum 			"Port %u Tx queue %u requests to many CQEs %u.",
123086d259ceSMichael Baum 			dev->data->port_id, txq_data->idx, cqe_n);
123186d259ceSMichael Baum 		rte_errno = EINVAL;
123288f2e3f1SMichael Baum 		return 0;
123386d259ceSMichael Baum 	}
123486d259ceSMichael Baum 	txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
123586d259ceSMichael Baum 				      cqe_n * sizeof(struct mlx5_cqe),
123686d259ceSMichael Baum 				      alignment,
123786d259ceSMichael Baum 				      priv->sh->numa_node);
123886d259ceSMichael Baum 	if (!txq_obj->cq_buf) {
123986d259ceSMichael Baum 		DRV_LOG(ERR,
124086d259ceSMichael Baum 			"Port %u Tx queue %u cannot allocate memory (CQ).",
124186d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
124286d259ceSMichael Baum 		rte_errno = ENOMEM;
124388f2e3f1SMichael Baum 		return 0;
124486d259ceSMichael Baum 	}
124586d259ceSMichael Baum 	/* Register allocated buffer in user space with DevX. */
124686d259ceSMichael Baum 	txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
124786d259ceSMichael Baum 						(void *)txq_obj->cq_buf,
124886d259ceSMichael Baum 						cqe_n * sizeof(struct mlx5_cqe),
124986d259ceSMichael Baum 						IBV_ACCESS_LOCAL_WRITE);
125086d259ceSMichael Baum 	if (!txq_obj->cq_umem) {
125186d259ceSMichael Baum 		rte_errno = errno;
125286d259ceSMichael Baum 		DRV_LOG(ERR,
125386d259ceSMichael Baum 			"Port %u Tx queue %u cannot register memory (CQ).",
125486d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
125586d259ceSMichael Baum 		goto error;
125686d259ceSMichael Baum 	}
125786d259ceSMichael Baum 	/* Allocate doorbell record for completion queue. */
125886d259ceSMichael Baum 	txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
125986d259ceSMichael Baum 						&priv->dbrpgs,
126086d259ceSMichael Baum 						&txq_obj->cq_dbrec_page);
126186d259ceSMichael Baum 	if (txq_obj->cq_dbrec_offset < 0) {
126286d259ceSMichael Baum 		rte_errno = errno;
126386d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
126486d259ceSMichael Baum 		goto error;
126586d259ceSMichael Baum 	}
126686d259ceSMichael Baum 	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
126786d259ceSMichael Baum 			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
126886d259ceSMichael Baum 	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
126986d259ceSMichael Baum 	cq_attr.eqn = priv->sh->eqn;
127086d259ceSMichael Baum 	cq_attr.q_umem_valid = 1;
127186d259ceSMichael Baum 	cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
127286d259ceSMichael Baum 	cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
127386d259ceSMichael Baum 	cq_attr.db_umem_valid = 1;
127486d259ceSMichael Baum 	cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
127586d259ceSMichael Baum 	cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
127686d259ceSMichael Baum 	cq_attr.log_cq_size = rte_log2_u32(cqe_n);
127786d259ceSMichael Baum 	cq_attr.log_page_size = rte_log2_u32(page_size);
127886d259ceSMichael Baum 	/* Create completion queue object with DevX. */
127988f2e3f1SMichael Baum 	txq_obj->cq_devx = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
128088f2e3f1SMichael Baum 	if (!txq_obj->cq_devx) {
128186d259ceSMichael Baum 		rte_errno = errno;
128286d259ceSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
128386d259ceSMichael Baum 			dev->data->port_id, idx);
128486d259ceSMichael Baum 		goto error;
128586d259ceSMichael Baum 	}
128686d259ceSMichael Baum 	/* Initial fill CQ buffer with invalid CQE opcode. */
128786d259ceSMichael Baum 	cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
128888f2e3f1SMichael Baum 	for (i = 0; i < cqe_n; i++) {
128986d259ceSMichael Baum 		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
129086d259ceSMichael Baum 		++cqe;
129186d259ceSMichael Baum 	}
129288f2e3f1SMichael Baum 	return cqe_n;
129386d259ceSMichael Baum error:
129486d259ceSMichael Baum 	ret = rte_errno;
129588f2e3f1SMichael Baum 	mlx5_txq_release_devx_cq_resources(txq_obj);
129686d259ceSMichael Baum 	rte_errno = ret;
129788f2e3f1SMichael Baum 	return 0;
129886d259ceSMichael Baum }
129986d259ceSMichael Baum 
130086d259ceSMichael Baum /**
130188f2e3f1SMichael Baum  * Create a SQ object and its resources using DevX.
130286d259ceSMichael Baum  *
130386d259ceSMichael Baum  * @param dev
130486d259ceSMichael Baum  *   Pointer to Ethernet device.
130586d259ceSMichael Baum  * @param idx
130686d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
130786d259ceSMichael Baum  *
130886d259ceSMichael Baum  * @return
130988f2e3f1SMichael Baum  *   Number of WQEs in SQ, 0 otherwise and rte_errno is set.
131086d259ceSMichael Baum  */
131188f2e3f1SMichael Baum static uint32_t
131288f2e3f1SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
131386d259ceSMichael Baum {
131486d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
131586d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
131688f2e3f1SMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
131788f2e3f1SMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
131888f2e3f1SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
131986d259ceSMichael Baum 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
132086d259ceSMichael Baum 	size_t page_size;
132186d259ceSMichael Baum 	uint32_t wqe_n;
132286d259ceSMichael Baum 	int ret;
132386d259ceSMichael Baum 
132486d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
132586d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
132686d259ceSMichael Baum 	page_size = rte_mem_page_size();
132786d259ceSMichael Baum 	if (page_size == (size_t)-1) {
132886d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to get mem page size.");
132986d259ceSMichael Baum 		rte_errno = ENOMEM;
133088f2e3f1SMichael Baum 		return 0;
133186d259ceSMichael Baum 	}
133286d259ceSMichael Baum 	wqe_n = RTE_MIN(1UL << txq_data->elts_n,
133386d259ceSMichael Baum 			(uint32_t)priv->sh->device_attr.max_qp_wr);
133486d259ceSMichael Baum 	txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
133586d259ceSMichael Baum 				      wqe_n * sizeof(struct mlx5_wqe),
133686d259ceSMichael Baum 				      page_size, priv->sh->numa_node);
133786d259ceSMichael Baum 	if (!txq_obj->sq_buf) {
133886d259ceSMichael Baum 		DRV_LOG(ERR,
133986d259ceSMichael Baum 			"Port %u Tx queue %u cannot allocate memory (SQ).",
134086d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
134186d259ceSMichael Baum 		rte_errno = ENOMEM;
134286d259ceSMichael Baum 		goto error;
134386d259ceSMichael Baum 	}
134486d259ceSMichael Baum 	/* Register allocated buffer in user space with DevX. */
134586d259ceSMichael Baum 	txq_obj->sq_umem = mlx5_glue->devx_umem_reg
134686d259ceSMichael Baum 					(priv->sh->ctx,
134786d259ceSMichael Baum 					 (void *)txq_obj->sq_buf,
134886d259ceSMichael Baum 					 wqe_n * sizeof(struct mlx5_wqe),
134986d259ceSMichael Baum 					 IBV_ACCESS_LOCAL_WRITE);
135086d259ceSMichael Baum 	if (!txq_obj->sq_umem) {
135186d259ceSMichael Baum 		rte_errno = errno;
135286d259ceSMichael Baum 		DRV_LOG(ERR,
135386d259ceSMichael Baum 			"Port %u Tx queue %u cannot register memory (SQ).",
135486d259ceSMichael Baum 			dev->data->port_id, txq_data->idx);
135586d259ceSMichael Baum 		goto error;
135686d259ceSMichael Baum 	}
135786d259ceSMichael Baum 	/* Allocate doorbell record for send queue. */
135886d259ceSMichael Baum 	txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
135986d259ceSMichael Baum 						&priv->dbrpgs,
136086d259ceSMichael Baum 						&txq_obj->sq_dbrec_page);
136186d259ceSMichael Baum 	if (txq_obj->sq_dbrec_offset < 0) {
136286d259ceSMichael Baum 		rte_errno = errno;
136386d259ceSMichael Baum 		DRV_LOG(ERR, "Failed to allocate SQ door-bell.");
136486d259ceSMichael Baum 		goto error;
136586d259ceSMichael Baum 	}
136686d259ceSMichael Baum 	sq_attr.tis_lst_sz = 1;
136786d259ceSMichael Baum 	sq_attr.tis_num = priv->sh->tis->id;
136886d259ceSMichael Baum 	sq_attr.state = MLX5_SQC_STATE_RST;
136986d259ceSMichael Baum 	sq_attr.cqn = txq_obj->cq_devx->id;
137086d259ceSMichael Baum 	sq_attr.flush_in_error_en = 1;
137186d259ceSMichael Baum 	sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
137286d259ceSMichael Baum 	sq_attr.allow_swp = !!priv->config.swp;
137386d259ceSMichael Baum 	sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
137486d259ceSMichael Baum 	sq_attr.wq_attr.uar_page =
137586d259ceSMichael Baum 				mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
137686d259ceSMichael Baum 	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
137786d259ceSMichael Baum 	sq_attr.wq_attr.pd = priv->sh->pdn;
137886d259ceSMichael Baum 	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
137988f2e3f1SMichael Baum 	sq_attr.wq_attr.log_wq_sz = log2above(wqe_n);
138086d259ceSMichael Baum 	sq_attr.wq_attr.dbr_umem_valid = 1;
138186d259ceSMichael Baum 	sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset;
138286d259ceSMichael Baum 	sq_attr.wq_attr.dbr_umem_id =
138386d259ceSMichael Baum 			mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem);
138486d259ceSMichael Baum 	sq_attr.wq_attr.wq_umem_valid = 1;
138586d259ceSMichael Baum 	sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
138686d259ceSMichael Baum 	sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
138786d259ceSMichael Baum 	/* Create Send Queue object with DevX. */
138888f2e3f1SMichael Baum 	txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr);
138988f2e3f1SMichael Baum 	if (!txq_obj->sq_devx) {
139086d259ceSMichael Baum 		rte_errno = errno;
139186d259ceSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
139286d259ceSMichael Baum 			dev->data->port_id, idx);
139386d259ceSMichael Baum 		goto error;
139486d259ceSMichael Baum 	}
139588f2e3f1SMichael Baum 	return wqe_n;
139686d259ceSMichael Baum error:
139786d259ceSMichael Baum 	ret = rte_errno;
139888f2e3f1SMichael Baum 	mlx5_txq_release_devx_sq_resources(txq_obj);
139986d259ceSMichael Baum 	rte_errno = ret;
140088f2e3f1SMichael Baum 	return 0;
140186d259ceSMichael Baum }
140286d259ceSMichael Baum #endif
140386d259ceSMichael Baum 
140486d259ceSMichael Baum /**
140586d259ceSMichael Baum  * Create the Tx queue DevX object.
140686d259ceSMichael Baum  *
140786d259ceSMichael Baum  * @param dev
140886d259ceSMichael Baum  *   Pointer to Ethernet device.
140986d259ceSMichael Baum  * @param idx
141086d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
141186d259ceSMichael Baum  *
141286d259ceSMichael Baum  * @return
1413f49f4483SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
141486d259ceSMichael Baum  */
1415f49f4483SMichael Baum int
141686d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
141786d259ceSMichael Baum {
141886d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
141986d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
142086d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
142186d259ceSMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
142286d259ceSMichael Baum 
142386d259ceSMichael Baum 	if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
142486d259ceSMichael Baum 		return mlx5_txq_obj_hairpin_new(dev, idx);
142586d259ceSMichael Baum #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
142686d259ceSMichael Baum 	DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
142786d259ceSMichael Baum 		     dev->data->port_id, idx);
142886d259ceSMichael Baum 	rte_errno = ENOMEM;
1429f49f4483SMichael Baum 	return -rte_errno;
143086d259ceSMichael Baum #else
143186d259ceSMichael Baum 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1432f49f4483SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
143386d259ceSMichael Baum 	void *reg_addr;
143486d259ceSMichael Baum 	uint32_t cqe_n;
143588f2e3f1SMichael Baum 	uint32_t wqe_n;
143686d259ceSMichael Baum 	int ret = 0;
143786d259ceSMichael Baum 
143886d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
1439f49f4483SMichael Baum 	MLX5_ASSERT(txq_obj);
144086d259ceSMichael Baum 	txq_obj->txq_ctrl = txq_ctrl;
144186d259ceSMichael Baum 	txq_obj->dev = dev;
144288f2e3f1SMichael Baum 	cqe_n = mlx5_txq_create_devx_cq_resources(dev, idx);
144388f2e3f1SMichael Baum 	if (!cqe_n) {
144486d259ceSMichael Baum 		rte_errno = errno;
144586d259ceSMichael Baum 		goto error;
144686d259ceSMichael Baum 	}
144788f2e3f1SMichael Baum 	txq_data->cqe_n = log2above(cqe_n);
144888f2e3f1SMichael Baum 	txq_data->cqe_s = 1 << txq_data->cqe_n;
144986d259ceSMichael Baum 	txq_data->cqe_m = txq_data->cqe_s - 1;
145086d259ceSMichael Baum 	txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
145186d259ceSMichael Baum 	txq_data->cq_ci = 0;
145286d259ceSMichael Baum 	txq_data->cq_pi = 0;
145386d259ceSMichael Baum 	txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
145486d259ceSMichael Baum 						txq_obj->cq_dbrec_offset);
145586d259ceSMichael Baum 	*txq_data->cq_db = 0;
145686d259ceSMichael Baum 	/* Create Send Queue object with DevX. */
145788f2e3f1SMichael Baum 	wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx);
145888f2e3f1SMichael Baum 	if (!wqe_n) {
145986d259ceSMichael Baum 		rte_errno = errno;
146086d259ceSMichael Baum 		goto error;
146186d259ceSMichael Baum 	}
146286d259ceSMichael Baum 	/* Create the Work Queue. */
146388f2e3f1SMichael Baum 	txq_data->wqe_n = log2above(wqe_n);
146486d259ceSMichael Baum 	txq_data->wqe_s = 1 << txq_data->wqe_n;
146586d259ceSMichael Baum 	txq_data->wqe_m = txq_data->wqe_s - 1;
146686d259ceSMichael Baum 	txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
146786d259ceSMichael Baum 	txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
146886d259ceSMichael Baum 	txq_data->wqe_ci = 0;
146986d259ceSMichael Baum 	txq_data->wqe_pi = 0;
147086d259ceSMichael Baum 	txq_data->wqe_comp = 0;
147186d259ceSMichael Baum 	txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
147286d259ceSMichael Baum 	txq_data->qp_db = (volatile uint32_t *)
147386d259ceSMichael Baum 					(txq_obj->sq_dbrec_page->dbrs +
147486d259ceSMichael Baum 					 txq_obj->sq_dbrec_offset +
147586d259ceSMichael Baum 					 MLX5_SND_DBR * sizeof(uint32_t));
147686d259ceSMichael Baum 	*txq_data->qp_db = 0;
147786d259ceSMichael Baum 	txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
147886d259ceSMichael Baum 	/* Change Send Queue state to Ready-to-Send. */
1479a9c79306SMichael Baum 	ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
148086d259ceSMichael Baum 	if (ret) {
148186d259ceSMichael Baum 		rte_errno = errno;
148286d259ceSMichael Baum 		DRV_LOG(ERR,
1483a9c79306SMichael Baum 			"Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
148486d259ceSMichael Baum 			dev->data->port_id, idx);
148586d259ceSMichael Baum 		goto error;
148686d259ceSMichael Baum 	}
148786d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
148886d259ceSMichael Baum 	/*
148986d259ceSMichael Baum 	 * If using DevX need to query and store TIS transport domain value.
149086d259ceSMichael Baum 	 * This is done once per port.
149186d259ceSMichael Baum 	 * Will use this value on Rx, when creating matching TIR.
149286d259ceSMichael Baum 	 */
149386d259ceSMichael Baum 	if (!priv->sh->tdn)
149486d259ceSMichael Baum 		priv->sh->tdn = priv->sh->td->id;
149586d259ceSMichael Baum #endif
149686d259ceSMichael Baum 	MLX5_ASSERT(sh->tx_uar);
149786d259ceSMichael Baum 	reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
149886d259ceSMichael Baum 	MLX5_ASSERT(reg_addr);
149986d259ceSMichael Baum 	txq_ctrl->bf_reg = reg_addr;
150086d259ceSMichael Baum 	txq_ctrl->uar_mmap_offset =
150186d259ceSMichael Baum 				mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
150286d259ceSMichael Baum 	txq_uar_init(txq_ctrl);
1503876b5d52SMatan Azrad 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1504f49f4483SMichael Baum 	return 0;
150586d259ceSMichael Baum error:
150686d259ceSMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
150788f2e3f1SMichael Baum 	mlx5_txq_release_devx_resources(txq_obj);
150886d259ceSMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
1509f49f4483SMichael Baum 	return -rte_errno;
151086d259ceSMichael Baum #endif
151186d259ceSMichael Baum }
151286d259ceSMichael Baum 
151386d259ceSMichael Baum /**
151486d259ceSMichael Baum  * Release an Tx DevX queue object.
151586d259ceSMichael Baum  *
151686d259ceSMichael Baum  * @param txq_obj
151786d259ceSMichael Baum  *   DevX Tx queue object.
151886d259ceSMichael Baum  */
151986d259ceSMichael Baum void
152086d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
152186d259ceSMichael Baum {
152286d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
1523354cc08aSMichael Baum 	if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
152486d259ceSMichael Baum 		if (txq_obj->tis)
152586d259ceSMichael Baum 			claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
152686d259ceSMichael Baum #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
152786d259ceSMichael Baum 	} else {
152888f2e3f1SMichael Baum 		mlx5_txq_release_devx_resources(txq_obj);
152986d259ceSMichael Baum #endif
153086d259ceSMichael Baum 	}
153186d259ceSMichael Baum }
153286d259ceSMichael Baum 
15338bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = {
15348bb2410eSOphir Munk 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
15356deb19e1SMichael Baum 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
153632287079SMichael Baum 	.rxq_event_get = mlx5_rx_devx_get_event,
1537c279f187SMichael Baum 	.rxq_obj_modify = mlx5_devx_modify_rq,
15386deb19e1SMichael Baum 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
153925ae7f1aSMichael Baum 	.ind_table_new = mlx5_devx_ind_table_new,
1540*fa7ad49eSAndrey Vesnovaty 	.ind_table_modify = mlx5_devx_ind_table_modify,
154125ae7f1aSMichael Baum 	.ind_table_destroy = mlx5_devx_ind_table_destroy,
154285552726SMichael Baum 	.hrxq_new = mlx5_devx_hrxq_new,
154385552726SMichael Baum 	.hrxq_destroy = mlx5_devx_tir_destroy,
1544b8cc58c1SAndrey Vesnovaty 	.hrxq_modify = mlx5_devx_hrxq_modify,
15450c762e81SMichael Baum 	.drop_action_create = mlx5_devx_drop_action_create,
15460c762e81SMichael Baum 	.drop_action_destroy = mlx5_devx_drop_action_destroy,
154786d259ceSMichael Baum 	.txq_obj_new = mlx5_txq_devx_obj_new,
15485d9f3c3fSMichael Baum 	.txq_obj_modify = mlx5_devx_modify_sq,
154986d259ceSMichael Baum 	.txq_obj_release = mlx5_txq_devx_obj_release,
15508bb2410eSOphir Munk };
1551