xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision 2f5122dfc41f4493a33d8ccd65ba89dd26624b6b)
18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
28bb2410eSOphir Munk  * Copyright 2020 Mellanox Technologies, Ltd
38bb2410eSOphir Munk  */
48bb2410eSOphir Munk 
58bb2410eSOphir Munk #include <stddef.h>
68bb2410eSOphir Munk #include <errno.h>
7c279f187SMichael Baum #include <stdbool.h>
88bb2410eSOphir Munk #include <string.h>
98bb2410eSOphir Munk #include <stdint.h>
108bb2410eSOphir Munk #include <sys/queue.h>
118bb2410eSOphir Munk 
128bb2410eSOphir Munk #include <rte_malloc.h>
138bb2410eSOphir Munk #include <rte_common.h>
148bb2410eSOphir Munk #include <rte_eal_paging.h>
158bb2410eSOphir Munk 
168bb2410eSOphir Munk #include <mlx5_glue.h>
178bb2410eSOphir Munk #include <mlx5_devx_cmds.h>
185f04f70cSMichael Baum #include <mlx5_common_devx.h>
198bb2410eSOphir Munk #include <mlx5_malloc.h>
208bb2410eSOphir Munk 
218bb2410eSOphir Munk #include "mlx5.h"
228bb2410eSOphir Munk #include "mlx5_common_os.h"
23377b69fbSMichael Baum #include "mlx5_tx.h"
24151cbe3aSMichael Baum #include "mlx5_rx.h"
258bb2410eSOphir Munk #include "mlx5_utils.h"
268bb2410eSOphir Munk #include "mlx5_devx.h"
2787e2db37SMichael Baum #include "mlx5_flow.h"
2888019723SOphir Munk #include "mlx5_flow_os.h"
29f6dee900SMichael Baum 
30f6dee900SMichael Baum /**
318bb2410eSOphir Munk  * Modify RQ vlan stripping offload
328bb2410eSOphir Munk  *
335ceb3a02SXueming Li  * @param rxq
345ceb3a02SXueming Li  *   Rx queue.
355ceb3a02SXueming Li  * @param on
365ceb3a02SXueming Li  *   Enable/disable VLAN stripping.
378bb2410eSOphir Munk  *
38f6dee900SMichael Baum  * @return
39f6dee900SMichael Baum  *   0 on success, non-0 otherwise
408bb2410eSOphir Munk  */
418bb2410eSOphir Munk static int
425ceb3a02SXueming Li mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
438bb2410eSOphir Munk {
448bb2410eSOphir Munk 	struct mlx5_devx_modify_rq_attr rq_attr;
458bb2410eSOphir Munk 
468bb2410eSOphir Munk 	memset(&rq_attr, 0, sizeof(rq_attr));
478bb2410eSOphir Munk 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
488bb2410eSOphir Munk 	rq_attr.state = MLX5_RQC_STATE_RDY;
498bb2410eSOphir Munk 	rq_attr.vsd = (on ? 0 : 1);
508bb2410eSOphir Munk 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
515ceb3a02SXueming Li 	return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
528bb2410eSOphir Munk }
538bb2410eSOphir Munk 
546deb19e1SMichael Baum /**
55fa2c85ccSMichael Baum  * Modify RQ using DevX API.
56fa2c85ccSMichael Baum  *
575ceb3a02SXueming Li  * @param rxq
585ceb3a02SXueming Li  *   DevX rx queue.
594c6d80f1SMichael Baum  * @param type
604c6d80f1SMichael Baum  *   Type of change queue state.
61fa2c85ccSMichael Baum  *
62fa2c85ccSMichael Baum  * @return
63fa2c85ccSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
64fa2c85ccSMichael Baum  */
65fa2c85ccSMichael Baum static int
665ceb3a02SXueming Li mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)
67fa2c85ccSMichael Baum {
68fa2c85ccSMichael Baum 	struct mlx5_devx_modify_rq_attr rq_attr;
69fa2c85ccSMichael Baum 
70fa2c85ccSMichael Baum 	memset(&rq_attr, 0, sizeof(rq_attr));
714c6d80f1SMichael Baum 	switch (type) {
724c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_ERR2RST:
734c6d80f1SMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_ERR;
744c6d80f1SMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RST;
754c6d80f1SMichael Baum 		break;
764c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_RST2RDY:
77fa2c85ccSMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
78fa2c85ccSMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RDY;
794c6d80f1SMichael Baum 		break;
804c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_RDY2ERR:
814c6d80f1SMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
824c6d80f1SMichael Baum 		rq_attr.state = MLX5_RQC_STATE_ERR;
834c6d80f1SMichael Baum 		break;
844c6d80f1SMichael Baum 	case MLX5_RXQ_MOD_RDY2RST:
85fa2c85ccSMichael Baum 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
86fa2c85ccSMichael Baum 		rq_attr.state = MLX5_RQC_STATE_RST;
874c6d80f1SMichael Baum 		break;
884c6d80f1SMichael Baum 	default:
894c6d80f1SMichael Baum 		break;
90fa2c85ccSMichael Baum 	}
9109c25553SXueming Li 	if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
9209c25553SXueming Li 		return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr);
935ceb3a02SXueming Li 	return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
94fa2c85ccSMichael Baum }
95fa2c85ccSMichael Baum 
96fa2c85ccSMichael Baum /**
975d9f3c3fSMichael Baum  * Modify SQ using DevX API.
985d9f3c3fSMichael Baum  *
995d9f3c3fSMichael Baum  * @param txq_obj
1005d9f3c3fSMichael Baum  *   DevX Tx queue object.
1015d9f3c3fSMichael Baum  * @param type
1025d9f3c3fSMichael Baum  *   Type of change queue state.
1035d9f3c3fSMichael Baum  * @param dev_port
1045d9f3c3fSMichael Baum  *   Unnecessary.
1055d9f3c3fSMichael Baum  *
1065d9f3c3fSMichael Baum  * @return
1075d9f3c3fSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
1085d9f3c3fSMichael Baum  */
109686d05b6SXueming Li int
110686d05b6SXueming Li mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
1115d9f3c3fSMichael Baum 		     uint8_t dev_port)
1125d9f3c3fSMichael Baum {
1135d9f3c3fSMichael Baum 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
1145d9f3c3fSMichael Baum 	int ret;
1155d9f3c3fSMichael Baum 
1165d9f3c3fSMichael Baum 	if (type != MLX5_TXQ_MOD_RST2RDY) {
1175d9f3c3fSMichael Baum 		/* Change queue state to reset. */
1185d9f3c3fSMichael Baum 		if (type == MLX5_TXQ_MOD_ERR2RDY)
1195d9f3c3fSMichael Baum 			msq_attr.sq_state = MLX5_SQC_STATE_ERR;
1205d9f3c3fSMichael Baum 		else
1215d9f3c3fSMichael Baum 			msq_attr.sq_state = MLX5_SQC_STATE_RDY;
1225d9f3c3fSMichael Baum 		msq_attr.state = MLX5_SQC_STATE_RST;
12374e91860SMichael Baum 		ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
1245d9f3c3fSMichael Baum 		if (ret) {
1255d9f3c3fSMichael Baum 			DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
1265d9f3c3fSMichael Baum 				" %s", strerror(errno));
1275d9f3c3fSMichael Baum 			rte_errno = errno;
1285d9f3c3fSMichael Baum 			return ret;
1295d9f3c3fSMichael Baum 		}
1305d9f3c3fSMichael Baum 	}
1315d9f3c3fSMichael Baum 	if (type != MLX5_TXQ_MOD_RDY2RST) {
1325d9f3c3fSMichael Baum 		/* Change queue state to ready. */
1335d9f3c3fSMichael Baum 		msq_attr.sq_state = MLX5_SQC_STATE_RST;
1345d9f3c3fSMichael Baum 		msq_attr.state = MLX5_SQC_STATE_RDY;
13574e91860SMichael Baum 		ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
1365d9f3c3fSMichael Baum 		if (ret) {
1375d9f3c3fSMichael Baum 			DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
1385d9f3c3fSMichael Baum 				" %s", strerror(errno));
1395d9f3c3fSMichael Baum 			rte_errno = errno;
1405d9f3c3fSMichael Baum 			return ret;
1415d9f3c3fSMichael Baum 		}
1425d9f3c3fSMichael Baum 	}
1435d9f3c3fSMichael Baum 	/*
1445d9f3c3fSMichael Baum 	 * The dev_port variable is relevant only in Verbs API, and there is a
1455d9f3c3fSMichael Baum 	 * pointer that points to this function and a parallel function in verbs
1465d9f3c3fSMichael Baum 	 * intermittently, so they should have the same parameters.
1475d9f3c3fSMichael Baum 	 */
1485d9f3c3fSMichael Baum 	(void)dev_port;
1495d9f3c3fSMichael Baum 	return 0;
1505d9f3c3fSMichael Baum }
1515d9f3c3fSMichael Baum 
1525d9f3c3fSMichael Baum /**
1536deb19e1SMichael Baum  * Release an Rx DevX queue object.
1546deb19e1SMichael Baum  *
1555ceb3a02SXueming Li  * @param rxq
1565ceb3a02SXueming Li  *   DevX Rx queue.
1576deb19e1SMichael Baum  */
1586deb19e1SMichael Baum static void
1595ceb3a02SXueming Li mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
1606deb19e1SMichael Baum {
16109c25553SXueming Li 	struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
1625ceb3a02SXueming Li 
16309c25553SXueming Li 	if (rxq_obj == NULL)
16409c25553SXueming Li 		return;
165e96242efSMichael Baum 	if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
16609c25553SXueming Li 		if (rxq_obj->rq == NULL)
16709c25553SXueming Li 			return;
1685ceb3a02SXueming Li 		mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);
169fa2c85ccSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
1706deb19e1SMichael Baum 	} else {
17109c25553SXueming Li 		if (rxq->devx_rq.rq == NULL)
17209c25553SXueming Li 			return;
1735ceb3a02SXueming Li 		mlx5_devx_rq_destroy(&rxq->devx_rq);
17409c25553SXueming Li 		if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0)
17509c25553SXueming Li 			return;
1765ceb3a02SXueming Li 		mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
1775ceb3a02SXueming Li 		memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
1785ceb3a02SXueming Li 		if (rxq_obj->devx_channel) {
17998174626STal Shnaiderman 			mlx5_os_devx_destroy_event_channel
1806deb19e1SMichael Baum 							(rxq_obj->devx_channel);
1815ceb3a02SXueming Li 			rxq_obj->devx_channel = NULL;
1825ceb3a02SXueming Li 		}
1836deb19e1SMichael Baum 	}
18409c25553SXueming Li 	rxq->ctrl->started = false;
1856deb19e1SMichael Baum }
1866deb19e1SMichael Baum 
1876deb19e1SMichael Baum /**
18832287079SMichael Baum  * Get event for an Rx DevX queue object.
18932287079SMichael Baum  *
19032287079SMichael Baum  * @param rxq_obj
19132287079SMichael Baum  *   DevX Rx queue object.
19232287079SMichael Baum  *
19332287079SMichael Baum  * @return
19432287079SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
19532287079SMichael Baum  */
19632287079SMichael Baum static int
19732287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
19832287079SMichael Baum {
19932287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT
20032287079SMichael Baum 	union {
20132287079SMichael Baum 		struct mlx5dv_devx_async_event_hdr event_resp;
20232287079SMichael Baum 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
20332287079SMichael Baum 	} out;
20432287079SMichael Baum 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
20532287079SMichael Baum 					    &out.event_resp,
20632287079SMichael Baum 					    sizeof(out.buf));
20732287079SMichael Baum 
20832287079SMichael Baum 	if (ret < 0) {
20932287079SMichael Baum 		rte_errno = errno;
21032287079SMichael Baum 		return -rte_errno;
21132287079SMichael Baum 	}
2125cd33796SMichael Baum 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) {
21332287079SMichael Baum 		rte_errno = EINVAL;
21432287079SMichael Baum 		return -rte_errno;
21532287079SMichael Baum 	}
21632287079SMichael Baum 	return 0;
21732287079SMichael Baum #else
21832287079SMichael Baum 	(void)rxq_obj;
21932287079SMichael Baum 	rte_errno = ENOTSUP;
22032287079SMichael Baum 	return -rte_errno;
22132287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */
22232287079SMichael Baum }
22332287079SMichael Baum 
22432287079SMichael Baum /**
2256deb19e1SMichael Baum  * Create a RQ object using DevX.
2266deb19e1SMichael Baum  *
2275ceb3a02SXueming Li  * @param rxq
2285ceb3a02SXueming Li  *   Pointer to Rx queue.
2296deb19e1SMichael Baum  *
2306deb19e1SMichael Baum  * @return
2316e0a3637SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
2326deb19e1SMichael Baum  */
2336e0a3637SMichael Baum static int
2345ceb3a02SXueming Li mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
2356deb19e1SMichael Baum {
2365ceb3a02SXueming Li 	struct mlx5_priv *priv = rxq->priv;
237fe46b20cSMichael Baum 	struct mlx5_common_device *cdev = priv->sh->cdev;
2385ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
2395ceb3a02SXueming Li 	struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
2406deb19e1SMichael Baum 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
2416e0a3637SMichael Baum 	uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;
2426e0a3637SMichael Baum 	uint32_t wqe_size, log_wqe_size;
2436deb19e1SMichael Baum 
2446deb19e1SMichael Baum 	/* Fill RQ attributes. */
2456deb19e1SMichael Baum 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
2466deb19e1SMichael Baum 	rq_attr.flush_in_error_en = 1;
2476e0a3637SMichael Baum 	rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
2486e0a3637SMichael Baum 	rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
2496e0a3637SMichael Baum 	rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
250fe46b20cSMichael Baum 	rq_attr.ts_format =
251fe46b20cSMichael Baum 			mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format);
2526deb19e1SMichael Baum 	/* Fill WQ attributes for this RQ. */
2536deb19e1SMichael Baum 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
2546deb19e1SMichael Baum 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
2556deb19e1SMichael Baum 		/*
2566deb19e1SMichael Baum 		 * Number of strides in each WQE:
2576deb19e1SMichael Baum 		 * 512*2^single_wqe_log_num_of_strides.
2586deb19e1SMichael Baum 		 */
2596deb19e1SMichael Baum 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
2600947ed38SMichael Baum 				rxq_data->log_strd_num -
2616deb19e1SMichael Baum 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
2626deb19e1SMichael Baum 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
2636deb19e1SMichael Baum 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
2640947ed38SMichael Baum 				rxq_data->log_strd_sz -
2656deb19e1SMichael Baum 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
2666deb19e1SMichael Baum 		wqe_size = sizeof(struct mlx5_wqe_mprq);
2676deb19e1SMichael Baum 	} else {
2686deb19e1SMichael Baum 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
2696deb19e1SMichael Baum 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
2706deb19e1SMichael Baum 	}
2716deb19e1SMichael Baum 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
2726deb19e1SMichael Baum 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
2736e0a3637SMichael Baum 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
2746e0a3637SMichael Baum 	rq_attr.wq_attr.log_wq_sz = log_desc_n;
2756e0a3637SMichael Baum 	rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
2766e0a3637SMichael Baum 						MLX5_WQ_END_PAD_MODE_ALIGN :
2776e0a3637SMichael Baum 						MLX5_WQ_END_PAD_MODE_NONE;
278fe46b20cSMichael Baum 	rq_attr.wq_attr.pd = cdev->pdn;
279e6988afdSMatan Azrad 	rq_attr.counter_set_id = priv->counter_set_id;
280febcac7bSBing Zhao 	rq_attr.delay_drop_en = rxq_data->delay_drop;
28125ed2ebfSViacheslav Ovsiienko 	rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id);
28209c25553SXueming Li 	if (rxq_data->shared) /* Create RMP based RQ. */
28309c25553SXueming Li 		rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;
284f6dee900SMichael Baum 	/* Create RQ using DevX API. */
2855ceb3a02SXueming Li 	return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size,
286fe46b20cSMichael Baum 				   log_desc_n, &rq_attr, rxq_ctrl->socket);
2876deb19e1SMichael Baum }
2886deb19e1SMichael Baum 
2896deb19e1SMichael Baum /**
2906deb19e1SMichael Baum  * Create a DevX CQ object for an Rx queue.
2916deb19e1SMichael Baum  *
2925ceb3a02SXueming Li  * @param rxq
2935ceb3a02SXueming Li  *   Pointer to Rx queue.
2946deb19e1SMichael Baum  *
2956deb19e1SMichael Baum  * @return
2965cd33796SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
2976deb19e1SMichael Baum  */
2985cd33796SMichael Baum static int
2995ceb3a02SXueming Li mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
3006deb19e1SMichael Baum {
3015cd33796SMichael Baum 	struct mlx5_devx_cq *cq_obj = 0;
3026deb19e1SMichael Baum 	struct mlx5_devx_cq_attr cq_attr = { 0 };
3035ceb3a02SXueming Li 	struct mlx5_priv *priv = rxq->priv;
3045cd33796SMichael Baum 	struct mlx5_dev_ctx_shared *sh = priv->sh;
3055ceb3a02SXueming Li 	uint16_t port_id = priv->dev_data->port_id;
3065ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
3075ceb3a02SXueming Li 	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
308f6dee900SMichael Baum 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
3096deb19e1SMichael Baum 	uint32_t log_cqe_n;
3105cd33796SMichael Baum 	uint16_t event_nums[1] = { 0 };
3116deb19e1SMichael Baum 	int ret = 0;
3126deb19e1SMichael Baum 
31309c25553SXueming Li 	if (rxq_ctrl->started)
31409c25553SXueming Li 		return 0;
3156deb19e1SMichael Baum 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
3166deb19e1SMichael Baum 	    !rxq_data->lro) {
31738f9369dSDekel Peled 		cq_attr.cqe_comp_en = 1u;
31854c2d46bSAlexander Kozyrev 		rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
31954c2d46bSAlexander Kozyrev 		rxq_data->byte_mask = UINT32_MAX;
32054c2d46bSAlexander Kozyrev 		switch (priv->config.cqe_comp_fmt) {
32154c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_HASH:
32254c2d46bSAlexander Kozyrev 			/* fallthrough */
32354c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_CSUM:
3240f20acbfSAlexander Kozyrev 			/*
32554c2d46bSAlexander Kozyrev 			 * Select CSUM miniCQE format only for non-vectorized
32654c2d46bSAlexander Kozyrev 			 * MPRQ Rx burst, use HASH miniCQE format for others.
3270f20acbfSAlexander Kozyrev 			 */
3280f20acbfSAlexander Kozyrev 			if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
3290f20acbfSAlexander Kozyrev 			    mlx5_rxq_mprq_enabled(rxq_data))
3306deb19e1SMichael Baum 				cq_attr.mini_cqe_res_format =
3310f20acbfSAlexander Kozyrev 					MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
3320f20acbfSAlexander Kozyrev 			else
3330f20acbfSAlexander Kozyrev 				cq_attr.mini_cqe_res_format =
33438f9369dSDekel Peled 					MLX5_CQE_RESP_FORMAT_HASH;
33554c2d46bSAlexander Kozyrev 			rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
33654c2d46bSAlexander Kozyrev 			break;
33754c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
33854c2d46bSAlexander Kozyrev 			rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
33954c2d46bSAlexander Kozyrev 			/* fallthrough */
34054c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
34154c2d46bSAlexander Kozyrev 			cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
34254c2d46bSAlexander Kozyrev 			break;
34354c2d46bSAlexander Kozyrev 		case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
34454c2d46bSAlexander Kozyrev 			cq_attr.mini_cqe_res_format = 0;
34554c2d46bSAlexander Kozyrev 			cq_attr.mini_cqe_res_format_ext = 1;
34654c2d46bSAlexander Kozyrev 			break;
34754c2d46bSAlexander Kozyrev 		}
34854c2d46bSAlexander Kozyrev 		DRV_LOG(DEBUG,
34954c2d46bSAlexander Kozyrev 			"Port %u Rx CQE compression is enabled, format %d.",
3505ceb3a02SXueming Li 			port_id, priv->config.cqe_comp_fmt);
3516deb19e1SMichael Baum 		/*
3526deb19e1SMichael Baum 		 * For vectorized Rx, it must not be doubled in order to
3536deb19e1SMichael Baum 		 * make cq_ci and rq_ci aligned.
3546deb19e1SMichael Baum 		 */
3556deb19e1SMichael Baum 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
3566deb19e1SMichael Baum 			cqe_n *= 2;
3576deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
3586deb19e1SMichael Baum 		DRV_LOG(DEBUG,
3595ceb3a02SXueming Li 			"Port %u Rx CQE compression is disabled for HW timestamp.",
3605ceb3a02SXueming Li 			port_id);
3616deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->lro) {
3626deb19e1SMichael Baum 		DRV_LOG(DEBUG,
3636deb19e1SMichael Baum 			"Port %u Rx CQE compression is disabled for LRO.",
3645ceb3a02SXueming Li 			port_id);
3656deb19e1SMichael Baum 	}
3665dfa003dSMichael Baum 	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj);
3676deb19e1SMichael Baum 	log_cqe_n = log2above(cqe_n);
368f6dee900SMichael Baum 	/* Create CQ using DevX API. */
369ca1418ceSMichael Baum 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj,
370ca1418ceSMichael Baum 				  log_cqe_n, &cq_attr, sh->numa_node);
3715cd33796SMichael Baum 	if (ret)
3725cd33796SMichael Baum 		return ret;
3735cd33796SMichael Baum 	cq_obj = &rxq_ctrl->obj->cq_obj;
3745cd33796SMichael Baum 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])
3755cd33796SMichael Baum 							(uintptr_t)cq_obj->cqes;
3765cd33796SMichael Baum 	rxq_data->cq_db = cq_obj->db_rec;
3775dfa003dSMichael Baum 	rxq_data->uar_data = sh->rx_uar.cq_db;
3786deb19e1SMichael Baum 	rxq_data->cqe_n = log_cqe_n;
3795cd33796SMichael Baum 	rxq_data->cqn = cq_obj->cq->id;
38009c25553SXueming Li 	rxq_data->cq_ci = 0;
381f6dee900SMichael Baum 	if (rxq_ctrl->obj->devx_channel) {
38298174626STal Shnaiderman 		ret = mlx5_os_devx_subscribe_devx_event
383f6dee900SMichael Baum 					      (rxq_ctrl->obj->devx_channel,
3845cd33796SMichael Baum 					       cq_obj->cq->obj,
3856deb19e1SMichael Baum 					       sizeof(event_nums),
3866deb19e1SMichael Baum 					       event_nums,
3875cd33796SMichael Baum 					       (uint64_t)(uintptr_t)cq_obj->cq);
3886deb19e1SMichael Baum 		if (ret) {
3896deb19e1SMichael Baum 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
3905cd33796SMichael Baum 			ret = errno;
3915cd33796SMichael Baum 			mlx5_devx_cq_destroy(cq_obj);
3925cd33796SMichael Baum 			memset(cq_obj, 0, sizeof(*cq_obj));
3935cd33796SMichael Baum 			rte_errno = ret;
3945cd33796SMichael Baum 			return -ret;
3956deb19e1SMichael Baum 		}
3966deb19e1SMichael Baum 	}
3975cd33796SMichael Baum 	return 0;
3986deb19e1SMichael Baum }
3996deb19e1SMichael Baum 
4006deb19e1SMichael Baum /**
4016deb19e1SMichael Baum  * Create the Rx hairpin queue object.
4026deb19e1SMichael Baum  *
4035ceb3a02SXueming Li  * @param rxq
4045ceb3a02SXueming Li  *   Pointer to Rx queue.
4056deb19e1SMichael Baum  *
4066deb19e1SMichael Baum  * @return
4071260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
4086deb19e1SMichael Baum  */
4091260a87bSMichael Baum static int
4105ceb3a02SXueming Li mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
4116deb19e1SMichael Baum {
4125ceb3a02SXueming Li 	uint16_t idx = rxq->idx;
4135ceb3a02SXueming Li 	struct mlx5_priv *priv = rxq->priv;
4145ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
4156deb19e1SMichael Baum 	struct mlx5_devx_create_rq_attr attr = { 0 };
4161260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
4176deb19e1SMichael Baum 	uint32_t max_wq_data;
4186deb19e1SMichael Baum 
4195ceb3a02SXueming Li 	MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);
4206deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
4216deb19e1SMichael Baum 	attr.hairpin = 1;
42253820561SMichael Baum 	max_wq_data =
42353820561SMichael Baum 		priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
4246deb19e1SMichael Baum 	/* Jumbo frames > 9KB should be supported, and more packets. */
4256deb19e1SMichael Baum 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
4266deb19e1SMichael Baum 		if (priv->config.log_hp_size > max_wq_data) {
4276deb19e1SMichael Baum 			DRV_LOG(ERR, "Total data size %u power of 2 is "
4286deb19e1SMichael Baum 				"too large for hairpin.",
4296deb19e1SMichael Baum 				priv->config.log_hp_size);
4306deb19e1SMichael Baum 			rte_errno = ERANGE;
4311260a87bSMichael Baum 			return -rte_errno;
4326deb19e1SMichael Baum 		}
4336deb19e1SMichael Baum 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
4346deb19e1SMichael Baum 	} else {
4356deb19e1SMichael Baum 		attr.wq_attr.log_hairpin_data_sz =
4366deb19e1SMichael Baum 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
4376deb19e1SMichael Baum 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
4386deb19e1SMichael Baum 	}
4396deb19e1SMichael Baum 	/* Set the packets number to the maximum value for performance. */
4406deb19e1SMichael Baum 	attr.wq_attr.log_hairpin_num_packets =
4416deb19e1SMichael Baum 			attr.wq_attr.log_hairpin_data_sz -
4426deb19e1SMichael Baum 			MLX5_HAIRPIN_QUEUE_STRIDE;
443e6988afdSMatan Azrad 	attr.counter_set_id = priv->counter_set_id;
444febcac7bSBing Zhao 	rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop;
445febcac7bSBing Zhao 	attr.delay_drop_en = priv->config.hp_delay_drop;
446ca1418ceSMichael Baum 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
4476deb19e1SMichael Baum 					   rxq_ctrl->socket);
4486deb19e1SMichael Baum 	if (!tmpl->rq) {
4496deb19e1SMichael Baum 		DRV_LOG(ERR,
4506deb19e1SMichael Baum 			"Port %u Rx hairpin queue %u can't create rq object.",
4515ceb3a02SXueming Li 			priv->dev_data->port_id, idx);
4526deb19e1SMichael Baum 		rte_errno = errno;
4531260a87bSMichael Baum 		return -rte_errno;
4546deb19e1SMichael Baum 	}
4555ceb3a02SXueming Li 	priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
4561260a87bSMichael Baum 	return 0;
4576deb19e1SMichael Baum }
4586deb19e1SMichael Baum 
4596deb19e1SMichael Baum /**
4606deb19e1SMichael Baum  * Create the Rx queue DevX object.
4616deb19e1SMichael Baum  *
4625ceb3a02SXueming Li  * @param rxq
4635ceb3a02SXueming Li  *   Pointer to Rx queue.
4646deb19e1SMichael Baum  *
4656deb19e1SMichael Baum  * @return
4661260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
4676deb19e1SMichael Baum  */
4681260a87bSMichael Baum static int
4695ceb3a02SXueming Li mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
4706deb19e1SMichael Baum {
4715ceb3a02SXueming Li 	struct mlx5_priv *priv = rxq->priv;
4725ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
4735ceb3a02SXueming Li 	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
4741260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
4756deb19e1SMichael Baum 	int ret = 0;
4766deb19e1SMichael Baum 
4776deb19e1SMichael Baum 	MLX5_ASSERT(rxq_data);
4781260a87bSMichael Baum 	MLX5_ASSERT(tmpl);
4796deb19e1SMichael Baum 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
4805ceb3a02SXueming Li 		return mlx5_rxq_obj_hairpin_new(rxq);
4816deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
48209c25553SXueming Li 	if (rxq_ctrl->irq && !rxq_ctrl->started) {
4836deb19e1SMichael Baum 		int devx_ev_flag =
4846deb19e1SMichael Baum 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
4856deb19e1SMichael Baum 
48698174626STal Shnaiderman 		tmpl->devx_channel = mlx5_os_devx_create_event_channel
487ca1418ceSMichael Baum 							(priv->sh->cdev->ctx,
4886deb19e1SMichael Baum 							 devx_ev_flag);
4896deb19e1SMichael Baum 		if (!tmpl->devx_channel) {
4906deb19e1SMichael Baum 			rte_errno = errno;
4916deb19e1SMichael Baum 			DRV_LOG(ERR, "Failed to create event channel %d.",
4926deb19e1SMichael Baum 				rte_errno);
4936deb19e1SMichael Baum 			goto error;
4946deb19e1SMichael Baum 		}
4956deb19e1SMichael Baum 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
4966deb19e1SMichael Baum 	}
4976deb19e1SMichael Baum 	/* Create CQ using DevX API. */
4985ceb3a02SXueming Li 	ret = mlx5_rxq_create_devx_cq_resources(rxq);
4995cd33796SMichael Baum 	if (ret) {
5006deb19e1SMichael Baum 		DRV_LOG(ERR, "Failed to create CQ.");
5016deb19e1SMichael Baum 		goto error;
5026deb19e1SMichael Baum 	}
503febcac7bSBing Zhao 	rxq_data->delay_drop = priv->config.std_delay_drop;
5046deb19e1SMichael Baum 	/* Create RQ using DevX API. */
5055ceb3a02SXueming Li 	ret = mlx5_rxq_create_devx_rq_resources(rxq);
5066e0a3637SMichael Baum 	if (ret) {
5076deb19e1SMichael Baum 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
5085ceb3a02SXueming Li 			priv->dev_data->port_id, rxq->idx);
5096deb19e1SMichael Baum 		rte_errno = ENOMEM;
5106deb19e1SMichael Baum 		goto error;
5116deb19e1SMichael Baum 	}
5126deb19e1SMichael Baum 	/* Change queue state to ready. */
5135ceb3a02SXueming Li 	ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
5146deb19e1SMichael Baum 	if (ret)
5156deb19e1SMichael Baum 		goto error;
51609c25553SXueming Li 	if (!rxq_data->shared) {
5175ceb3a02SXueming Li 		rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
5185ceb3a02SXueming Li 		rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;
51909c25553SXueming Li 	} else if (!rxq_ctrl->started) {
52009c25553SXueming Li 		rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf;
52109c25553SXueming Li 		rxq_data->rq_db =
52209c25553SXueming Li 				(uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec;
52309c25553SXueming Li 	}
52409c25553SXueming Li 	if (!rxq_ctrl->started) {
5256e0a3637SMichael Baum 		mlx5_rxq_initialize(rxq_data);
5265ceb3a02SXueming Li 		rxq_ctrl->wqn = rxq->devx_rq.rq->id;
52709c25553SXueming Li 	}
52809c25553SXueming Li 	priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;
5291260a87bSMichael Baum 	return 0;
5306deb19e1SMichael Baum error:
5316deb19e1SMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
5325ceb3a02SXueming Li 	mlx5_rxq_devx_obj_release(rxq);
5331260a87bSMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
5341260a87bSMichael Baum 	return -rte_errno;
5356deb19e1SMichael Baum }
5366deb19e1SMichael Baum 
53787e2db37SMichael Baum /**
538fa7ad49eSAndrey Vesnovaty  * Prepare RQT attribute structure for DevX RQT API.
539fa7ad49eSAndrey Vesnovaty  *
540fa7ad49eSAndrey Vesnovaty  * @param dev
541fa7ad49eSAndrey Vesnovaty  *   Pointer to Ethernet device.
542fa7ad49eSAndrey Vesnovaty  * @param log_n
543fa7ad49eSAndrey Vesnovaty  *   Log of number of queues in the array.
544bc5bee02SDmitry Kozlyuk  * @param queues
545bc5bee02SDmitry Kozlyuk  *   List of RX queue indices or NULL, in which case
546bc5bee02SDmitry Kozlyuk  *   the attribute will be filled by drop queue ID.
547bc5bee02SDmitry Kozlyuk  * @param queues_n
548bc5bee02SDmitry Kozlyuk  *   Size of @p queues array or 0 if it is NULL.
549fa7ad49eSAndrey Vesnovaty  * @param ind_tbl
550fa7ad49eSAndrey Vesnovaty  *   DevX indirection table object.
551fa7ad49eSAndrey Vesnovaty  *
552fa7ad49eSAndrey Vesnovaty  * @return
553fa7ad49eSAndrey Vesnovaty  *   The RQT attr object initialized, NULL otherwise and rte_errno is set.
554fa7ad49eSAndrey Vesnovaty  */
555fa7ad49eSAndrey Vesnovaty static struct mlx5_devx_rqt_attr *
556fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
557fa7ad49eSAndrey Vesnovaty 				     const unsigned int log_n,
558fa7ad49eSAndrey Vesnovaty 				     const uint16_t *queues,
559fa7ad49eSAndrey Vesnovaty 				     const uint32_t queues_n)
560fa7ad49eSAndrey Vesnovaty {
561fa7ad49eSAndrey Vesnovaty 	struct mlx5_priv *priv = dev->data->dev_private;
562fa7ad49eSAndrey Vesnovaty 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
563fa7ad49eSAndrey Vesnovaty 	const unsigned int rqt_n = 1 << log_n;
564fa7ad49eSAndrey Vesnovaty 	unsigned int i, j;
565fa7ad49eSAndrey Vesnovaty 
566fa7ad49eSAndrey Vesnovaty 	rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
567fa7ad49eSAndrey Vesnovaty 			      rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
568fa7ad49eSAndrey Vesnovaty 	if (!rqt_attr) {
569fa7ad49eSAndrey Vesnovaty 		DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
570fa7ad49eSAndrey Vesnovaty 			dev->data->port_id);
571fa7ad49eSAndrey Vesnovaty 		rte_errno = ENOMEM;
572fa7ad49eSAndrey Vesnovaty 		return NULL;
573fa7ad49eSAndrey Vesnovaty 	}
57487af0d1eSMichael Baum 	rqt_attr->rqt_max_size = priv->sh->dev_cap.ind_table_max_size;
575fa7ad49eSAndrey Vesnovaty 	rqt_attr->rqt_actual_size = rqt_n;
576bc5bee02SDmitry Kozlyuk 	if (queues == NULL) {
577bc5bee02SDmitry Kozlyuk 		for (i = 0; i < rqt_n; i++)
5785ceb3a02SXueming Li 			rqt_attr->rq_list[i] =
5795ceb3a02SXueming Li 					priv->drop_queue.rxq->devx_rq.rq->id;
580bc5bee02SDmitry Kozlyuk 		return rqt_attr;
581bc5bee02SDmitry Kozlyuk 	}
582fa7ad49eSAndrey Vesnovaty 	for (i = 0; i != queues_n; ++i) {
5835ceb3a02SXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);
584fa7ad49eSAndrey Vesnovaty 
5855ceb3a02SXueming Li 		MLX5_ASSERT(rxq != NULL);
58609c25553SXueming Li 		if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
58709c25553SXueming Li 			rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
58809c25553SXueming Li 		else
5895ceb3a02SXueming Li 			rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
590fa7ad49eSAndrey Vesnovaty 	}
591fa7ad49eSAndrey Vesnovaty 	MLX5_ASSERT(i > 0);
592fa7ad49eSAndrey Vesnovaty 	for (j = 0; i != rqt_n; ++j, ++i)
593fa7ad49eSAndrey Vesnovaty 		rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
594fa7ad49eSAndrey Vesnovaty 	return rqt_attr;
595fa7ad49eSAndrey Vesnovaty }
596fa7ad49eSAndrey Vesnovaty 
597fa7ad49eSAndrey Vesnovaty /**
59825ae7f1aSMichael Baum  * Create RQT using DevX API as a filed of indirection table.
59987e2db37SMichael Baum  *
60087e2db37SMichael Baum  * @param dev
60187e2db37SMichael Baum  *   Pointer to Ethernet device.
60225ae7f1aSMichael Baum  * @param log_n
60325ae7f1aSMichael Baum  *   Log of number of queues in the array.
60425ae7f1aSMichael Baum  * @param ind_tbl
60525ae7f1aSMichael Baum  *   DevX indirection table object.
60687e2db37SMichael Baum  *
60787e2db37SMichael Baum  * @return
60825ae7f1aSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
60987e2db37SMichael Baum  */
61025ae7f1aSMichael Baum static int
61125ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
61225ae7f1aSMichael Baum 			struct mlx5_ind_table_obj *ind_tbl)
61387e2db37SMichael Baum {
61487e2db37SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
61587e2db37SMichael Baum 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
616bc5bee02SDmitry Kozlyuk 	const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues :
617bc5bee02SDmitry Kozlyuk 							  NULL;
61887e2db37SMichael Baum 
61925ae7f1aSMichael Baum 	MLX5_ASSERT(ind_tbl);
620bc5bee02SDmitry Kozlyuk 	rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues,
621fa7ad49eSAndrey Vesnovaty 						       ind_tbl->queues_n);
622fa7ad49eSAndrey Vesnovaty 	if (!rqt_attr)
62325ae7f1aSMichael Baum 		return -rte_errno;
624ca1418ceSMichael Baum 	ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr);
62587e2db37SMichael Baum 	mlx5_free(rqt_attr);
62687e2db37SMichael Baum 	if (!ind_tbl->rqt) {
62787e2db37SMichael Baum 		DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
62887e2db37SMichael Baum 			dev->data->port_id);
62987e2db37SMichael Baum 		rte_errno = errno;
63025ae7f1aSMichael Baum 		return -rte_errno;
63187e2db37SMichael Baum 	}
63225ae7f1aSMichael Baum 	return 0;
63387e2db37SMichael Baum }
63487e2db37SMichael Baum 
63587e2db37SMichael Baum /**
636fa7ad49eSAndrey Vesnovaty  * Modify RQT using DevX API as a filed of indirection table.
637fa7ad49eSAndrey Vesnovaty  *
638fa7ad49eSAndrey Vesnovaty  * @param dev
639fa7ad49eSAndrey Vesnovaty  *   Pointer to Ethernet device.
640fa7ad49eSAndrey Vesnovaty  * @param log_n
641fa7ad49eSAndrey Vesnovaty  *   Log of number of queues in the array.
642fa7ad49eSAndrey Vesnovaty  * @param ind_tbl
643fa7ad49eSAndrey Vesnovaty  *   DevX indirection table object.
644fa7ad49eSAndrey Vesnovaty  *
645fa7ad49eSAndrey Vesnovaty  * @return
646fa7ad49eSAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
647fa7ad49eSAndrey Vesnovaty  */
648fa7ad49eSAndrey Vesnovaty static int
649fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
650fa7ad49eSAndrey Vesnovaty 			   const uint16_t *queues, const uint32_t queues_n,
651fa7ad49eSAndrey Vesnovaty 			   struct mlx5_ind_table_obj *ind_tbl)
652fa7ad49eSAndrey Vesnovaty {
653fa7ad49eSAndrey Vesnovaty 	int ret = 0;
654fa7ad49eSAndrey Vesnovaty 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
655fa7ad49eSAndrey Vesnovaty 
656fa7ad49eSAndrey Vesnovaty 	MLX5_ASSERT(ind_tbl);
657fa7ad49eSAndrey Vesnovaty 	rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
658fa7ad49eSAndrey Vesnovaty 							queues,
659fa7ad49eSAndrey Vesnovaty 							queues_n);
660fa7ad49eSAndrey Vesnovaty 	if (!rqt_attr)
661fa7ad49eSAndrey Vesnovaty 		return -rte_errno;
662fa7ad49eSAndrey Vesnovaty 	ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
663fa7ad49eSAndrey Vesnovaty 	mlx5_free(rqt_attr);
664fa7ad49eSAndrey Vesnovaty 	if (ret)
665fa7ad49eSAndrey Vesnovaty 		DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
666fa7ad49eSAndrey Vesnovaty 			dev->data->port_id);
667fa7ad49eSAndrey Vesnovaty 	return ret;
668fa7ad49eSAndrey Vesnovaty }
669fa7ad49eSAndrey Vesnovaty 
670fa7ad49eSAndrey Vesnovaty /**
67187e2db37SMichael Baum  * Destroy the DevX RQT object.
67287e2db37SMichael Baum  *
67387e2db37SMichael Baum  * @param ind_table
67487e2db37SMichael Baum  *   Indirection table to release.
67587e2db37SMichael Baum  */
67687e2db37SMichael Baum static void
67725ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
67887e2db37SMichael Baum {
67987e2db37SMichael Baum 	claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
68087e2db37SMichael Baum }
68187e2db37SMichael Baum 
68285552726SMichael Baum /**
683b8cc58c1SAndrey Vesnovaty  * Set TIR attribute struct with relevant input values.
68485552726SMichael Baum  *
685b8cc58c1SAndrey Vesnovaty  * @param[in] dev
68685552726SMichael Baum  *   Pointer to Ethernet device.
687b8cc58c1SAndrey Vesnovaty  * @param[in] rss_key
688b8cc58c1SAndrey Vesnovaty  *   RSS key for the Rx hash queue.
689b8cc58c1SAndrey Vesnovaty  * @param[in] hash_fields
690b8cc58c1SAndrey Vesnovaty  *   Verbs protocol hash field to make the RSS on.
691b8cc58c1SAndrey Vesnovaty  * @param[in] ind_tbl
692bc5bee02SDmitry Kozlyuk  *   Indirection table for TIR. If table queues array is NULL,
693bc5bee02SDmitry Kozlyuk  *   a TIR for drop queue is assumed.
694b8cc58c1SAndrey Vesnovaty  * @param[in] tunnel
69585552726SMichael Baum  *   Tunnel type.
696b8cc58c1SAndrey Vesnovaty  * @param[out] tir_attr
697b8cc58c1SAndrey Vesnovaty  *   Parameters structure for TIR creation/modification.
69885552726SMichael Baum  *
69985552726SMichael Baum  * @return
700b8cc58c1SAndrey Vesnovaty  *   The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
70185552726SMichael Baum  */
702b8cc58c1SAndrey Vesnovaty static void
703b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
704b8cc58c1SAndrey Vesnovaty 		       uint64_t hash_fields,
705b8cc58c1SAndrey Vesnovaty 		       const struct mlx5_ind_table_obj *ind_tbl,
706b8cc58c1SAndrey Vesnovaty 		       int tunnel, struct mlx5_devx_tir_attr *tir_attr)
70785552726SMichael Baum {
70885552726SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
709bc5bee02SDmitry Kozlyuk 	enum mlx5_rxq_type rxq_obj_type;
71085552726SMichael Baum 	bool lro = true;
7115a959cbfSMichael Baum 	uint32_t i;
71285552726SMichael Baum 
713bc5bee02SDmitry Kozlyuk 	/* NULL queues designate drop queue. */
714bc5bee02SDmitry Kozlyuk 	if (ind_tbl->queues != NULL) {
715bc5bee02SDmitry Kozlyuk 		struct mlx5_rxq_ctrl *rxq_ctrl =
7165cf0707fSXueming Li 				mlx5_rxq_ctrl_get(dev, ind_tbl->queues[0]);
7175cf0707fSXueming Li 		rxq_obj_type = rxq_ctrl != NULL ? rxq_ctrl->type :
7185cf0707fSXueming Li 						  MLX5_RXQ_TYPE_STANDARD;
719bc5bee02SDmitry Kozlyuk 
72085552726SMichael Baum 		/* Enable TIR LRO only if all the queues were configured for. */
7215a959cbfSMichael Baum 		for (i = 0; i < ind_tbl->queues_n; ++i) {
7225cf0707fSXueming Li 			struct mlx5_rxq_data *rxq_i =
7235cf0707fSXueming Li 				mlx5_rxq_data_get(dev, ind_tbl->queues[i]);
7245cf0707fSXueming Li 
7255cf0707fSXueming Li 			if (rxq_i != NULL && !rxq_i->lro) {
72685552726SMichael Baum 				lro = false;
72785552726SMichael Baum 				break;
72885552726SMichael Baum 			}
72985552726SMichael Baum 		}
730bc5bee02SDmitry Kozlyuk 	} else {
7315ceb3a02SXueming Li 		rxq_obj_type = priv->drop_queue.rxq->ctrl->type;
732bc5bee02SDmitry Kozlyuk 	}
733b8cc58c1SAndrey Vesnovaty 	memset(tir_attr, 0, sizeof(*tir_attr));
734b8cc58c1SAndrey Vesnovaty 	tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
735b8cc58c1SAndrey Vesnovaty 	tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
736b8cc58c1SAndrey Vesnovaty 	tir_attr->tunneled_offload_en = !!tunnel;
73785552726SMichael Baum 	/* If needed, translate hash_fields bitmap to PRM format. */
73885552726SMichael Baum 	if (hash_fields) {
739b8cc58c1SAndrey Vesnovaty 		struct mlx5_rx_hash_field_select *rx_hash_field_select =
74085552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
741b8cc58c1SAndrey Vesnovaty 			hash_fields & IBV_RX_HASH_INNER ?
742b8cc58c1SAndrey Vesnovaty 				&tir_attr->rx_hash_field_selector_inner :
74385552726SMichael Baum #endif
744b8cc58c1SAndrey Vesnovaty 				&tir_attr->rx_hash_field_selector_outer;
74585552726SMichael Baum 		/* 1 bit: 0: IPv4, 1: IPv6. */
74685552726SMichael Baum 		rx_hash_field_select->l3_prot_type =
74785552726SMichael Baum 					!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
74885552726SMichael Baum 		/* 1 bit: 0: TCP, 1: UDP. */
74985552726SMichael Baum 		rx_hash_field_select->l4_prot_type =
75085552726SMichael Baum 					!!(hash_fields & MLX5_UDP_IBV_RX_HASH);
75185552726SMichael Baum 		/* Bitmask which sets which fields to use in RX Hash. */
75285552726SMichael Baum 		rx_hash_field_select->selected_fields =
75385552726SMichael Baum 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
75485552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
75585552726SMichael Baum 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
75685552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
75785552726SMichael Baum 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
75885552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
75985552726SMichael Baum 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
76085552726SMichael Baum 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
76185552726SMichael Baum 	}
762b8cc58c1SAndrey Vesnovaty 	if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
763b8cc58c1SAndrey Vesnovaty 		tir_attr->transport_domain = priv->sh->td->id;
76485552726SMichael Baum 	else
765b8cc58c1SAndrey Vesnovaty 		tir_attr->transport_domain = priv->sh->tdn;
766b8cc58c1SAndrey Vesnovaty 	memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
767b8cc58c1SAndrey Vesnovaty 	tir_attr->indirect_table = ind_tbl->rqt->id;
76885552726SMichael Baum 	if (dev->data->dev_conf.lpbk_mode)
76945a6df80SMichael Baum 		tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
77085552726SMichael Baum 	if (lro) {
77187af0d1eSMichael Baum 		tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout;
772b8cc58c1SAndrey Vesnovaty 		tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
773b8cc58c1SAndrey Vesnovaty 		tir_attr->lro_enable_mask =
774b8cc58c1SAndrey Vesnovaty 				MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
77585552726SMichael Baum 				MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
77685552726SMichael Baum 	}
777b8cc58c1SAndrey Vesnovaty }
778b8cc58c1SAndrey Vesnovaty 
779b8cc58c1SAndrey Vesnovaty /**
780b8cc58c1SAndrey Vesnovaty  * Create an Rx Hash queue.
781b8cc58c1SAndrey Vesnovaty  *
782b8cc58c1SAndrey Vesnovaty  * @param dev
783b8cc58c1SAndrey Vesnovaty  *   Pointer to Ethernet device.
784b8cc58c1SAndrey Vesnovaty  * @param hrxq
785b8cc58c1SAndrey Vesnovaty  *   Pointer to Rx Hash queue.
786b8cc58c1SAndrey Vesnovaty  * @param tunnel
787b8cc58c1SAndrey Vesnovaty  *   Tunnel type.
788b8cc58c1SAndrey Vesnovaty  *
789b8cc58c1SAndrey Vesnovaty  * @return
790b8cc58c1SAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
791b8cc58c1SAndrey Vesnovaty  */
792b8cc58c1SAndrey Vesnovaty static int
793b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
794b8cc58c1SAndrey Vesnovaty 		   int tunnel __rte_unused)
795b8cc58c1SAndrey Vesnovaty {
796b8cc58c1SAndrey Vesnovaty 	struct mlx5_priv *priv = dev->data->dev_private;
797b8cc58c1SAndrey Vesnovaty 	struct mlx5_devx_tir_attr tir_attr = {0};
798b8cc58c1SAndrey Vesnovaty 	int err;
799b8cc58c1SAndrey Vesnovaty 
800b8cc58c1SAndrey Vesnovaty 	mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
801b8cc58c1SAndrey Vesnovaty 			       hrxq->ind_table, tunnel, &tir_attr);
802ca1418ceSMichael Baum 	hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr);
8035a959cbfSMichael Baum 	if (!hrxq->tir) {
80485552726SMichael Baum 		DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
80585552726SMichael Baum 			dev->data->port_id);
80685552726SMichael Baum 		rte_errno = errno;
80785552726SMichael Baum 		goto error;
80885552726SMichael Baum 	}
809f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
81088019723SOphir Munk 	if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
81188019723SOphir Munk 							  &hrxq->action)) {
81285552726SMichael Baum 		rte_errno = errno;
81385552726SMichael Baum 		goto error;
81485552726SMichael Baum 	}
81585552726SMichael Baum #endif
8165a959cbfSMichael Baum 	return 0;
81785552726SMichael Baum error:
81885552726SMichael Baum 	err = rte_errno; /* Save rte_errno before cleanup. */
8195a959cbfSMichael Baum 	if (hrxq->tir)
8205a959cbfSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
82185552726SMichael Baum 	rte_errno = err; /* Restore rte_errno. */
8225a959cbfSMichael Baum 	return -rte_errno;
82385552726SMichael Baum }
82485552726SMichael Baum 
82585552726SMichael Baum /**
82685552726SMichael Baum  * Destroy a DevX TIR object.
82785552726SMichael Baum  *
82885552726SMichael Baum  * @param hrxq
82985552726SMichael Baum  *   Hash Rx queue to release its tir.
83085552726SMichael Baum  */
83185552726SMichael Baum static void
83285552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
83385552726SMichael Baum {
83485552726SMichael Baum 	claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
83585552726SMichael Baum }
83685552726SMichael Baum 
8375eaf882eSMichael Baum /**
838b8cc58c1SAndrey Vesnovaty  * Modify an Rx Hash queue configuration.
839b8cc58c1SAndrey Vesnovaty  *
840b8cc58c1SAndrey Vesnovaty  * @param dev
841b8cc58c1SAndrey Vesnovaty  *   Pointer to Ethernet device.
842b8cc58c1SAndrey Vesnovaty  * @param hrxq
843b8cc58c1SAndrey Vesnovaty  *   Hash Rx queue to modify.
844b8cc58c1SAndrey Vesnovaty  * @param rss_key
845b8cc58c1SAndrey Vesnovaty  *   RSS key for the Rx hash queue.
846b8cc58c1SAndrey Vesnovaty  * @param hash_fields
847b8cc58c1SAndrey Vesnovaty  *   Verbs protocol hash field to make the RSS on.
848b8cc58c1SAndrey Vesnovaty  * @param[in] ind_tbl
849b8cc58c1SAndrey Vesnovaty  *   Indirection table for TIR.
850b8cc58c1SAndrey Vesnovaty  *
851b8cc58c1SAndrey Vesnovaty  * @return
852b8cc58c1SAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
853b8cc58c1SAndrey Vesnovaty  */
854b8cc58c1SAndrey Vesnovaty static int
855b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
856b8cc58c1SAndrey Vesnovaty 		       const uint8_t *rss_key,
857b8cc58c1SAndrey Vesnovaty 		       uint64_t hash_fields,
858b8cc58c1SAndrey Vesnovaty 		       const struct mlx5_ind_table_obj *ind_tbl)
859b8cc58c1SAndrey Vesnovaty {
860b8cc58c1SAndrey Vesnovaty 	struct mlx5_devx_modify_tir_attr modify_tir = {0};
861b8cc58c1SAndrey Vesnovaty 
862b8cc58c1SAndrey Vesnovaty 	/*
863b8cc58c1SAndrey Vesnovaty 	 * untested for modification fields:
864b8cc58c1SAndrey Vesnovaty 	 * - rx_hash_symmetric not set in hrxq_new(),
865b8cc58c1SAndrey Vesnovaty 	 * - rx_hash_fn set hard-coded in hrxq_new(),
866b8cc58c1SAndrey Vesnovaty 	 * - lro_xxx not set after rxq setup
867b8cc58c1SAndrey Vesnovaty 	 */
868b8cc58c1SAndrey Vesnovaty 	if (ind_tbl != hrxq->ind_table)
869b8cc58c1SAndrey Vesnovaty 		modify_tir.modify_bitmask |=
870b8cc58c1SAndrey Vesnovaty 			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
871b8cc58c1SAndrey Vesnovaty 	if (hash_fields != hrxq->hash_fields ||
872b8cc58c1SAndrey Vesnovaty 			memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
873b8cc58c1SAndrey Vesnovaty 		modify_tir.modify_bitmask |=
874b8cc58c1SAndrey Vesnovaty 			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
875b8cc58c1SAndrey Vesnovaty 	mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
876b8cc58c1SAndrey Vesnovaty 			       0, /* N/A - tunnel modification unsupported */
877b8cc58c1SAndrey Vesnovaty 			       &modify_tir.tir);
878b8cc58c1SAndrey Vesnovaty 	modify_tir.tirn = hrxq->tir->id;
879b8cc58c1SAndrey Vesnovaty 	if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
880b8cc58c1SAndrey Vesnovaty 		DRV_LOG(ERR, "port %u cannot modify DevX TIR",
881b8cc58c1SAndrey Vesnovaty 			dev->data->port_id);
882b8cc58c1SAndrey Vesnovaty 		rte_errno = errno;
883b8cc58c1SAndrey Vesnovaty 		return -rte_errno;
884b8cc58c1SAndrey Vesnovaty 	}
885b8cc58c1SAndrey Vesnovaty 	return 0;
886b8cc58c1SAndrey Vesnovaty }
887b8cc58c1SAndrey Vesnovaty 
888b8cc58c1SAndrey Vesnovaty /**
889bc5bee02SDmitry Kozlyuk  * Create a DevX drop Rx queue.
8905eaf882eSMichael Baum  *
8915eaf882eSMichael Baum  * @param dev
8925eaf882eSMichael Baum  *   Pointer to Ethernet device.
8935eaf882eSMichael Baum  *
8945eaf882eSMichael Baum  * @return
8950c762e81SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
8965eaf882eSMichael Baum  */
8970c762e81SMichael Baum static int
898bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
8995eaf882eSMichael Baum {
900bc5bee02SDmitry Kozlyuk 	struct mlx5_priv *priv = dev->data->dev_private;
901bc5bee02SDmitry Kozlyuk 	int socket_id = dev->device->numa_node;
9025ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq;
9035ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
9045ceb3a02SXueming Li 	struct mlx5_rxq_obj *rxq_obj = NULL;
905bc5bee02SDmitry Kozlyuk 	int ret;
906bc5bee02SDmitry Kozlyuk 
907bc5bee02SDmitry Kozlyuk 	/*
908bc5bee02SDmitry Kozlyuk 	 * Initialize dummy control structures.
909bc5bee02SDmitry Kozlyuk 	 * They are required to hold pointers for cleanup
910bc5bee02SDmitry Kozlyuk 	 * and are only accessible via drop queue DevX objects.
911bc5bee02SDmitry Kozlyuk 	 */
9125ceb3a02SXueming Li 	rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
9135ceb3a02SXueming Li 	if (rxq == NULL) {
9145ceb3a02SXueming Li 		DRV_LOG(ERR, "Port %u could not allocate drop queue private",
9155ceb3a02SXueming Li 			dev->data->port_id);
9165ceb3a02SXueming Li 		rte_errno = ENOMEM;
9175ceb3a02SXueming Li 		goto error;
9185ceb3a02SXueming Li 	}
919bc5bee02SDmitry Kozlyuk 	rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
920bc5bee02SDmitry Kozlyuk 			       0, socket_id);
921bc5bee02SDmitry Kozlyuk 	if (rxq_ctrl == NULL) {
922bc5bee02SDmitry Kozlyuk 		DRV_LOG(ERR, "Port %u could not allocate drop queue control",
923bc5bee02SDmitry Kozlyuk 			dev->data->port_id);
924bc5bee02SDmitry Kozlyuk 		rte_errno = ENOMEM;
925bc5bee02SDmitry Kozlyuk 		goto error;
926bc5bee02SDmitry Kozlyuk 	}
9275ceb3a02SXueming Li 	rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id);
9285ceb3a02SXueming Li 	if (rxq_obj == NULL) {
929bc5bee02SDmitry Kozlyuk 		DRV_LOG(ERR, "Port %u could not allocate drop queue object",
930bc5bee02SDmitry Kozlyuk 			dev->data->port_id);
931bc5bee02SDmitry Kozlyuk 		rte_errno = ENOMEM;
932bc5bee02SDmitry Kozlyuk 		goto error;
933bc5bee02SDmitry Kozlyuk 	}
9345ceb3a02SXueming Li 	rxq_obj->rxq_ctrl = rxq_ctrl;
935bc5bee02SDmitry Kozlyuk 	rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
9365db77fefSXueming Li 	rxq_ctrl->sh = priv->sh;
9375ceb3a02SXueming Li 	rxq_ctrl->obj = rxq_obj;
9385ceb3a02SXueming Li 	rxq->ctrl = rxq_ctrl;
9395ceb3a02SXueming Li 	rxq->priv = priv;
9405ceb3a02SXueming Li 	LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
941bc5bee02SDmitry Kozlyuk 	/* Create CQ using DevX API. */
9425ceb3a02SXueming Li 	ret = mlx5_rxq_create_devx_cq_resources(rxq);
943bc5bee02SDmitry Kozlyuk 	if (ret != 0) {
944bc5bee02SDmitry Kozlyuk 		DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
945bc5bee02SDmitry Kozlyuk 			dev->data->port_id);
946bc5bee02SDmitry Kozlyuk 		goto error;
947bc5bee02SDmitry Kozlyuk 	}
948febcac7bSBing Zhao 	rxq_ctrl->rxq.delay_drop = 0;
949bc5bee02SDmitry Kozlyuk 	/* Create RQ using DevX API. */
9505ceb3a02SXueming Li 	ret = mlx5_rxq_create_devx_rq_resources(rxq);
951bc5bee02SDmitry Kozlyuk 	if (ret != 0) {
952bc5bee02SDmitry Kozlyuk 		DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
953bc5bee02SDmitry Kozlyuk 			dev->data->port_id);
954bc5bee02SDmitry Kozlyuk 		rte_errno = ENOMEM;
955bc5bee02SDmitry Kozlyuk 		goto error;
956bc5bee02SDmitry Kozlyuk 	}
957bc5bee02SDmitry Kozlyuk 	/* Change queue state to ready. */
958bc5bee02SDmitry Kozlyuk 	ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
959bc5bee02SDmitry Kozlyuk 	if (ret != 0)
960bc5bee02SDmitry Kozlyuk 		goto error;
961bc5bee02SDmitry Kozlyuk 	/* Initialize drop queue. */
962bc5bee02SDmitry Kozlyuk 	priv->drop_queue.rxq = rxq;
963bc5bee02SDmitry Kozlyuk 	return 0;
964bc5bee02SDmitry Kozlyuk error:
965bc5bee02SDmitry Kozlyuk 	ret = rte_errno; /* Save rte_errno before cleanup. */
9665ceb3a02SXueming Li 	if (rxq != NULL && rxq->devx_rq.rq != NULL)
9675ceb3a02SXueming Li 		mlx5_devx_rq_destroy(&rxq->devx_rq);
9685ceb3a02SXueming Li 	if (rxq_obj != NULL) {
9695ceb3a02SXueming Li 		if (rxq_obj->cq_obj.cq != NULL)
9705ceb3a02SXueming Li 			mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
9715ceb3a02SXueming Li 		if (rxq_obj->devx_channel)
972bc5bee02SDmitry Kozlyuk 			mlx5_os_devx_destroy_event_channel
9735ceb3a02SXueming Li 							(rxq_obj->devx_channel);
9745ceb3a02SXueming Li 		mlx5_free(rxq_obj);
975bc5bee02SDmitry Kozlyuk 	}
976bc5bee02SDmitry Kozlyuk 	if (rxq_ctrl != NULL)
977bc5bee02SDmitry Kozlyuk 		mlx5_free(rxq_ctrl);
9785ceb3a02SXueming Li 	if (rxq != NULL)
9795ceb3a02SXueming Li 		mlx5_free(rxq);
980bc5bee02SDmitry Kozlyuk 	rte_errno = ret; /* Restore rte_errno. */
9810c762e81SMichael Baum 	return -rte_errno;
9825eaf882eSMichael Baum }
9835eaf882eSMichael Baum 
9845eaf882eSMichael Baum /**
985bc5bee02SDmitry Kozlyuk  * Release drop Rx queue resources.
986bc5bee02SDmitry Kozlyuk  *
987bc5bee02SDmitry Kozlyuk  * @param dev
988bc5bee02SDmitry Kozlyuk  *   Pointer to Ethernet device.
989bc5bee02SDmitry Kozlyuk  */
990bc5bee02SDmitry Kozlyuk static void
991bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
992bc5bee02SDmitry Kozlyuk {
993bc5bee02SDmitry Kozlyuk 	struct mlx5_priv *priv = dev->data->dev_private;
9945ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
9955ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
996bc5bee02SDmitry Kozlyuk 
997bc5bee02SDmitry Kozlyuk 	mlx5_rxq_devx_obj_release(rxq);
9985ceb3a02SXueming Li 	mlx5_free(rxq_ctrl->obj);
999bc5bee02SDmitry Kozlyuk 	mlx5_free(rxq_ctrl);
10005ceb3a02SXueming Li 	mlx5_free(rxq);
1001bc5bee02SDmitry Kozlyuk 	priv->drop_queue.rxq = NULL;
1002bc5bee02SDmitry Kozlyuk }
1003bc5bee02SDmitry Kozlyuk 
1004bc5bee02SDmitry Kozlyuk /**
10055eaf882eSMichael Baum  * Release a drop hash Rx queue.
10065eaf882eSMichael Baum  *
10075eaf882eSMichael Baum  * @param dev
10085eaf882eSMichael Baum  *   Pointer to Ethernet device.
10095eaf882eSMichael Baum  */
10105eaf882eSMichael Baum static void
10110c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
10125eaf882eSMichael Baum {
1013bc5bee02SDmitry Kozlyuk 	struct mlx5_priv *priv = dev->data->dev_private;
1014bc5bee02SDmitry Kozlyuk 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
1015bc5bee02SDmitry Kozlyuk 
1016bc5bee02SDmitry Kozlyuk 	if (hrxq->tir != NULL)
1017bc5bee02SDmitry Kozlyuk 		mlx5_devx_tir_destroy(hrxq);
1018bc5bee02SDmitry Kozlyuk 	if (hrxq->ind_table->ind_table != NULL)
1019bc5bee02SDmitry Kozlyuk 		mlx5_devx_ind_table_destroy(hrxq->ind_table);
10205ceb3a02SXueming Li 	if (priv->drop_queue.rxq->devx_rq.rq != NULL)
1021bc5bee02SDmitry Kozlyuk 		mlx5_rxq_devx_obj_drop_release(dev);
1022bc5bee02SDmitry Kozlyuk }
1023bc5bee02SDmitry Kozlyuk 
1024bc5bee02SDmitry Kozlyuk /**
1025bc5bee02SDmitry Kozlyuk  * Create a DevX drop action for Rx Hash queue.
1026bc5bee02SDmitry Kozlyuk  *
1027bc5bee02SDmitry Kozlyuk  * @param dev
1028bc5bee02SDmitry Kozlyuk  *   Pointer to Ethernet device.
1029bc5bee02SDmitry Kozlyuk  *
1030bc5bee02SDmitry Kozlyuk  * @return
1031bc5bee02SDmitry Kozlyuk  *   0 on success, a negative errno value otherwise and rte_errno is set.
1032bc5bee02SDmitry Kozlyuk  */
1033bc5bee02SDmitry Kozlyuk static int
1034bc5bee02SDmitry Kozlyuk mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
1035bc5bee02SDmitry Kozlyuk {
1036bc5bee02SDmitry Kozlyuk 	struct mlx5_priv *priv = dev->data->dev_private;
1037bc5bee02SDmitry Kozlyuk 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
1038bc5bee02SDmitry Kozlyuk 	int ret;
1039bc5bee02SDmitry Kozlyuk 
1040bc5bee02SDmitry Kozlyuk 	ret = mlx5_rxq_devx_obj_drop_create(dev);
1041bc5bee02SDmitry Kozlyuk 	if (ret != 0) {
1042bc5bee02SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot create drop RX queue");
1043bc5bee02SDmitry Kozlyuk 		return ret;
1044bc5bee02SDmitry Kozlyuk 	}
1045bc5bee02SDmitry Kozlyuk 	/* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
1046bc5bee02SDmitry Kozlyuk 	ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
1047bc5bee02SDmitry Kozlyuk 	if (ret != 0) {
1048bc5bee02SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table");
1049bc5bee02SDmitry Kozlyuk 		goto error;
1050bc5bee02SDmitry Kozlyuk 	}
1051bc5bee02SDmitry Kozlyuk 	ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false);
1052bc5bee02SDmitry Kozlyuk 	if (ret != 0) {
1053bc5bee02SDmitry Kozlyuk 		DRV_LOG(ERR, "Cannot create drop hash RX queue");
1054bc5bee02SDmitry Kozlyuk 		goto error;
1055bc5bee02SDmitry Kozlyuk 	}
1056bc5bee02SDmitry Kozlyuk 	return 0;
1057bc5bee02SDmitry Kozlyuk error:
1058bc5bee02SDmitry Kozlyuk 	mlx5_devx_drop_action_destroy(dev);
1059bc5bee02SDmitry Kozlyuk 	return ret;
10605eaf882eSMichael Baum }
10615eaf882eSMichael Baum 
106286d259ceSMichael Baum /**
1063a89f6433SRongwei Liu  * Select TXQ TIS number.
1064a89f6433SRongwei Liu  *
1065a89f6433SRongwei Liu  * @param dev
1066a89f6433SRongwei Liu  *   Pointer to Ethernet device.
1067a89f6433SRongwei Liu  * @param queue_idx
1068a89f6433SRongwei Liu  *   Queue index in DPDK Tx queue array.
1069a89f6433SRongwei Liu  *
1070a89f6433SRongwei Liu  * @return
1071a89f6433SRongwei Liu  *   > 0 on success, a negative errno value otherwise.
1072a89f6433SRongwei Liu  */
1073a89f6433SRongwei Liu static uint32_t
1074a89f6433SRongwei Liu mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx)
1075a89f6433SRongwei Liu {
1076a89f6433SRongwei Liu 	struct mlx5_priv *priv = dev->data->dev_private;
1077a89f6433SRongwei Liu 	int tis_idx;
1078a89f6433SRongwei Liu 
1079a89f6433SRongwei Liu 	if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode ==
1080a89f6433SRongwei Liu 			MLX5_LAG_MODE_TIS) {
1081a89f6433SRongwei Liu 		tis_idx = (priv->lag_affinity_idx + queue_idx) %
1082a89f6433SRongwei Liu 			priv->sh->bond.n_port;
1083a89f6433SRongwei Liu 		DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.",
1084a89f6433SRongwei Liu 			dev->data->port_id, queue_idx, tis_idx + 1,
1085a89f6433SRongwei Liu 			priv->sh->lag.tx_remap_affinity[tis_idx]);
1086a89f6433SRongwei Liu 	} else {
1087a89f6433SRongwei Liu 		tis_idx = 0;
1088a89f6433SRongwei Liu 	}
1089a89f6433SRongwei Liu 	MLX5_ASSERT(priv->sh->tis[tis_idx]);
1090a89f6433SRongwei Liu 	return priv->sh->tis[tis_idx]->id;
1091a89f6433SRongwei Liu }
1092a89f6433SRongwei Liu 
1093a89f6433SRongwei Liu /**
109486d259ceSMichael Baum  * Create the Tx hairpin queue object.
109586d259ceSMichael Baum  *
109686d259ceSMichael Baum  * @param dev
109786d259ceSMichael Baum  *   Pointer to Ethernet device.
109886d259ceSMichael Baum  * @param idx
109986d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
110086d259ceSMichael Baum  *
110186d259ceSMichael Baum  * @return
1102f49f4483SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
110386d259ceSMichael Baum  */
1104f49f4483SMichael Baum static int
110586d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
110686d259ceSMichael Baum {
110786d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
110886d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
110986d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
111086d259ceSMichael Baum 		container_of(txq_data, struct mlx5_txq_ctrl, txq);
111186d259ceSMichael Baum 	struct mlx5_devx_create_sq_attr attr = { 0 };
1112f49f4483SMichael Baum 	struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
111386d259ceSMichael Baum 	uint32_t max_wq_data;
111486d259ceSMichael Baum 
111586d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
1116f49f4483SMichael Baum 	MLX5_ASSERT(tmpl);
111786d259ceSMichael Baum 	tmpl->txq_ctrl = txq_ctrl;
111886d259ceSMichael Baum 	attr.hairpin = 1;
111986d259ceSMichael Baum 	attr.tis_lst_sz = 1;
112053820561SMichael Baum 	max_wq_data =
112153820561SMichael Baum 		priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
112286d259ceSMichael Baum 	/* Jumbo frames > 9KB should be supported, and more packets. */
112386d259ceSMichael Baum 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
112486d259ceSMichael Baum 		if (priv->config.log_hp_size > max_wq_data) {
112586d259ceSMichael Baum 			DRV_LOG(ERR, "Total data size %u power of 2 is "
112686d259ceSMichael Baum 				"too large for hairpin.",
112786d259ceSMichael Baum 				priv->config.log_hp_size);
112886d259ceSMichael Baum 			rte_errno = ERANGE;
1129f49f4483SMichael Baum 			return -rte_errno;
113086d259ceSMichael Baum 		}
113186d259ceSMichael Baum 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
113286d259ceSMichael Baum 	} else {
113386d259ceSMichael Baum 		attr.wq_attr.log_hairpin_data_sz =
113486d259ceSMichael Baum 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
113586d259ceSMichael Baum 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
113686d259ceSMichael Baum 	}
113786d259ceSMichael Baum 	/* Set the packets number to the maximum value for performance. */
113886d259ceSMichael Baum 	attr.wq_attr.log_hairpin_num_packets =
113986d259ceSMichael Baum 			attr.wq_attr.log_hairpin_data_sz -
114086d259ceSMichael Baum 			MLX5_HAIRPIN_QUEUE_STRIDE;
1141a89f6433SRongwei Liu 
1142a89f6433SRongwei Liu 	attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
1143ca1418ceSMichael Baum 	tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr);
114486d259ceSMichael Baum 	if (!tmpl->sq) {
114586d259ceSMichael Baum 		DRV_LOG(ERR,
114686d259ceSMichael Baum 			"Port %u tx hairpin queue %u can't create SQ object.",
114786d259ceSMichael Baum 			dev->data->port_id, idx);
114886d259ceSMichael Baum 		rte_errno = errno;
1149f49f4483SMichael Baum 		return -rte_errno;
115086d259ceSMichael Baum 	}
1151f49f4483SMichael Baum 	return 0;
115286d259ceSMichael Baum }
115386d259ceSMichael Baum 
1154f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
115586d259ceSMichael Baum /**
115686d259ceSMichael Baum  * Destroy the Tx queue DevX object.
115786d259ceSMichael Baum  *
115886d259ceSMichael Baum  * @param txq_obj
115986d259ceSMichael Baum  *   Txq object to destroy.
116086d259ceSMichael Baum  */
116186d259ceSMichael Baum static void
116288f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
116386d259ceSMichael Baum {
116474e91860SMichael Baum 	mlx5_devx_sq_destroy(&txq_obj->sq_obj);
116574e91860SMichael Baum 	memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj));
11665f04f70cSMichael Baum 	mlx5_devx_cq_destroy(&txq_obj->cq_obj);
11675f04f70cSMichael Baum 	memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj));
116886d259ceSMichael Baum }
116986d259ceSMichael Baum 
117086d259ceSMichael Baum /**
117188f2e3f1SMichael Baum  * Create a SQ object and its resources using DevX.
117286d259ceSMichael Baum  *
117386d259ceSMichael Baum  * @param dev
117486d259ceSMichael Baum  *   Pointer to Ethernet device.
117586d259ceSMichael Baum  * @param idx
117686d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
117774e91860SMichael Baum  * @param[in] log_desc_n
117874e91860SMichael Baum  *   Log of number of descriptors in queue.
117986d259ceSMichael Baum  *
118086d259ceSMichael Baum  * @return
118174e91860SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
118286d259ceSMichael Baum  */
118374e91860SMichael Baum static int
118474e91860SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
118574e91860SMichael Baum 				  uint16_t log_desc_n)
118686d259ceSMichael Baum {
118786d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
1188fe46b20cSMichael Baum 	struct mlx5_common_device *cdev = priv->sh->cdev;
11895dfa003dSMichael Baum 	struct mlx5_uar *uar = &priv->sh->tx_uar;
119086d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
119188f2e3f1SMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
119288f2e3f1SMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
119388f2e3f1SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
119474e91860SMichael Baum 	struct mlx5_devx_create_sq_attr sq_attr = {
119574e91860SMichael Baum 		.flush_in_error_en = 1,
119674e91860SMichael Baum 		.allow_multi_pkt_send_wqe = !!priv->config.mps,
119753820561SMichael Baum 		.min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode,
119887af0d1eSMichael Baum 		.allow_swp = !!priv->sh->dev_cap.swp,
119974e91860SMichael Baum 		.cqn = txq_obj->cq_obj.cq->id,
120074e91860SMichael Baum 		.tis_lst_sz = 1,
120174e91860SMichael Baum 		.wq_attr = (struct mlx5_devx_wq_attr){
1202fe46b20cSMichael Baum 			.pd = cdev->pdn,
12035dfa003dSMichael Baum 			.uar_page = mlx5_os_get_devx_uar_page_id(uar->obj),
120474e91860SMichael Baum 		},
1205fe46b20cSMichael Baum 		.ts_format =
1206fe46b20cSMichael Baum 			mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
1207a89f6433SRongwei Liu 		.tis_num = mlx5_get_txq_tis_num(dev, idx),
120874e91860SMichael Baum 	};
1209a89f6433SRongwei Liu 
121086d259ceSMichael Baum 	/* Create Send Queue object with DevX. */
1211fe46b20cSMichael Baum 	return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj,
1212ca1418ceSMichael Baum 				   log_desc_n, &sq_attr, priv->sh->numa_node);
121386d259ceSMichael Baum }
121486d259ceSMichael Baum #endif
121586d259ceSMichael Baum 
121686d259ceSMichael Baum /**
121786d259ceSMichael Baum  * Create the Tx queue DevX object.
121886d259ceSMichael Baum  *
121986d259ceSMichael Baum  * @param dev
122086d259ceSMichael Baum  *   Pointer to Ethernet device.
122186d259ceSMichael Baum  * @param idx
122286d259ceSMichael Baum  *   Queue index in DPDK Tx queue array.
122386d259ceSMichael Baum  *
122486d259ceSMichael Baum  * @return
1225f49f4483SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
122686d259ceSMichael Baum  */
1227f49f4483SMichael Baum int
122886d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
122986d259ceSMichael Baum {
123086d259ceSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
123186d259ceSMichael Baum 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
123286d259ceSMichael Baum 	struct mlx5_txq_ctrl *txq_ctrl =
123386d259ceSMichael Baum 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
123486d259ceSMichael Baum 
123586d259ceSMichael Baum 	if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
123686d259ceSMichael Baum 		return mlx5_txq_obj_hairpin_new(dev, idx);
1237f1ae0b35SOphir Munk #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
123886d259ceSMichael Baum 	DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
123986d259ceSMichael Baum 		     dev->data->port_id, idx);
124086d259ceSMichael Baum 	rte_errno = ENOMEM;
1241f49f4483SMichael Baum 	return -rte_errno;
124286d259ceSMichael Baum #else
12435dfa003dSMichael Baum 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
124486d259ceSMichael Baum 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1245f49f4483SMichael Baum 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
12465f04f70cSMichael Baum 	struct mlx5_devx_cq_attr cq_attr = {
12475dfa003dSMichael Baum 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
12485f04f70cSMichael Baum 	};
12495f04f70cSMichael Baum 	uint32_t cqe_n, log_desc_n;
125000984de5SViacheslav Ovsiienko 	uint32_t wqe_n, wqe_size;
125186d259ceSMichael Baum 	int ret = 0;
125286d259ceSMichael Baum 
125386d259ceSMichael Baum 	MLX5_ASSERT(txq_data);
1254f49f4483SMichael Baum 	MLX5_ASSERT(txq_obj);
12555dfa003dSMichael Baum 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
12565dfa003dSMichael Baum 	MLX5_ASSERT(ppriv);
125786d259ceSMichael Baum 	txq_obj->txq_ctrl = txq_ctrl;
125886d259ceSMichael Baum 	txq_obj->dev = dev;
12595f04f70cSMichael Baum 	cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
12605f04f70cSMichael Baum 		1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
12615f04f70cSMichael Baum 	log_desc_n = log2above(cqe_n);
12625f04f70cSMichael Baum 	cqe_n = 1UL << log_desc_n;
12635f04f70cSMichael Baum 	if (cqe_n > UINT16_MAX) {
12645f04f70cSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.",
12655f04f70cSMichael Baum 			dev->data->port_id, txq_data->idx, cqe_n);
12665f04f70cSMichael Baum 		rte_errno = EINVAL;
12675f04f70cSMichael Baum 		return 0;
12685f04f70cSMichael Baum 	}
12695f04f70cSMichael Baum 	/* Create completion queue object with DevX. */
1270ca1418ceSMichael Baum 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n,
12715f04f70cSMichael Baum 				  &cq_attr, priv->sh->numa_node);
12725f04f70cSMichael Baum 	if (ret) {
12735f04f70cSMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
12745f04f70cSMichael Baum 			dev->data->port_id, idx);
127586d259ceSMichael Baum 		goto error;
127686d259ceSMichael Baum 	}
12775f04f70cSMichael Baum 	txq_data->cqe_n = log_desc_n;
12785f04f70cSMichael Baum 	txq_data->cqe_s = cqe_n;
127986d259ceSMichael Baum 	txq_data->cqe_m = txq_data->cqe_s - 1;
12805f04f70cSMichael Baum 	txq_data->cqes = txq_obj->cq_obj.cqes;
128186d259ceSMichael Baum 	txq_data->cq_ci = 0;
128286d259ceSMichael Baum 	txq_data->cq_pi = 0;
12835f04f70cSMichael Baum 	txq_data->cq_db = txq_obj->cq_obj.db_rec;
128486d259ceSMichael Baum 	*txq_data->cq_db = 0;
128500984de5SViacheslav Ovsiienko 	/*
128600984de5SViacheslav Ovsiienko 	 * Adjust the amount of WQEs depending on inline settings.
128700984de5SViacheslav Ovsiienko 	 * The number of descriptors should be enough to handle
128800984de5SViacheslav Ovsiienko 	 * the specified number of packets. If queue is being created
128900984de5SViacheslav Ovsiienko 	 * with Verbs the rdma-core does queue size adjustment
129000984de5SViacheslav Ovsiienko 	 * internally in the mlx5_calc_sq_size(), we do the same
129100984de5SViacheslav Ovsiienko 	 * for the queue being created with DevX at this point.
129200984de5SViacheslav Ovsiienko 	 */
129300984de5SViacheslav Ovsiienko 	wqe_size = txq_data->tso_en ?
129400984de5SViacheslav Ovsiienko 		   RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0;
129500984de5SViacheslav Ovsiienko 	wqe_size += sizeof(struct mlx5_wqe_cseg) +
129600984de5SViacheslav Ovsiienko 		    sizeof(struct mlx5_wqe_eseg) +
129700984de5SViacheslav Ovsiienko 		    sizeof(struct mlx5_wqe_dseg);
129800984de5SViacheslav Ovsiienko 	if (txq_data->inlen_send)
129900984de5SViacheslav Ovsiienko 		wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) +
130000984de5SViacheslav Ovsiienko 					     sizeof(struct mlx5_wqe_eseg) +
130100984de5SViacheslav Ovsiienko 					     RTE_ALIGN(txq_data->inlen_send +
130200984de5SViacheslav Ovsiienko 						       sizeof(uint32_t),
130300984de5SViacheslav Ovsiienko 						       MLX5_WSEG_SIZE));
130400984de5SViacheslav Ovsiienko 	wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
130586d259ceSMichael Baum 	/* Create Send Queue object with DevX. */
130600984de5SViacheslav Ovsiienko 	wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
130791d1cfafSMichael Baum 			(uint32_t)priv->sh->dev_cap.max_qp_wr);
130874e91860SMichael Baum 	log_desc_n = log2above(wqe_n);
130974e91860SMichael Baum 	ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
131074e91860SMichael Baum 	if (ret) {
131174e91860SMichael Baum 		DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
131274e91860SMichael Baum 			dev->data->port_id, idx);
131386d259ceSMichael Baum 		rte_errno = errno;
131486d259ceSMichael Baum 		goto error;
131586d259ceSMichael Baum 	}
131686d259ceSMichael Baum 	/* Create the Work Queue. */
131774e91860SMichael Baum 	txq_data->wqe_n = log_desc_n;
131886d259ceSMichael Baum 	txq_data->wqe_s = 1 << txq_data->wqe_n;
131986d259ceSMichael Baum 	txq_data->wqe_m = txq_data->wqe_s - 1;
132074e91860SMichael Baum 	txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes;
132186d259ceSMichael Baum 	txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
132286d259ceSMichael Baum 	txq_data->wqe_ci = 0;
132386d259ceSMichael Baum 	txq_data->wqe_pi = 0;
132486d259ceSMichael Baum 	txq_data->wqe_comp = 0;
132586d259ceSMichael Baum 	txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
132631625e62SViacheslav Ovsiienko 	txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR];
132786d259ceSMichael Baum 	*txq_data->qp_db = 0;
132874e91860SMichael Baum 	txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8;
1329a6b9d5a5SMichael Baum 	txq_data->db_heu = sh->cdev->config.dbnc == MLX5_SQ_DB_HEURISTIC;
13305dfa003dSMichael Baum 	txq_data->db_nc = sh->tx_uar.dbnc;
1331*2f5122dfSViacheslav Ovsiienko 	txq_data->wait_on_time = !!(!sh->config.tx_pp &&
1332*2f5122dfSViacheslav Ovsiienko 				    sh->cdev->config.hca_attr.wait_on_time);
133386d259ceSMichael Baum 	/* Change Send Queue state to Ready-to-Send. */
1334686d05b6SXueming Li 	ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
133586d259ceSMichael Baum 	if (ret) {
133686d259ceSMichael Baum 		rte_errno = errno;
133786d259ceSMichael Baum 		DRV_LOG(ERR,
1338a9c79306SMichael Baum 			"Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
133986d259ceSMichael Baum 			dev->data->port_id, idx);
134086d259ceSMichael Baum 		goto error;
134186d259ceSMichael Baum 	}
134286d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
134386d259ceSMichael Baum 	/*
134486d259ceSMichael Baum 	 * If using DevX need to query and store TIS transport domain value.
134586d259ceSMichael Baum 	 * This is done once per port.
134686d259ceSMichael Baum 	 * Will use this value on Rx, when creating matching TIR.
134786d259ceSMichael Baum 	 */
134886d259ceSMichael Baum 	if (!priv->sh->tdn)
134986d259ceSMichael Baum 		priv->sh->tdn = priv->sh->td->id;
135086d259ceSMichael Baum #endif
135186d259ceSMichael Baum 	txq_ctrl->uar_mmap_offset =
13525dfa003dSMichael Baum 			mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj);
13535dfa003dSMichael Baum 	ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db;
1354876b5d52SMatan Azrad 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1355f49f4483SMichael Baum 	return 0;
135686d259ceSMichael Baum error:
135786d259ceSMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
135888f2e3f1SMichael Baum 	mlx5_txq_release_devx_resources(txq_obj);
135986d259ceSMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
1360f49f4483SMichael Baum 	return -rte_errno;
136186d259ceSMichael Baum #endif
136286d259ceSMichael Baum }
136386d259ceSMichael Baum 
136486d259ceSMichael Baum /**
136586d259ceSMichael Baum  * Release an Tx DevX queue object.
136686d259ceSMichael Baum  *
136786d259ceSMichael Baum  * @param txq_obj
136886d259ceSMichael Baum  *   DevX Tx queue object.
136986d259ceSMichael Baum  */
137086d259ceSMichael Baum void
137186d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
137286d259ceSMichael Baum {
137386d259ceSMichael Baum 	MLX5_ASSERT(txq_obj);
1374354cc08aSMichael Baum 	if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
137586d259ceSMichael Baum 		if (txq_obj->tis)
137686d259ceSMichael Baum 			claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1377f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
137886d259ceSMichael Baum 	} else {
137988f2e3f1SMichael Baum 		mlx5_txq_release_devx_resources(txq_obj);
138086d259ceSMichael Baum #endif
138186d259ceSMichael Baum 	}
138286d259ceSMichael Baum }
138386d259ceSMichael Baum 
13848bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = {
13858bb2410eSOphir Munk 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
13866deb19e1SMichael Baum 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
138732287079SMichael Baum 	.rxq_event_get = mlx5_rx_devx_get_event,
1388c279f187SMichael Baum 	.rxq_obj_modify = mlx5_devx_modify_rq,
13896deb19e1SMichael Baum 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
139025ae7f1aSMichael Baum 	.ind_table_new = mlx5_devx_ind_table_new,
1391fa7ad49eSAndrey Vesnovaty 	.ind_table_modify = mlx5_devx_ind_table_modify,
139225ae7f1aSMichael Baum 	.ind_table_destroy = mlx5_devx_ind_table_destroy,
139385552726SMichael Baum 	.hrxq_new = mlx5_devx_hrxq_new,
139485552726SMichael Baum 	.hrxq_destroy = mlx5_devx_tir_destroy,
1395b8cc58c1SAndrey Vesnovaty 	.hrxq_modify = mlx5_devx_hrxq_modify,
13960c762e81SMichael Baum 	.drop_action_create = mlx5_devx_drop_action_create,
13970c762e81SMichael Baum 	.drop_action_destroy = mlx5_devx_drop_action_destroy,
139886d259ceSMichael Baum 	.txq_obj_new = mlx5_txq_devx_obj_new,
1399686d05b6SXueming Li 	.txq_obj_modify = mlx5_txq_devx_modify,
140086d259ceSMichael Baum 	.txq_obj_release = mlx5_txq_devx_obj_release,
140123233fd6SBing Zhao 	.lb_dummy_queue_create = NULL,
140223233fd6SBing Zhao 	.lb_dummy_queue_release = NULL,
14038bb2410eSOphir Munk };
1404