xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_send.c (revision 3cddeba0ca38b00c7dc646277484d08a4cb2d862)
13eb74886SAlex Vesker /* SPDX-License-Identifier: BSD-3-Clause
23eb74886SAlex Vesker  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
33eb74886SAlex Vesker  */
43eb74886SAlex Vesker 
53eb74886SAlex Vesker #include "mlx5dr_internal.h"
63eb74886SAlex Vesker 
73eb74886SAlex Vesker struct mlx5dr_send_ring_dep_wqe *
83eb74886SAlex Vesker mlx5dr_send_add_new_dep_wqe(struct mlx5dr_send_engine *queue)
93eb74886SAlex Vesker {
103eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *send_sq = &queue->send_ring->send_sq;
113eb74886SAlex Vesker 	unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
123eb74886SAlex Vesker 
133eb74886SAlex Vesker 	memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5DR_MATCH_TAG_SZ);
143eb74886SAlex Vesker 
153eb74886SAlex Vesker 	return &send_sq->dep_wqe[idx];
163eb74886SAlex Vesker }
173eb74886SAlex Vesker 
183eb74886SAlex Vesker void mlx5dr_send_abort_new_dep_wqe(struct mlx5dr_send_engine *queue)
193eb74886SAlex Vesker {
203eb74886SAlex Vesker 	queue->send_ring->send_sq.head_dep_idx--;
213eb74886SAlex Vesker }
223eb74886SAlex Vesker 
233eb74886SAlex Vesker void mlx5dr_send_all_dep_wqe(struct mlx5dr_send_engine *queue)
243eb74886SAlex Vesker {
253eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *send_sq = &queue->send_ring->send_sq;
263eb74886SAlex Vesker 	struct mlx5dr_send_ste_attr ste_attr = {0};
273eb74886SAlex Vesker 	struct mlx5dr_send_ring_dep_wqe *dep_wqe;
283eb74886SAlex Vesker 
293eb74886SAlex Vesker 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
303eb74886SAlex Vesker 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
313eb74886SAlex Vesker 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
323eb74886SAlex Vesker 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
333eb74886SAlex Vesker 
343eb74886SAlex Vesker 	/* Fence first from previous depend WQEs  */
353eb74886SAlex Vesker 	ste_attr.send_attr.fence = 1;
363eb74886SAlex Vesker 
373eb74886SAlex Vesker 	while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
383eb74886SAlex Vesker 		dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
393eb74886SAlex Vesker 
403eb74886SAlex Vesker 		/* Notify HW on the last WQE */
413eb74886SAlex Vesker 		ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
423eb74886SAlex Vesker 		ste_attr.send_attr.user_data = dep_wqe->user_data;
433eb74886SAlex Vesker 		ste_attr.send_attr.rule = dep_wqe->rule;
443eb74886SAlex Vesker 
453eb74886SAlex Vesker 		ste_attr.rtc_0 = dep_wqe->rtc_0;
463eb74886SAlex Vesker 		ste_attr.rtc_1 = dep_wqe->rtc_1;
473eb74886SAlex Vesker 		ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
483eb74886SAlex Vesker 		ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
493eb74886SAlex Vesker 		ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
503eb74886SAlex Vesker 		ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
513eb74886SAlex Vesker 		ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
523eb74886SAlex Vesker 		ste_attr.wqe_data = &dep_wqe->wqe_data;
53e28392b3SAlex Vesker 		ste_attr.direct_index = dep_wqe->direct_index;
543eb74886SAlex Vesker 
553eb74886SAlex Vesker 		mlx5dr_send_ste(queue, &ste_attr);
563eb74886SAlex Vesker 
573eb74886SAlex Vesker 		/* Fencing is done only on the first WQE */
583eb74886SAlex Vesker 		ste_attr.send_attr.fence = 0;
593eb74886SAlex Vesker 	}
603eb74886SAlex Vesker }
613eb74886SAlex Vesker 
623eb74886SAlex Vesker struct mlx5dr_send_engine_post_ctrl
633eb74886SAlex Vesker mlx5dr_send_engine_post_start(struct mlx5dr_send_engine *queue)
643eb74886SAlex Vesker {
653eb74886SAlex Vesker 	struct mlx5dr_send_engine_post_ctrl ctrl;
663eb74886SAlex Vesker 
673eb74886SAlex Vesker 	ctrl.queue = queue;
683eb74886SAlex Vesker 	/* Currently only one send ring is supported */
693eb74886SAlex Vesker 	ctrl.send_ring = &queue->send_ring[0];
703eb74886SAlex Vesker 	ctrl.num_wqebbs = 0;
713eb74886SAlex Vesker 
723eb74886SAlex Vesker 	return ctrl;
733eb74886SAlex Vesker }
743eb74886SAlex Vesker 
753eb74886SAlex Vesker void mlx5dr_send_engine_post_req_wqe(struct mlx5dr_send_engine_post_ctrl *ctrl,
763eb74886SAlex Vesker 				     char **buf, size_t *len)
773eb74886SAlex Vesker {
783eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
793eb74886SAlex Vesker 	unsigned int idx;
803eb74886SAlex Vesker 
813eb74886SAlex Vesker 	idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
823eb74886SAlex Vesker 
833eb74886SAlex Vesker 	*buf = send_sq->buf + (idx << MLX5_SEND_WQE_SHIFT);
843eb74886SAlex Vesker 	*len = MLX5_SEND_WQE_BB;
853eb74886SAlex Vesker 
863eb74886SAlex Vesker 	if (!ctrl->num_wqebbs) {
873eb74886SAlex Vesker 		*buf += sizeof(struct mlx5dr_wqe_ctrl_seg);
883eb74886SAlex Vesker 		*len -= sizeof(struct mlx5dr_wqe_ctrl_seg);
893eb74886SAlex Vesker 	}
903eb74886SAlex Vesker 
913eb74886SAlex Vesker 	ctrl->num_wqebbs++;
923eb74886SAlex Vesker }
933eb74886SAlex Vesker 
943eb74886SAlex Vesker static void mlx5dr_send_engine_post_ring(struct mlx5dr_send_ring_sq *sq,
953eb74886SAlex Vesker 					 struct mlx5dv_devx_uar *uar,
963eb74886SAlex Vesker 					 struct mlx5dr_wqe_ctrl_seg *wqe_ctrl)
973eb74886SAlex Vesker {
983eb74886SAlex Vesker 	rte_compiler_barrier();
993eb74886SAlex Vesker 	sq->db[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->cur_post);
1003eb74886SAlex Vesker 
1013eb74886SAlex Vesker 	rte_wmb();
1023eb74886SAlex Vesker 	mlx5dr_uar_write64_relaxed(*((uint64_t *)wqe_ctrl), uar->reg_addr);
1033eb74886SAlex Vesker 	rte_wmb();
1043eb74886SAlex Vesker }
1053eb74886SAlex Vesker 
1063eb74886SAlex Vesker static void
1073eb74886SAlex Vesker mlx5dr_send_wqe_set_tag(struct mlx5dr_wqe_gta_data_seg_ste *wqe_data,
1083eb74886SAlex Vesker 			struct mlx5dr_rule_match_tag *tag,
1093eb74886SAlex Vesker 			bool is_jumbo)
1103eb74886SAlex Vesker {
1113eb74886SAlex Vesker 	if (is_jumbo) {
1123eb74886SAlex Vesker 		/* Clear previous possibly dirty control */
1133eb74886SAlex Vesker 		memset(wqe_data, 0, MLX5DR_STE_CTRL_SZ);
114bec1dcc6SErez Shitrit 		memcpy(wqe_data->jumbo, tag->jumbo, MLX5DR_JUMBO_TAG_SZ);
1153eb74886SAlex Vesker 	} else {
1163eb74886SAlex Vesker 		/* Clear previous possibly dirty control and actions */
1173eb74886SAlex Vesker 		memset(wqe_data, 0, MLX5DR_STE_CTRL_SZ + MLX5DR_ACTIONS_SZ);
1183eb74886SAlex Vesker 		memcpy(wqe_data->tag, tag->match, MLX5DR_MATCH_TAG_SZ);
1193eb74886SAlex Vesker 	}
1203eb74886SAlex Vesker }
1213eb74886SAlex Vesker 
1223eb74886SAlex Vesker void mlx5dr_send_engine_post_end(struct mlx5dr_send_engine_post_ctrl *ctrl,
1233eb74886SAlex Vesker 				 struct mlx5dr_send_engine_post_attr *attr)
1243eb74886SAlex Vesker {
1253eb74886SAlex Vesker 	struct mlx5dr_wqe_ctrl_seg *wqe_ctrl;
1263eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *sq;
1273eb74886SAlex Vesker 	uint32_t flags = 0;
1283eb74886SAlex Vesker 	unsigned int idx;
1293eb74886SAlex Vesker 
1303eb74886SAlex Vesker 	sq = &ctrl->send_ring->send_sq;
1313eb74886SAlex Vesker 	idx = sq->cur_post & sq->buf_mask;
1323eb74886SAlex Vesker 	sq->last_idx = idx;
1333eb74886SAlex Vesker 
1343eb74886SAlex Vesker 	wqe_ctrl = (void *)(sq->buf + (idx << MLX5_SEND_WQE_SHIFT));
1353eb74886SAlex Vesker 
1363eb74886SAlex Vesker 	wqe_ctrl->opmod_idx_opcode =
1373eb74886SAlex Vesker 		rte_cpu_to_be_32((attr->opmod << 24) |
1383eb74886SAlex Vesker 				 ((sq->cur_post & 0xffff) << 8) |
1393eb74886SAlex Vesker 				 attr->opcode);
1403eb74886SAlex Vesker 	wqe_ctrl->qpn_ds =
1413eb74886SAlex Vesker 		rte_cpu_to_be_32((attr->len + sizeof(struct mlx5dr_wqe_ctrl_seg)) / 16 |
1423eb74886SAlex Vesker 				 sq->sqn << 8);
1433eb74886SAlex Vesker 
1443eb74886SAlex Vesker 	wqe_ctrl->imm = rte_cpu_to_be_32(attr->id);
1453eb74886SAlex Vesker 
1463eb74886SAlex Vesker 	flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
1473eb74886SAlex Vesker 	flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
1483eb74886SAlex Vesker 	wqe_ctrl->flags = rte_cpu_to_be_32(flags);
1493eb74886SAlex Vesker 
1503eb74886SAlex Vesker 	sq->wr_priv[idx].id = attr->id;
1513eb74886SAlex Vesker 	sq->wr_priv[idx].retry_id = attr->retry_id;
1523eb74886SAlex Vesker 
1533eb74886SAlex Vesker 	sq->wr_priv[idx].rule = attr->rule;
1543eb74886SAlex Vesker 	sq->wr_priv[idx].user_data = attr->user_data;
1553eb74886SAlex Vesker 	sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
1563eb74886SAlex Vesker 
1573eb74886SAlex Vesker 	if (attr->rule) {
1583eb74886SAlex Vesker 		sq->wr_priv[idx].rule->pending_wqes++;
1593eb74886SAlex Vesker 		sq->wr_priv[idx].used_id = attr->used_id;
1603eb74886SAlex Vesker 	}
1613eb74886SAlex Vesker 
1623eb74886SAlex Vesker 	sq->cur_post += ctrl->num_wqebbs;
1633eb74886SAlex Vesker 
1643eb74886SAlex Vesker 	if (attr->notify_hw)
1653eb74886SAlex Vesker 		mlx5dr_send_engine_post_ring(sq, ctrl->queue->uar, wqe_ctrl);
1663eb74886SAlex Vesker }
1673eb74886SAlex Vesker 
1683eb74886SAlex Vesker static void mlx5dr_send_wqe(struct mlx5dr_send_engine *queue,
1693eb74886SAlex Vesker 			    struct mlx5dr_send_engine_post_attr *send_attr,
1703eb74886SAlex Vesker 			    struct mlx5dr_wqe_gta_ctrl_seg *send_wqe_ctrl,
1713eb74886SAlex Vesker 			    void *send_wqe_data,
1723eb74886SAlex Vesker 			    void *send_wqe_tag,
1733eb74886SAlex Vesker 			    bool is_jumbo,
1743eb74886SAlex Vesker 			    uint8_t gta_opcode,
1753eb74886SAlex Vesker 			    uint32_t direct_index)
1763eb74886SAlex Vesker {
1773eb74886SAlex Vesker 	struct mlx5dr_wqe_gta_data_seg_ste *wqe_data;
1783eb74886SAlex Vesker 	struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
1793eb74886SAlex Vesker 	struct mlx5dr_send_engine_post_ctrl ctrl;
1803eb74886SAlex Vesker 	size_t wqe_len;
1813eb74886SAlex Vesker 
1823eb74886SAlex Vesker 	ctrl = mlx5dr_send_engine_post_start(queue);
1833eb74886SAlex Vesker 	mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
1843eb74886SAlex Vesker 	mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
1853eb74886SAlex Vesker 
1863eb74886SAlex Vesker 	wqe_ctrl->op_dirix = htobe32(gta_opcode << 28 | direct_index);
1873eb74886SAlex Vesker 	memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
1883eb74886SAlex Vesker 
1893eb74886SAlex Vesker 	if (send_wqe_data)
1903eb74886SAlex Vesker 		memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
1913eb74886SAlex Vesker 	else
1923eb74886SAlex Vesker 		mlx5dr_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
1933eb74886SAlex Vesker 
1943eb74886SAlex Vesker 	mlx5dr_send_engine_post_end(&ctrl, send_attr);
1953eb74886SAlex Vesker }
1963eb74886SAlex Vesker 
1973eb74886SAlex Vesker void mlx5dr_send_ste(struct mlx5dr_send_engine *queue,
1983eb74886SAlex Vesker 		     struct mlx5dr_send_ste_attr *ste_attr)
1993eb74886SAlex Vesker {
2003eb74886SAlex Vesker 	struct mlx5dr_send_engine_post_attr *send_attr = &ste_attr->send_attr;
2013eb74886SAlex Vesker 	uint8_t notify_hw = send_attr->notify_hw;
2023eb74886SAlex Vesker 	uint8_t fence = send_attr->fence;
2033eb74886SAlex Vesker 
2043eb74886SAlex Vesker 	if (ste_attr->rtc_1) {
2053eb74886SAlex Vesker 		send_attr->id = ste_attr->rtc_1;
2063eb74886SAlex Vesker 		send_attr->used_id = ste_attr->used_id_rtc_1;
2073eb74886SAlex Vesker 		send_attr->retry_id = ste_attr->retry_rtc_1;
2083eb74886SAlex Vesker 		send_attr->fence = fence;
2093eb74886SAlex Vesker 		send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
2103eb74886SAlex Vesker 		mlx5dr_send_wqe(queue, send_attr,
2113eb74886SAlex Vesker 				ste_attr->wqe_ctrl,
2123eb74886SAlex Vesker 				ste_attr->wqe_data,
2133eb74886SAlex Vesker 				ste_attr->wqe_tag,
2143eb74886SAlex Vesker 				ste_attr->wqe_tag_is_jumbo,
2153eb74886SAlex Vesker 				ste_attr->gta_opcode,
2163eb74886SAlex Vesker 				ste_attr->direct_index);
2173eb74886SAlex Vesker 	}
2183eb74886SAlex Vesker 
2193eb74886SAlex Vesker 	if (ste_attr->rtc_0) {
2203eb74886SAlex Vesker 		send_attr->id = ste_attr->rtc_0;
2213eb74886SAlex Vesker 		send_attr->used_id = ste_attr->used_id_rtc_0;
2223eb74886SAlex Vesker 		send_attr->retry_id = ste_attr->retry_rtc_0;
2233eb74886SAlex Vesker 		send_attr->fence = fence && !ste_attr->rtc_1;
2243eb74886SAlex Vesker 		send_attr->notify_hw = notify_hw;
2253eb74886SAlex Vesker 		mlx5dr_send_wqe(queue, send_attr,
2263eb74886SAlex Vesker 				ste_attr->wqe_ctrl,
2273eb74886SAlex Vesker 				ste_attr->wqe_data,
2283eb74886SAlex Vesker 				ste_attr->wqe_tag,
2293eb74886SAlex Vesker 				ste_attr->wqe_tag_is_jumbo,
2303eb74886SAlex Vesker 				ste_attr->gta_opcode,
2313eb74886SAlex Vesker 				ste_attr->direct_index);
2323eb74886SAlex Vesker 	}
2333eb74886SAlex Vesker 
2343eb74886SAlex Vesker 	/* Restore to ortginal requested values */
2353eb74886SAlex Vesker 	send_attr->notify_hw = notify_hw;
2363eb74886SAlex Vesker 	send_attr->fence = fence;
2373eb74886SAlex Vesker }
2383eb74886SAlex Vesker 
239338aaf91SAlex Vesker static
240338aaf91SAlex Vesker int mlx5dr_send_wqe_fw(struct ibv_context *ibv_ctx,
241338aaf91SAlex Vesker 		       uint32_t pd_num,
242338aaf91SAlex Vesker 		       struct mlx5dr_send_engine_post_attr *send_attr,
243338aaf91SAlex Vesker 		       struct mlx5dr_wqe_gta_ctrl_seg *send_wqe_ctrl,
244338aaf91SAlex Vesker 		       void *send_wqe_match_data,
245338aaf91SAlex Vesker 		       void *send_wqe_match_tag,
2469d7d66aeSAlex Vesker 		       void *send_wqe_range_data,
2479d7d66aeSAlex Vesker 		       void *send_wqe_range_tag,
248338aaf91SAlex Vesker 		       bool is_jumbo,
249338aaf91SAlex Vesker 		       uint8_t gta_opcode)
250338aaf91SAlex Vesker {
2519d7d66aeSAlex Vesker 	bool has_range = send_wqe_range_data || send_wqe_range_tag;
252338aaf91SAlex Vesker 	bool has_match = send_wqe_match_data || send_wqe_match_tag;
253338aaf91SAlex Vesker 	struct mlx5dr_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
2549d7d66aeSAlex Vesker 	struct mlx5dr_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
255338aaf91SAlex Vesker 	struct mlx5dr_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
256338aaf91SAlex Vesker 	struct mlx5dr_cmd_generate_wqe_attr attr = {0};
257338aaf91SAlex Vesker 	struct mlx5dr_wqe_ctrl_seg wqe_ctrl = {0};
258338aaf91SAlex Vesker 	struct mlx5_cqe64 cqe;
259338aaf91SAlex Vesker 	uint32_t flags = 0;
260338aaf91SAlex Vesker 	int ret;
261338aaf91SAlex Vesker 
262338aaf91SAlex Vesker 	/* Set WQE control */
263338aaf91SAlex Vesker 	wqe_ctrl.opmod_idx_opcode =
264338aaf91SAlex Vesker 		rte_cpu_to_be_32((send_attr->opmod << 24) | send_attr->opcode);
265338aaf91SAlex Vesker 	wqe_ctrl.qpn_ds =
266338aaf91SAlex Vesker 		rte_cpu_to_be_32((send_attr->len + sizeof(struct mlx5dr_wqe_ctrl_seg)) / 16);
267338aaf91SAlex Vesker 	flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
268338aaf91SAlex Vesker 	wqe_ctrl.flags = rte_cpu_to_be_32(flags);
269338aaf91SAlex Vesker 	wqe_ctrl.imm = rte_cpu_to_be_32(send_attr->id);
270338aaf91SAlex Vesker 
271338aaf91SAlex Vesker 	/* Set GTA WQE CTRL */
272338aaf91SAlex Vesker 	memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
273338aaf91SAlex Vesker 	gta_wqe_ctrl.op_dirix = htobe32(gta_opcode << 28);
274338aaf91SAlex Vesker 
275338aaf91SAlex Vesker 	/* Set GTA match WQE DATA */
276338aaf91SAlex Vesker 	if (has_match) {
277338aaf91SAlex Vesker 		if (send_wqe_match_data)
278338aaf91SAlex Vesker 			memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
279338aaf91SAlex Vesker 		else
280338aaf91SAlex Vesker 			mlx5dr_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
281338aaf91SAlex Vesker 
282338aaf91SAlex Vesker 		gta_wqe_data0.rsvd1_definer = htobe32(send_attr->match_definer_id << 8);
283338aaf91SAlex Vesker 		attr.gta_data_0 = (uint8_t *)&gta_wqe_data0;
284338aaf91SAlex Vesker 	}
285338aaf91SAlex Vesker 
2869d7d66aeSAlex Vesker 	/* Set GTA range WQE DATA */
2879d7d66aeSAlex Vesker 	if (has_range) {
2889d7d66aeSAlex Vesker 		if (send_wqe_range_data)
2899d7d66aeSAlex Vesker 			memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
2909d7d66aeSAlex Vesker 		else
2919d7d66aeSAlex Vesker 			mlx5dr_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
2929d7d66aeSAlex Vesker 
2939d7d66aeSAlex Vesker 		gta_wqe_data1.rsvd1_definer = htobe32(send_attr->range_definer_id << 8);
2949d7d66aeSAlex Vesker 		attr.gta_data_1 = (uint8_t *)&gta_wqe_data1;
2959d7d66aeSAlex Vesker 	}
2969d7d66aeSAlex Vesker 
297338aaf91SAlex Vesker 	attr.pdn = pd_num;
298338aaf91SAlex Vesker 	attr.wqe_ctrl = (uint8_t *)&wqe_ctrl;
299338aaf91SAlex Vesker 	attr.gta_ctrl = (uint8_t *)&gta_wqe_ctrl;
300338aaf91SAlex Vesker 
301338aaf91SAlex Vesker send_wqe:
302338aaf91SAlex Vesker 	ret = mlx5dr_cmd_generate_wqe(ibv_ctx, &attr, &cqe);
303338aaf91SAlex Vesker 	if (ret) {
304338aaf91SAlex Vesker 		DR_LOG(ERR, "Failed to write WQE using command");
305338aaf91SAlex Vesker 		return ret;
306338aaf91SAlex Vesker 	}
307338aaf91SAlex Vesker 
308338aaf91SAlex Vesker 	if ((mlx5dv_get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
309338aaf91SAlex Vesker 	    (rte_be_to_cpu_32(cqe.byte_cnt) >> 31 == 0)) {
310338aaf91SAlex Vesker 		*send_attr->used_id = send_attr->id;
311338aaf91SAlex Vesker 		return 0;
312338aaf91SAlex Vesker 	}
313338aaf91SAlex Vesker 
314338aaf91SAlex Vesker 	/* Retry if rule failed */
315338aaf91SAlex Vesker 	if (send_attr->retry_id) {
316338aaf91SAlex Vesker 		wqe_ctrl.imm = rte_cpu_to_be_32(send_attr->retry_id);
317338aaf91SAlex Vesker 		send_attr->id = send_attr->retry_id;
318338aaf91SAlex Vesker 		send_attr->retry_id = 0;
319338aaf91SAlex Vesker 		goto send_wqe;
320338aaf91SAlex Vesker 	}
321338aaf91SAlex Vesker 
322338aaf91SAlex Vesker 	return -1;
323338aaf91SAlex Vesker }
324338aaf91SAlex Vesker 
325338aaf91SAlex Vesker void mlx5dr_send_stes_fw(struct mlx5dr_send_engine *queue,
326338aaf91SAlex Vesker 			 struct mlx5dr_send_ste_attr *ste_attr)
327338aaf91SAlex Vesker {
328338aaf91SAlex Vesker 	struct mlx5dr_send_engine_post_attr *send_attr = &ste_attr->send_attr;
329338aaf91SAlex Vesker 	struct mlx5dr_rule *rule = send_attr->rule;
330338aaf91SAlex Vesker 	struct ibv_context *ibv_ctx;
331338aaf91SAlex Vesker 	struct mlx5dr_context *ctx;
332338aaf91SAlex Vesker 	uint16_t queue_id;
333338aaf91SAlex Vesker 	uint32_t pdn;
334338aaf91SAlex Vesker 	int ret;
335338aaf91SAlex Vesker 
336338aaf91SAlex Vesker 	ctx = rule->matcher->tbl->ctx;
337338aaf91SAlex Vesker 	queue_id = queue - ctx->send_queue;
338338aaf91SAlex Vesker 	ibv_ctx = ctx->ibv_ctx;
339338aaf91SAlex Vesker 	pdn = ctx->pd_num;
340338aaf91SAlex Vesker 
341338aaf91SAlex Vesker 	/* Writing through FW can't HW fence, therefore we drain the queue */
342338aaf91SAlex Vesker 	if (send_attr->fence)
343338aaf91SAlex Vesker 		mlx5dr_send_queue_action(ctx,
344338aaf91SAlex Vesker 					 queue_id,
345338aaf91SAlex Vesker 					 MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC);
346338aaf91SAlex Vesker 
347338aaf91SAlex Vesker 	if (ste_attr->rtc_1) {
348338aaf91SAlex Vesker 		send_attr->id = ste_attr->rtc_1;
349338aaf91SAlex Vesker 		send_attr->used_id = ste_attr->used_id_rtc_1;
350338aaf91SAlex Vesker 		send_attr->retry_id = ste_attr->retry_rtc_1;
351338aaf91SAlex Vesker 		ret = mlx5dr_send_wqe_fw(ibv_ctx, pdn, send_attr,
352338aaf91SAlex Vesker 					 ste_attr->wqe_ctrl,
353338aaf91SAlex Vesker 					 ste_attr->wqe_data,
354338aaf91SAlex Vesker 					 ste_attr->wqe_tag,
3559d7d66aeSAlex Vesker 					 ste_attr->range_wqe_data,
3569d7d66aeSAlex Vesker 					 ste_attr->range_wqe_tag,
357338aaf91SAlex Vesker 					 ste_attr->wqe_tag_is_jumbo,
358338aaf91SAlex Vesker 					 ste_attr->gta_opcode);
359338aaf91SAlex Vesker 		if (ret)
360338aaf91SAlex Vesker 			goto fail_rule;
361338aaf91SAlex Vesker 	}
362338aaf91SAlex Vesker 
363338aaf91SAlex Vesker 	if (ste_attr->rtc_0) {
364338aaf91SAlex Vesker 		send_attr->id = ste_attr->rtc_0;
365338aaf91SAlex Vesker 		send_attr->used_id = ste_attr->used_id_rtc_0;
366338aaf91SAlex Vesker 		send_attr->retry_id = ste_attr->retry_rtc_0;
367338aaf91SAlex Vesker 		ret = mlx5dr_send_wqe_fw(ibv_ctx, pdn, send_attr,
368338aaf91SAlex Vesker 					 ste_attr->wqe_ctrl,
369338aaf91SAlex Vesker 					 ste_attr->wqe_data,
370338aaf91SAlex Vesker 					 ste_attr->wqe_tag,
3719d7d66aeSAlex Vesker 					 ste_attr->range_wqe_data,
3729d7d66aeSAlex Vesker 					 ste_attr->range_wqe_tag,
373338aaf91SAlex Vesker 					 ste_attr->wqe_tag_is_jumbo,
374338aaf91SAlex Vesker 					 ste_attr->gta_opcode);
375338aaf91SAlex Vesker 		if (ret)
376338aaf91SAlex Vesker 			goto fail_rule;
377338aaf91SAlex Vesker 	}
378338aaf91SAlex Vesker 
379338aaf91SAlex Vesker 	/* Increase the status, this only works on good flow as the enum
380338aaf91SAlex Vesker 	 * is arrange it away creating -> created -> deleting -> deleted
381338aaf91SAlex Vesker 	 */
382338aaf91SAlex Vesker 	rule->status++;
383338aaf91SAlex Vesker 	mlx5dr_send_engine_gen_comp(queue, send_attr->user_data, RTE_FLOW_OP_SUCCESS);
384338aaf91SAlex Vesker 	return;
385338aaf91SAlex Vesker 
386338aaf91SAlex Vesker fail_rule:
387338aaf91SAlex Vesker 	rule->status = !rule->rtc_0 && !rule->rtc_1 ?
388338aaf91SAlex Vesker 		MLX5DR_RULE_STATUS_FAILED : MLX5DR_RULE_STATUS_FAILING;
389338aaf91SAlex Vesker 	mlx5dr_send_engine_gen_comp(queue, send_attr->user_data, RTE_FLOW_OP_ERROR);
390338aaf91SAlex Vesker }
391338aaf91SAlex Vesker 
3923eb74886SAlex Vesker static void mlx5dr_send_engine_retry_post_send(struct mlx5dr_send_engine *queue,
3933eb74886SAlex Vesker 					       struct mlx5dr_send_ring_priv *priv,
3943eb74886SAlex Vesker 					       uint16_t wqe_cnt)
3953eb74886SAlex Vesker {
3963eb74886SAlex Vesker 	struct mlx5dr_send_engine_post_attr send_attr = {0};
3973eb74886SAlex Vesker 	struct mlx5dr_wqe_gta_data_seg_ste *wqe_data;
3983eb74886SAlex Vesker 	struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
3993eb74886SAlex Vesker 	struct mlx5dr_send_engine_post_ctrl ctrl;
4003eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *send_sq;
4013eb74886SAlex Vesker 	unsigned int idx;
4023eb74886SAlex Vesker 	size_t wqe_len;
4033eb74886SAlex Vesker 	char *p;
4043eb74886SAlex Vesker 
4053eb74886SAlex Vesker 	send_attr.rule = priv->rule;
4063eb74886SAlex Vesker 	send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
4073eb74886SAlex Vesker 	send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
4083eb74886SAlex Vesker 	send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5dr_wqe_ctrl_seg);
4093eb74886SAlex Vesker 	send_attr.notify_hw = 1;
4103eb74886SAlex Vesker 	send_attr.fence = 0;
4113eb74886SAlex Vesker 	send_attr.user_data = priv->user_data;
4123eb74886SAlex Vesker 	send_attr.id = priv->retry_id;
4133eb74886SAlex Vesker 	send_attr.used_id = priv->used_id;
4143eb74886SAlex Vesker 
4153eb74886SAlex Vesker 	ctrl = mlx5dr_send_engine_post_start(queue);
4163eb74886SAlex Vesker 	mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
4173eb74886SAlex Vesker 	mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
4183eb74886SAlex Vesker 
4193eb74886SAlex Vesker 	send_sq = &ctrl.send_ring->send_sq;
4203eb74886SAlex Vesker 	idx = wqe_cnt & send_sq->buf_mask;
4213eb74886SAlex Vesker 	p = send_sq->buf + (idx << MLX5_SEND_WQE_SHIFT);
4223eb74886SAlex Vesker 
4233eb74886SAlex Vesker 	/* Copy old gta ctrl */
4243eb74886SAlex Vesker 	memcpy(wqe_ctrl, p + sizeof(struct mlx5dr_wqe_ctrl_seg),
4253eb74886SAlex Vesker 	       MLX5_SEND_WQE_BB - sizeof(struct mlx5dr_wqe_ctrl_seg));
4263eb74886SAlex Vesker 
4273eb74886SAlex Vesker 	idx = (wqe_cnt + 1) & send_sq->buf_mask;
4283eb74886SAlex Vesker 	p = send_sq->buf + (idx << MLX5_SEND_WQE_SHIFT);
4293eb74886SAlex Vesker 
4303eb74886SAlex Vesker 	/* Copy old gta data */
4313eb74886SAlex Vesker 	memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
4323eb74886SAlex Vesker 
4333eb74886SAlex Vesker 	mlx5dr_send_engine_post_end(&ctrl, &send_attr);
4343eb74886SAlex Vesker }
4353eb74886SAlex Vesker 
4363eb74886SAlex Vesker void mlx5dr_send_engine_flush_queue(struct mlx5dr_send_engine *queue)
4373eb74886SAlex Vesker {
4383eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *sq = &queue->send_ring[0].send_sq;
4393eb74886SAlex Vesker 	struct mlx5dr_wqe_ctrl_seg *wqe_ctrl;
4403eb74886SAlex Vesker 
4413eb74886SAlex Vesker 	wqe_ctrl = (void *)(sq->buf + (sq->last_idx << MLX5_SEND_WQE_SHIFT));
4423eb74886SAlex Vesker 
4433eb74886SAlex Vesker 	wqe_ctrl->flags |= rte_cpu_to_be_32(MLX5_WQE_CTRL_CQ_UPDATE);
4443eb74886SAlex Vesker 
4453eb74886SAlex Vesker 	mlx5dr_send_engine_post_ring(sq, queue->uar, wqe_ctrl);
4463eb74886SAlex Vesker }
4473eb74886SAlex Vesker 
448762fecebSYevgeny Kliteynik static void
449762fecebSYevgeny Kliteynik mlx5dr_send_engine_update_rule_resize(struct mlx5dr_send_engine *queue,
450762fecebSYevgeny Kliteynik 				      struct mlx5dr_send_ring_priv *priv,
451762fecebSYevgeny Kliteynik 				      enum rte_flow_op_status *status)
452762fecebSYevgeny Kliteynik {
453762fecebSYevgeny Kliteynik 	switch (priv->rule->resize_info->state) {
454762fecebSYevgeny Kliteynik 	case MLX5DR_RULE_RESIZE_STATE_WRITING:
455762fecebSYevgeny Kliteynik 		if (priv->rule->status == MLX5DR_RULE_STATUS_FAILING) {
456762fecebSYevgeny Kliteynik 			/* Backup original RTCs */
457762fecebSYevgeny Kliteynik 			uint32_t orig_rtc_0 = priv->rule->resize_info->rtc_0;
458762fecebSYevgeny Kliteynik 			uint32_t orig_rtc_1 = priv->rule->resize_info->rtc_1;
459762fecebSYevgeny Kliteynik 
460762fecebSYevgeny Kliteynik 			/* Delete partially failed move rule using resize_info */
461762fecebSYevgeny Kliteynik 			priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
462762fecebSYevgeny Kliteynik 			priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
463762fecebSYevgeny Kliteynik 
464762fecebSYevgeny Kliteynik 			/* Move rule to original RTC for future delete */
465762fecebSYevgeny Kliteynik 			priv->rule->rtc_0 = orig_rtc_0;
466762fecebSYevgeny Kliteynik 			priv->rule->rtc_1 = orig_rtc_1;
467762fecebSYevgeny Kliteynik 		}
468762fecebSYevgeny Kliteynik 		/* Clean leftovers */
469762fecebSYevgeny Kliteynik 		mlx5dr_rule_move_hws_remove(priv->rule, queue, priv->user_data);
470762fecebSYevgeny Kliteynik 		break;
471762fecebSYevgeny Kliteynik 
472762fecebSYevgeny Kliteynik 	case MLX5DR_RULE_RESIZE_STATE_DELETING:
473762fecebSYevgeny Kliteynik 		if (priv->rule->status == MLX5DR_RULE_STATUS_FAILING) {
474762fecebSYevgeny Kliteynik 			*status = RTE_FLOW_OP_ERROR;
475762fecebSYevgeny Kliteynik 		} else {
476762fecebSYevgeny Kliteynik 			*status = RTE_FLOW_OP_SUCCESS;
477762fecebSYevgeny Kliteynik 			priv->rule->matcher = priv->rule->matcher->resize_dst;
478762fecebSYevgeny Kliteynik 		}
479762fecebSYevgeny Kliteynik 		priv->rule->resize_info->state = MLX5DR_RULE_RESIZE_STATE_IDLE;
480762fecebSYevgeny Kliteynik 		priv->rule->status = MLX5DR_RULE_STATUS_CREATED;
481762fecebSYevgeny Kliteynik 		break;
482762fecebSYevgeny Kliteynik 
483762fecebSYevgeny Kliteynik 	default:
484762fecebSYevgeny Kliteynik 		break;
485762fecebSYevgeny Kliteynik 	}
486762fecebSYevgeny Kliteynik }
487762fecebSYevgeny Kliteynik 
4883eb74886SAlex Vesker static void mlx5dr_send_engine_update_rule(struct mlx5dr_send_engine *queue,
4893eb74886SAlex Vesker 					   struct mlx5dr_send_ring_priv *priv,
4903eb74886SAlex Vesker 					   uint16_t wqe_cnt,
4913eb74886SAlex Vesker 					   enum rte_flow_op_status *status)
4923eb74886SAlex Vesker {
4933eb74886SAlex Vesker 	priv->rule->pending_wqes--;
4943eb74886SAlex Vesker 
4953eb74886SAlex Vesker 	if (*status == RTE_FLOW_OP_ERROR) {
4963eb74886SAlex Vesker 		if (priv->retry_id) {
4973eb74886SAlex Vesker 			mlx5dr_send_engine_retry_post_send(queue, priv, wqe_cnt);
4983eb74886SAlex Vesker 			return;
4993eb74886SAlex Vesker 		}
5003eb74886SAlex Vesker 		/* Some part of the rule failed */
5013eb74886SAlex Vesker 		priv->rule->status = MLX5DR_RULE_STATUS_FAILING;
5023eb74886SAlex Vesker 		*priv->used_id = 0;
5033eb74886SAlex Vesker 	} else {
5043eb74886SAlex Vesker 		*priv->used_id = priv->id;
5053eb74886SAlex Vesker 	}
5063eb74886SAlex Vesker 
5073eb74886SAlex Vesker 	/* Update rule status for the last completion */
5083eb74886SAlex Vesker 	if (!priv->rule->pending_wqes) {
509762fecebSYevgeny Kliteynik 		if (unlikely(mlx5dr_rule_move_in_progress(priv->rule))) {
510762fecebSYevgeny Kliteynik 			mlx5dr_send_engine_update_rule_resize(queue, priv, status);
511762fecebSYevgeny Kliteynik 			return;
512762fecebSYevgeny Kliteynik 		}
513762fecebSYevgeny Kliteynik 
5143eb74886SAlex Vesker 		if (unlikely(priv->rule->status == MLX5DR_RULE_STATUS_FAILING)) {
5153eb74886SAlex Vesker 			/* Rule completely failed and doesn't require cleanup */
5163eb74886SAlex Vesker 			if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
5173eb74886SAlex Vesker 				priv->rule->status = MLX5DR_RULE_STATUS_FAILED;
5183eb74886SAlex Vesker 
5193eb74886SAlex Vesker 			*status = RTE_FLOW_OP_ERROR;
5203eb74886SAlex Vesker 		} else {
5213eb74886SAlex Vesker 			/* Increase the status, this only works on good flow as the enum
5223eb74886SAlex Vesker 			 * is arrange it away creating -> created -> deleting -> deleted
5233eb74886SAlex Vesker 			 */
5243eb74886SAlex Vesker 			priv->rule->status++;
5253eb74886SAlex Vesker 			*status = RTE_FLOW_OP_SUCCESS;
526c54d3949SYevgeny Kliteynik 			/* Rule was deleted now we can safely release action STEs
527c54d3949SYevgeny Kliteynik 			 * and clear resize info
528c54d3949SYevgeny Kliteynik 			 */
529c54d3949SYevgeny Kliteynik 			if (priv->rule->status == MLX5DR_RULE_STATUS_DELETED) {
5303eb74886SAlex Vesker 				mlx5dr_rule_free_action_ste_idx(priv->rule);
531c54d3949SYevgeny Kliteynik 				mlx5dr_rule_clear_resize_info(priv->rule);
532c54d3949SYevgeny Kliteynik 			}
5333eb74886SAlex Vesker 		}
5343eb74886SAlex Vesker 	}
5353eb74886SAlex Vesker }
5363eb74886SAlex Vesker 
5373eb74886SAlex Vesker static void mlx5dr_send_engine_update(struct mlx5dr_send_engine *queue,
5383eb74886SAlex Vesker 				      struct mlx5_cqe64 *cqe,
5393eb74886SAlex Vesker 				      struct mlx5dr_send_ring_priv *priv,
5403eb74886SAlex Vesker 				      struct rte_flow_op_result res[],
5413eb74886SAlex Vesker 				      int64_t *i,
5423eb74886SAlex Vesker 				      uint32_t res_nb,
5433eb74886SAlex Vesker 				      uint16_t wqe_cnt)
5443eb74886SAlex Vesker {
5453eb74886SAlex Vesker 	enum rte_flow_op_status status;
5463eb74886SAlex Vesker 
5473eb74886SAlex Vesker 	if (!cqe || (likely(rte_be_to_cpu_32(cqe->byte_cnt) >> 31 == 0) &&
5483eb74886SAlex Vesker 	    likely(mlx5dv_get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
5493eb74886SAlex Vesker 		status = RTE_FLOW_OP_SUCCESS;
5503eb74886SAlex Vesker 	} else {
5513eb74886SAlex Vesker 		status = RTE_FLOW_OP_ERROR;
5523eb74886SAlex Vesker 	}
5533eb74886SAlex Vesker 
5543eb74886SAlex Vesker 	if (priv->user_data) {
5553eb74886SAlex Vesker 		if (priv->rule) {
5563eb74886SAlex Vesker 			mlx5dr_send_engine_update_rule(queue, priv, wqe_cnt, &status);
5573eb74886SAlex Vesker 			/* Completion is provided on the last rule WQE */
5583eb74886SAlex Vesker 			if (priv->rule->pending_wqes)
5593eb74886SAlex Vesker 				return;
5603eb74886SAlex Vesker 		}
5613eb74886SAlex Vesker 
5623eb74886SAlex Vesker 		if (*i < res_nb) {
5633eb74886SAlex Vesker 			res[*i].user_data = priv->user_data;
5643eb74886SAlex Vesker 			res[*i].status = status;
5653eb74886SAlex Vesker 			(*i)++;
5663eb74886SAlex Vesker 			mlx5dr_send_engine_dec_rule(queue);
5673eb74886SAlex Vesker 		} else {
5683eb74886SAlex Vesker 			mlx5dr_send_engine_gen_comp(queue, priv->user_data, status);
5693eb74886SAlex Vesker 		}
5703eb74886SAlex Vesker 	}
5713eb74886SAlex Vesker }
5723eb74886SAlex Vesker 
5733eb74886SAlex Vesker static void mlx5dr_send_engine_poll_cq(struct mlx5dr_send_engine *queue,
5743eb74886SAlex Vesker 				       struct mlx5dr_send_ring *send_ring,
5753eb74886SAlex Vesker 				       struct rte_flow_op_result res[],
5763eb74886SAlex Vesker 				       int64_t *i,
5773eb74886SAlex Vesker 				       uint32_t res_nb)
5783eb74886SAlex Vesker {
5793eb74886SAlex Vesker 	struct mlx5dr_send_ring_cq *cq = &send_ring->send_cq;
5803eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *sq = &send_ring->send_sq;
5813eb74886SAlex Vesker 	uint32_t cq_idx = cq->cons_index & cq->ncqe_mask;
5823eb74886SAlex Vesker 	struct mlx5dr_send_ring_priv *priv;
5833eb74886SAlex Vesker 	struct mlx5_cqe64 *cqe;
5843eb74886SAlex Vesker 	uint32_t offset_cqe64;
5853eb74886SAlex Vesker 	uint8_t cqe_opcode;
5863eb74886SAlex Vesker 	uint8_t cqe_owner;
5873eb74886SAlex Vesker 	uint16_t wqe_cnt;
5883eb74886SAlex Vesker 	uint8_t sw_own;
5893eb74886SAlex Vesker 
5903eb74886SAlex Vesker 	offset_cqe64 = RTE_CACHE_LINE_SIZE - sizeof(struct mlx5_cqe64);
5913eb74886SAlex Vesker 	cqe = (void *)(cq->buf + (cq_idx << cq->cqe_log_sz) + offset_cqe64);
5923eb74886SAlex Vesker 
5933eb74886SAlex Vesker 	sw_own = (cq->cons_index & cq->ncqe) ? 1 : 0;
5943eb74886SAlex Vesker 	cqe_opcode = mlx5dv_get_cqe_opcode(cqe);
5953eb74886SAlex Vesker 	cqe_owner = mlx5dv_get_cqe_owner(cqe);
5963eb74886SAlex Vesker 
5973eb74886SAlex Vesker 	if (cqe_opcode == MLX5_CQE_INVALID ||
5983eb74886SAlex Vesker 	    cqe_owner != sw_own)
5993eb74886SAlex Vesker 		return;
6003eb74886SAlex Vesker 
601d9acab17SGavin Li 	if (unlikely(cqe_opcode != MLX5_CQE_REQ)) {
602*3cddeba0SAlexander Kozyrev 		struct mlx5_error_cqe *err_cqe = (struct mlx5_error_cqe *)cqe;
603d9acab17SGavin Li 
604d9acab17SGavin Li 		DR_LOG(ERR, "CQE ERR:0x%x, Vendor_ERR:0x%x, OP:0x%x, QPN:0x%x, WQE_CNT:0x%x",
605d9acab17SGavin Li 			err_cqe->syndrome, err_cqe->vendor_err_synd, cqe_opcode,
606d9acab17SGavin Li 			(rte_be_to_cpu_32(err_cqe->s_wqe_opcode_qpn) & 0xffffff),
607d9acab17SGavin Li 			rte_be_to_cpu_16(err_cqe->wqe_counter));
6083eb74886SAlex Vesker 		queue->err = true;
609d9acab17SGavin Li 	}
6103eb74886SAlex Vesker 
6113eb74886SAlex Vesker 	rte_io_rmb();
6123eb74886SAlex Vesker 
6133eb74886SAlex Vesker 	wqe_cnt = be16toh(cqe->wqe_counter) & sq->buf_mask;
6143eb74886SAlex Vesker 
6153eb74886SAlex Vesker 	while (cq->poll_wqe != wqe_cnt) {
6163eb74886SAlex Vesker 		priv = &sq->wr_priv[cq->poll_wqe];
6173eb74886SAlex Vesker 		mlx5dr_send_engine_update(queue, NULL, priv, res, i, res_nb, 0);
6183eb74886SAlex Vesker 		cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
6193eb74886SAlex Vesker 	}
6203eb74886SAlex Vesker 
6213eb74886SAlex Vesker 	priv = &sq->wr_priv[wqe_cnt];
6223eb74886SAlex Vesker 	cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
6233eb74886SAlex Vesker 	mlx5dr_send_engine_update(queue, cqe, priv, res, i, res_nb, wqe_cnt);
6243eb74886SAlex Vesker 	cq->cons_index++;
62577594e28SYevgeny Kliteynik 	*cq->db = htobe32(cq->cons_index & 0xffffff);
6263eb74886SAlex Vesker }
6273eb74886SAlex Vesker 
6283eb74886SAlex Vesker static void mlx5dr_send_engine_poll_cqs(struct mlx5dr_send_engine *queue,
6293eb74886SAlex Vesker 					struct rte_flow_op_result res[],
6303eb74886SAlex Vesker 					int64_t *polled,
6313eb74886SAlex Vesker 					uint32_t res_nb)
6323eb74886SAlex Vesker {
6333eb74886SAlex Vesker 	int j;
6343eb74886SAlex Vesker 
63577594e28SYevgeny Kliteynik 	for (j = 0; j < MLX5DR_NUM_SEND_RINGS; j++)
6363eb74886SAlex Vesker 		mlx5dr_send_engine_poll_cq(queue, &queue->send_ring[j],
6373eb74886SAlex Vesker 					   res, polled, res_nb);
6383eb74886SAlex Vesker }
6393eb74886SAlex Vesker 
6403eb74886SAlex Vesker static void mlx5dr_send_engine_poll_list(struct mlx5dr_send_engine *queue,
6413eb74886SAlex Vesker 					 struct rte_flow_op_result res[],
6423eb74886SAlex Vesker 					 int64_t *polled,
6433eb74886SAlex Vesker 					 uint32_t res_nb)
6443eb74886SAlex Vesker {
6453eb74886SAlex Vesker 	struct mlx5dr_completed_poll *comp = &queue->completed;
6463eb74886SAlex Vesker 
6473eb74886SAlex Vesker 	while (comp->ci != comp->pi) {
6483eb74886SAlex Vesker 		if (*polled < res_nb) {
6493eb74886SAlex Vesker 			res[*polled].status =
6503eb74886SAlex Vesker 				comp->entries[comp->ci].status;
6513eb74886SAlex Vesker 			res[*polled].user_data =
6523eb74886SAlex Vesker 				comp->entries[comp->ci].user_data;
6533eb74886SAlex Vesker 			(*polled)++;
6543eb74886SAlex Vesker 			comp->ci = (comp->ci + 1) & comp->mask;
6553eb74886SAlex Vesker 			mlx5dr_send_engine_dec_rule(queue);
6563eb74886SAlex Vesker 		} else {
6573eb74886SAlex Vesker 			return;
6583eb74886SAlex Vesker 		}
6593eb74886SAlex Vesker 	}
6603eb74886SAlex Vesker }
6613eb74886SAlex Vesker 
6623eb74886SAlex Vesker static int mlx5dr_send_engine_poll(struct mlx5dr_send_engine *queue,
6633eb74886SAlex Vesker 				   struct rte_flow_op_result res[],
6643eb74886SAlex Vesker 				   uint32_t res_nb)
6653eb74886SAlex Vesker {
6663eb74886SAlex Vesker 	int64_t polled = 0;
6673eb74886SAlex Vesker 
6683eb74886SAlex Vesker 	mlx5dr_send_engine_poll_list(queue, res, &polled, res_nb);
6693eb74886SAlex Vesker 
6703eb74886SAlex Vesker 	if (polled >= res_nb)
6713eb74886SAlex Vesker 		return polled;
6723eb74886SAlex Vesker 
6733eb74886SAlex Vesker 	mlx5dr_send_engine_poll_cqs(queue, res, &polled, res_nb);
6743eb74886SAlex Vesker 
6753eb74886SAlex Vesker 	return polled;
6763eb74886SAlex Vesker }
6773eb74886SAlex Vesker 
6783eb74886SAlex Vesker int mlx5dr_send_queue_poll(struct mlx5dr_context *ctx,
6793eb74886SAlex Vesker 			   uint16_t queue_id,
6803eb74886SAlex Vesker 			   struct rte_flow_op_result res[],
6813eb74886SAlex Vesker 			   uint32_t res_nb)
6823eb74886SAlex Vesker {
6833eb74886SAlex Vesker 	return mlx5dr_send_engine_poll(&ctx->send_queue[queue_id],
6843eb74886SAlex Vesker 				       res, res_nb);
6853eb74886SAlex Vesker }
6863eb74886SAlex Vesker 
6873eb74886SAlex Vesker static int mlx5dr_send_ring_create_sq_obj(struct mlx5dr_context *ctx,
6883eb74886SAlex Vesker 					  struct mlx5dr_send_engine *queue,
6893eb74886SAlex Vesker 					  struct mlx5dr_send_ring_sq *sq,
6903eb74886SAlex Vesker 					  struct mlx5dr_send_ring_cq *cq,
6913eb74886SAlex Vesker 					  size_t log_wq_sz)
6923eb74886SAlex Vesker {
6933eb74886SAlex Vesker 	struct mlx5dr_cmd_sq_create_attr attr = {0};
6943eb74886SAlex Vesker 	int err;
6953eb74886SAlex Vesker 
6963eb74886SAlex Vesker 	attr.cqn = cq->cqn;
6973eb74886SAlex Vesker 	attr.pdn = ctx->pd_num;
6983eb74886SAlex Vesker 	attr.page_id = queue->uar->page_id;
6993eb74886SAlex Vesker 	attr.dbr_id = sq->db_umem->umem_id;
7003eb74886SAlex Vesker 	attr.wq_id = sq->buf_umem->umem_id;
7013eb74886SAlex Vesker 	attr.log_wq_sz = log_wq_sz;
702d4444de8SViacheslav Ovsiienko 	if (ctx->caps->sq_ts_format == MLX5_HCA_CAP_TIMESTAMP_FORMAT_FR)
703d4444de8SViacheslav Ovsiienko 		attr.ts_format = MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING;
704d4444de8SViacheslav Ovsiienko 	else
705d4444de8SViacheslav Ovsiienko 		attr.ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
7063eb74886SAlex Vesker 
7073eb74886SAlex Vesker 	sq->obj = mlx5dr_cmd_sq_create(ctx->ibv_ctx, &attr);
7083eb74886SAlex Vesker 	if (!sq->obj)
7093eb74886SAlex Vesker 		return rte_errno;
7103eb74886SAlex Vesker 
7113eb74886SAlex Vesker 	sq->sqn = sq->obj->id;
7123eb74886SAlex Vesker 
7133eb74886SAlex Vesker 	err = mlx5dr_cmd_sq_modify_rdy(sq->obj);
7143eb74886SAlex Vesker 	if (err)
7153eb74886SAlex Vesker 		goto free_sq;
7163eb74886SAlex Vesker 
7173eb74886SAlex Vesker 	return 0;
7183eb74886SAlex Vesker 
7193eb74886SAlex Vesker free_sq:
7203eb74886SAlex Vesker 	mlx5dr_cmd_destroy_obj(sq->obj);
7213eb74886SAlex Vesker 
7223eb74886SAlex Vesker 	return err;
7233eb74886SAlex Vesker }
7243eb74886SAlex Vesker 
7253eb74886SAlex Vesker static int mlx5dr_send_ring_open_sq(struct mlx5dr_context *ctx,
7263eb74886SAlex Vesker 				    struct mlx5dr_send_engine *queue,
7273eb74886SAlex Vesker 				    struct mlx5dr_send_ring_sq *sq,
7283eb74886SAlex Vesker 				    struct mlx5dr_send_ring_cq *cq)
7293eb74886SAlex Vesker {
7303eb74886SAlex Vesker 	size_t sq_log_buf_sz;
7313eb74886SAlex Vesker 	size_t buf_aligned;
7323eb74886SAlex Vesker 	size_t sq_buf_sz;
733e280f924SAlex Vesker 	size_t page_size;
7343eb74886SAlex Vesker 	size_t buf_sz;
7353eb74886SAlex Vesker 	int err;
7363eb74886SAlex Vesker 
7373eb74886SAlex Vesker 	buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
7383eb74886SAlex Vesker 	sq_log_buf_sz = log2above(buf_sz);
7393eb74886SAlex Vesker 	sq_buf_sz = 1 << (sq_log_buf_sz + log2above(MLX5_SEND_WQE_BB));
7403eb74886SAlex Vesker 
741e280f924SAlex Vesker 	page_size = sysconf(_SC_PAGESIZE);
742e280f924SAlex Vesker 	buf_aligned = align(sq_buf_sz, page_size);
743e280f924SAlex Vesker 	err = posix_memalign((void **)&sq->buf, page_size, buf_aligned);
7443eb74886SAlex Vesker 	if (err) {
7453eb74886SAlex Vesker 		rte_errno = ENOMEM;
7463eb74886SAlex Vesker 		return err;
7473eb74886SAlex Vesker 	}
7483eb74886SAlex Vesker 	memset(sq->buf, 0, buf_aligned);
7493eb74886SAlex Vesker 
7503eb74886SAlex Vesker 	err = posix_memalign((void **)&sq->db, 8, 8);
7513eb74886SAlex Vesker 	if (err)
7523eb74886SAlex Vesker 		goto free_buf;
7533eb74886SAlex Vesker 
7543eb74886SAlex Vesker 	sq->buf_umem = mlx5_glue->devx_umem_reg(ctx->ibv_ctx, sq->buf, sq_buf_sz, 0);
7553eb74886SAlex Vesker 
7563eb74886SAlex Vesker 	if (!sq->buf_umem) {
7573eb74886SAlex Vesker 		err = errno;
7583eb74886SAlex Vesker 		goto free_db;
7593eb74886SAlex Vesker 	}
7603eb74886SAlex Vesker 
7613eb74886SAlex Vesker 	sq->db_umem = mlx5_glue->devx_umem_reg(ctx->ibv_ctx, sq->db, 8, 0);
7623eb74886SAlex Vesker 	if (!sq->db_umem) {
7633eb74886SAlex Vesker 		err = errno;
7643eb74886SAlex Vesker 		goto free_buf_umem;
7653eb74886SAlex Vesker 	}
7663eb74886SAlex Vesker 
7673eb74886SAlex Vesker 	err = mlx5dr_send_ring_create_sq_obj(ctx, queue, sq, cq, sq_log_buf_sz);
7683eb74886SAlex Vesker 
7693eb74886SAlex Vesker 	if (err)
7703eb74886SAlex Vesker 		goto free_db_umem;
7713eb74886SAlex Vesker 
7723eb74886SAlex Vesker 	sq->wr_priv = simple_malloc(sizeof(*sq->wr_priv) * buf_sz);
7733eb74886SAlex Vesker 	if (!sq->wr_priv) {
7743eb74886SAlex Vesker 		err = ENOMEM;
7753eb74886SAlex Vesker 		goto destroy_sq_obj;
7763eb74886SAlex Vesker 	}
7773eb74886SAlex Vesker 
7783eb74886SAlex Vesker 	sq->dep_wqe = simple_calloc(queue->num_entries, sizeof(*sq->dep_wqe));
7793eb74886SAlex Vesker 	if (!sq->dep_wqe) {
7803eb74886SAlex Vesker 		err = ENOMEM;
7813eb74886SAlex Vesker 		goto destroy_wr_priv;
7823eb74886SAlex Vesker 	}
7833eb74886SAlex Vesker 
7843eb74886SAlex Vesker 	sq->buf_mask = buf_sz - 1;
7853eb74886SAlex Vesker 
7863eb74886SAlex Vesker 	return 0;
7873eb74886SAlex Vesker 
7883eb74886SAlex Vesker destroy_wr_priv:
7893eb74886SAlex Vesker 	simple_free(sq->wr_priv);
7903eb74886SAlex Vesker destroy_sq_obj:
7913eb74886SAlex Vesker 	mlx5dr_cmd_destroy_obj(sq->obj);
7923eb74886SAlex Vesker free_db_umem:
7933eb74886SAlex Vesker 	mlx5_glue->devx_umem_dereg(sq->db_umem);
7943eb74886SAlex Vesker free_buf_umem:
7953eb74886SAlex Vesker 	mlx5_glue->devx_umem_dereg(sq->buf_umem);
7963eb74886SAlex Vesker free_db:
7973eb74886SAlex Vesker 	free(sq->db);
7983eb74886SAlex Vesker free_buf:
7993eb74886SAlex Vesker 	free(sq->buf);
8003eb74886SAlex Vesker 	rte_errno = err;
8013eb74886SAlex Vesker 	return err;
8023eb74886SAlex Vesker }
8033eb74886SAlex Vesker 
8043eb74886SAlex Vesker static void mlx5dr_send_ring_close_sq(struct mlx5dr_send_ring_sq *sq)
8053eb74886SAlex Vesker {
8063eb74886SAlex Vesker 	simple_free(sq->dep_wqe);
8073eb74886SAlex Vesker 	mlx5dr_cmd_destroy_obj(sq->obj);
8083eb74886SAlex Vesker 	mlx5_glue->devx_umem_dereg(sq->db_umem);
8093eb74886SAlex Vesker 	mlx5_glue->devx_umem_dereg(sq->buf_umem);
8103eb74886SAlex Vesker 	simple_free(sq->wr_priv);
8113eb74886SAlex Vesker 	free(sq->db);
8123eb74886SAlex Vesker 	free(sq->buf);
8133eb74886SAlex Vesker }
8143eb74886SAlex Vesker 
8153eb74886SAlex Vesker static int mlx5dr_send_ring_open_cq(struct mlx5dr_context *ctx,
8163eb74886SAlex Vesker 				    struct mlx5dr_send_engine *queue,
8173eb74886SAlex Vesker 				    struct mlx5dr_send_ring_cq *cq)
8183eb74886SAlex Vesker {
8193eb74886SAlex Vesker 	struct mlx5dv_cq mlx5_cq = {0};
8203eb74886SAlex Vesker 	struct mlx5dv_obj obj;
8213eb74886SAlex Vesker 	struct ibv_cq *ibv_cq;
8223eb74886SAlex Vesker 	size_t cq_size;
8233eb74886SAlex Vesker 	int err;
8243eb74886SAlex Vesker 
8253eb74886SAlex Vesker 	cq_size = queue->num_entries;
8263eb74886SAlex Vesker 	ibv_cq = mlx5_glue->create_cq(ctx->ibv_ctx, cq_size, NULL, NULL, 0);
8273eb74886SAlex Vesker 	if (!ibv_cq) {
8283eb74886SAlex Vesker 		DR_LOG(ERR, "Failed to create CQ");
8293eb74886SAlex Vesker 		rte_errno = errno;
8303eb74886SAlex Vesker 		return rte_errno;
8313eb74886SAlex Vesker 	}
8323eb74886SAlex Vesker 
8333eb74886SAlex Vesker 	obj.cq.in = ibv_cq;
8343eb74886SAlex Vesker 	obj.cq.out = &mlx5_cq;
8353eb74886SAlex Vesker 	err = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
8363eb74886SAlex Vesker 	if (err) {
8373eb74886SAlex Vesker 		err = errno;
8383eb74886SAlex Vesker 		goto close_cq;
8393eb74886SAlex Vesker 	}
8403eb74886SAlex Vesker 
8413eb74886SAlex Vesker 	cq->buf = mlx5_cq.buf;
8423eb74886SAlex Vesker 	cq->db = mlx5_cq.dbrec;
8433eb74886SAlex Vesker 	cq->ncqe = mlx5_cq.cqe_cnt;
8443eb74886SAlex Vesker 	cq->cqe_sz = mlx5_cq.cqe_size;
8453eb74886SAlex Vesker 	cq->cqe_log_sz = log2above(cq->cqe_sz);
8463eb74886SAlex Vesker 	cq->ncqe_mask = cq->ncqe - 1;
8473eb74886SAlex Vesker 	cq->buf_sz = cq->cqe_sz * cq->ncqe;
8483eb74886SAlex Vesker 	cq->cqn = mlx5_cq.cqn;
8493eb74886SAlex Vesker 	cq->ibv_cq = ibv_cq;
8503eb74886SAlex Vesker 
8513eb74886SAlex Vesker 	return 0;
8523eb74886SAlex Vesker 
8533eb74886SAlex Vesker close_cq:
8543eb74886SAlex Vesker 	mlx5_glue->destroy_cq(ibv_cq);
8553eb74886SAlex Vesker 	rte_errno = err;
8563eb74886SAlex Vesker 	return err;
8573eb74886SAlex Vesker }
8583eb74886SAlex Vesker 
8593eb74886SAlex Vesker static void mlx5dr_send_ring_close_cq(struct mlx5dr_send_ring_cq *cq)
8603eb74886SAlex Vesker {
8613eb74886SAlex Vesker 	mlx5_glue->destroy_cq(cq->ibv_cq);
8623eb74886SAlex Vesker }
8633eb74886SAlex Vesker 
8643eb74886SAlex Vesker static void mlx5dr_send_ring_close(struct mlx5dr_send_ring *ring)
8653eb74886SAlex Vesker {
8663eb74886SAlex Vesker 	mlx5dr_send_ring_close_sq(&ring->send_sq);
8673eb74886SAlex Vesker 	mlx5dr_send_ring_close_cq(&ring->send_cq);
8683eb74886SAlex Vesker }
8693eb74886SAlex Vesker 
8703eb74886SAlex Vesker static int mlx5dr_send_ring_open(struct mlx5dr_context *ctx,
8713eb74886SAlex Vesker 				 struct mlx5dr_send_engine *queue,
8723eb74886SAlex Vesker 				 struct mlx5dr_send_ring *ring)
8733eb74886SAlex Vesker {
8743eb74886SAlex Vesker 	int err;
8753eb74886SAlex Vesker 
8763eb74886SAlex Vesker 	err = mlx5dr_send_ring_open_cq(ctx, queue, &ring->send_cq);
8773eb74886SAlex Vesker 	if (err)
8783eb74886SAlex Vesker 		return err;
8793eb74886SAlex Vesker 
8803eb74886SAlex Vesker 	err = mlx5dr_send_ring_open_sq(ctx, queue, &ring->send_sq, &ring->send_cq);
8813eb74886SAlex Vesker 	if (err)
8823eb74886SAlex Vesker 		goto close_cq;
8833eb74886SAlex Vesker 
8843eb74886SAlex Vesker 	return err;
8853eb74886SAlex Vesker 
8863eb74886SAlex Vesker close_cq:
8873eb74886SAlex Vesker 	mlx5dr_send_ring_close_cq(&ring->send_cq);
8883eb74886SAlex Vesker 
8893eb74886SAlex Vesker 	return err;
8903eb74886SAlex Vesker }
8913eb74886SAlex Vesker 
8923eb74886SAlex Vesker static void __mlx5dr_send_rings_close(struct mlx5dr_send_engine *queue,
8933eb74886SAlex Vesker 				      uint16_t i)
8943eb74886SAlex Vesker {
8953eb74886SAlex Vesker 	while (i--)
8963eb74886SAlex Vesker 		mlx5dr_send_ring_close(&queue->send_ring[i]);
8973eb74886SAlex Vesker }
8983eb74886SAlex Vesker 
8993eb74886SAlex Vesker static void mlx5dr_send_rings_close(struct mlx5dr_send_engine *queue)
9003eb74886SAlex Vesker {
9013eb74886SAlex Vesker 	__mlx5dr_send_rings_close(queue, queue->rings);
9023eb74886SAlex Vesker }
9033eb74886SAlex Vesker 
9043eb74886SAlex Vesker static int mlx5dr_send_rings_open(struct mlx5dr_context *ctx,
9053eb74886SAlex Vesker 				  struct mlx5dr_send_engine *queue)
9063eb74886SAlex Vesker {
9073eb74886SAlex Vesker 	uint16_t i;
9083eb74886SAlex Vesker 	int err;
9093eb74886SAlex Vesker 
9103eb74886SAlex Vesker 	for (i = 0; i < queue->rings; i++) {
9113eb74886SAlex Vesker 		err = mlx5dr_send_ring_open(ctx, queue, &queue->send_ring[i]);
9123eb74886SAlex Vesker 		if (err)
9133eb74886SAlex Vesker 			goto free_rings;
9143eb74886SAlex Vesker 	}
9153eb74886SAlex Vesker 
9163eb74886SAlex Vesker 	return 0;
9173eb74886SAlex Vesker 
9183eb74886SAlex Vesker free_rings:
9193eb74886SAlex Vesker 	__mlx5dr_send_rings_close(queue, i);
9203eb74886SAlex Vesker 
9213eb74886SAlex Vesker 	return err;
9223eb74886SAlex Vesker }
9233eb74886SAlex Vesker 
9243eb74886SAlex Vesker void mlx5dr_send_queue_close(struct mlx5dr_send_engine *queue)
9253eb74886SAlex Vesker {
9263eb74886SAlex Vesker 	mlx5dr_send_rings_close(queue);
9273eb74886SAlex Vesker 	simple_free(queue->completed.entries);
9283eb74886SAlex Vesker 	mlx5_glue->devx_free_uar(queue->uar);
9293eb74886SAlex Vesker }
9303eb74886SAlex Vesker 
9313eb74886SAlex Vesker int mlx5dr_send_queue_open(struct mlx5dr_context *ctx,
9323eb74886SAlex Vesker 			   struct mlx5dr_send_engine *queue,
9333eb74886SAlex Vesker 			   uint16_t queue_size)
9343eb74886SAlex Vesker {
9353eb74886SAlex Vesker 	struct mlx5dv_devx_uar *uar;
9363eb74886SAlex Vesker 	int err;
9373eb74886SAlex Vesker 
9383eb74886SAlex Vesker #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
9393eb74886SAlex Vesker 	uar = mlx5_glue->devx_alloc_uar(ctx->ibv_ctx, MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC);
9403eb74886SAlex Vesker 	if (!uar) {
9413eb74886SAlex Vesker 		rte_errno = errno;
9423eb74886SAlex Vesker 		return rte_errno;
9433eb74886SAlex Vesker 	}
9443eb74886SAlex Vesker #else
9453eb74886SAlex Vesker 	uar = NULL;
9463eb74886SAlex Vesker 	rte_errno = ENOTSUP;
9473eb74886SAlex Vesker 	return rte_errno;
9483eb74886SAlex Vesker #endif
9493eb74886SAlex Vesker 
9503eb74886SAlex Vesker 	queue->uar = uar;
9513eb74886SAlex Vesker 	queue->rings = MLX5DR_NUM_SEND_RINGS;
9523eb74886SAlex Vesker 	queue->num_entries = roundup_pow_of_two(queue_size);
9533eb74886SAlex Vesker 	queue->used_entries = 0;
9543eb74886SAlex Vesker 	queue->th_entries = queue->num_entries;
9553eb74886SAlex Vesker 
9563eb74886SAlex Vesker 	queue->completed.entries = simple_calloc(queue->num_entries,
9573eb74886SAlex Vesker 						 sizeof(queue->completed.entries[0]));
9583eb74886SAlex Vesker 	if (!queue->completed.entries) {
9593eb74886SAlex Vesker 		rte_errno = ENOMEM;
9603eb74886SAlex Vesker 		goto free_uar;
9613eb74886SAlex Vesker 	}
9623eb74886SAlex Vesker 	queue->completed.pi = 0;
9633eb74886SAlex Vesker 	queue->completed.ci = 0;
9643eb74886SAlex Vesker 	queue->completed.mask = queue->num_entries - 1;
9653eb74886SAlex Vesker 
9663eb74886SAlex Vesker 	err = mlx5dr_send_rings_open(ctx, queue);
9673eb74886SAlex Vesker 	if (err)
9683eb74886SAlex Vesker 		goto free_completed_entries;
9693eb74886SAlex Vesker 
9703eb74886SAlex Vesker 	return 0;
9713eb74886SAlex Vesker 
9723eb74886SAlex Vesker free_completed_entries:
9733eb74886SAlex Vesker 	simple_free(queue->completed.entries);
9743eb74886SAlex Vesker free_uar:
9753eb74886SAlex Vesker 	mlx5_glue->devx_free_uar(uar);
9763eb74886SAlex Vesker 	return rte_errno;
9773eb74886SAlex Vesker }
9783eb74886SAlex Vesker 
9793eb74886SAlex Vesker static void __mlx5dr_send_queues_close(struct mlx5dr_context *ctx, uint16_t queues)
9803eb74886SAlex Vesker {
9815e2c93dcSYevgeny Kliteynik 	while (queues--)
9825e2c93dcSYevgeny Kliteynik 		mlx5dr_send_queue_close(&ctx->send_queue[queues]);
9833eb74886SAlex Vesker }
9843eb74886SAlex Vesker 
98587026d2eSYevgeny Kliteynik static int mlx5dr_bwc_send_queues_init(struct mlx5dr_context *ctx)
98687026d2eSYevgeny Kliteynik {
98787026d2eSYevgeny Kliteynik 	int bwc_queues = ctx->queues - 1;
98887026d2eSYevgeny Kliteynik 	int i;
98987026d2eSYevgeny Kliteynik 
99087026d2eSYevgeny Kliteynik 	if (!mlx5dr_context_bwc_supported(ctx))
99187026d2eSYevgeny Kliteynik 		return 0;
99287026d2eSYevgeny Kliteynik 
99387026d2eSYevgeny Kliteynik 	ctx->queues += bwc_queues;
99487026d2eSYevgeny Kliteynik 
99587026d2eSYevgeny Kliteynik 	ctx->bwc_send_queue_locks = simple_calloc(bwc_queues,
99687026d2eSYevgeny Kliteynik 						  sizeof(*ctx->bwc_send_queue_locks));
99787026d2eSYevgeny Kliteynik 	if (!ctx->bwc_send_queue_locks) {
99887026d2eSYevgeny Kliteynik 		rte_errno = ENOMEM;
99987026d2eSYevgeny Kliteynik 		return rte_errno;
100087026d2eSYevgeny Kliteynik 	}
100187026d2eSYevgeny Kliteynik 
100287026d2eSYevgeny Kliteynik 	for (i = 0; i < bwc_queues; i++)
100387026d2eSYevgeny Kliteynik 		rte_spinlock_init(&ctx->bwc_send_queue_locks[i]);
100487026d2eSYevgeny Kliteynik 
100587026d2eSYevgeny Kliteynik 	return 0;
100687026d2eSYevgeny Kliteynik }
100787026d2eSYevgeny Kliteynik 
100887026d2eSYevgeny Kliteynik static void mlx5dr_send_queues_bwc_locks_destroy(struct mlx5dr_context *ctx)
100987026d2eSYevgeny Kliteynik {
101087026d2eSYevgeny Kliteynik 	if (!mlx5dr_context_bwc_supported(ctx))
101187026d2eSYevgeny Kliteynik 		return;
101287026d2eSYevgeny Kliteynik 
101387026d2eSYevgeny Kliteynik 	simple_free(ctx->bwc_send_queue_locks);
101487026d2eSYevgeny Kliteynik }
101587026d2eSYevgeny Kliteynik 
10163eb74886SAlex Vesker void mlx5dr_send_queues_close(struct mlx5dr_context *ctx)
10173eb74886SAlex Vesker {
10183eb74886SAlex Vesker 	__mlx5dr_send_queues_close(ctx, ctx->queues);
10193eb74886SAlex Vesker 	simple_free(ctx->send_queue);
102087026d2eSYevgeny Kliteynik 	mlx5dr_send_queues_bwc_locks_destroy(ctx);
10213eb74886SAlex Vesker }
10223eb74886SAlex Vesker 
10233eb74886SAlex Vesker int mlx5dr_send_queues_open(struct mlx5dr_context *ctx,
10243eb74886SAlex Vesker 			    uint16_t queues,
10253eb74886SAlex Vesker 			    uint16_t queue_size)
10263eb74886SAlex Vesker {
10273eb74886SAlex Vesker 	int err = 0;
10283eb74886SAlex Vesker 	uint32_t i;
10293eb74886SAlex Vesker 
10303eb74886SAlex Vesker 	/* Open one extra queue for control path */
10313eb74886SAlex Vesker 	ctx->queues = queues + 1;
10323eb74886SAlex Vesker 
103387026d2eSYevgeny Kliteynik 	/* open a separate set of queues and locks for bwc API */
103487026d2eSYevgeny Kliteynik 	err = mlx5dr_bwc_send_queues_init(ctx);
103587026d2eSYevgeny Kliteynik 	if (err)
103687026d2eSYevgeny Kliteynik 		return err;
103787026d2eSYevgeny Kliteynik 
10383eb74886SAlex Vesker 	ctx->send_queue = simple_calloc(ctx->queues, sizeof(*ctx->send_queue));
10393eb74886SAlex Vesker 	if (!ctx->send_queue) {
10403eb74886SAlex Vesker 		rte_errno = ENOMEM;
104187026d2eSYevgeny Kliteynik 		err = rte_errno;
104287026d2eSYevgeny Kliteynik 		goto free_bwc_locks;
10433eb74886SAlex Vesker 	}
10443eb74886SAlex Vesker 
10453eb74886SAlex Vesker 	for (i = 0; i < ctx->queues; i++) {
10463eb74886SAlex Vesker 		err = mlx5dr_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
10473eb74886SAlex Vesker 		if (err)
10483eb74886SAlex Vesker 			goto close_send_queues;
10493eb74886SAlex Vesker 	}
10503eb74886SAlex Vesker 
10513eb74886SAlex Vesker 	return 0;
10523eb74886SAlex Vesker 
10533eb74886SAlex Vesker close_send_queues:
10543eb74886SAlex Vesker 	 __mlx5dr_send_queues_close(ctx, i);
10553eb74886SAlex Vesker 
10563eb74886SAlex Vesker 	simple_free(ctx->send_queue);
10573eb74886SAlex Vesker 
105887026d2eSYevgeny Kliteynik free_bwc_locks:
105987026d2eSYevgeny Kliteynik 	mlx5dr_send_queues_bwc_locks_destroy(ctx);
106087026d2eSYevgeny Kliteynik 
10613eb74886SAlex Vesker 	return err;
10623eb74886SAlex Vesker }
10633eb74886SAlex Vesker 
10643eb74886SAlex Vesker int mlx5dr_send_queue_action(struct mlx5dr_context *ctx,
10653eb74886SAlex Vesker 			     uint16_t queue_id,
10663eb74886SAlex Vesker 			     uint32_t actions)
10673eb74886SAlex Vesker {
10683eb74886SAlex Vesker 	struct mlx5dr_send_ring_sq *send_sq;
10693eb74886SAlex Vesker 	struct mlx5dr_send_engine *queue;
107090488887SAlex Vesker 	bool wait_comp = false;
107190488887SAlex Vesker 	int64_t polled = 0;
10723eb74886SAlex Vesker 
10733eb74886SAlex Vesker 	queue = &ctx->send_queue[queue_id];
10743eb74886SAlex Vesker 	send_sq = &queue->send_ring->send_sq;
10753eb74886SAlex Vesker 
107690488887SAlex Vesker 	switch (actions) {
107790488887SAlex Vesker 	case MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC:
107890488887SAlex Vesker 		wait_comp = true;
107990488887SAlex Vesker 		/* FALLTHROUGH */
108090488887SAlex Vesker 	case MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC:
10813eb74886SAlex Vesker 		if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
10823eb74886SAlex Vesker 			/* Send dependent WQEs to drain the queue */
10833eb74886SAlex Vesker 			mlx5dr_send_all_dep_wqe(queue);
10843eb74886SAlex Vesker 		else
10853eb74886SAlex Vesker 			/* Signal on the last posted WQE */
10863eb74886SAlex Vesker 			mlx5dr_send_engine_flush_queue(queue);
108790488887SAlex Vesker 
108890488887SAlex Vesker 		/* Poll queue until empty */
108990488887SAlex Vesker 		while (wait_comp && !mlx5dr_send_engine_empty(queue))
109090488887SAlex Vesker 			mlx5dr_send_engine_poll_cqs(queue, NULL, &polled, 0);
109190488887SAlex Vesker 
109290488887SAlex Vesker 		break;
109390488887SAlex Vesker 	default:
10948ae49a5eSAlex Vesker 		rte_errno = EINVAL;
10958ae49a5eSAlex Vesker 		return -rte_errno;
10963eb74886SAlex Vesker 	}
10973eb74886SAlex Vesker 
10983eb74886SAlex Vesker 	return 0;
10993eb74886SAlex Vesker }
1100