xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_send.h (revision 2e3fa232e8b821e82144d60c8d052354af78f366)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #ifndef MLX5DR_SEND_H_
6 #define MLX5DR_SEND_H_
7 
8 #define MLX5DR_NUM_SEND_RINGS 1
9 
10 /* As a single operation requires at least two WQEBBS.
11  * This means a maximum of 16 such operations per rule.
12  */
13 #define MAX_WQES_PER_RULE 32
14 
15 /* WQE Control segment. */
16 struct mlx5dr_wqe_ctrl_seg {
17 	__be32 opmod_idx_opcode;
18 	__be32 qpn_ds;
19 	__be32 flags;
20 	__be32 imm;
21 };
22 
23 enum mlx5dr_wqe_opcode {
24 	MLX5DR_WQE_OPCODE_TBL_ACCESS = 0x2c,
25 };
26 
27 enum mlx5dr_wqe_opmod {
28 	MLX5DR_WQE_OPMOD_GTA_STE = 0,
29 	MLX5DR_WQE_OPMOD_GTA_MOD_ARG = 1,
30 };
31 
32 enum mlx5dr_wqe_gta_opcode {
33 	MLX5DR_WQE_GTA_OP_ACTIVATE = 0,
34 	MLX5DR_WQE_GTA_OP_DEACTIVATE = 1,
35 };
36 
37 enum mlx5dr_wqe_gta_opmod {
38 	MLX5DR_WQE_GTA_OPMOD_STE = 0,
39 	MLX5DR_WQE_GTA_OPMOD_MOD_ARG = 1,
40 };
41 
42 enum mlx5dr_wqe_gta_sz {
43 	MLX5DR_WQE_SZ_GTA_CTRL = 48,
44 	MLX5DR_WQE_SZ_GTA_DATA = 64,
45 };
46 
47 struct mlx5dr_wqe_gta_ctrl_seg {
48 	__be32 op_dirix;
49 	__be32 stc_ix[5];
50 	__be32 rsvd0[6];
51 };
52 
53 struct mlx5dr_wqe_gta_data_seg_ste {
54 	__be32 rsvd0_ctr_id;
55 	__be32 rsvd1_definer;
56 	__be32 rsvd2[3];
57 	union {
58 		struct {
59 			__be32 action[3];
60 			__be32 tag[8];
61 		};
62 		__be32 jumbo[11];
63 	};
64 };
65 
66 struct mlx5dr_wqe_gta_data_seg_arg {
67 	__be32 action_args[8];
68 };
69 
70 struct mlx5dr_wqe_gta {
71 	struct mlx5dr_wqe_gta_ctrl_seg gta_ctrl;
72 	union {
73 		struct mlx5dr_wqe_gta_data_seg_ste seg_ste;
74 		struct mlx5dr_wqe_gta_data_seg_arg seg_arg;
75 	};
76 };
77 
78 struct mlx5dr_send_ring_cq {
79 	uint8_t *buf;
80 	uint32_t cons_index;
81 	uint32_t ncqe_mask;
82 	uint32_t buf_sz;
83 	uint32_t ncqe;
84 	uint32_t cqe_log_sz;
85 	__be32 *db;
86 	uint16_t poll_wqe;
87 	struct ibv_cq *ibv_cq;
88 	uint32_t cqn;
89 	uint32_t cqe_sz;
90 };
91 
92 struct mlx5dr_send_ring_priv {
93 	struct mlx5dr_rule *rule;
94 	void *user_data;
95 	uint32_t num_wqebbs;
96 	uint32_t id;
97 	uint32_t retry_id;
98 	uint32_t *used_id;
99 };
100 
101 struct mlx5dr_send_ring_dep_wqe {
102 	struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl;
103 	struct mlx5dr_wqe_gta_data_seg_ste wqe_data;
104 	struct mlx5dr_rule *rule;
105 	uint32_t rtc_0;
106 	uint32_t rtc_1;
107 	uint32_t retry_rtc_0;
108 	uint32_t retry_rtc_1;
109 	uint32_t direct_index;
110 	void *user_data;
111 };
112 
113 struct mlx5dr_send_ring_sq {
114 	char *buf;
115 	uint32_t sqn;
116 	__be32 *db;
117 	uint16_t cur_post;
118 	uint16_t buf_mask;
119 	struct mlx5dr_send_ring_priv *wr_priv;
120 	unsigned int last_idx;
121 	struct mlx5dr_send_ring_dep_wqe *dep_wqe;
122 	unsigned int head_dep_idx;
123 	unsigned int tail_dep_idx;
124 	struct mlx5dr_devx_obj *obj;
125 	struct mlx5dv_devx_umem *buf_umem;
126 	struct mlx5dv_devx_umem *db_umem;
127 };
128 
129 struct mlx5dr_send_ring {
130 	struct mlx5dr_send_ring_cq send_cq;
131 	struct mlx5dr_send_ring_sq send_sq;
132 };
133 
134 struct mlx5dr_completed_poll_entry {
135 	void *user_data;
136 	enum rte_flow_op_status status;
137 };
138 
139 struct mlx5dr_completed_poll {
140 	struct mlx5dr_completed_poll_entry *entries;
141 	uint16_t ci;
142 	uint16_t pi;
143 	uint16_t mask;
144 };
145 
146 struct __rte_cache_aligned mlx5dr_send_engine {
147 	struct mlx5dr_send_ring send_ring[MLX5DR_NUM_SEND_RINGS]; /* For now 1:1 mapping */
148 	struct mlx5dv_devx_uar *uar; /* Uar is shared between rings of a queue */
149 	struct mlx5dr_completed_poll completed;
150 	uint16_t used_entries;
151 	uint16_t th_entries;
152 	uint16_t rings;
153 	uint16_t num_entries;
154 	bool err;
155 };
156 
157 struct mlx5dr_send_engine_post_ctrl {
158 	struct mlx5dr_send_engine *queue;
159 	struct mlx5dr_send_ring *send_ring;
160 	size_t num_wqebbs;
161 };
162 
163 struct mlx5dr_send_engine_post_attr {
164 	uint8_t opcode;
165 	uint8_t opmod;
166 	uint8_t notify_hw;
167 	uint8_t fence;
168 	uint8_t match_definer_id;
169 	uint8_t range_definer_id;
170 	size_t len;
171 	struct mlx5dr_rule *rule;
172 	uint32_t id;
173 	uint32_t retry_id;
174 	uint32_t *used_id;
175 	void *user_data;
176 };
177 
178 struct mlx5dr_send_ste_attr {
179 	/* rtc / retry_rtc / used_id_rtc override send_attr */
180 	uint32_t rtc_0;
181 	uint32_t rtc_1;
182 	uint32_t retry_rtc_0;
183 	uint32_t retry_rtc_1;
184 	uint32_t *used_id_rtc_0;
185 	uint32_t *used_id_rtc_1;
186 	bool wqe_tag_is_jumbo;
187 	uint8_t gta_opcode;
188 	uint32_t direct_index;
189 	struct mlx5dr_send_engine_post_attr send_attr;
190 	struct mlx5dr_rule_match_tag *wqe_tag;
191 	struct mlx5dr_rule_match_tag *range_wqe_tag;
192 	struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
193 	struct mlx5dr_wqe_gta_data_seg_ste *wqe_data;
194 	struct mlx5dr_wqe_gta_data_seg_ste *range_wqe_data;
195 };
196 
197 /**
198  * Provide safe 64bit store operation to mlx5 UAR region for
199  * both 32bit and 64bit architectures.
200  *
201  * @param val
202  *   value to write in CPU endian format.
203  * @param addr
204  *   Address to write to.
205  */
206 static __rte_always_inline void
mlx5dr_uar_write64_relaxed(uint64_t val,void * addr)207 mlx5dr_uar_write64_relaxed(uint64_t val, void *addr)
208 {
209 #ifdef RTE_ARCH_64
210 	*(uint64_t *)addr = val;
211 #else /* !RTE_ARCH_64 */
212 	*(uint32_t *)addr = val;
213 	rte_io_wmb();
214 	*((uint32_t *)addr + 1) = val >> 32;
215 #endif
216 }
217 
218 struct mlx5dr_send_ring_dep_wqe *
219 mlx5dr_send_add_new_dep_wqe(struct mlx5dr_send_engine *queue);
220 
221 void mlx5dr_send_abort_new_dep_wqe(struct mlx5dr_send_engine *queue);
222 
223 void mlx5dr_send_all_dep_wqe(struct mlx5dr_send_engine *queue);
224 
225 void mlx5dr_send_queue_close(struct mlx5dr_send_engine *queue);
226 
227 int mlx5dr_send_queue_open(struct mlx5dr_context *ctx,
228 			   struct mlx5dr_send_engine *queue,
229 			   uint16_t queue_size);
230 
231 void mlx5dr_send_queues_close(struct mlx5dr_context *ctx);
232 
233 int mlx5dr_send_queues_open(struct mlx5dr_context *ctx,
234 			    uint16_t queues,
235 			    uint16_t queue_size);
236 
237 struct mlx5dr_send_engine_post_ctrl
238 mlx5dr_send_engine_post_start(struct mlx5dr_send_engine *queue);
239 
240 void mlx5dr_send_engine_post_req_wqe(struct mlx5dr_send_engine_post_ctrl *ctrl,
241 				     char **buf, size_t *len);
242 
243 void mlx5dr_send_engine_post_end(struct mlx5dr_send_engine_post_ctrl *ctrl,
244 				 struct mlx5dr_send_engine_post_attr *attr);
245 
246 void mlx5dr_send_ste(struct mlx5dr_send_engine *queue,
247 		     struct mlx5dr_send_ste_attr *ste_attr);
248 
249 void mlx5dr_send_stes_fw(struct mlx5dr_send_engine *queue,
250 			 struct mlx5dr_send_ste_attr *ste_attr);
251 
252 void mlx5dr_send_engine_flush_queue(struct mlx5dr_send_engine *queue);
253 
mlx5dr_send_engine_empty(struct mlx5dr_send_engine * queue)254 static inline bool mlx5dr_send_engine_empty(struct mlx5dr_send_engine *queue)
255 {
256 	struct mlx5dr_send_ring_sq *send_sq = &queue->send_ring->send_sq;
257 	struct mlx5dr_send_ring_cq *send_cq = &queue->send_ring->send_cq;
258 
259 	return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
260 }
261 
mlx5dr_send_engine_full(struct mlx5dr_send_engine * queue)262 static inline bool mlx5dr_send_engine_full(struct mlx5dr_send_engine *queue)
263 {
264 	return queue->used_entries >= queue->th_entries;
265 }
266 
mlx5dr_send_engine_inc_rule(struct mlx5dr_send_engine * queue)267 static inline void mlx5dr_send_engine_inc_rule(struct mlx5dr_send_engine *queue)
268 {
269 	queue->used_entries++;
270 }
271 
mlx5dr_send_engine_dec_rule(struct mlx5dr_send_engine * queue)272 static inline void mlx5dr_send_engine_dec_rule(struct mlx5dr_send_engine *queue)
273 {
274 	queue->used_entries--;
275 }
276 
mlx5dr_send_engine_gen_comp(struct mlx5dr_send_engine * queue,void * user_data,int comp_status)277 static inline void mlx5dr_send_engine_gen_comp(struct mlx5dr_send_engine *queue,
278 					       void *user_data,
279 					       int comp_status)
280 {
281 	struct mlx5dr_completed_poll *comp = &queue->completed;
282 
283 	comp->entries[comp->pi].status = comp_status;
284 	comp->entries[comp->pi].user_data = user_data;
285 
286 	comp->pi = (comp->pi + 1) & comp->mask;
287 }
288 
mlx5dr_send_engine_err(struct mlx5dr_send_engine * queue)289 static inline bool mlx5dr_send_engine_err(struct mlx5dr_send_engine *queue)
290 {
291 	return queue->err;
292 }
293 
294 #endif /* MLX5DR_SEND_H_ */
295