xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,
8 			     struct mlx5dr_match_template *mt,
9 			     const struct rte_flow_item *items,
10 			     bool *skip_rx, bool *skip_tx)
11 {
12 	const struct flow_hw_port_info *vport;
13 	const struct rte_flow_item_ethdev *v;
14 
15 	/* Flow_src is the 1st priority */
16 	if (matcher->attr.optimize_flow_src) {
17 		*skip_tx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_WIRE;
18 		*skip_rx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_VPORT;
19 		return;
20 	}
21 
22 	/* By default FDB rules are added to both RX and TX */
23 	*skip_rx = false;
24 	*skip_tx = false;
25 
26 	if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {
27 		v = items[mt->vport_item_id].spec;
28 		vport = flow_hw_conv_port_id(v->port_id);
29 		if (unlikely(!vport)) {
30 			DR_LOG(ERR, "Fail to map port ID %d, ignoring", v->port_id);
31 			return;
32 		}
33 
34 		if (!vport->is_wire)
35 			/* Match vport ID is not WIRE -> Skip RX */
36 			*skip_rx = true;
37 		else
38 			/* Match vport ID is WIRE -> Skip TX */
39 			*skip_tx = true;
40 	}
41 }
42 
43 static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,
44 				     struct mlx5dr_rule *rule,
45 				     const struct rte_flow_item *items,
46 				     struct mlx5dr_match_template *mt,
47 				     void *user_data)
48 {
49 	struct mlx5dr_matcher *matcher = rule->matcher;
50 	struct mlx5dr_table *tbl = matcher->tbl;
51 	bool skip_rx, skip_tx;
52 
53 	dep_wqe->rule = rule;
54 	dep_wqe->user_data = user_data;
55 
56 	switch (tbl->type) {
57 	case MLX5DR_TABLE_TYPE_NIC_RX:
58 	case MLX5DR_TABLE_TYPE_NIC_TX:
59 		dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
60 		dep_wqe->retry_rtc_0 = matcher->col_matcher ?
61 				       matcher->col_matcher->match_ste.rtc_0->id : 0;
62 		dep_wqe->rtc_1 = 0;
63 		dep_wqe->retry_rtc_1 = 0;
64 		break;
65 
66 	case MLX5DR_TABLE_TYPE_FDB:
67 		mlx5dr_rule_skip(matcher, mt, items, &skip_rx, &skip_tx);
68 
69 		if (!skip_rx) {
70 			dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
71 			dep_wqe->retry_rtc_0 = matcher->col_matcher ?
72 					       matcher->col_matcher->match_ste.rtc_0->id : 0;
73 		} else {
74 			dep_wqe->rtc_0 = 0;
75 			dep_wqe->retry_rtc_0 = 0;
76 		}
77 
78 		if (!skip_tx) {
79 			dep_wqe->rtc_1 = matcher->match_ste.rtc_1->id;
80 			dep_wqe->retry_rtc_1 = matcher->col_matcher ?
81 					       matcher->col_matcher->match_ste.rtc_1->id : 0;
82 		} else {
83 			dep_wqe->rtc_1 = 0;
84 			dep_wqe->retry_rtc_1 = 0;
85 		}
86 
87 		break;
88 
89 	default:
90 		assert(false);
91 		break;
92 	}
93 }
94 
95 static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,
96 				 struct mlx5dr_rule *rule,
97 				 bool err,
98 				 void *user_data,
99 				 enum mlx5dr_rule_status rule_status_on_succ)
100 {
101 	enum rte_flow_op_status comp_status;
102 
103 	if (!err) {
104 		comp_status = RTE_FLOW_OP_SUCCESS;
105 		rule->status = rule_status_on_succ;
106 	} else {
107 		comp_status = RTE_FLOW_OP_ERROR;
108 		rule->status = MLX5DR_RULE_STATUS_FAILED;
109 	}
110 
111 	mlx5dr_send_engine_inc_rule(queue);
112 	mlx5dr_send_engine_gen_comp(queue, user_data, comp_status);
113 }
114 
115 static void
116 mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule,
117 			     struct mlx5dr_send_ste_attr *ste_attr)
118 {
119 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
120 		uint8_t *src_tag;
121 
122 		/* Save match definer id and tag for delete */
123 		rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr));
124 		assert(rule->tag_ptr);
125 
126 		src_tag = (uint8_t *)ste_attr->wqe_data->tag;
127 		memcpy(rule->tag_ptr[0].match, src_tag, MLX5DR_MATCH_TAG_SZ);
128 		rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id;
129 
130 		/* Save range definer id and tag for delete */
131 		if (ste_attr->range_wqe_data) {
132 			src_tag = (uint8_t *)ste_attr->range_wqe_data->tag;
133 			memcpy(rule->tag_ptr[1].match, src_tag, MLX5DR_MATCH_TAG_SZ);
134 			rule->tag_ptr[1].reserved[1] = ste_attr->send_attr.range_definer_id;
135 		}
136 		return;
137 	}
138 
139 	if (ste_attr->wqe_tag_is_jumbo)
140 		memcpy(rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5DR_JUMBO_TAG_SZ);
141 	else
142 		memcpy(rule->tag.match, ste_attr->wqe_data->tag, MLX5DR_MATCH_TAG_SZ);
143 }
144 
145 static void
146 mlx5dr_rule_clear_delete_info(struct mlx5dr_rule *rule)
147 {
148 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher)))
149 		simple_free(rule->tag_ptr);
150 }
151 
152 static void
153 mlx5dr_rule_load_delete_info(struct mlx5dr_rule *rule,
154 			     struct mlx5dr_send_ste_attr *ste_attr)
155 {
156 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
157 		/* Load match definer id and tag for delete */
158 		ste_attr->wqe_tag = &rule->tag_ptr[0];
159 		ste_attr->send_attr.match_definer_id = rule->tag_ptr[1].reserved[0];
160 
161 		/* Load range definer id and tag for delete */
162 		if (rule->matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER) {
163 			ste_attr->range_wqe_tag = &rule->tag_ptr[1];
164 			ste_attr->send_attr.range_definer_id = rule->tag_ptr[1].reserved[1];
165 		}
166 	} else {
167 		ste_attr->wqe_tag = &rule->tag;
168 	}
169 }
170 
171 static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,
172 					struct mlx5dr_rule_attr *attr)
173 {
174 	struct mlx5dr_matcher *matcher = rule->matcher;
175 	int ret;
176 
177 	/* Use rule_idx for locking optimzation, otherwise allocate from pool */
178 	if (matcher->attr.optimize_using_rule_idx ||
179 	    mlx5dr_matcher_is_insert_by_idx(matcher)) {
180 		rule->action_ste_idx = attr->rule_idx * matcher->action_ste.max_stes;
181 	} else {
182 		struct mlx5dr_pool_chunk ste = {0};
183 
184 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
185 		ret = mlx5dr_pool_chunk_alloc(matcher->action_ste.pool, &ste);
186 		if (ret) {
187 			DR_LOG(ERR, "Failed to allocate STE for rule actions");
188 			return ret;
189 		}
190 		rule->action_ste_idx = ste.offset;
191 	}
192 	return 0;
193 }
194 
195 void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule)
196 {
197 	struct mlx5dr_matcher *matcher = rule->matcher;
198 
199 	if (rule->action_ste_idx > -1 &&
200 	    !matcher->attr.optimize_using_rule_idx &&
201 	    !mlx5dr_matcher_is_insert_by_idx(matcher)) {
202 		struct mlx5dr_pool_chunk ste = {0};
203 
204 		/* This release is safe only when the rule match part was deleted */
205 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
206 		ste.offset = rule->action_ste_idx;
207 		mlx5dr_pool_chunk_free(matcher->action_ste.pool, &ste);
208 	}
209 }
210 
211 static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,
212 				    struct mlx5dr_send_ste_attr *ste_attr,
213 				    struct mlx5dr_actions_apply_data *apply)
214 {
215 	struct mlx5dr_matcher *matcher = rule->matcher;
216 	struct mlx5dr_table *tbl = matcher->tbl;
217 	struct mlx5dr_context *ctx = tbl->ctx;
218 
219 	/* Init rule before reuse */
220 	rule->rtc_0 = 0;
221 	rule->rtc_1 = 0;
222 	rule->pending_wqes = 0;
223 	rule->action_ste_idx = -1;
224 	rule->status = MLX5DR_RULE_STATUS_CREATING;
225 
226 	/* Init default send STE attributes */
227 	ste_attr->gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
228 	ste_attr->send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
229 	ste_attr->send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
230 	ste_attr->send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
231 
232 	/* Init default action apply */
233 	apply->tbl_type = tbl->type;
234 	apply->common_res = &ctx->common_res[tbl->type];
235 	apply->jump_to_action_stc = matcher->action_ste.stc.offset;
236 	apply->require_dep = 0;
237 }
238 
239 static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule,
240 					 struct mlx5dr_rule_attr *attr,
241 					 uint8_t mt_idx,
242 					 const struct rte_flow_item items[],
243 					 uint8_t at_idx,
244 					 struct mlx5dr_rule_action rule_actions[])
245 {
246 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
247 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
248 	struct mlx5dr_send_ring_dep_wqe range_wqe = {{0}};
249 	struct mlx5dr_send_ring_dep_wqe match_wqe = {{0}};
250 	bool is_range = mlx5dr_matcher_mt_is_range(mt);
251 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
252 	struct mlx5dr_matcher *matcher = rule->matcher;
253 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
254 	struct mlx5dr_send_ste_attr ste_attr = {0};
255 	struct mlx5dr_actions_apply_data apply;
256 	struct mlx5dr_send_engine *queue;
257 
258 	queue = &ctx->send_queue[attr->queue_id];
259 	if (unlikely(mlx5dr_send_engine_err(queue))) {
260 		rte_errno = EIO;
261 		return rte_errno;
262 	}
263 
264 	mlx5dr_rule_create_init(rule, &ste_attr, &apply);
265 	mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr->user_data);
266 	mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr->user_data);
267 
268 	ste_attr.direct_index = 0;
269 	ste_attr.rtc_0 = match_wqe.rtc_0;
270 	ste_attr.rtc_1 = match_wqe.rtc_1;
271 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
272 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
273 	ste_attr.retry_rtc_0 = match_wqe.retry_rtc_0;
274 	ste_attr.retry_rtc_1 = match_wqe.retry_rtc_1;
275 	ste_attr.send_attr.rule = match_wqe.rule;
276 	ste_attr.send_attr.user_data = match_wqe.user_data;
277 
278 	ste_attr.send_attr.fence = 1;
279 	ste_attr.send_attr.notify_hw = 1;
280 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
281 
282 	/* Prepare match STE TAG */
283 	ste_attr.wqe_ctrl = &match_wqe.wqe_ctrl;
284 	ste_attr.wqe_data = &match_wqe.wqe_data;
285 	ste_attr.send_attr.match_definer_id = mlx5dr_definer_get_id(mt->definer);
286 
287 	mlx5dr_definer_create_tag(items,
288 				  mt->fc,
289 				  mt->fc_sz,
290 				  (uint8_t *)match_wqe.wqe_data.action);
291 
292 	/* Prepare range STE TAG */
293 	if (is_range) {
294 		ste_attr.range_wqe_data = &range_wqe.wqe_data;
295 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
296 		ste_attr.send_attr.range_definer_id = mlx5dr_definer_get_id(mt->range_definer);
297 
298 		mlx5dr_definer_create_tag_range(items,
299 						mt->fcr,
300 						mt->fcr_sz,
301 						(uint8_t *)range_wqe.wqe_data.action);
302 	}
303 
304 	/* Apply the actions on the last STE */
305 	apply.queue = queue;
306 	apply.next_direct_idx = 0;
307 	apply.rule_action = rule_actions;
308 	apply.wqe_ctrl = &match_wqe.wqe_ctrl;
309 	apply.wqe_data = (uint32_t *)(is_range ?
310 				      &range_wqe.wqe_data :
311 				      &match_wqe.wqe_data);
312 
313 	/* Skip setters[0] used for jumbo STE since not support with FW WQE */
314 	mlx5dr_action_apply_setter(&apply, &at->setters[1], 0);
315 
316 	/* Send WQEs to FW */
317 	mlx5dr_send_stes_fw(queue, &ste_attr);
318 
319 	/* Backup TAG on the rule for deletion */
320 	mlx5dr_rule_save_delete_info(rule, &ste_attr);
321 	mlx5dr_send_engine_inc_rule(queue);
322 
323 	/* Send dependent WQEs */
324 	if (!attr->burst)
325 		mlx5dr_send_all_dep_wqe(queue);
326 
327 	return 0;
328 }
329 
330 static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
331 				  struct mlx5dr_rule_attr *attr,
332 				  uint8_t mt_idx,
333 				  const struct rte_flow_item items[],
334 				  uint8_t at_idx,
335 				  struct mlx5dr_rule_action rule_actions[])
336 {
337 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
338 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
339 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
340 	struct mlx5dr_matcher *matcher = rule->matcher;
341 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
342 	struct mlx5dr_send_ste_attr ste_attr = {0};
343 	struct mlx5dr_send_ring_dep_wqe *dep_wqe;
344 	struct mlx5dr_actions_wqe_setter *setter;
345 	struct mlx5dr_actions_apply_data apply;
346 	struct mlx5dr_send_engine *queue;
347 	uint8_t total_stes, action_stes;
348 	int i, ret;
349 
350 	/* Insert rule using FW WQE if cannot use GTA WQE */
351 	if (unlikely(mlx5dr_matcher_req_fw_wqe(matcher)))
352 		return mlx5dr_rule_create_hws_fw_wqe(rule, attr, mt_idx, items,
353 						     at_idx, rule_actions);
354 
355 	queue = &ctx->send_queue[attr->queue_id];
356 	if (unlikely(mlx5dr_send_engine_err(queue))) {
357 		rte_errno = EIO;
358 		return rte_errno;
359 	}
360 
361 	mlx5dr_rule_create_init(rule, &ste_attr, &apply);
362 
363 	/* Allocate dependent match WQE since rule might have dependent writes.
364 	 * The queued dependent WQE can be later aborted or kept as a dependency.
365 	 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
366 	 */
367 	dep_wqe = mlx5dr_send_add_new_dep_wqe(queue);
368 	mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr->user_data);
369 
370 	ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
371 	ste_attr.wqe_data = &dep_wqe->wqe_data;
372 	apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
373 	apply.wqe_data = (uint32_t *)&dep_wqe->wqe_data;
374 	apply.rule_action = rule_actions;
375 	apply.queue = queue;
376 
377 	setter = &at->setters[at->num_of_action_stes];
378 	total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
379 	action_stes = total_stes - 1;
380 
381 	if (action_stes) {
382 		/* Allocate action STEs for complex rules */
383 		ret = mlx5dr_rule_alloc_action_ste(rule, attr);
384 		if (ret) {
385 			DR_LOG(ERR, "Failed to allocate action memory %d", ret);
386 			mlx5dr_send_abort_new_dep_wqe(queue);
387 			return ret;
388 		}
389 		/* Skip RX/TX based on the dep_wqe init */
390 		ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0->id : 0;
391 		ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1->id : 0;
392 		/* Action STEs are written to a specific index last to first */
393 		ste_attr.direct_index = rule->action_ste_idx + action_stes;
394 		apply.next_direct_idx = ste_attr.direct_index;
395 	} else {
396 		apply.next_direct_idx = 0;
397 	}
398 
399 	for (i = total_stes; i-- > 0;) {
400 		mlx5dr_action_apply_setter(&apply, setter--, !i && is_jumbo);
401 
402 		if (i == 0) {
403 			/* Handle last match STE.
404 			 * For hash split / linear lookup RTCs, packets reaching any STE
405 			 * will always match and perform the specified actions, which
406 			 * makes the tag irrelevant.
407 			 */
408 			if (likely(!mlx5dr_matcher_is_insert_by_idx(matcher)))
409 				mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz,
410 							  (uint8_t *)dep_wqe->wqe_data.action);
411 
412 			/* Rule has dependent WQEs, match dep_wqe is queued */
413 			if (action_stes || apply.require_dep)
414 				break;
415 
416 			/* Rule has no dependencies, abort dep_wqe and send WQE now */
417 			mlx5dr_send_abort_new_dep_wqe(queue);
418 			ste_attr.wqe_tag_is_jumbo = is_jumbo;
419 			ste_attr.send_attr.notify_hw = !attr->burst;
420 			ste_attr.send_attr.user_data = dep_wqe->user_data;
421 			ste_attr.send_attr.rule = dep_wqe->rule;
422 			ste_attr.rtc_0 = dep_wqe->rtc_0;
423 			ste_attr.rtc_1 = dep_wqe->rtc_1;
424 			ste_attr.used_id_rtc_0 = &rule->rtc_0;
425 			ste_attr.used_id_rtc_1 = &rule->rtc_1;
426 			ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
427 			ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
428 			ste_attr.direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
429 						attr->rule_idx : 0;
430 		} else {
431 			apply.next_direct_idx = --ste_attr.direct_index;
432 		}
433 
434 		mlx5dr_send_ste(queue, &ste_attr);
435 	}
436 
437 	/* Backup TAG on the rule for deletion */
438 	mlx5dr_rule_save_delete_info(rule, &ste_attr);
439 	mlx5dr_send_engine_inc_rule(queue);
440 
441 	/* Send dependent WQEs */
442 	if (!attr->burst)
443 		mlx5dr_send_all_dep_wqe(queue);
444 
445 	return 0;
446 }
447 
448 static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,
449 					   struct mlx5dr_rule_attr *attr)
450 {
451 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
452 	struct mlx5dr_send_engine *queue;
453 
454 	queue = &ctx->send_queue[attr->queue_id];
455 
456 	mlx5dr_rule_gen_comp(queue, rule, false,
457 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
458 
459 	/* Rule failed now we can safely release action STEs */
460 	mlx5dr_rule_free_action_ste_idx(rule);
461 
462 	/* Clear complex tag */
463 	mlx5dr_rule_clear_delete_info(rule);
464 
465 	/* If a rule that was indicated as burst (need to trigger HW) has failed
466 	 * insertion we won't ring the HW as nothing is being written to the WQ.
467 	 * In such case update the last WQE and ring the HW with that work
468 	 */
469 	if (attr->burst)
470 		return;
471 
472 	mlx5dr_send_all_dep_wqe(queue);
473 	mlx5dr_send_engine_flush_queue(queue);
474 }
475 
476 static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
477 				   struct mlx5dr_rule_attr *attr)
478 {
479 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
480 	struct mlx5dr_matcher *matcher = rule->matcher;
481 	bool fw_wqe = mlx5dr_matcher_req_fw_wqe(matcher);
482 	bool is_range = mlx5dr_matcher_mt_is_range(matcher->mt);
483 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt);
484 	struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};
485 	struct mlx5dr_send_ste_attr ste_attr = {0};
486 	struct mlx5dr_send_engine *queue;
487 
488 	queue = &ctx->send_queue[attr->queue_id];
489 
490 	/* Rule is not completed yet */
491 	if (rule->status == MLX5DR_RULE_STATUS_CREATING) {
492 		rte_errno = EBUSY;
493 		return rte_errno;
494 	}
495 
496 	/* Rule failed and doesn't require cleanup */
497 	if (rule->status == MLX5DR_RULE_STATUS_FAILED) {
498 		mlx5dr_rule_destroy_failed_hws(rule, attr);
499 		return 0;
500 	}
501 
502 	if (unlikely(mlx5dr_send_engine_err(queue))) {
503 		mlx5dr_rule_destroy_failed_hws(rule, attr);
504 		return 0;
505 	}
506 
507 	mlx5dr_send_engine_inc_rule(queue);
508 
509 	/* Send dependent WQE */
510 	if (!attr->burst)
511 		mlx5dr_send_all_dep_wqe(queue);
512 
513 	rule->status = MLX5DR_RULE_STATUS_DELETING;
514 
515 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
516 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
517 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
518 	if (unlikely(is_range))
519 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
520 
521 	ste_attr.send_attr.rule = rule;
522 	ste_attr.send_attr.notify_hw = !attr->burst;
523 	ste_attr.send_attr.user_data = attr->user_data;
524 
525 	ste_attr.rtc_0 = rule->rtc_0;
526 	ste_attr.rtc_1 = rule->rtc_1;
527 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
528 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
529 	ste_attr.wqe_ctrl = &wqe_ctrl;
530 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
531 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
532 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
533 		ste_attr.direct_index = attr->rule_idx;
534 
535 	mlx5dr_rule_load_delete_info(rule, &ste_attr);
536 
537 	if (unlikely(fw_wqe)) {
538 		mlx5dr_send_stes_fw(queue, &ste_attr);
539 		mlx5dr_rule_clear_delete_info(rule);
540 	} else {
541 		mlx5dr_send_ste(queue, &ste_attr);
542 	}
543 
544 	return 0;
545 }
546 
547 static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,
548 				   struct mlx5dr_rule_attr *rule_attr,
549 				   const struct rte_flow_item items[],
550 				   uint8_t at_idx,
551 				   struct mlx5dr_rule_action rule_actions[])
552 {
553 	struct mlx5dv_flow_matcher *dv_matcher = rule->matcher->dv_matcher;
554 	uint8_t num_actions = rule->matcher->at[at_idx].num_actions;
555 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
556 	struct mlx5dv_flow_match_parameters *value;
557 	struct mlx5_flow_attr flow_attr = {0};
558 	struct mlx5dv_flow_action_attr *attr;
559 	struct rte_flow_error error;
560 	uint8_t match_criteria;
561 	int ret;
562 
563 	attr = simple_calloc(num_actions, sizeof(*attr));
564 	if (!attr) {
565 		rte_errno = ENOMEM;
566 		return rte_errno;
567 	}
568 
569 	value = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +
570 			      offsetof(struct mlx5dv_flow_match_parameters, match_buf));
571 	if (!value) {
572 		rte_errno = ENOMEM;
573 		goto free_attr;
574 	}
575 
576 	flow_attr.tbl_type = rule->matcher->tbl->type;
577 
578 	ret = flow_dv_translate_items_hws(items, &flow_attr, value->match_buf,
579 					  MLX5_SET_MATCHER_HS_V, NULL,
580 					  &match_criteria,
581 					  &error);
582 	if (ret) {
583 		DR_LOG(ERR, "Failed to convert items to PRM [%s]", error.message);
584 		goto free_value;
585 	}
586 
587 	/* Convert actions to verb action attr */
588 	ret = mlx5dr_action_root_build_attr(rule_actions, num_actions, attr);
589 	if (ret)
590 		goto free_value;
591 
592 	/* Create verb flow */
593 	value->match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
594 	rule->flow = mlx5_glue->dv_create_flow_root(dv_matcher,
595 						    value,
596 						    num_actions,
597 						    attr);
598 
599 	mlx5dr_rule_gen_comp(&ctx->send_queue[rule_attr->queue_id], rule, !rule->flow,
600 			     rule_attr->user_data, MLX5DR_RULE_STATUS_CREATED);
601 
602 	simple_free(value);
603 	simple_free(attr);
604 
605 	return 0;
606 
607 free_value:
608 	simple_free(value);
609 free_attr:
610 	simple_free(attr);
611 
612 	return -rte_errno;
613 }
614 
615 static int mlx5dr_rule_destroy_root(struct mlx5dr_rule *rule,
616 				    struct mlx5dr_rule_attr *attr)
617 {
618 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
619 	int err = 0;
620 
621 	if (rule->flow)
622 		err = ibv_destroy_flow(rule->flow);
623 
624 	mlx5dr_rule_gen_comp(&ctx->send_queue[attr->queue_id], rule, err,
625 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
626 
627 	return 0;
628 }
629 
630 int mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
631 		       uint8_t mt_idx,
632 		       const struct rte_flow_item items[],
633 		       uint8_t at_idx,
634 		       struct mlx5dr_rule_action rule_actions[],
635 		       struct mlx5dr_rule_attr *attr,
636 		       struct mlx5dr_rule *rule_handle)
637 {
638 	struct mlx5dr_context *ctx;
639 	int ret;
640 
641 	rule_handle->matcher = matcher;
642 	ctx = matcher->tbl->ctx;
643 
644 	if (unlikely(!attr->user_data)) {
645 		rte_errno = EINVAL;
646 		return -rte_errno;
647 	}
648 
649 	/* Check if there is room in queue */
650 	if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
651 		rte_errno = EBUSY;
652 		return -rte_errno;
653 	}
654 
655 	assert(matcher->num_of_mt >= mt_idx);
656 	assert(matcher->num_of_at >= at_idx);
657 
658 	if (unlikely(mlx5dr_table_is_root(matcher->tbl)))
659 		ret = mlx5dr_rule_create_root(rule_handle,
660 					      attr,
661 					      items,
662 					      at_idx,
663 					      rule_actions);
664 	else
665 		ret = mlx5dr_rule_create_hws(rule_handle,
666 					     attr,
667 					     mt_idx,
668 					     items,
669 					     at_idx,
670 					     rule_actions);
671 	return -ret;
672 }
673 
674 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule,
675 			struct mlx5dr_rule_attr *attr)
676 {
677 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
678 	int ret;
679 
680 	if (unlikely(!attr->user_data)) {
681 		rte_errno = EINVAL;
682 		return -rte_errno;
683 	}
684 
685 	/* Check if there is room in queue */
686 	if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
687 		rte_errno = EBUSY;
688 		return -rte_errno;
689 	}
690 
691 	if (unlikely(mlx5dr_table_is_root(rule->matcher->tbl)))
692 		ret = mlx5dr_rule_destroy_root(rule, attr);
693 	else
694 		ret = mlx5dr_rule_destroy_hws(rule, attr);
695 
696 	return -ret;
697 }
698 
699 size_t mlx5dr_rule_get_handle_size(void)
700 {
701 	return sizeof(struct mlx5dr_rule);
702 }
703