xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,
8 			     struct mlx5dr_match_template *mt,
9 			     const struct rte_flow_item *items,
10 			     bool *skip_rx, bool *skip_tx)
11 {
12 	const struct flow_hw_port_info *vport;
13 	const struct rte_flow_item_ethdev *v;
14 
15 	/* Flow_src is the 1st priority */
16 	if (matcher->attr.optimize_flow_src) {
17 		*skip_tx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_WIRE;
18 		*skip_rx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_VPORT;
19 		return;
20 	}
21 
22 	/* By default FDB rules are added to both RX and TX */
23 	*skip_rx = false;
24 	*skip_tx = false;
25 
26 	if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {
27 		v = items[mt->vport_item_id].spec;
28 		vport = flow_hw_conv_port_id(v->port_id);
29 		if (unlikely(!vport)) {
30 			DR_LOG(ERR, "Fail to map port ID %d, ignoring", v->port_id);
31 			return;
32 		}
33 
34 		if (!vport->is_wire)
35 			/* Match vport ID is not WIRE -> Skip RX */
36 			*skip_rx = true;
37 		else
38 			/* Match vport ID is WIRE -> Skip TX */
39 			*skip_tx = true;
40 	}
41 }
42 
43 static void
44 mlx5dr_rule_update_copy_tag(struct mlx5dr_rule *rule,
45 			    struct mlx5dr_wqe_gta_data_seg_ste *wqe_data,
46 			    bool is_jumbo)
47 {
48 	if (is_jumbo)
49 		memcpy(wqe_data->jumbo, rule->tag.jumbo, MLX5DR_JUMBO_TAG_SZ);
50 	else
51 		memcpy(wqe_data->tag, rule->tag.match, MLX5DR_MATCH_TAG_SZ);
52 }
53 
54 static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,
55 				     struct mlx5dr_rule *rule,
56 				     const struct rte_flow_item *items,
57 				     struct mlx5dr_match_template *mt,
58 				     void *user_data)
59 {
60 	struct mlx5dr_matcher *matcher = rule->matcher;
61 	struct mlx5dr_table *tbl = matcher->tbl;
62 	bool skip_rx, skip_tx;
63 
64 	dep_wqe->rule = rule;
65 	dep_wqe->user_data = user_data;
66 
67 	if (!items) { /* rule update */
68 		dep_wqe->rtc_0 = rule->rtc_0;
69 		dep_wqe->rtc_1 = rule->rtc_1;
70 		dep_wqe->retry_rtc_1 = 0;
71 		dep_wqe->retry_rtc_0 = 0;
72 		return;
73 	}
74 
75 	switch (tbl->type) {
76 	case MLX5DR_TABLE_TYPE_NIC_RX:
77 	case MLX5DR_TABLE_TYPE_NIC_TX:
78 		dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
79 		dep_wqe->retry_rtc_0 = matcher->col_matcher ?
80 				       matcher->col_matcher->match_ste.rtc_0->id : 0;
81 		dep_wqe->rtc_1 = 0;
82 		dep_wqe->retry_rtc_1 = 0;
83 		break;
84 
85 	case MLX5DR_TABLE_TYPE_FDB:
86 		mlx5dr_rule_skip(matcher, mt, items, &skip_rx, &skip_tx);
87 
88 		if (!skip_rx) {
89 			dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
90 			dep_wqe->retry_rtc_0 = matcher->col_matcher ?
91 					       matcher->col_matcher->match_ste.rtc_0->id : 0;
92 		} else {
93 			dep_wqe->rtc_0 = 0;
94 			dep_wqe->retry_rtc_0 = 0;
95 		}
96 
97 		if (!skip_tx) {
98 			dep_wqe->rtc_1 = matcher->match_ste.rtc_1->id;
99 			dep_wqe->retry_rtc_1 = matcher->col_matcher ?
100 					       matcher->col_matcher->match_ste.rtc_1->id : 0;
101 		} else {
102 			dep_wqe->rtc_1 = 0;
103 			dep_wqe->retry_rtc_1 = 0;
104 		}
105 
106 		break;
107 
108 	default:
109 		assert(false);
110 		break;
111 	}
112 }
113 
114 static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,
115 				 struct mlx5dr_rule *rule,
116 				 bool err,
117 				 void *user_data,
118 				 enum mlx5dr_rule_status rule_status_on_succ)
119 {
120 	enum rte_flow_op_status comp_status;
121 
122 	if (!err) {
123 		comp_status = RTE_FLOW_OP_SUCCESS;
124 		rule->status = rule_status_on_succ;
125 	} else {
126 		comp_status = RTE_FLOW_OP_ERROR;
127 		rule->status = MLX5DR_RULE_STATUS_FAILED;
128 	}
129 
130 	mlx5dr_send_engine_inc_rule(queue);
131 	mlx5dr_send_engine_gen_comp(queue, user_data, comp_status);
132 }
133 
134 static void
135 mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule,
136 			     struct mlx5dr_send_ste_attr *ste_attr)
137 {
138 	struct mlx5dr_match_template *mt = rule->matcher->mt;
139 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
140 
141 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
142 		uint8_t *src_tag;
143 
144 		/* Save match definer id and tag for delete */
145 		rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr));
146 		assert(rule->tag_ptr);
147 
148 		src_tag = (uint8_t *)ste_attr->wqe_data->tag;
149 		memcpy(rule->tag_ptr[0].match, src_tag, MLX5DR_MATCH_TAG_SZ);
150 		rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id;
151 
152 		/* Save range definer id and tag for delete */
153 		if (ste_attr->range_wqe_data) {
154 			src_tag = (uint8_t *)ste_attr->range_wqe_data->tag;
155 			memcpy(rule->tag_ptr[1].match, src_tag, MLX5DR_MATCH_TAG_SZ);
156 			rule->tag_ptr[1].reserved[1] = ste_attr->send_attr.range_definer_id;
157 		}
158 		return;
159 	}
160 
161 	if (is_jumbo)
162 		memcpy(rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5DR_JUMBO_TAG_SZ);
163 	else
164 		memcpy(rule->tag.match, ste_attr->wqe_data->tag, MLX5DR_MATCH_TAG_SZ);
165 }
166 
167 static void
168 mlx5dr_rule_clear_delete_info(struct mlx5dr_rule *rule)
169 {
170 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher)))
171 		simple_free(rule->tag_ptr);
172 }
173 
174 static void
175 mlx5dr_rule_load_delete_info(struct mlx5dr_rule *rule,
176 			     struct mlx5dr_send_ste_attr *ste_attr)
177 {
178 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
179 		/* Load match definer id and tag for delete */
180 		ste_attr->wqe_tag = &rule->tag_ptr[0];
181 		ste_attr->send_attr.match_definer_id = rule->tag_ptr[1].reserved[0];
182 
183 		/* Load range definer id and tag for delete */
184 		if (rule->matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER) {
185 			ste_attr->range_wqe_tag = &rule->tag_ptr[1];
186 			ste_attr->send_attr.range_definer_id = rule->tag_ptr[1].reserved[1];
187 		}
188 	} else {
189 		ste_attr->wqe_tag = &rule->tag;
190 	}
191 }
192 
193 static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,
194 					struct mlx5dr_rule_attr *attr)
195 {
196 	struct mlx5dr_matcher *matcher = rule->matcher;
197 	int ret;
198 
199 	/* Use rule_idx for locking optimzation, otherwise allocate from pool */
200 	if (matcher->attr.optimize_using_rule_idx ||
201 	    mlx5dr_matcher_is_insert_by_idx(matcher)) {
202 		rule->action_ste_idx = attr->rule_idx * matcher->action_ste.max_stes;
203 	} else {
204 		struct mlx5dr_pool_chunk ste = {0};
205 
206 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
207 		ret = mlx5dr_pool_chunk_alloc(matcher->action_ste.pool, &ste);
208 		if (ret) {
209 			DR_LOG(ERR, "Failed to allocate STE for rule actions");
210 			return ret;
211 		}
212 		rule->action_ste_idx = ste.offset;
213 	}
214 	return 0;
215 }
216 
217 void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule)
218 {
219 	struct mlx5dr_matcher *matcher = rule->matcher;
220 
221 	if (rule->action_ste_idx > -1 &&
222 	    !matcher->attr.optimize_using_rule_idx &&
223 	    !mlx5dr_matcher_is_insert_by_idx(matcher)) {
224 		struct mlx5dr_pool_chunk ste = {0};
225 
226 		/* This release is safe only when the rule match part was deleted */
227 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
228 		ste.offset = rule->action_ste_idx;
229 		mlx5dr_pool_chunk_free(matcher->action_ste.pool, &ste);
230 	}
231 }
232 
233 static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,
234 				    struct mlx5dr_send_ste_attr *ste_attr,
235 				    struct mlx5dr_actions_apply_data *apply,
236 				    bool is_update)
237 {
238 	struct mlx5dr_matcher *matcher = rule->matcher;
239 	struct mlx5dr_table *tbl = matcher->tbl;
240 	struct mlx5dr_context *ctx = tbl->ctx;
241 
242 	/* Init rule before reuse */
243 	if (!is_update) {
244 		/* In update we use these rtc's */
245 		rule->rtc_0 = 0;
246 		rule->rtc_1 = 0;
247 	}
248 
249 	rule->pending_wqes = 0;
250 	rule->action_ste_idx = -1;
251 	rule->status = MLX5DR_RULE_STATUS_CREATING;
252 
253 	/* Init default send STE attributes */
254 	ste_attr->gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
255 	ste_attr->send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
256 	ste_attr->send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
257 	ste_attr->send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
258 
259 	/* Init default action apply */
260 	apply->tbl_type = tbl->type;
261 	apply->common_res = &ctx->common_res[tbl->type];
262 	apply->jump_to_action_stc = matcher->action_ste.stc.offset;
263 	apply->require_dep = 0;
264 }
265 
266 static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule,
267 					 struct mlx5dr_rule_attr *attr,
268 					 uint8_t mt_idx,
269 					 const struct rte_flow_item items[],
270 					 uint8_t at_idx,
271 					 struct mlx5dr_rule_action rule_actions[])
272 {
273 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
274 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
275 	struct mlx5dr_send_ring_dep_wqe range_wqe = {{0}};
276 	struct mlx5dr_send_ring_dep_wqe match_wqe = {{0}};
277 	bool is_range = mlx5dr_matcher_mt_is_range(mt);
278 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
279 	struct mlx5dr_matcher *matcher = rule->matcher;
280 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
281 	struct mlx5dr_send_ste_attr ste_attr = {0};
282 	struct mlx5dr_actions_apply_data apply;
283 	struct mlx5dr_send_engine *queue;
284 
285 	queue = &ctx->send_queue[attr->queue_id];
286 	if (unlikely(mlx5dr_send_engine_err(queue))) {
287 		rte_errno = EIO;
288 		return rte_errno;
289 	}
290 
291 	mlx5dr_rule_create_init(rule, &ste_attr, &apply, false);
292 	mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr->user_data);
293 	mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr->user_data);
294 
295 	ste_attr.direct_index = 0;
296 	ste_attr.rtc_0 = match_wqe.rtc_0;
297 	ste_attr.rtc_1 = match_wqe.rtc_1;
298 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
299 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
300 	ste_attr.retry_rtc_0 = match_wqe.retry_rtc_0;
301 	ste_attr.retry_rtc_1 = match_wqe.retry_rtc_1;
302 	ste_attr.send_attr.rule = match_wqe.rule;
303 	ste_attr.send_attr.user_data = match_wqe.user_data;
304 
305 	ste_attr.send_attr.fence = 1;
306 	ste_attr.send_attr.notify_hw = 1;
307 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
308 
309 	/* Prepare match STE TAG */
310 	ste_attr.wqe_ctrl = &match_wqe.wqe_ctrl;
311 	ste_attr.wqe_data = &match_wqe.wqe_data;
312 	ste_attr.send_attr.match_definer_id = mlx5dr_definer_get_id(mt->definer);
313 
314 	mlx5dr_definer_create_tag(items,
315 				  mt->fc,
316 				  mt->fc_sz,
317 				  (uint8_t *)match_wqe.wqe_data.action);
318 
319 	/* Prepare range STE TAG */
320 	if (is_range) {
321 		ste_attr.range_wqe_data = &range_wqe.wqe_data;
322 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
323 		ste_attr.send_attr.range_definer_id = mlx5dr_definer_get_id(mt->range_definer);
324 
325 		mlx5dr_definer_create_tag_range(items,
326 						mt->fcr,
327 						mt->fcr_sz,
328 						(uint8_t *)range_wqe.wqe_data.action);
329 	}
330 
331 	/* Apply the actions on the last STE */
332 	apply.queue = queue;
333 	apply.next_direct_idx = 0;
334 	apply.rule_action = rule_actions;
335 	apply.wqe_ctrl = &match_wqe.wqe_ctrl;
336 	apply.wqe_data = (uint32_t *)(is_range ?
337 				      &range_wqe.wqe_data :
338 				      &match_wqe.wqe_data);
339 
340 	/* Skip setters[0] used for jumbo STE since not support with FW WQE */
341 	mlx5dr_action_apply_setter(&apply, &at->setters[1], 0);
342 
343 	/* Send WQEs to FW */
344 	mlx5dr_send_stes_fw(queue, &ste_attr);
345 
346 	/* Backup TAG on the rule for deletion */
347 	mlx5dr_rule_save_delete_info(rule, &ste_attr);
348 	mlx5dr_send_engine_inc_rule(queue);
349 
350 	/* Send dependent WQEs */
351 	if (!attr->burst)
352 		mlx5dr_send_all_dep_wqe(queue);
353 
354 	return 0;
355 }
356 
357 static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
358 				  struct mlx5dr_rule_attr *attr,
359 				  uint8_t mt_idx,
360 				  const struct rte_flow_item items[],
361 				  uint8_t at_idx,
362 				  struct mlx5dr_rule_action rule_actions[])
363 {
364 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
365 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
366 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
367 	struct mlx5dr_matcher *matcher = rule->matcher;
368 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
369 	struct mlx5dr_send_ste_attr ste_attr = {0};
370 	struct mlx5dr_send_ring_dep_wqe *dep_wqe;
371 	struct mlx5dr_actions_wqe_setter *setter;
372 	struct mlx5dr_actions_apply_data apply;
373 	struct mlx5dr_send_engine *queue;
374 	uint8_t total_stes, action_stes;
375 	bool is_update;
376 	int i, ret;
377 
378 	is_update = (items == NULL);
379 
380 	/* Insert rule using FW WQE if cannot use GTA WQE */
381 	if (unlikely(mlx5dr_matcher_req_fw_wqe(matcher) && !is_update))
382 		return mlx5dr_rule_create_hws_fw_wqe(rule, attr, mt_idx, items,
383 						     at_idx, rule_actions);
384 
385 	queue = &ctx->send_queue[attr->queue_id];
386 	if (unlikely(mlx5dr_send_engine_err(queue))) {
387 		rte_errno = EIO;
388 		return rte_errno;
389 	}
390 
391 	mlx5dr_rule_create_init(rule, &ste_attr, &apply, is_update);
392 
393 	/* Allocate dependent match WQE since rule might have dependent writes.
394 	 * The queued dependent WQE can be later aborted or kept as a dependency.
395 	 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
396 	 */
397 	dep_wqe = mlx5dr_send_add_new_dep_wqe(queue);
398 	mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr->user_data);
399 
400 	ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
401 	ste_attr.wqe_data = &dep_wqe->wqe_data;
402 	apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
403 	apply.wqe_data = (uint32_t *)&dep_wqe->wqe_data;
404 	apply.rule_action = rule_actions;
405 	apply.queue = queue;
406 
407 	setter = &at->setters[at->num_of_action_stes];
408 	total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
409 	action_stes = total_stes - 1;
410 
411 	if (action_stes) {
412 		/* Allocate action STEs for complex rules */
413 		ret = mlx5dr_rule_alloc_action_ste(rule, attr);
414 		if (ret) {
415 			DR_LOG(ERR, "Failed to allocate action memory %d", ret);
416 			mlx5dr_send_abort_new_dep_wqe(queue);
417 			return ret;
418 		}
419 		/* Skip RX/TX based on the dep_wqe init */
420 		ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0->id : 0;
421 		ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1->id : 0;
422 		/* Action STEs are written to a specific index last to first */
423 		ste_attr.direct_index = rule->action_ste_idx + action_stes;
424 		apply.next_direct_idx = ste_attr.direct_index;
425 	} else {
426 		apply.next_direct_idx = 0;
427 	}
428 
429 	for (i = total_stes; i-- > 0;) {
430 		mlx5dr_action_apply_setter(&apply, setter--, !i && is_jumbo);
431 
432 		if (i == 0) {
433 			/* Handle last match STE.
434 			 * For hash split / linear lookup RTCs, packets reaching any STE
435 			 * will always match and perform the specified actions, which
436 			 * makes the tag irrelevant.
437 			 */
438 			if (likely(!mlx5dr_matcher_is_insert_by_idx(matcher) && !is_update))
439 				mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz,
440 							  (uint8_t *)dep_wqe->wqe_data.action);
441 			else if (unlikely(is_update))
442 				mlx5dr_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
443 
444 			/* Rule has dependent WQEs, match dep_wqe is queued */
445 			if (action_stes || apply.require_dep)
446 				break;
447 
448 			/* Rule has no dependencies, abort dep_wqe and send WQE now */
449 			mlx5dr_send_abort_new_dep_wqe(queue);
450 			ste_attr.wqe_tag_is_jumbo = is_jumbo;
451 			ste_attr.send_attr.notify_hw = !attr->burst;
452 			ste_attr.send_attr.user_data = dep_wqe->user_data;
453 			ste_attr.send_attr.rule = dep_wqe->rule;
454 			ste_attr.rtc_0 = dep_wqe->rtc_0;
455 			ste_attr.rtc_1 = dep_wqe->rtc_1;
456 			ste_attr.used_id_rtc_0 = &rule->rtc_0;
457 			ste_attr.used_id_rtc_1 = &rule->rtc_1;
458 			ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
459 			ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
460 			ste_attr.direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
461 						attr->rule_idx : 0;
462 		} else {
463 			apply.next_direct_idx = --ste_attr.direct_index;
464 		}
465 
466 		mlx5dr_send_ste(queue, &ste_attr);
467 	}
468 
469 	/* Backup TAG on the rule for deletion, only after insertion */
470 	if (!is_update)
471 		mlx5dr_rule_save_delete_info(rule, &ste_attr);
472 
473 	mlx5dr_send_engine_inc_rule(queue);
474 
475 	/* Send dependent WQEs */
476 	if (!attr->burst)
477 		mlx5dr_send_all_dep_wqe(queue);
478 
479 	return 0;
480 }
481 
482 static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,
483 					   struct mlx5dr_rule_attr *attr)
484 {
485 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
486 	struct mlx5dr_send_engine *queue;
487 
488 	queue = &ctx->send_queue[attr->queue_id];
489 
490 	mlx5dr_rule_gen_comp(queue, rule, false,
491 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
492 
493 	/* Rule failed now we can safely release action STEs */
494 	mlx5dr_rule_free_action_ste_idx(rule);
495 
496 	/* Clear complex tag */
497 	mlx5dr_rule_clear_delete_info(rule);
498 
499 	/* If a rule that was indicated as burst (need to trigger HW) has failed
500 	 * insertion we won't ring the HW as nothing is being written to the WQ.
501 	 * In such case update the last WQE and ring the HW with that work
502 	 */
503 	if (attr->burst)
504 		return;
505 
506 	mlx5dr_send_all_dep_wqe(queue);
507 	mlx5dr_send_engine_flush_queue(queue);
508 }
509 
510 static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
511 				   struct mlx5dr_rule_attr *attr)
512 {
513 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
514 	struct mlx5dr_matcher *matcher = rule->matcher;
515 	bool fw_wqe = mlx5dr_matcher_req_fw_wqe(matcher);
516 	bool is_range = mlx5dr_matcher_mt_is_range(matcher->mt);
517 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt);
518 	struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};
519 	struct mlx5dr_send_ste_attr ste_attr = {0};
520 	struct mlx5dr_send_engine *queue;
521 
522 	queue = &ctx->send_queue[attr->queue_id];
523 
524 	if (unlikely(mlx5dr_send_engine_err(queue))) {
525 		mlx5dr_rule_destroy_failed_hws(rule, attr);
526 		return 0;
527 	}
528 
529 	/* Rule is not completed yet */
530 	if (rule->status == MLX5DR_RULE_STATUS_CREATING) {
531 		rte_errno = EBUSY;
532 		return rte_errno;
533 	}
534 
535 	/* Rule failed and doesn't require cleanup */
536 	if (rule->status == MLX5DR_RULE_STATUS_FAILED) {
537 		mlx5dr_rule_destroy_failed_hws(rule, attr);
538 		return 0;
539 	}
540 
541 	mlx5dr_send_engine_inc_rule(queue);
542 
543 	/* Send dependent WQE */
544 	if (!attr->burst)
545 		mlx5dr_send_all_dep_wqe(queue);
546 
547 	rule->status = MLX5DR_RULE_STATUS_DELETING;
548 
549 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
550 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
551 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
552 	if (unlikely(is_range))
553 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
554 
555 	ste_attr.send_attr.rule = rule;
556 	ste_attr.send_attr.notify_hw = !attr->burst;
557 	ste_attr.send_attr.user_data = attr->user_data;
558 
559 	ste_attr.rtc_0 = rule->rtc_0;
560 	ste_attr.rtc_1 = rule->rtc_1;
561 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
562 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
563 	ste_attr.wqe_ctrl = &wqe_ctrl;
564 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
565 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
566 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
567 		ste_attr.direct_index = attr->rule_idx;
568 
569 	mlx5dr_rule_load_delete_info(rule, &ste_attr);
570 
571 	if (unlikely(fw_wqe)) {
572 		mlx5dr_send_stes_fw(queue, &ste_attr);
573 		mlx5dr_rule_clear_delete_info(rule);
574 	} else {
575 		mlx5dr_send_ste(queue, &ste_attr);
576 	}
577 
578 	return 0;
579 }
580 
581 static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,
582 				   struct mlx5dr_rule_attr *rule_attr,
583 				   const struct rte_flow_item items[],
584 				   uint8_t at_idx,
585 				   struct mlx5dr_rule_action rule_actions[])
586 {
587 	struct mlx5dv_flow_matcher *dv_matcher = rule->matcher->dv_matcher;
588 	uint8_t num_actions = rule->matcher->at[at_idx].num_actions;
589 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
590 	struct mlx5dv_flow_match_parameters *value;
591 	struct mlx5_flow_attr flow_attr = {0};
592 	struct mlx5dv_flow_action_attr *attr;
593 	struct rte_flow_error error;
594 	uint8_t match_criteria;
595 	int ret;
596 
597 	attr = simple_calloc(num_actions, sizeof(*attr));
598 	if (!attr) {
599 		rte_errno = ENOMEM;
600 		return rte_errno;
601 	}
602 
603 	value = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +
604 			      offsetof(struct mlx5dv_flow_match_parameters, match_buf));
605 	if (!value) {
606 		rte_errno = ENOMEM;
607 		goto free_attr;
608 	}
609 
610 	flow_attr.tbl_type = rule->matcher->tbl->type;
611 
612 	ret = flow_dv_translate_items_hws(items, &flow_attr, value->match_buf,
613 					  MLX5_SET_MATCHER_HS_V, NULL,
614 					  &match_criteria,
615 					  &error);
616 	if (ret) {
617 		DR_LOG(ERR, "Failed to convert items to PRM [%s]", error.message);
618 		goto free_value;
619 	}
620 
621 	/* Convert actions to verb action attr */
622 	ret = mlx5dr_action_root_build_attr(rule_actions, num_actions, attr);
623 	if (ret)
624 		goto free_value;
625 
626 	/* Create verb flow */
627 	value->match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
628 	rule->flow = mlx5_glue->dv_create_flow_root(dv_matcher,
629 						    value,
630 						    num_actions,
631 						    attr);
632 
633 	mlx5dr_rule_gen_comp(&ctx->send_queue[rule_attr->queue_id], rule, !rule->flow,
634 			     rule_attr->user_data, MLX5DR_RULE_STATUS_CREATED);
635 
636 	simple_free(value);
637 	simple_free(attr);
638 
639 	return 0;
640 
641 free_value:
642 	simple_free(value);
643 free_attr:
644 	simple_free(attr);
645 
646 	return -rte_errno;
647 }
648 
649 static int mlx5dr_rule_destroy_root(struct mlx5dr_rule *rule,
650 				    struct mlx5dr_rule_attr *attr)
651 {
652 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
653 	int err = 0;
654 
655 	if (rule->flow)
656 		err = ibv_destroy_flow(rule->flow);
657 
658 	mlx5dr_rule_gen_comp(&ctx->send_queue[attr->queue_id], rule, err,
659 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
660 
661 	return 0;
662 }
663 
664 static int mlx5dr_rule_enqueue_precheck(struct mlx5dr_context *ctx,
665 					struct mlx5dr_rule_attr *attr)
666 {
667 	if (unlikely(!attr->user_data)) {
668 		rte_errno = EINVAL;
669 		return rte_errno;
670 	}
671 
672 	/* Check if there is room in queue */
673 	if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
674 		rte_errno = EBUSY;
675 		return rte_errno;
676 	}
677 
678 	return 0;
679 }
680 
681 int mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
682 		       uint8_t mt_idx,
683 		       const struct rte_flow_item items[],
684 		       uint8_t at_idx,
685 		       struct mlx5dr_rule_action rule_actions[],
686 		       struct mlx5dr_rule_attr *attr,
687 		       struct mlx5dr_rule *rule_handle)
688 {
689 	struct mlx5dr_context *ctx;
690 	int ret;
691 
692 	rule_handle->matcher = matcher;
693 	ctx = matcher->tbl->ctx;
694 
695 	if (mlx5dr_rule_enqueue_precheck(ctx, attr))
696 		return -rte_errno;
697 
698 	assert(matcher->num_of_mt >= mt_idx);
699 	assert(matcher->num_of_at >= at_idx);
700 	assert(items);
701 
702 	if (unlikely(mlx5dr_table_is_root(matcher->tbl)))
703 		ret = mlx5dr_rule_create_root(rule_handle,
704 					      attr,
705 					      items,
706 					      at_idx,
707 					      rule_actions);
708 	else
709 		ret = mlx5dr_rule_create_hws(rule_handle,
710 					     attr,
711 					     mt_idx,
712 					     items,
713 					     at_idx,
714 					     rule_actions);
715 	return -ret;
716 }
717 
718 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule,
719 			struct mlx5dr_rule_attr *attr)
720 {
721 	int ret;
722 
723 	if (mlx5dr_rule_enqueue_precheck(rule->matcher->tbl->ctx, attr))
724 		return -rte_errno;
725 
726 	if (unlikely(mlx5dr_table_is_root(rule->matcher->tbl)))
727 		ret = mlx5dr_rule_destroy_root(rule, attr);
728 	else
729 		ret = mlx5dr_rule_destroy_hws(rule, attr);
730 
731 	return -ret;
732 }
733 
734 int mlx5dr_rule_action_update(struct mlx5dr_rule *rule_handle,
735 			      uint8_t at_idx,
736 			      struct mlx5dr_rule_action rule_actions[],
737 			      struct mlx5dr_rule_attr *attr)
738 {
739 	struct mlx5dr_matcher *matcher = rule_handle->matcher;
740 	int ret;
741 
742 	if (unlikely(mlx5dr_table_is_root(matcher->tbl) ||
743 	    unlikely(mlx5dr_matcher_req_fw_wqe(matcher)))) {
744 		DR_LOG(ERR, "Rule update not supported on current matcher");
745 		rte_errno = ENOTSUP;
746 		return -rte_errno;
747 	}
748 
749 	if (!matcher->attr.optimize_using_rule_idx &&
750 	    !mlx5dr_matcher_is_insert_by_idx(matcher)) {
751 		DR_LOG(ERR, "Rule update requires optimize by idx matcher");
752 		rte_errno = ENOTSUP;
753 		return -rte_errno;
754 	}
755 
756 	if (mlx5dr_rule_enqueue_precheck(matcher->tbl->ctx, attr))
757 		return -rte_errno;
758 
759 	ret = mlx5dr_rule_create_hws(rule_handle,
760 				     attr,
761 				     0,
762 				     NULL,
763 				     at_idx,
764 				     rule_actions);
765 
766 	return -ret;
767 }
768 
769 size_t mlx5dr_rule_get_handle_size(void)
770 {
771 	return sizeof(struct mlx5dr_rule);
772 }
773 
774 int mlx5dr_rule_hash_calculate(struct mlx5dr_matcher *matcher,
775 			       const struct rte_flow_item items[],
776 			       uint8_t mt_idx,
777 			       enum mlx5dr_rule_hash_calc_mode mode,
778 			       uint32_t *ret_hash)
779 {
780 	uint8_t tag[MLX5DR_STE_SZ] = {0};
781 	struct mlx5dr_match_template *mt;
782 
783 	if (!matcher || !matcher->mt) {
784 		rte_errno = EINVAL;
785 		return -rte_errno;
786 	}
787 
788 	mt = &matcher->mt[mt_idx];
789 
790 	if (mlx5dr_matcher_req_fw_wqe(matcher) ||
791 	    mlx5dr_table_is_root(matcher->tbl) ||
792 	    matcher->tbl->ctx->caps->access_index_mode == MLX5DR_MATCHER_INSERT_BY_HASH ||
793 	    matcher->tbl->ctx->caps->flow_table_hash_type != MLX5_FLOW_TABLE_HASH_TYPE_CRC32) {
794 		rte_errno = ENOTSUP;
795 		return -rte_errno;
796 	}
797 
798 	mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz, tag);
799 	if (mlx5dr_matcher_mt_is_jumbo(mt))
800 		*ret_hash = mlx5dr_crc32_calc(tag, MLX5DR_JUMBO_TAG_SZ);
801 	else
802 		*ret_hash = mlx5dr_crc32_calc(tag + MLX5DR_ACTIONS_SZ,
803 					      MLX5DR_MATCH_TAG_SZ);
804 
805 	if (mode == MLX5DR_RULE_HASH_CALC_MODE_IDX)
806 		*ret_hash = *ret_hash & (BIT(matcher->attr.rule.num_log) - 1);
807 
808 	return 0;
809 }
810