xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,
8 			     struct mlx5dr_match_template *mt,
9 			     const struct rte_flow_item *items,
10 			     bool *skip_rx, bool *skip_tx)
11 {
12 	const struct flow_hw_port_info *vport;
13 	const struct rte_flow_item_ethdev *v;
14 
15 	/* Flow_src is the 1st priority */
16 	if (matcher->attr.optimize_flow_src) {
17 		*skip_tx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_WIRE;
18 		*skip_rx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_VPORT;
19 		return;
20 	}
21 
22 	/* By default FDB rules are added to both RX and TX */
23 	*skip_rx = false;
24 	*skip_tx = false;
25 
26 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
27 		return;
28 
29 	if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {
30 		v = items[mt->vport_item_id].spec;
31 		vport = flow_hw_conv_port_id(matcher->tbl->ctx, v->port_id);
32 		if (unlikely(!vport)) {
33 			DR_LOG(ERR, "Fail to map port ID %d, ignoring", v->port_id);
34 			return;
35 		}
36 
37 		if (!vport->is_wire)
38 			/* Match vport ID is not WIRE -> Skip RX */
39 			*skip_rx = true;
40 		else
41 			/* Match vport ID is WIRE -> Skip TX */
42 			*skip_tx = true;
43 	}
44 }
45 
46 static void
47 mlx5dr_rule_update_copy_tag(struct mlx5dr_rule *rule,
48 			    struct mlx5dr_wqe_gta_data_seg_ste *wqe_data,
49 			    bool is_jumbo)
50 {
51 	if (is_jumbo)
52 		memcpy(wqe_data->jumbo, rule->tag.jumbo, MLX5DR_JUMBO_TAG_SZ);
53 	else
54 		memcpy(wqe_data->tag, rule->tag.match, MLX5DR_MATCH_TAG_SZ);
55 }
56 
57 static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,
58 				     struct mlx5dr_rule *rule,
59 				     const struct rte_flow_item *items,
60 				     struct mlx5dr_match_template *mt,
61 				     struct mlx5dr_rule_attr *attr)
62 {
63 	struct mlx5dr_matcher *matcher = rule->matcher;
64 	struct mlx5dr_table *tbl = matcher->tbl;
65 	bool skip_rx, skip_tx;
66 
67 	dep_wqe->rule = rule;
68 	dep_wqe->user_data = attr->user_data;
69 	dep_wqe->direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
70 		attr->rule_idx : 0;
71 
72 	if (!items) { /* rule update */
73 		dep_wqe->rtc_0 = rule->rtc_0;
74 		dep_wqe->rtc_1 = rule->rtc_1;
75 		dep_wqe->retry_rtc_1 = 0;
76 		dep_wqe->retry_rtc_0 = 0;
77 		return;
78 	}
79 
80 	switch (tbl->type) {
81 	case MLX5DR_TABLE_TYPE_NIC_RX:
82 	case MLX5DR_TABLE_TYPE_NIC_TX:
83 		dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
84 		dep_wqe->retry_rtc_0 = matcher->col_matcher ?
85 				       matcher->col_matcher->match_ste.rtc_0->id : 0;
86 		dep_wqe->rtc_1 = 0;
87 		dep_wqe->retry_rtc_1 = 0;
88 		break;
89 
90 	case MLX5DR_TABLE_TYPE_FDB:
91 		mlx5dr_rule_skip(matcher, mt, items, &skip_rx, &skip_tx);
92 
93 		if (!skip_rx) {
94 			dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
95 			dep_wqe->retry_rtc_0 = matcher->col_matcher ?
96 					       matcher->col_matcher->match_ste.rtc_0->id : 0;
97 		} else {
98 			dep_wqe->rtc_0 = 0;
99 			dep_wqe->retry_rtc_0 = 0;
100 		}
101 
102 		if (!skip_tx) {
103 			dep_wqe->rtc_1 = matcher->match_ste.rtc_1->id;
104 			dep_wqe->retry_rtc_1 = matcher->col_matcher ?
105 					       matcher->col_matcher->match_ste.rtc_1->id : 0;
106 		} else {
107 			dep_wqe->rtc_1 = 0;
108 			dep_wqe->retry_rtc_1 = 0;
109 		}
110 
111 		break;
112 
113 	default:
114 		assert(false);
115 		break;
116 	}
117 }
118 
119 static void mlx5dr_rule_move_get_rtc(struct mlx5dr_rule *rule,
120 				     struct mlx5dr_send_ste_attr *ste_attr)
121 {
122 	struct mlx5dr_matcher *dst_matcher = rule->matcher->resize_dst;
123 
124 	if (rule->resize_info->rtc_0) {
125 		ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0->id;
126 		ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
127 					dst_matcher->col_matcher->match_ste.rtc_0->id : 0;
128 	}
129 	if (rule->resize_info->rtc_1) {
130 		ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1->id;
131 		ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
132 					dst_matcher->col_matcher->match_ste.rtc_1->id : 0;
133 	}
134 }
135 
136 static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,
137 				 struct mlx5dr_rule *rule,
138 				 bool err,
139 				 void *user_data,
140 				 enum mlx5dr_rule_status rule_status_on_succ)
141 {
142 	enum rte_flow_op_status comp_status;
143 
144 	if (!err) {
145 		comp_status = RTE_FLOW_OP_SUCCESS;
146 		rule->status = rule_status_on_succ;
147 	} else {
148 		comp_status = RTE_FLOW_OP_ERROR;
149 		rule->status = MLX5DR_RULE_STATUS_FAILED;
150 	}
151 
152 	mlx5dr_send_engine_inc_rule(queue);
153 	mlx5dr_send_engine_gen_comp(queue, user_data, comp_status);
154 }
155 
156 static void
157 mlx5dr_rule_save_resize_info(struct mlx5dr_rule *rule,
158 			     struct mlx5dr_send_ste_attr *ste_attr)
159 {
160 	if (likely(!mlx5dr_matcher_is_resizable(rule->matcher)))
161 		return;
162 
163 	rule->resize_info = simple_calloc(1, sizeof(*rule->resize_info));
164 	if (unlikely(!rule->resize_info)) {
165 		assert(rule->resize_info);
166 		rte_errno = ENOMEM;
167 	}
168 
169 	memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
170 	       sizeof(rule->resize_info->ctrl_seg));
171 	memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
172 	       sizeof(rule->resize_info->data_seg));
173 
174 	rule->resize_info->max_stes = rule->matcher->action_ste.max_stes;
175 	rule->resize_info->action_ste_pool = rule->matcher->action_ste.max_stes ?
176 					     rule->matcher->action_ste.pool :
177 					     NULL;
178 }
179 
180 void mlx5dr_rule_clear_resize_info(struct mlx5dr_rule *rule)
181 {
182 	if (unlikely(mlx5dr_matcher_is_resizable(rule->matcher) &&
183 		     rule->resize_info)) {
184 		simple_free(rule->resize_info);
185 		rule->resize_info = NULL;
186 	}
187 }
188 
189 static void
190 mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule,
191 			     struct mlx5dr_send_ste_attr *ste_attr)
192 {
193 	struct mlx5dr_match_template *mt = rule->matcher->mt;
194 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
195 
196 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
197 		uint8_t *src_tag;
198 
199 		/* Save match definer id and tag for delete */
200 		rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr));
201 		assert(rule->tag_ptr);
202 
203 		if (is_jumbo)
204 			memcpy(rule->tag_ptr[0].jumbo, ste_attr->wqe_data->action,
205 			       MLX5DR_JUMBO_TAG_SZ);
206 		else
207 			memcpy(rule->tag_ptr[0].match, ste_attr->wqe_data->tag,
208 			       MLX5DR_MATCH_TAG_SZ);
209 
210 		rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id;
211 
212 		/* Save range definer id and tag for delete */
213 		if (ste_attr->range_wqe_data) {
214 			src_tag = (uint8_t *)ste_attr->range_wqe_data->tag;
215 			memcpy(rule->tag_ptr[1].match, src_tag, MLX5DR_MATCH_TAG_SZ);
216 			rule->tag_ptr[1].reserved[1] = ste_attr->send_attr.range_definer_id;
217 		}
218 		return;
219 	}
220 
221 	if (likely(!mlx5dr_matcher_is_resizable(rule->matcher))) {
222 		if (is_jumbo)
223 			memcpy(&rule->tag.jumbo, ste_attr->wqe_data->action, MLX5DR_JUMBO_TAG_SZ);
224 		else
225 			memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5DR_MATCH_TAG_SZ);
226 		return;
227 	}
228 }
229 
230 static void
231 mlx5dr_rule_clear_delete_info(struct mlx5dr_rule *rule)
232 {
233 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
234 		simple_free(rule->tag_ptr);
235 		return;
236 	}
237 }
238 
239 static void
240 mlx5dr_rule_load_delete_info(struct mlx5dr_rule *rule,
241 			     struct mlx5dr_send_ste_attr *ste_attr)
242 {
243 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
244 		/* Load match definer id and tag for delete */
245 		ste_attr->wqe_tag = &rule->tag_ptr[0];
246 		ste_attr->send_attr.match_definer_id = rule->tag_ptr[1].reserved[0];
247 
248 		/* Load range definer id and tag for delete */
249 		if (rule->matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER) {
250 			ste_attr->range_wqe_tag = &rule->tag_ptr[1];
251 			ste_attr->send_attr.range_definer_id = rule->tag_ptr[1].reserved[1];
252 		}
253 	} else if (likely(!mlx5dr_matcher_is_resizable(rule->matcher))) {
254 		ste_attr->wqe_tag = &rule->tag;
255 	} else {
256 		ste_attr->wqe_tag = (struct mlx5dr_rule_match_tag *)
257 			&rule->resize_info->data_seg[MLX5DR_STE_CTRL_SZ];
258 	}
259 }
260 
261 static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,
262 					struct mlx5dr_rule_attr *attr)
263 {
264 	struct mlx5dr_matcher *matcher = rule->matcher;
265 	int ret;
266 
267 	/* Use rule_idx for locking optimzation, otherwise allocate from pool */
268 	if (matcher->attr.optimize_using_rule_idx ||
269 	    mlx5dr_matcher_is_insert_by_idx(matcher)) {
270 		rule->action_ste_idx = attr->rule_idx * matcher->action_ste.max_stes;
271 	} else {
272 		struct mlx5dr_pool_chunk ste = {0};
273 
274 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
275 		ret = mlx5dr_pool_chunk_alloc(matcher->action_ste.pool, &ste);
276 		if (ret) {
277 			DR_LOG(ERR, "Failed to allocate STE for rule actions");
278 			return ret;
279 		}
280 		rule->action_ste_idx = ste.offset;
281 	}
282 	return 0;
283 }
284 
285 void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule)
286 {
287 	struct mlx5dr_matcher *matcher = rule->matcher;
288 	struct mlx5dr_pool *pool;
289 	uint8_t max_stes;
290 
291 	if (rule->action_ste_idx > -1 &&
292 	    !matcher->attr.optimize_using_rule_idx &&
293 	    !mlx5dr_matcher_is_insert_by_idx(matcher)) {
294 		struct mlx5dr_pool_chunk ste = {0};
295 
296 		if (unlikely(mlx5dr_matcher_is_resizable(matcher))) {
297 			/* Free the original action pool if rule was resized */
298 			max_stes = rule->resize_info->max_stes;
299 			pool = rule->resize_info->action_ste_pool;
300 		} else {
301 			max_stes = matcher->action_ste.max_stes;
302 			pool = matcher->action_ste.pool;
303 		}
304 
305 		/* This release is safe only when the rule match part was deleted */
306 		ste.order = rte_log2_u32(max_stes);
307 		ste.offset = rule->action_ste_idx;
308 
309 		mlx5dr_pool_chunk_free(pool, &ste);
310 	}
311 }
312 
313 static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,
314 				    struct mlx5dr_send_ste_attr *ste_attr,
315 				    struct mlx5dr_actions_apply_data *apply,
316 				    bool is_update)
317 {
318 	struct mlx5dr_matcher *matcher = rule->matcher;
319 	struct mlx5dr_table *tbl = matcher->tbl;
320 	struct mlx5dr_context *ctx = tbl->ctx;
321 
322 	/* Init rule before reuse */
323 	if (!is_update) {
324 		/* In update we use these rtc's */
325 		rule->rtc_0 = 0;
326 		rule->rtc_1 = 0;
327 	}
328 
329 	rule->pending_wqes = 0;
330 	rule->action_ste_idx = -1;
331 	rule->status = MLX5DR_RULE_STATUS_CREATING;
332 
333 	/* Init default send STE attributes */
334 	ste_attr->gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
335 	ste_attr->send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
336 	ste_attr->send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
337 	ste_attr->send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
338 
339 	/* Init default action apply */
340 	apply->tbl_type = tbl->type;
341 	apply->common_res = &ctx->common_res[tbl->type];
342 	apply->jump_to_action_stc = matcher->action_ste.stc.offset;
343 	apply->require_dep = 0;
344 }
345 
346 static void mlx5dr_rule_move_init(struct mlx5dr_rule *rule,
347 				  struct mlx5dr_rule_attr *attr)
348 {
349 	/* Save the old RTC IDs to be later used in match STE delete */
350 	rule->resize_info->rtc_0 = rule->rtc_0;
351 	rule->resize_info->rtc_1 = rule->rtc_1;
352 	rule->resize_info->rule_idx = attr->rule_idx;
353 
354 	rule->rtc_0 = 0;
355 	rule->rtc_1 = 0;
356 
357 	rule->pending_wqes = 0;
358 	rule->action_ste_idx = -1;
359 	rule->status = MLX5DR_RULE_STATUS_CREATING;
360 	rule->resize_info->state = MLX5DR_RULE_RESIZE_STATE_WRITING;
361 }
362 
363 bool mlx5dr_rule_move_in_progress(struct mlx5dr_rule *rule)
364 {
365 	return mlx5dr_matcher_is_in_resize(rule->matcher) &&
366 	       rule->resize_info &&
367 	       rule->resize_info->state != MLX5DR_RULE_RESIZE_STATE_IDLE;
368 }
369 
370 static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule,
371 					 struct mlx5dr_rule_attr *attr,
372 					 uint8_t mt_idx,
373 					 const struct rte_flow_item items[],
374 					 uint8_t at_idx,
375 					 struct mlx5dr_rule_action rule_actions[])
376 {
377 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
378 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
379 	struct mlx5dr_send_ring_dep_wqe range_wqe = {{0}};
380 	struct mlx5dr_send_ring_dep_wqe match_wqe = {{0}};
381 	bool is_range = mlx5dr_matcher_mt_is_range(mt);
382 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
383 	struct mlx5dr_matcher *matcher = rule->matcher;
384 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
385 	struct mlx5dr_send_ste_attr ste_attr = {0};
386 	struct mlx5dr_actions_apply_data apply;
387 	struct mlx5dr_send_engine *queue;
388 
389 	queue = &ctx->send_queue[attr->queue_id];
390 	if (unlikely(mlx5dr_send_engine_err(queue))) {
391 		rte_errno = EIO;
392 		return rte_errno;
393 	}
394 
395 	mlx5dr_rule_create_init(rule, &ste_attr, &apply, false);
396 	mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr);
397 	mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr);
398 
399 	ste_attr.direct_index = 0;
400 	ste_attr.rtc_0 = match_wqe.rtc_0;
401 	ste_attr.rtc_1 = match_wqe.rtc_1;
402 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
403 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
404 	ste_attr.retry_rtc_0 = match_wqe.retry_rtc_0;
405 	ste_attr.retry_rtc_1 = match_wqe.retry_rtc_1;
406 	ste_attr.send_attr.rule = match_wqe.rule;
407 	ste_attr.send_attr.user_data = match_wqe.user_data;
408 
409 	ste_attr.send_attr.fence = 1;
410 	ste_attr.send_attr.notify_hw = 1;
411 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
412 
413 	/* Prepare match STE TAG */
414 	ste_attr.wqe_ctrl = &match_wqe.wqe_ctrl;
415 	ste_attr.wqe_data = &match_wqe.wqe_data;
416 	ste_attr.send_attr.match_definer_id = mlx5dr_definer_get_id(mt->definer);
417 
418 	mlx5dr_definer_create_tag(items,
419 				  mt->fc,
420 				  mt->fc_sz,
421 				  (uint8_t *)match_wqe.wqe_data.action);
422 
423 	/* Prepare range STE TAG */
424 	if (is_range) {
425 		ste_attr.range_wqe_data = &range_wqe.wqe_data;
426 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
427 		ste_attr.send_attr.range_definer_id = mlx5dr_definer_get_id(mt->range_definer);
428 
429 		mlx5dr_definer_create_tag_range(items,
430 						mt->fcr,
431 						mt->fcr_sz,
432 						(uint8_t *)range_wqe.wqe_data.action);
433 	}
434 
435 	/* Apply the actions on the last STE */
436 	apply.queue = queue;
437 	apply.next_direct_idx = 0;
438 	apply.rule_action = rule_actions;
439 	apply.wqe_ctrl = &match_wqe.wqe_ctrl;
440 	apply.wqe_data = (uint32_t *)(is_range ?
441 				      &range_wqe.wqe_data :
442 				      &match_wqe.wqe_data);
443 
444 	/* Skip setters[0] used for jumbo STE since not support with FW WQE */
445 	mlx5dr_action_apply_setter(&apply, &at->setters[1], 0);
446 
447 	/* Send WQEs to FW */
448 	mlx5dr_send_stes_fw(queue, &ste_attr);
449 
450 	/* Backup TAG on the rule for deletion */
451 	mlx5dr_rule_save_delete_info(rule, &ste_attr);
452 	mlx5dr_send_engine_inc_rule(queue);
453 
454 	/* Send dependent WQEs */
455 	if (!attr->burst)
456 		mlx5dr_send_all_dep_wqe(queue);
457 
458 	return 0;
459 }
460 
461 static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
462 				  struct mlx5dr_rule_attr *attr,
463 				  uint8_t mt_idx,
464 				  const struct rte_flow_item items[],
465 				  uint8_t at_idx,
466 				  struct mlx5dr_rule_action rule_actions[])
467 {
468 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
469 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
470 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
471 	struct mlx5dr_matcher *matcher = rule->matcher;
472 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
473 	struct mlx5dr_send_ste_attr ste_attr = {0};
474 	struct mlx5dr_send_ring_dep_wqe *dep_wqe;
475 	struct mlx5dr_actions_wqe_setter *setter;
476 	struct mlx5dr_actions_apply_data apply;
477 	struct mlx5dr_send_engine *queue;
478 	uint8_t total_stes, action_stes;
479 	bool is_update;
480 	int i, ret;
481 
482 	is_update = (items == NULL);
483 
484 	/* Insert rule using FW WQE if cannot use GTA WQE */
485 	if (unlikely(mlx5dr_matcher_req_fw_wqe(matcher) && !is_update))
486 		return mlx5dr_rule_create_hws_fw_wqe(rule, attr, mt_idx, items,
487 						     at_idx, rule_actions);
488 
489 	queue = &ctx->send_queue[attr->queue_id];
490 	if (unlikely(mlx5dr_send_engine_err(queue))) {
491 		rte_errno = EIO;
492 		return rte_errno;
493 	}
494 
495 	mlx5dr_rule_create_init(rule, &ste_attr, &apply, is_update);
496 
497 	/* Allocate dependent match WQE since rule might have dependent writes.
498 	 * The queued dependent WQE can be later aborted or kept as a dependency.
499 	 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
500 	 */
501 	dep_wqe = mlx5dr_send_add_new_dep_wqe(queue);
502 	mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr);
503 
504 	ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
505 	ste_attr.wqe_data = &dep_wqe->wqe_data;
506 	apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
507 	apply.wqe_data = (uint32_t *)&dep_wqe->wqe_data;
508 	apply.rule_action = rule_actions;
509 	apply.queue = queue;
510 
511 	setter = &at->setters[at->num_of_action_stes];
512 	total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
513 	action_stes = total_stes - 1;
514 
515 	if (action_stes) {
516 		/* Allocate action STEs for complex rules */
517 		ret = mlx5dr_rule_alloc_action_ste(rule, attr);
518 		if (ret) {
519 			DR_LOG(ERR, "Failed to allocate action memory %d", ret);
520 			mlx5dr_send_abort_new_dep_wqe(queue);
521 			return ret;
522 		}
523 		/* Skip RX/TX based on the dep_wqe init */
524 		ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0->id : 0;
525 		ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1->id : 0;
526 		/* Action STEs are written to a specific index last to first */
527 		ste_attr.direct_index = rule->action_ste_idx + action_stes;
528 		apply.next_direct_idx = ste_attr.direct_index;
529 	} else {
530 		apply.next_direct_idx = 0;
531 	}
532 
533 	for (i = total_stes; i-- > 0;) {
534 		mlx5dr_action_apply_setter(&apply, setter--, !i && is_jumbo);
535 
536 		if (i == 0) {
537 			/* Handle last match STE.
538 			 * For hash split / linear lookup RTCs, packets reaching any STE
539 			 * will always match and perform the specified actions, which
540 			 * makes the tag irrelevant.
541 			 */
542 			if (likely(!mlx5dr_matcher_is_insert_by_idx(matcher) && !is_update))
543 				mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz,
544 							  (uint8_t *)dep_wqe->wqe_data.action);
545 			else if (unlikely(is_update))
546 				mlx5dr_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
547 
548 			/* Rule has dependent WQEs, match dep_wqe is queued */
549 			if (action_stes || apply.require_dep)
550 				break;
551 
552 			/* Rule has no dependencies, abort dep_wqe and send WQE now */
553 			mlx5dr_send_abort_new_dep_wqe(queue);
554 			ste_attr.wqe_tag_is_jumbo = is_jumbo;
555 			ste_attr.send_attr.notify_hw = !attr->burst;
556 			ste_attr.send_attr.user_data = dep_wqe->user_data;
557 			ste_attr.send_attr.rule = dep_wqe->rule;
558 			ste_attr.rtc_0 = dep_wqe->rtc_0;
559 			ste_attr.rtc_1 = dep_wqe->rtc_1;
560 			ste_attr.used_id_rtc_0 = &rule->rtc_0;
561 			ste_attr.used_id_rtc_1 = &rule->rtc_1;
562 			ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
563 			ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
564 			ste_attr.direct_index = dep_wqe->direct_index;
565 		} else {
566 			apply.next_direct_idx = --ste_attr.direct_index;
567 		}
568 
569 		mlx5dr_send_ste(queue, &ste_attr);
570 	}
571 
572 	/* Backup TAG on the rule for deletion and resize info for
573 	 * moving rules to a new matcher, only after insertion.
574 	 */
575 	if (!is_update) {
576 		mlx5dr_rule_save_delete_info(rule, &ste_attr);
577 		mlx5dr_rule_save_resize_info(rule, &ste_attr);
578 	}
579 
580 	mlx5dr_send_engine_inc_rule(queue);
581 
582 	/* Send dependent WQEs */
583 	if (!attr->burst)
584 		mlx5dr_send_all_dep_wqe(queue);
585 
586 	return 0;
587 }
588 
589 static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,
590 					   struct mlx5dr_rule_attr *attr)
591 {
592 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
593 	struct mlx5dr_send_engine *queue;
594 
595 	queue = &ctx->send_queue[attr->queue_id];
596 
597 	mlx5dr_rule_gen_comp(queue, rule, false,
598 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
599 
600 	/* Rule failed now we can safely release action STEs */
601 	mlx5dr_rule_free_action_ste_idx(rule);
602 
603 	/* Clear complex tag */
604 	mlx5dr_rule_clear_delete_info(rule);
605 
606 	/* Clear info that was saved for resizing */
607 	mlx5dr_rule_clear_resize_info(rule);
608 
609 	/* If a rule that was indicated as burst (need to trigger HW) has failed
610 	 * insertion we won't ring the HW as nothing is being written to the WQ.
611 	 * In such case update the last WQE and ring the HW with that work
612 	 */
613 	if (attr->burst)
614 		return;
615 
616 	mlx5dr_send_all_dep_wqe(queue);
617 	mlx5dr_send_engine_flush_queue(queue);
618 }
619 
620 static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
621 				   struct mlx5dr_rule_attr *attr)
622 {
623 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
624 	struct mlx5dr_matcher *matcher = rule->matcher;
625 	bool fw_wqe = mlx5dr_matcher_req_fw_wqe(matcher);
626 	bool is_range = mlx5dr_matcher_mt_is_range(matcher->mt);
627 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt);
628 	struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};
629 	struct mlx5dr_send_ste_attr ste_attr = {0};
630 	struct mlx5dr_send_engine *queue;
631 
632 	queue = &ctx->send_queue[attr->queue_id];
633 
634 	if (unlikely(mlx5dr_send_engine_err(queue))) {
635 		mlx5dr_rule_destroy_failed_hws(rule, attr);
636 		return 0;
637 	}
638 
639 	/* Rule is not completed yet */
640 	if (rule->status == MLX5DR_RULE_STATUS_CREATING) {
641 		rte_errno = EBUSY;
642 		return rte_errno;
643 	}
644 
645 	/* Rule failed and doesn't require cleanup */
646 	if (rule->status == MLX5DR_RULE_STATUS_FAILED) {
647 		mlx5dr_rule_destroy_failed_hws(rule, attr);
648 		return 0;
649 	}
650 
651 	mlx5dr_send_engine_inc_rule(queue);
652 
653 	/* Send dependent WQE */
654 	if (!attr->burst)
655 		mlx5dr_send_all_dep_wqe(queue);
656 
657 	rule->status = MLX5DR_RULE_STATUS_DELETING;
658 
659 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
660 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
661 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
662 	if (unlikely(is_range))
663 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
664 
665 	ste_attr.send_attr.rule = rule;
666 	ste_attr.send_attr.notify_hw = !attr->burst;
667 	ste_attr.send_attr.user_data = attr->user_data;
668 
669 	ste_attr.rtc_0 = rule->rtc_0;
670 	ste_attr.rtc_1 = rule->rtc_1;
671 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
672 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
673 	ste_attr.wqe_ctrl = &wqe_ctrl;
674 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
675 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
676 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
677 		ste_attr.direct_index = attr->rule_idx;
678 
679 	mlx5dr_rule_load_delete_info(rule, &ste_attr);
680 
681 	if (unlikely(fw_wqe))
682 		mlx5dr_send_stes_fw(queue, &ste_attr);
683 	else
684 		mlx5dr_send_ste(queue, &ste_attr);
685 
686 	mlx5dr_rule_clear_delete_info(rule);
687 
688 	return 0;
689 }
690 
691 int mlx5dr_rule_create_root_no_comp(struct mlx5dr_rule *rule,
692 				    const struct rte_flow_item items[],
693 				    uint8_t num_actions,
694 				    struct mlx5dr_rule_action rule_actions[])
695 {
696 	struct mlx5dv_flow_matcher *dv_matcher = rule->matcher->dv_matcher;
697 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
698 	struct mlx5dv_flow_match_parameters *value;
699 	struct mlx5_flow_attr flow_attr = {0};
700 	struct mlx5dv_flow_action_attr *attr;
701 	struct rte_flow_error error;
702 	uint8_t match_criteria;
703 	int ret;
704 
705 	ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id);
706 	if (ret) {
707 		DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name);
708 		rte_errno = EINVAL;
709 		return rte_errno;
710 	}
711 
712 	attr = simple_calloc(num_actions, sizeof(*attr));
713 	if (!attr) {
714 		rte_errno = ENOMEM;
715 		return rte_errno;
716 	}
717 
718 	value = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +
719 			      offsetof(struct mlx5dv_flow_match_parameters, match_buf));
720 	if (!value) {
721 		rte_errno = ENOMEM;
722 		goto free_attr;
723 	}
724 
725 	flow_attr.tbl_type = rule->matcher->tbl->type;
726 
727 	ret = flow_dv_translate_items_hws(items, &flow_attr, value->match_buf,
728 					  MLX5_SET_MATCHER_HS_V, NULL,
729 					  &match_criteria,
730 					  &error);
731 	if (ret) {
732 		DR_LOG(ERR, "Failed to convert items to PRM [%s]", error.message);
733 		goto free_value;
734 	}
735 
736 	/* Convert actions to verb action attr */
737 	ret = mlx5dr_action_root_build_attr(rule_actions, num_actions, attr);
738 	if (ret)
739 		goto free_value;
740 
741 	/* Create verb flow */
742 	value->match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
743 	rule->flow = mlx5_glue->dv_create_flow_root(dv_matcher,
744 						    value,
745 						    num_actions,
746 						    attr);
747 
748 	simple_free(value);
749 	simple_free(attr);
750 
751 	return 0;
752 
753 free_value:
754 	simple_free(value);
755 free_attr:
756 	simple_free(attr);
757 
758 	return rte_errno;
759 }
760 
761 static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,
762 				   struct mlx5dr_rule_attr *rule_attr,
763 				   const struct rte_flow_item items[],
764 				   uint8_t num_actions,
765 				   struct mlx5dr_rule_action rule_actions[])
766 {
767 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
768 	int ret;
769 
770 	ret = mlx5dr_rule_create_root_no_comp(rule, items,
771 					      num_actions, rule_actions);
772 	if (ret)
773 		return rte_errno;
774 
775 	mlx5dr_rule_gen_comp(&ctx->send_queue[rule_attr->queue_id], rule, !rule->flow,
776 			     rule_attr->user_data, MLX5DR_RULE_STATUS_CREATED);
777 
778 	return 0;
779 }
780 
781 int mlx5dr_rule_destroy_root_no_comp(struct mlx5dr_rule *rule)
782 {
783 	if (rule->flow)
784 		return ibv_destroy_flow(rule->flow);
785 
786 	return 0;
787 }
788 
789 static int mlx5dr_rule_destroy_root(struct mlx5dr_rule *rule,
790 				    struct mlx5dr_rule_attr *attr)
791 {
792 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
793 	int err;
794 
795 	err = mlx5dr_rule_destroy_root_no_comp(rule);
796 
797 	mlx5dr_rule_gen_comp(&ctx->send_queue[attr->queue_id], rule, err,
798 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
799 
800 	return 0;
801 }
802 
803 static int mlx5dr_rule_enqueue_precheck(struct mlx5dr_rule *rule,
804 					struct mlx5dr_rule_attr *attr)
805 {
806 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
807 
808 	if (unlikely(!attr->user_data)) {
809 		rte_errno = EINVAL;
810 		return rte_errno;
811 	}
812 
813 	/* Check if there is room in queue */
814 	if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
815 		rte_errno = EBUSY;
816 		return rte_errno;
817 	}
818 
819 	return 0;
820 }
821 
822 static int mlx5dr_rule_enqueue_precheck_move(struct mlx5dr_rule *rule,
823 					     struct mlx5dr_rule_attr *attr)
824 {
825 	if (unlikely(rule->status != MLX5DR_RULE_STATUS_CREATED)) {
826 		rte_errno = EINVAL;
827 		return rte_errno;
828 	}
829 
830 	return mlx5dr_rule_enqueue_precheck(rule, attr);
831 }
832 
833 static int mlx5dr_rule_enqueue_precheck_create(struct mlx5dr_rule *rule,
834 					       struct mlx5dr_rule_attr *attr)
835 {
836 	if (unlikely(mlx5dr_matcher_is_in_resize(rule->matcher))) {
837 		/* Matcher in resize - new rules are not allowed */
838 		rte_errno = EAGAIN;
839 		return rte_errno;
840 	}
841 
842 	return mlx5dr_rule_enqueue_precheck(rule, attr);
843 }
844 
845 static int mlx5dr_rule_enqueue_precheck_update(struct mlx5dr_rule *rule,
846 					       struct mlx5dr_rule_attr *attr)
847 {
848 	struct mlx5dr_matcher *matcher = rule->matcher;
849 
850 	if (unlikely((mlx5dr_table_is_root(matcher->tbl) ||
851 		     mlx5dr_matcher_req_fw_wqe(matcher)))) {
852 		DR_LOG(ERR, "Rule update is not supported on current matcher");
853 		rte_errno = ENOTSUP;
854 		return rte_errno;
855 	}
856 
857 	if (unlikely(!matcher->attr.optimize_using_rule_idx &&
858 		     !mlx5dr_matcher_is_insert_by_idx(matcher))) {
859 		DR_LOG(ERR, "Rule update requires optimize by idx matcher");
860 		rte_errno = ENOTSUP;
861 		return rte_errno;
862 	}
863 
864 	if (unlikely(mlx5dr_matcher_is_resizable(rule->matcher))) {
865 		DR_LOG(ERR, "Rule update is not supported on resizable matcher");
866 		rte_errno = ENOTSUP;
867 		return rte_errno;
868 	}
869 
870 	if (unlikely(rule->status != MLX5DR_RULE_STATUS_CREATED)) {
871 		DR_LOG(ERR, "Current rule status does not allow update");
872 		rte_errno = EBUSY;
873 		return rte_errno;
874 	}
875 
876 	return mlx5dr_rule_enqueue_precheck_create(rule, attr);
877 }
878 
879 int mlx5dr_rule_move_hws_remove(struct mlx5dr_rule *rule,
880 				void *queue_ptr,
881 				void *user_data)
882 {
883 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(rule->matcher->mt);
884 	struct mlx5dr_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
885 	struct mlx5dr_matcher *matcher = rule->matcher;
886 	struct mlx5dr_send_engine *queue = queue_ptr;
887 	struct mlx5dr_send_ste_attr ste_attr = {0};
888 
889 	/* Send dependent WQEs */
890 	mlx5dr_send_all_dep_wqe(queue);
891 
892 	rule->resize_info->state = MLX5DR_RULE_RESIZE_STATE_DELETING;
893 
894 	ste_attr.send_attr.fence = 0;
895 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
896 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
897 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
898 	ste_attr.send_attr.rule = rule;
899 	ste_attr.send_attr.notify_hw = 1;
900 	ste_attr.send_attr.user_data = user_data;
901 	ste_attr.rtc_0 = rule->resize_info->rtc_0;
902 	ste_attr.rtc_1 = rule->resize_info->rtc_1;
903 	ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
904 	ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
905 	ste_attr.wqe_ctrl = &empty_wqe_ctrl;
906 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
907 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
908 
909 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
910 		ste_attr.direct_index = rule->resize_info->rule_idx;
911 
912 	mlx5dr_rule_load_delete_info(rule, &ste_attr);
913 	mlx5dr_send_ste(queue, &ste_attr);
914 
915 	return 0;
916 }
917 
918 int mlx5dr_rule_move_hws_add(struct mlx5dr_rule *rule,
919 			     struct mlx5dr_rule_attr *attr)
920 {
921 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(rule->matcher->mt);
922 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
923 	struct mlx5dr_matcher *matcher = rule->matcher;
924 	struct mlx5dr_send_ste_attr ste_attr = {0};
925 	struct mlx5dr_send_engine *queue;
926 
927 	if (unlikely(mlx5dr_rule_enqueue_precheck_move(rule, attr)))
928 		return -rte_errno;
929 
930 	queue = &ctx->send_queue[attr->queue_id];
931 
932 	if (unlikely(mlx5dr_send_engine_err(queue))) {
933 		rte_errno = EIO;
934 		return rte_errno;
935 	}
936 
937 	mlx5dr_rule_move_init(rule, attr);
938 
939 	mlx5dr_rule_move_get_rtc(rule, &ste_attr);
940 
941 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
942 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
943 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
944 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
945 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
946 
947 	ste_attr.send_attr.rule = rule;
948 	ste_attr.send_attr.fence = 0;
949 	ste_attr.send_attr.notify_hw = !attr->burst;
950 	ste_attr.send_attr.user_data = attr->user_data;
951 
952 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
953 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
954 	ste_attr.wqe_ctrl = (struct mlx5dr_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
955 	ste_attr.wqe_data = (struct mlx5dr_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
956 	ste_attr.direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
957 				attr->rule_idx : 0;
958 
959 	mlx5dr_send_ste(queue, &ste_attr);
960 	mlx5dr_send_engine_inc_rule(queue);
961 
962 	/* Send dependent WQEs */
963 	if (!attr->burst)
964 		mlx5dr_send_all_dep_wqe(queue);
965 
966 	return 0;
967 }
968 
969 int mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
970 		       uint8_t mt_idx,
971 		       const struct rte_flow_item items[],
972 		       uint8_t at_idx,
973 		       struct mlx5dr_rule_action rule_actions[],
974 		       struct mlx5dr_rule_attr *attr,
975 		       struct mlx5dr_rule *rule_handle)
976 {
977 	int ret;
978 
979 	rule_handle->matcher = matcher;
980 
981 	if (unlikely(mlx5dr_rule_enqueue_precheck_create(rule_handle, attr)))
982 		return -rte_errno;
983 
984 	assert(matcher->num_of_mt >= mt_idx);
985 	assert(matcher->num_of_at >= at_idx);
986 	assert(items);
987 
988 	if (unlikely(mlx5dr_table_is_root(matcher->tbl)))
989 		ret = mlx5dr_rule_create_root(rule_handle,
990 					      attr,
991 					      items,
992 					      matcher->at[at_idx].num_actions,
993 					      rule_actions);
994 	else
995 		ret = mlx5dr_rule_create_hws(rule_handle,
996 					     attr,
997 					     mt_idx,
998 					     items,
999 					     at_idx,
1000 					     rule_actions);
1001 	return -ret;
1002 }
1003 
1004 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule,
1005 			struct mlx5dr_rule_attr *attr)
1006 {
1007 	int ret;
1008 
1009 	if (unlikely(mlx5dr_rule_enqueue_precheck(rule, attr)))
1010 		return -rte_errno;
1011 
1012 	if (unlikely(mlx5dr_table_is_root(rule->matcher->tbl)))
1013 		ret = mlx5dr_rule_destroy_root(rule, attr);
1014 	else
1015 		ret = mlx5dr_rule_destroy_hws(rule, attr);
1016 
1017 	return -ret;
1018 }
1019 
1020 int mlx5dr_rule_action_update(struct mlx5dr_rule *rule_handle,
1021 			      uint8_t at_idx,
1022 			      struct mlx5dr_rule_action rule_actions[],
1023 			      struct mlx5dr_rule_attr *attr)
1024 {
1025 	int ret;
1026 
1027 	if (unlikely(mlx5dr_rule_enqueue_precheck_update(rule_handle, attr)))
1028 		return -rte_errno;
1029 
1030 	if (rule_handle->status != MLX5DR_RULE_STATUS_CREATED) {
1031 		DR_LOG(ERR, "Current rule status does not allow update");
1032 		rte_errno = EBUSY;
1033 		return -rte_errno;
1034 	}
1035 
1036 	ret = mlx5dr_rule_create_hws(rule_handle,
1037 				     attr,
1038 				     0,
1039 				     NULL,
1040 				     at_idx,
1041 				     rule_actions);
1042 
1043 	return -ret;
1044 }
1045 
1046 size_t mlx5dr_rule_get_handle_size(void)
1047 {
1048 	return sizeof(struct mlx5dr_rule);
1049 }
1050 
1051 int mlx5dr_rule_hash_calculate(struct mlx5dr_matcher *matcher,
1052 			       const struct rte_flow_item items[],
1053 			       uint8_t mt_idx,
1054 			       enum mlx5dr_rule_hash_calc_mode mode,
1055 			       uint32_t *ret_hash)
1056 {
1057 	uint8_t tag[MLX5DR_WQE_SZ_GTA_DATA] = {0};
1058 	struct mlx5dr_match_template *mt;
1059 
1060 	if (!matcher || !matcher->mt) {
1061 		rte_errno = EINVAL;
1062 		return -rte_errno;
1063 	}
1064 
1065 	mt = &matcher->mt[mt_idx];
1066 
1067 	if (mlx5dr_matcher_req_fw_wqe(matcher) ||
1068 	    mlx5dr_table_is_root(matcher->tbl) ||
1069 	    matcher->tbl->ctx->caps->access_index_mode == MLX5DR_MATCHER_INSERT_BY_HASH ||
1070 	    matcher->tbl->ctx->caps->flow_table_hash_type != MLX5_FLOW_TABLE_HASH_TYPE_CRC32) {
1071 		rte_errno = ENOTSUP;
1072 		return -rte_errno;
1073 	}
1074 
1075 	mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz, tag);
1076 	if (mlx5dr_matcher_mt_is_jumbo(mt))
1077 		*ret_hash = mlx5dr_crc32_calc(tag, MLX5DR_JUMBO_TAG_SZ);
1078 	else
1079 		*ret_hash = mlx5dr_crc32_calc(tag + MLX5DR_ACTIONS_SZ,
1080 					      MLX5DR_MATCH_TAG_SZ);
1081 
1082 	if (mode == MLX5DR_RULE_HASH_CALC_MODE_IDX)
1083 		*ret_hash = *ret_hash & (BIT(matcher->attr.rule.num_log) - 1);
1084 
1085 	return 0;
1086 }
1087