xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c (revision 486f9aac0cbe2598a76c853890c1d557747f71cf)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,
8 			     struct mlx5dr_match_template *mt,
9 			     const struct rte_flow_item *items,
10 			     bool *skip_rx, bool *skip_tx)
11 {
12 	const struct flow_hw_port_info *vport;
13 	const struct rte_flow_item_ethdev *v;
14 
15 	/* Flow_src is the 1st priority */
16 	if (matcher->attr.optimize_flow_src) {
17 		*skip_tx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_WIRE;
18 		*skip_rx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_VPORT;
19 		return;
20 	}
21 
22 	/* By default FDB rules are added to both RX and TX */
23 	*skip_rx = false;
24 	*skip_tx = false;
25 
26 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
27 		return;
28 
29 	if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {
30 		v = items[mt->vport_item_id].spec;
31 		vport = flow_hw_conv_port_id(matcher->tbl->ctx, v->port_id);
32 		if (unlikely(!vport)) {
33 			DR_LOG(ERR, "Fail to map port ID %d, ignoring", v->port_id);
34 			return;
35 		}
36 
37 		if (!vport->is_wire)
38 			/* Match vport ID is not WIRE -> Skip RX */
39 			*skip_rx = true;
40 		else
41 			/* Match vport ID is WIRE -> Skip TX */
42 			*skip_tx = true;
43 	}
44 }
45 
46 static void
47 mlx5dr_rule_update_copy_tag(struct mlx5dr_rule *rule,
48 			    struct mlx5dr_wqe_gta_data_seg_ste *wqe_data,
49 			    bool is_jumbo)
50 {
51 	if (is_jumbo)
52 		memcpy(wqe_data->jumbo, rule->tag.jumbo, MLX5DR_JUMBO_TAG_SZ);
53 	else
54 		memcpy(wqe_data->tag, rule->tag.match, MLX5DR_MATCH_TAG_SZ);
55 }
56 
57 static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,
58 				     struct mlx5dr_rule *rule,
59 				     const struct rte_flow_item *items,
60 				     struct mlx5dr_match_template *mt,
61 				     struct mlx5dr_rule_attr *attr)
62 {
63 	struct mlx5dr_matcher *matcher = rule->matcher;
64 	struct mlx5dr_table *tbl = matcher->tbl;
65 	bool skip_rx, skip_tx;
66 
67 	dep_wqe->rule = rule;
68 	dep_wqe->user_data = attr->user_data;
69 	dep_wqe->direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
70 		attr->rule_idx : 0;
71 
72 	if (!items) { /* rule update */
73 		dep_wqe->rtc_0 = rule->rtc_0;
74 		dep_wqe->rtc_1 = rule->rtc_1;
75 		dep_wqe->retry_rtc_1 = 0;
76 		dep_wqe->retry_rtc_0 = 0;
77 		return;
78 	}
79 
80 	switch (tbl->type) {
81 	case MLX5DR_TABLE_TYPE_NIC_RX:
82 	case MLX5DR_TABLE_TYPE_NIC_TX:
83 		dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
84 		dep_wqe->retry_rtc_0 = matcher->col_matcher ?
85 				       matcher->col_matcher->match_ste.rtc_0->id : 0;
86 		dep_wqe->rtc_1 = 0;
87 		dep_wqe->retry_rtc_1 = 0;
88 		break;
89 
90 	case MLX5DR_TABLE_TYPE_FDB:
91 		mlx5dr_rule_skip(matcher, mt, items, &skip_rx, &skip_tx);
92 
93 		if (!skip_rx) {
94 			dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
95 			dep_wqe->retry_rtc_0 = matcher->col_matcher ?
96 					       matcher->col_matcher->match_ste.rtc_0->id : 0;
97 		} else {
98 			dep_wqe->rtc_0 = 0;
99 			dep_wqe->retry_rtc_0 = 0;
100 		}
101 
102 		if (!skip_tx) {
103 			dep_wqe->rtc_1 = matcher->match_ste.rtc_1->id;
104 			dep_wqe->retry_rtc_1 = matcher->col_matcher ?
105 					       matcher->col_matcher->match_ste.rtc_1->id : 0;
106 		} else {
107 			dep_wqe->rtc_1 = 0;
108 			dep_wqe->retry_rtc_1 = 0;
109 		}
110 
111 		break;
112 
113 	default:
114 		assert(false);
115 		break;
116 	}
117 }
118 
119 static void mlx5dr_rule_move_get_rtc(struct mlx5dr_rule *rule,
120 				     struct mlx5dr_send_ste_attr *ste_attr)
121 {
122 	struct mlx5dr_matcher *dst_matcher = rule->matcher->resize_dst;
123 
124 	if (rule->resize_info->rtc_0) {
125 		ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0->id;
126 		ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
127 					dst_matcher->col_matcher->match_ste.rtc_0->id : 0;
128 	}
129 	if (rule->resize_info->rtc_1) {
130 		ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1->id;
131 		ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
132 					dst_matcher->col_matcher->match_ste.rtc_1->id : 0;
133 	}
134 }
135 
136 static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,
137 				 struct mlx5dr_rule *rule,
138 				 bool err,
139 				 void *user_data,
140 				 enum mlx5dr_rule_status rule_status_on_succ)
141 {
142 	enum rte_flow_op_status comp_status;
143 
144 	if (!err) {
145 		comp_status = RTE_FLOW_OP_SUCCESS;
146 		rule->status = rule_status_on_succ;
147 	} else {
148 		comp_status = RTE_FLOW_OP_ERROR;
149 		rule->status = MLX5DR_RULE_STATUS_FAILED;
150 	}
151 
152 	mlx5dr_send_engine_inc_rule(queue);
153 	mlx5dr_send_engine_gen_comp(queue, user_data, comp_status);
154 }
155 
156 static void
157 mlx5dr_rule_save_resize_info(struct mlx5dr_rule *rule,
158 			     struct mlx5dr_send_ste_attr *ste_attr)
159 {
160 	if (likely(!mlx5dr_matcher_is_resizable(rule->matcher)))
161 		return;
162 
163 	rule->resize_info = simple_calloc(1, sizeof(*rule->resize_info));
164 	if (unlikely(!rule->resize_info)) {
165 		assert(rule->resize_info);
166 		rte_errno = ENOMEM;
167 	}
168 
169 	memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
170 	       sizeof(rule->resize_info->ctrl_seg));
171 	memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
172 	       sizeof(rule->resize_info->data_seg));
173 
174 	rule->resize_info->max_stes = rule->matcher->action_ste.max_stes;
175 	rule->resize_info->action_ste_pool = rule->matcher->action_ste.max_stes ?
176 					     rule->matcher->action_ste.pool :
177 					     NULL;
178 }
179 
180 void mlx5dr_rule_clear_resize_info(struct mlx5dr_rule *rule)
181 {
182 	if (unlikely(mlx5dr_matcher_is_resizable(rule->matcher) &&
183 		     rule->resize_info)) {
184 		simple_free(rule->resize_info);
185 		rule->resize_info = NULL;
186 	}
187 }
188 
189 static void
190 mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule,
191 			     struct mlx5dr_send_ste_attr *ste_attr)
192 {
193 	struct mlx5dr_match_template *mt = rule->matcher->mt;
194 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
195 
196 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
197 		uint8_t *src_tag;
198 
199 		/* Save match definer id and tag for delete */
200 		rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr));
201 		assert(rule->tag_ptr);
202 
203 		if (is_jumbo)
204 			memcpy(rule->tag_ptr[0].jumbo, ste_attr->wqe_data->action,
205 			       MLX5DR_JUMBO_TAG_SZ);
206 		else
207 			memcpy(rule->tag_ptr[0].match, ste_attr->wqe_data->tag,
208 			       MLX5DR_MATCH_TAG_SZ);
209 
210 		rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id;
211 
212 		/* Save range definer id and tag for delete */
213 		if (ste_attr->range_wqe_data) {
214 			src_tag = (uint8_t *)ste_attr->range_wqe_data->tag;
215 			memcpy(rule->tag_ptr[1].match, src_tag, MLX5DR_MATCH_TAG_SZ);
216 			rule->tag_ptr[1].reserved[1] = ste_attr->send_attr.range_definer_id;
217 		}
218 		return;
219 	}
220 
221 	if (likely(!mlx5dr_matcher_is_resizable(rule->matcher))) {
222 		if (is_jumbo)
223 			memcpy(&rule->tag.jumbo, ste_attr->wqe_data->action, MLX5DR_JUMBO_TAG_SZ);
224 		else
225 			memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5DR_MATCH_TAG_SZ);
226 		return;
227 	}
228 }
229 
230 static void
231 mlx5dr_rule_clear_delete_info(struct mlx5dr_rule *rule)
232 {
233 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
234 		simple_free(rule->tag_ptr);
235 		return;
236 	}
237 }
238 
239 static void
240 mlx5dr_rule_load_delete_info(struct mlx5dr_rule *rule,
241 			     struct mlx5dr_send_ste_attr *ste_attr)
242 {
243 	if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
244 		/* Load match definer id and tag for delete */
245 		ste_attr->wqe_tag = &rule->tag_ptr[0];
246 		ste_attr->send_attr.match_definer_id = rule->tag_ptr[1].reserved[0];
247 
248 		/* Load range definer id and tag for delete */
249 		if (rule->matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER) {
250 			ste_attr->range_wqe_tag = &rule->tag_ptr[1];
251 			ste_attr->send_attr.range_definer_id = rule->tag_ptr[1].reserved[1];
252 		}
253 	} else if (likely(!mlx5dr_matcher_is_resizable(rule->matcher))) {
254 		ste_attr->wqe_tag = &rule->tag;
255 	} else {
256 		ste_attr->wqe_tag = (struct mlx5dr_rule_match_tag *)
257 			&rule->resize_info->data_seg[MLX5DR_STE_CTRL_SZ];
258 	}
259 }
260 
261 static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,
262 					struct mlx5dr_rule_attr *attr)
263 {
264 	struct mlx5dr_matcher *matcher = rule->matcher;
265 	int ret;
266 
267 	/* Use rule_idx for locking optimzation, otherwise allocate from pool */
268 	if (matcher->attr.optimize_using_rule_idx ||
269 	    mlx5dr_matcher_is_insert_by_idx(matcher)) {
270 		rule->action_ste_idx = attr->rule_idx * matcher->action_ste.max_stes;
271 	} else {
272 		struct mlx5dr_pool_chunk ste = {0};
273 
274 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
275 		ret = mlx5dr_pool_chunk_alloc(matcher->action_ste.pool, &ste);
276 		if (ret) {
277 			DR_LOG(ERR, "Failed to allocate STE for rule actions");
278 			return ret;
279 		}
280 		rule->action_ste_idx = ste.offset;
281 	}
282 	return 0;
283 }
284 
285 void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule)
286 {
287 	struct mlx5dr_matcher *matcher = rule->matcher;
288 	struct mlx5dr_pool *pool;
289 	uint8_t max_stes;
290 
291 	if (rule->action_ste_idx > -1 &&
292 	    !matcher->attr.optimize_using_rule_idx &&
293 	    !mlx5dr_matcher_is_insert_by_idx(matcher)) {
294 		struct mlx5dr_pool_chunk ste = {0};
295 
296 		if (unlikely(mlx5dr_matcher_is_resizable(matcher))) {
297 			/* Free the original action pool if rule was resized */
298 			max_stes = rule->resize_info->max_stes;
299 			pool = rule->resize_info->action_ste_pool;
300 		} else {
301 			max_stes = matcher->action_ste.max_stes;
302 			pool = matcher->action_ste.pool;
303 		}
304 
305 		/* This release is safe only when the rule match part was deleted */
306 		ste.order = rte_log2_u32(max_stes);
307 		ste.offset = rule->action_ste_idx;
308 
309 		mlx5dr_pool_chunk_free(pool, &ste);
310 	}
311 }
312 
313 static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,
314 				    struct mlx5dr_send_ste_attr *ste_attr,
315 				    struct mlx5dr_actions_apply_data *apply,
316 				    bool is_update)
317 {
318 	struct mlx5dr_matcher *matcher = rule->matcher;
319 	struct mlx5dr_table *tbl = matcher->tbl;
320 	struct mlx5dr_context *ctx = tbl->ctx;
321 
322 	/* Init rule before reuse */
323 	if (!is_update) {
324 		/* In update we use these rtc's */
325 		rule->rtc_0 = 0;
326 		rule->rtc_1 = 0;
327 	}
328 
329 	rule->pending_wqes = 0;
330 	rule->action_ste_idx = -1;
331 	rule->status = MLX5DR_RULE_STATUS_CREATING;
332 
333 	/* Init default send STE attributes */
334 	ste_attr->gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
335 	ste_attr->send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
336 	ste_attr->send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
337 	ste_attr->send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
338 
339 	/* Init default action apply */
340 	apply->tbl_type = tbl->type;
341 	apply->common_res = &ctx->common_res[tbl->type];
342 	apply->jump_to_action_stc = matcher->action_ste.stc.offset;
343 	apply->require_dep = 0;
344 }
345 
346 static void mlx5dr_rule_move_init(struct mlx5dr_rule *rule,
347 				  struct mlx5dr_rule_attr *attr)
348 {
349 	/* Save the old RTC IDs to be later used in match STE delete */
350 	rule->resize_info->rtc_0 = rule->rtc_0;
351 	rule->resize_info->rtc_1 = rule->rtc_1;
352 	rule->resize_info->rule_idx = attr->rule_idx;
353 
354 	rule->rtc_0 = 0;
355 	rule->rtc_1 = 0;
356 
357 	rule->pending_wqes = 0;
358 	rule->action_ste_idx = -1;
359 	rule->status = MLX5DR_RULE_STATUS_CREATING;
360 	rule->resize_info->state = MLX5DR_RULE_RESIZE_STATE_WRITING;
361 }
362 
363 bool mlx5dr_rule_move_in_progress(struct mlx5dr_rule *rule)
364 {
365 	return mlx5dr_matcher_is_in_resize(rule->matcher) &&
366 	       rule->resize_info &&
367 	       rule->resize_info->state != MLX5DR_RULE_RESIZE_STATE_IDLE;
368 }
369 
370 static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule,
371 					 struct mlx5dr_rule_attr *attr,
372 					 uint8_t mt_idx,
373 					 const struct rte_flow_item items[],
374 					 uint8_t at_idx,
375 					 struct mlx5dr_rule_action rule_actions[])
376 {
377 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
378 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
379 	struct mlx5dr_send_ring_dep_wqe range_wqe = {{0}};
380 	struct mlx5dr_send_ring_dep_wqe match_wqe = {{0}};
381 	bool is_range = mlx5dr_matcher_mt_is_range(mt);
382 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
383 	struct mlx5dr_matcher *matcher = rule->matcher;
384 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
385 	struct mlx5dr_send_ste_attr ste_attr = {0};
386 	struct mlx5dr_actions_apply_data apply;
387 	struct mlx5dr_send_engine *queue;
388 
389 	queue = &ctx->send_queue[attr->queue_id];
390 	if (unlikely(mlx5dr_send_engine_err(queue))) {
391 		rte_errno = EIO;
392 		return rte_errno;
393 	}
394 
395 	mlx5dr_rule_create_init(rule, &ste_attr, &apply, false);
396 	mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr);
397 	mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr);
398 
399 	ste_attr.direct_index = 0;
400 	ste_attr.rtc_0 = match_wqe.rtc_0;
401 	ste_attr.rtc_1 = match_wqe.rtc_1;
402 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
403 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
404 	ste_attr.retry_rtc_0 = match_wqe.retry_rtc_0;
405 	ste_attr.retry_rtc_1 = match_wqe.retry_rtc_1;
406 	ste_attr.send_attr.rule = match_wqe.rule;
407 	ste_attr.send_attr.user_data = match_wqe.user_data;
408 
409 	ste_attr.send_attr.fence = 1;
410 	ste_attr.send_attr.notify_hw = 1;
411 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
412 
413 	/* Prepare match STE TAG */
414 	ste_attr.wqe_ctrl = &match_wqe.wqe_ctrl;
415 	ste_attr.wqe_data = &match_wqe.wqe_data;
416 	ste_attr.send_attr.match_definer_id = mlx5dr_definer_get_id(mt->definer);
417 
418 	mlx5dr_definer_create_tag(items,
419 				  mt->fc,
420 				  mt->fc_sz,
421 				  (uint8_t *)match_wqe.wqe_data.action);
422 
423 	/* Prepare range STE TAG */
424 	if (is_range) {
425 		ste_attr.range_wqe_data = &range_wqe.wqe_data;
426 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
427 		ste_attr.send_attr.range_definer_id = mlx5dr_definer_get_id(mt->range_definer);
428 
429 		mlx5dr_definer_create_tag_range(items,
430 						mt->fcr,
431 						mt->fcr_sz,
432 						(uint8_t *)range_wqe.wqe_data.action);
433 	}
434 
435 	/* Apply the actions on the last STE */
436 	apply.queue = queue;
437 	apply.next_direct_idx = 0;
438 	apply.rule_action = rule_actions;
439 	apply.wqe_ctrl = &match_wqe.wqe_ctrl;
440 	apply.wqe_data = (uint32_t *)(is_range ?
441 				      &range_wqe.wqe_data :
442 				      &match_wqe.wqe_data);
443 
444 	/* Skip setters[0] used for jumbo STE since not support with FW WQE */
445 	mlx5dr_action_apply_setter(&apply, &at->setters[1], 0);
446 
447 	/* Send WQEs to FW */
448 	mlx5dr_send_stes_fw(queue, &ste_attr);
449 
450 	/* Backup TAG on the rule for deletion */
451 	mlx5dr_rule_save_delete_info(rule, &ste_attr);
452 	mlx5dr_send_engine_inc_rule(queue);
453 
454 	/* Send dependent WQEs */
455 	if (!attr->burst)
456 		mlx5dr_send_all_dep_wqe(queue);
457 
458 	return 0;
459 }
460 
461 static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
462 				  struct mlx5dr_rule_attr *attr,
463 				  uint8_t mt_idx,
464 				  const struct rte_flow_item items[],
465 				  uint8_t at_idx,
466 				  struct mlx5dr_rule_action rule_actions[])
467 {
468 	struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
469 	struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
470 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
471 	struct mlx5dr_matcher *matcher = rule->matcher;
472 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
473 	struct mlx5dr_send_ste_attr ste_attr = {0};
474 	struct mlx5dr_send_ring_dep_wqe *dep_wqe;
475 	struct mlx5dr_actions_wqe_setter *setter;
476 	struct mlx5dr_actions_apply_data apply;
477 	struct mlx5dr_send_engine *queue;
478 	uint8_t total_stes, action_stes;
479 	bool is_update;
480 	int i, ret;
481 
482 	is_update = (items == NULL);
483 
484 	/* Insert rule using FW WQE if cannot use GTA WQE */
485 	if (unlikely(mlx5dr_matcher_req_fw_wqe(matcher) && !is_update))
486 		return mlx5dr_rule_create_hws_fw_wqe(rule, attr, mt_idx, items,
487 						     at_idx, rule_actions);
488 
489 	queue = &ctx->send_queue[attr->queue_id];
490 	if (unlikely(mlx5dr_send_engine_err(queue))) {
491 		rte_errno = EIO;
492 		return rte_errno;
493 	}
494 
495 	mlx5dr_rule_create_init(rule, &ste_attr, &apply, is_update);
496 
497 	/* Allocate dependent match WQE since rule might have dependent writes.
498 	 * The queued dependent WQE can be later aborted or kept as a dependency.
499 	 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
500 	 */
501 	dep_wqe = mlx5dr_send_add_new_dep_wqe(queue);
502 	mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr);
503 
504 	ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
505 	ste_attr.wqe_data = &dep_wqe->wqe_data;
506 	apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
507 	apply.wqe_data = (uint32_t *)&dep_wqe->wqe_data;
508 	apply.rule_action = rule_actions;
509 	apply.queue = queue;
510 
511 	setter = &at->setters[at->num_of_action_stes];
512 	total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
513 	action_stes = total_stes - 1;
514 
515 	if (action_stes) {
516 		/* Allocate action STEs for complex rules */
517 		ret = mlx5dr_rule_alloc_action_ste(rule, attr);
518 		if (ret) {
519 			DR_LOG(ERR, "Failed to allocate action memory %d", ret);
520 			mlx5dr_send_abort_new_dep_wqe(queue);
521 			return ret;
522 		}
523 		/* Skip RX/TX based on the dep_wqe init */
524 		ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0->id : 0;
525 		ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1->id : 0;
526 		/* Action STEs are written to a specific index last to first */
527 		ste_attr.direct_index = rule->action_ste_idx + action_stes;
528 		apply.next_direct_idx = ste_attr.direct_index;
529 	} else {
530 		apply.next_direct_idx = 0;
531 	}
532 
533 	for (i = total_stes; i-- > 0;) {
534 		mlx5dr_action_apply_setter(&apply, setter--, !i && is_jumbo);
535 
536 		if (i == 0) {
537 			/* Handle last match STE.
538 			 * For hash split / linear lookup RTCs, packets reaching any STE
539 			 * will always match and perform the specified actions, which
540 			 * makes the tag irrelevant.
541 			 */
542 			if (likely(!mlx5dr_matcher_is_always_hit(matcher) && !is_update))
543 				mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz,
544 							  (uint8_t *)dep_wqe->wqe_data.action);
545 			else if (unlikely(is_update))
546 				mlx5dr_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
547 
548 			/* Rule has dependent WQEs, match dep_wqe is queued */
549 			if (action_stes || apply.require_dep)
550 				break;
551 
552 			/* Rule has no dependencies, abort dep_wqe and send WQE now */
553 			mlx5dr_send_abort_new_dep_wqe(queue);
554 			ste_attr.wqe_tag_is_jumbo = is_jumbo;
555 			ste_attr.send_attr.notify_hw = !attr->burst;
556 			ste_attr.send_attr.user_data = dep_wqe->user_data;
557 			ste_attr.send_attr.rule = dep_wqe->rule;
558 			ste_attr.rtc_0 = dep_wqe->rtc_0;
559 			ste_attr.rtc_1 = dep_wqe->rtc_1;
560 			ste_attr.used_id_rtc_0 = &rule->rtc_0;
561 			ste_attr.used_id_rtc_1 = &rule->rtc_1;
562 			ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
563 			ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
564 			ste_attr.direct_index = dep_wqe->direct_index;
565 		} else {
566 			apply.next_direct_idx = --ste_attr.direct_index;
567 		}
568 
569 		mlx5dr_send_ste(queue, &ste_attr);
570 	}
571 
572 	/* Backup TAG on the rule for deletion and resize info for
573 	 * moving rules to a new matcher, only after insertion.
574 	 */
575 	if (!is_update) {
576 		mlx5dr_rule_save_delete_info(rule, &ste_attr);
577 		mlx5dr_rule_save_resize_info(rule, &ste_attr);
578 	}
579 
580 	mlx5dr_send_engine_inc_rule(queue);
581 
582 	/* Send dependent WQEs */
583 	if (!attr->burst)
584 		mlx5dr_send_all_dep_wqe(queue);
585 
586 	return 0;
587 }
588 
589 static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,
590 					   struct mlx5dr_rule_attr *attr)
591 {
592 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
593 	struct mlx5dr_send_engine *queue;
594 
595 	queue = &ctx->send_queue[attr->queue_id];
596 
597 	mlx5dr_rule_gen_comp(queue, rule, false,
598 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
599 
600 	/* Rule failed now we can safely release action STEs */
601 	mlx5dr_rule_free_action_ste_idx(rule);
602 
603 	/* Clear complex tag */
604 	mlx5dr_rule_clear_delete_info(rule);
605 
606 	/* Clear info that was saved for resizing */
607 	mlx5dr_rule_clear_resize_info(rule);
608 
609 	/* If a rule that was indicated as burst (need to trigger HW) has failed
610 	 * insertion we won't ring the HW as nothing is being written to the WQ.
611 	 * In such case update the last WQE and ring the HW with that work
612 	 */
613 	if (attr->burst)
614 		return;
615 
616 	mlx5dr_send_all_dep_wqe(queue);
617 	mlx5dr_send_engine_flush_queue(queue);
618 }
619 
620 static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
621 				   struct mlx5dr_rule_attr *attr)
622 {
623 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
624 	struct mlx5dr_matcher *matcher = rule->matcher;
625 	bool fw_wqe = mlx5dr_matcher_req_fw_wqe(matcher);
626 	bool is_range = mlx5dr_matcher_mt_is_range(matcher->mt);
627 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt);
628 	struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};
629 	struct mlx5dr_send_ste_attr ste_attr = {0};
630 	struct mlx5dr_send_engine *queue;
631 
632 	queue = &ctx->send_queue[attr->queue_id];
633 
634 	if (unlikely(mlx5dr_send_engine_err(queue))) {
635 		mlx5dr_rule_destroy_failed_hws(rule, attr);
636 		return 0;
637 	}
638 
639 	/* Rule is not completed yet */
640 	if (rule->status == MLX5DR_RULE_STATUS_CREATING) {
641 		DR_LOG(NOTICE, "Cannot destroy, rule creation still in progress");
642 		rte_errno = EBUSY;
643 		return rte_errno;
644 	}
645 
646 	/* Rule failed and doesn't require cleanup */
647 	if (rule->status == MLX5DR_RULE_STATUS_FAILED) {
648 		mlx5dr_rule_destroy_failed_hws(rule, attr);
649 		return 0;
650 	}
651 
652 	mlx5dr_send_engine_inc_rule(queue);
653 
654 	/* Send dependent WQE */
655 	if (!attr->burst)
656 		mlx5dr_send_all_dep_wqe(queue);
657 
658 	rule->status = MLX5DR_RULE_STATUS_DELETING;
659 
660 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
661 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
662 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
663 	if (unlikely(is_range))
664 		ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
665 
666 	ste_attr.send_attr.rule = rule;
667 	ste_attr.send_attr.notify_hw = !attr->burst;
668 	ste_attr.send_attr.user_data = attr->user_data;
669 
670 	ste_attr.rtc_0 = rule->rtc_0;
671 	ste_attr.rtc_1 = rule->rtc_1;
672 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
673 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
674 	ste_attr.wqe_ctrl = &wqe_ctrl;
675 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
676 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
677 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
678 		ste_attr.direct_index = attr->rule_idx;
679 
680 	mlx5dr_rule_load_delete_info(rule, &ste_attr);
681 
682 	if (unlikely(fw_wqe))
683 		mlx5dr_send_stes_fw(queue, &ste_attr);
684 	else
685 		mlx5dr_send_ste(queue, &ste_attr);
686 
687 	mlx5dr_rule_clear_delete_info(rule);
688 
689 	return 0;
690 }
691 
692 int mlx5dr_rule_create_root_no_comp(struct mlx5dr_rule *rule,
693 				    const struct rte_flow_item items[],
694 				    uint8_t num_actions,
695 				    struct mlx5dr_rule_action rule_actions[])
696 {
697 	struct mlx5dv_flow_matcher *dv_matcher = rule->matcher->dv_matcher;
698 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
699 	struct mlx5dv_flow_match_parameters *value;
700 	struct mlx5_flow_attr flow_attr = {0};
701 	struct mlx5dv_flow_action_attr *attr;
702 	struct rte_flow_error error;
703 	uint8_t match_criteria;
704 	int ret;
705 
706 	ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id);
707 	if (ret) {
708 		DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name);
709 		rte_errno = EINVAL;
710 		return rte_errno;
711 	}
712 
713 	attr = simple_calloc(num_actions, sizeof(*attr));
714 	if (!attr) {
715 		rte_errno = ENOMEM;
716 		return rte_errno;
717 	}
718 
719 	value = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +
720 			      offsetof(struct mlx5dv_flow_match_parameters, match_buf));
721 	if (!value) {
722 		rte_errno = ENOMEM;
723 		goto free_attr;
724 	}
725 
726 	flow_attr.tbl_type = rule->matcher->tbl->type;
727 
728 	ret = flow_dv_translate_items_hws(items, &flow_attr, value->match_buf,
729 					  MLX5_SET_MATCHER_HS_V, NULL,
730 					  &match_criteria,
731 					  &error);
732 	if (ret) {
733 		DR_LOG(ERR, "Failed to convert items to PRM [%s]", error.message);
734 		goto free_value;
735 	}
736 
737 	/* Convert actions to verb action attr */
738 	ret = mlx5dr_action_root_build_attr(rule_actions, num_actions, attr);
739 	if (ret)
740 		goto free_value;
741 
742 	/* Create verb flow */
743 	value->match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
744 	rule->flow = mlx5_glue->dv_create_flow_root(dv_matcher,
745 						    value,
746 						    num_actions,
747 						    attr);
748 
749 	simple_free(value);
750 	simple_free(attr);
751 
752 	return 0;
753 
754 free_value:
755 	simple_free(value);
756 free_attr:
757 	simple_free(attr);
758 
759 	return rte_errno;
760 }
761 
762 static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,
763 				   struct mlx5dr_rule_attr *rule_attr,
764 				   const struct rte_flow_item items[],
765 				   uint8_t num_actions,
766 				   struct mlx5dr_rule_action rule_actions[])
767 {
768 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
769 	int ret;
770 
771 	ret = mlx5dr_rule_create_root_no_comp(rule, items,
772 					      num_actions, rule_actions);
773 	if (ret)
774 		return rte_errno;
775 
776 	mlx5dr_rule_gen_comp(&ctx->send_queue[rule_attr->queue_id], rule, !rule->flow,
777 			     rule_attr->user_data, MLX5DR_RULE_STATUS_CREATED);
778 
779 	return 0;
780 }
781 
782 int mlx5dr_rule_destroy_root_no_comp(struct mlx5dr_rule *rule)
783 {
784 	if (rule->flow)
785 		return ibv_destroy_flow(rule->flow);
786 
787 	return 0;
788 }
789 
790 static int mlx5dr_rule_destroy_root(struct mlx5dr_rule *rule,
791 				    struct mlx5dr_rule_attr *attr)
792 {
793 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
794 	int err;
795 
796 	err = mlx5dr_rule_destroy_root_no_comp(rule);
797 
798 	mlx5dr_rule_gen_comp(&ctx->send_queue[attr->queue_id], rule, err,
799 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
800 
801 	return 0;
802 }
803 
804 static int mlx5dr_rule_enqueue_precheck(struct mlx5dr_rule *rule,
805 					struct mlx5dr_rule_attr *attr)
806 {
807 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
808 
809 	if (unlikely(!attr->user_data)) {
810 		DR_LOG(DEBUG, "User data must be provided for rule operations");
811 		rte_errno = EINVAL;
812 		return rte_errno;
813 	}
814 
815 	/* Check if there is room in queue */
816 	if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
817 		DR_LOG(NOTICE, "No room in queue[%d]", attr->queue_id);
818 		rte_errno = EBUSY;
819 		return rte_errno;
820 	}
821 
822 	return 0;
823 }
824 
825 static int mlx5dr_rule_enqueue_precheck_move(struct mlx5dr_rule *rule,
826 					     struct mlx5dr_rule_attr *attr)
827 {
828 	if (unlikely(rule->status != MLX5DR_RULE_STATUS_CREATED)) {
829 		DR_LOG(DEBUG, "Cannot move, rule status is invalid");
830 		rte_errno = EINVAL;
831 		return rte_errno;
832 	}
833 
834 	return mlx5dr_rule_enqueue_precheck(rule, attr);
835 }
836 
837 static int mlx5dr_rule_enqueue_precheck_create(struct mlx5dr_rule *rule,
838 					       struct mlx5dr_rule_attr *attr)
839 {
840 	if (unlikely(mlx5dr_matcher_is_in_resize(rule->matcher))) {
841 		/* Matcher in resize - new rules are not allowed */
842 		DR_LOG(NOTICE, "Resizing in progress, cannot create rule");
843 		rte_errno = EAGAIN;
844 		return rte_errno;
845 	}
846 
847 	return mlx5dr_rule_enqueue_precheck(rule, attr);
848 }
849 
850 static int mlx5dr_rule_enqueue_precheck_update(struct mlx5dr_rule *rule,
851 					       struct mlx5dr_rule_attr *attr)
852 {
853 	struct mlx5dr_matcher *matcher = rule->matcher;
854 
855 	if (unlikely((mlx5dr_table_is_root(matcher->tbl) ||
856 		     mlx5dr_matcher_req_fw_wqe(matcher)))) {
857 		DR_LOG(ERR, "Rule update is not supported on current matcher");
858 		rte_errno = ENOTSUP;
859 		return rte_errno;
860 	}
861 
862 	if (unlikely(!matcher->attr.optimize_using_rule_idx &&
863 		     !mlx5dr_matcher_is_insert_by_idx(matcher))) {
864 		DR_LOG(ERR, "Rule update requires optimize by idx matcher");
865 		rte_errno = ENOTSUP;
866 		return rte_errno;
867 	}
868 
869 	if (unlikely(mlx5dr_matcher_is_resizable(rule->matcher))) {
870 		DR_LOG(ERR, "Rule update is not supported on resizable matcher");
871 		rte_errno = ENOTSUP;
872 		return rte_errno;
873 	}
874 
875 	if (unlikely(rule->status != MLX5DR_RULE_STATUS_CREATED)) {
876 		DR_LOG(ERR, "Current rule status does not allow update");
877 		rte_errno = EBUSY;
878 		return rte_errno;
879 	}
880 
881 	return mlx5dr_rule_enqueue_precheck_create(rule, attr);
882 }
883 
884 int mlx5dr_rule_move_hws_remove(struct mlx5dr_rule *rule,
885 				void *queue_ptr,
886 				void *user_data)
887 {
888 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(rule->matcher->mt);
889 	struct mlx5dr_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
890 	struct mlx5dr_matcher *matcher = rule->matcher;
891 	struct mlx5dr_send_engine *queue = queue_ptr;
892 	struct mlx5dr_send_ste_attr ste_attr = {0};
893 
894 	/* Send dependent WQEs */
895 	mlx5dr_send_all_dep_wqe(queue);
896 
897 	rule->resize_info->state = MLX5DR_RULE_RESIZE_STATE_DELETING;
898 
899 	ste_attr.send_attr.fence = 0;
900 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
901 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
902 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
903 	ste_attr.send_attr.rule = rule;
904 	ste_attr.send_attr.notify_hw = 1;
905 	ste_attr.send_attr.user_data = user_data;
906 	ste_attr.rtc_0 = rule->resize_info->rtc_0;
907 	ste_attr.rtc_1 = rule->resize_info->rtc_1;
908 	ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
909 	ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
910 	ste_attr.wqe_ctrl = &empty_wqe_ctrl;
911 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
912 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
913 
914 	if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
915 		ste_attr.direct_index = rule->resize_info->rule_idx;
916 
917 	mlx5dr_rule_load_delete_info(rule, &ste_attr);
918 	mlx5dr_send_ste(queue, &ste_attr);
919 
920 	return 0;
921 }
922 
923 int mlx5dr_rule_move_hws_add(struct mlx5dr_rule *rule,
924 			     struct mlx5dr_rule_attr *attr)
925 {
926 	bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(rule->matcher->mt);
927 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
928 	struct mlx5dr_matcher *matcher = rule->matcher;
929 	struct mlx5dr_send_ste_attr ste_attr = {0};
930 	struct mlx5dr_send_engine *queue;
931 
932 	if (unlikely(mlx5dr_rule_enqueue_precheck_move(rule, attr)))
933 		return -rte_errno;
934 
935 	queue = &ctx->send_queue[attr->queue_id];
936 
937 	if (unlikely(mlx5dr_send_engine_err(queue))) {
938 		rte_errno = EIO;
939 		return rte_errno;
940 	}
941 
942 	mlx5dr_rule_move_init(rule, attr);
943 
944 	mlx5dr_rule_move_get_rtc(rule, &ste_attr);
945 
946 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
947 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
948 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
949 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
950 	ste_attr.wqe_tag_is_jumbo = is_jumbo;
951 
952 	ste_attr.send_attr.rule = rule;
953 	ste_attr.send_attr.fence = 0;
954 	ste_attr.send_attr.notify_hw = !attr->burst;
955 	ste_attr.send_attr.user_data = attr->user_data;
956 
957 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
958 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
959 	ste_attr.wqe_ctrl = (struct mlx5dr_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
960 	ste_attr.wqe_data = (struct mlx5dr_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
961 	ste_attr.direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ?
962 				attr->rule_idx : 0;
963 
964 	mlx5dr_send_ste(queue, &ste_attr);
965 	mlx5dr_send_engine_inc_rule(queue);
966 
967 	/* Send dependent WQEs */
968 	if (!attr->burst)
969 		mlx5dr_send_all_dep_wqe(queue);
970 
971 	return 0;
972 }
973 
974 int mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
975 		       uint8_t mt_idx,
976 		       const struct rte_flow_item items[],
977 		       uint8_t at_idx,
978 		       struct mlx5dr_rule_action rule_actions[],
979 		       struct mlx5dr_rule_attr *attr,
980 		       struct mlx5dr_rule *rule_handle)
981 {
982 	int ret;
983 
984 	rule_handle->matcher = matcher;
985 
986 	if (unlikely(mlx5dr_rule_enqueue_precheck_create(rule_handle, attr)))
987 		return -rte_errno;
988 
989 	assert(matcher->num_of_mt >= mt_idx);
990 	assert(matcher->num_of_at >= at_idx);
991 	assert(items);
992 
993 	if (unlikely(mlx5dr_table_is_root(matcher->tbl)))
994 		ret = mlx5dr_rule_create_root(rule_handle,
995 					      attr,
996 					      items,
997 					      matcher->at[at_idx].num_actions,
998 					      rule_actions);
999 	else
1000 		ret = mlx5dr_rule_create_hws(rule_handle,
1001 					     attr,
1002 					     mt_idx,
1003 					     items,
1004 					     at_idx,
1005 					     rule_actions);
1006 	return -ret;
1007 }
1008 
1009 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule,
1010 			struct mlx5dr_rule_attr *attr)
1011 {
1012 	int ret;
1013 
1014 	if (unlikely(mlx5dr_rule_enqueue_precheck(rule, attr)))
1015 		return -rte_errno;
1016 
1017 	if (unlikely(mlx5dr_table_is_root(rule->matcher->tbl)))
1018 		ret = mlx5dr_rule_destroy_root(rule, attr);
1019 	else
1020 		ret = mlx5dr_rule_destroy_hws(rule, attr);
1021 
1022 	return -ret;
1023 }
1024 
1025 int mlx5dr_rule_action_update(struct mlx5dr_rule *rule_handle,
1026 			      uint8_t at_idx,
1027 			      struct mlx5dr_rule_action rule_actions[],
1028 			      struct mlx5dr_rule_attr *attr)
1029 {
1030 	int ret;
1031 
1032 	if (unlikely(mlx5dr_rule_enqueue_precheck_update(rule_handle, attr)))
1033 		return -rte_errno;
1034 
1035 	if (rule_handle->status != MLX5DR_RULE_STATUS_CREATED) {
1036 		DR_LOG(ERR, "Current rule status does not allow update");
1037 		rte_errno = EBUSY;
1038 		return -rte_errno;
1039 	}
1040 
1041 	ret = mlx5dr_rule_create_hws(rule_handle,
1042 				     attr,
1043 				     0,
1044 				     NULL,
1045 				     at_idx,
1046 				     rule_actions);
1047 
1048 	return -ret;
1049 }
1050 
1051 size_t mlx5dr_rule_get_handle_size(void)
1052 {
1053 	return sizeof(struct mlx5dr_rule);
1054 }
1055 
1056 int mlx5dr_rule_hash_calculate(struct mlx5dr_matcher *matcher,
1057 			       const struct rte_flow_item items[],
1058 			       uint8_t mt_idx,
1059 			       enum mlx5dr_rule_hash_calc_mode mode,
1060 			       uint32_t *ret_hash)
1061 {
1062 	uint8_t tag[MLX5DR_WQE_SZ_GTA_DATA] = {0};
1063 	struct mlx5dr_match_template *mt;
1064 
1065 	if (!matcher || !matcher->mt) {
1066 		rte_errno = EINVAL;
1067 		return -rte_errno;
1068 	}
1069 
1070 	mt = &matcher->mt[mt_idx];
1071 
1072 	if (mlx5dr_matcher_req_fw_wqe(matcher) ||
1073 	    mlx5dr_table_is_root(matcher->tbl) ||
1074 	    matcher->tbl->ctx->caps->access_index_mode == MLX5DR_MATCHER_INSERT_BY_HASH ||
1075 	    matcher->tbl->ctx->caps->flow_table_hash_type != MLX5_FLOW_TABLE_HASH_TYPE_CRC32) {
1076 		DR_LOG(DEBUG, "Matcher is not supported");
1077 		rte_errno = ENOTSUP;
1078 		return -rte_errno;
1079 	}
1080 
1081 	mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz, tag);
1082 	if (mlx5dr_matcher_mt_is_jumbo(mt))
1083 		*ret_hash = mlx5dr_crc32_calc(tag, MLX5DR_JUMBO_TAG_SZ);
1084 	else
1085 		*ret_hash = mlx5dr_crc32_calc(tag + MLX5DR_ACTIONS_SZ,
1086 					      MLX5DR_MATCH_TAG_SZ);
1087 
1088 	if (mode == MLX5DR_RULE_HASH_CALC_MODE_IDX)
1089 		*ret_hash = *ret_hash & (BIT(matcher->attr.rule.num_log) - 1);
1090 
1091 	return 0;
1092 }
1093