xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_bwc.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static uint16_t mlx5dr_bwc_queues(struct mlx5dr_context *ctx)
8 {
9 	return (ctx->queues - 1) / 2;
10 }
11 
12 static uint16_t
13 mlx5dr_bwc_gen_queue_idx(struct mlx5dr_context *ctx)
14 {
15 	/* assign random queue */
16 	return rand() % mlx5dr_bwc_queues(ctx);
17 }
18 
19 static uint16_t
20 mlx5dr_bwc_get_queue_id(struct mlx5dr_context *ctx, uint16_t idx)
21 {
22 	return idx + mlx5dr_bwc_queues(ctx);
23 }
24 
25 static uint16_t
26 mlx5dr_bwc_get_burst_th(struct mlx5dr_context *ctx, uint16_t queue_id)
27 {
28 	return RTE_MIN(ctx->send_queue[queue_id].num_entries / 2,
29 		       MLX5DR_BWC_MATCHER_REHASH_BURST_TH);
30 }
31 
32 static rte_spinlock_t *
33 mlx5dr_bwc_get_queue_lock(struct mlx5dr_context *ctx, uint16_t idx)
34 {
35 	return &ctx->bwc_send_queue_locks[idx];
36 }
37 
38 static void mlx5dr_bwc_lock_all_queues(struct mlx5dr_context *ctx)
39 {
40 	uint16_t bwc_queues = mlx5dr_bwc_queues(ctx);
41 	rte_spinlock_t *queue_lock;
42 	int i;
43 
44 	for (i = 0; i < bwc_queues; i++) {
45 		queue_lock = mlx5dr_bwc_get_queue_lock(ctx, i);
46 		rte_spinlock_lock(queue_lock);
47 	}
48 }
49 
50 static void mlx5dr_bwc_unlock_all_queues(struct mlx5dr_context *ctx)
51 {
52 	uint16_t bwc_queues = mlx5dr_bwc_queues(ctx);
53 	rte_spinlock_t *queue_lock;
54 	int i;
55 
56 	for (i = 0; i < bwc_queues; i++) {
57 		queue_lock = mlx5dr_bwc_get_queue_lock(ctx, i);
58 		rte_spinlock_unlock(queue_lock);
59 	}
60 }
61 
62 static void mlx5dr_bwc_matcher_init_attr(struct mlx5dr_matcher_attr *attr,
63 					 uint32_t priority,
64 					 uint8_t size_log,
65 					 bool is_root)
66 {
67 	memset(attr, 0, sizeof(*attr));
68 
69 	attr->priority = priority;
70 	attr->optimize_using_rule_idx = 0;
71 	attr->mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
72 	attr->optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
73 	attr->insert_mode = MLX5DR_MATCHER_INSERT_BY_HASH;
74 	attr->distribute_mode = MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
75 	attr->rule.num_log = size_log;
76 
77 	if (!is_root) {
78 		attr->resizable = true;
79 		attr->max_num_of_at_attach = MLX5DR_BWC_MATCHER_ATTACH_AT_NUM;
80 	}
81 }
82 
83 struct mlx5dr_bwc_matcher *
84 mlx5dr_bwc_matcher_create(struct mlx5dr_table *table,
85 			  uint32_t priority,
86 			  const struct rte_flow_item flow_items[])
87 {
88 	enum mlx5dr_action_type init_action_types[1] = { MLX5DR_ACTION_TYP_LAST };
89 	uint16_t bwc_queues = mlx5dr_bwc_queues(table->ctx);
90 	struct mlx5dr_bwc_matcher *bwc_matcher;
91 	struct mlx5dr_matcher_attr attr = {0};
92 	int i;
93 
94 	if (!mlx5dr_context_bwc_supported(table->ctx)) {
95 		rte_errno = EINVAL;
96 		DR_LOG(ERR, "BWC rule: Context created w/o BWC API compatibility");
97 		return NULL;
98 	}
99 
100 	bwc_matcher = simple_calloc(1, sizeof(*bwc_matcher));
101 	if (!bwc_matcher) {
102 		rte_errno = ENOMEM;
103 		return NULL;
104 	}
105 
106 	bwc_matcher->rules = simple_calloc(bwc_queues, sizeof(*bwc_matcher->rules));
107 	if (!bwc_matcher->rules) {
108 		rte_errno = ENOMEM;
109 		goto free_bwc_matcher;
110 	}
111 
112 	for (i = 0; i < bwc_queues; i++)
113 		LIST_INIT(&bwc_matcher->rules[i]);
114 
115 	mlx5dr_bwc_matcher_init_attr(&attr,
116 				     priority,
117 				     MLX5DR_BWC_MATCHER_INIT_SIZE_LOG,
118 				     mlx5dr_table_is_root(table));
119 
120 	bwc_matcher->mt = mlx5dr_match_template_create(flow_items,
121 						       MLX5DR_MATCH_TEMPLATE_FLAG_NONE);
122 	if (!bwc_matcher->mt) {
123 		rte_errno = EINVAL;
124 		goto free_bwc_matcher_rules;
125 	}
126 
127 	bwc_matcher->priority = priority;
128 	bwc_matcher->size_log = MLX5DR_BWC_MATCHER_INIT_SIZE_LOG;
129 
130 	/* create dummy action template */
131 	bwc_matcher->at[0] = mlx5dr_action_template_create(init_action_types, 0);
132 	bwc_matcher->num_of_at = 1;
133 
134 	bwc_matcher->matcher = mlx5dr_matcher_create(table,
135 						     &bwc_matcher->mt, 1,
136 						     &bwc_matcher->at[0],
137 						     bwc_matcher->num_of_at,
138 						     &attr);
139 	if (!bwc_matcher->matcher) {
140 		rte_errno = EINVAL;
141 		goto free_at;
142 	}
143 
144 	return bwc_matcher;
145 
146 free_at:
147 	mlx5dr_action_template_destroy(bwc_matcher->at[0]);
148 	mlx5dr_match_template_destroy(bwc_matcher->mt);
149 free_bwc_matcher_rules:
150 	simple_free(bwc_matcher->rules);
151 free_bwc_matcher:
152 	simple_free(bwc_matcher);
153 
154 	return NULL;
155 }
156 
157 int mlx5dr_bwc_matcher_destroy(struct mlx5dr_bwc_matcher *bwc_matcher)
158 {
159 	int i;
160 
161 	if (bwc_matcher->num_of_rules)
162 		DR_LOG(ERR, "BWC matcher destroy: matcher still has %d rules",
163 		       bwc_matcher->num_of_rules);
164 
165 	mlx5dr_matcher_destroy(bwc_matcher->matcher);
166 	bwc_matcher->matcher = NULL;
167 
168 	for (i = 0; i < bwc_matcher->num_of_at; i++)
169 		mlx5dr_action_template_destroy(bwc_matcher->at[i]);
170 
171 	mlx5dr_match_template_destroy(bwc_matcher->mt);
172 	simple_free(bwc_matcher->rules);
173 	simple_free(bwc_matcher);
174 
175 	return 0;
176 }
177 
178 static int
179 mlx5dr_bwc_queue_poll(struct mlx5dr_context *ctx,
180 		      uint16_t queue_id,
181 		      uint32_t *pending_rules,
182 		      bool drain)
183 {
184 	bool queue_full = *pending_rules == MLX5DR_BWC_MATCHER_REHASH_QUEUE_SZ;
185 	struct rte_flow_op_result comp[MLX5DR_BWC_MATCHER_REHASH_BURST_TH];
186 	uint16_t burst_th = mlx5dr_bwc_get_burst_th(ctx, queue_id);
187 	bool got_comp = *pending_rules >= burst_th;
188 	int err = 0;
189 	int ret;
190 	int i;
191 
192 	/* Check if there are any completions at all */
193 	if (!got_comp && !drain)
194 		return 0;
195 
196 	while (queue_full || ((got_comp || drain) && *pending_rules)) {
197 		ret = mlx5dr_send_queue_poll(ctx, queue_id, comp, burst_th);
198 		if (unlikely(ret < 0)) {
199 			DR_LOG(ERR, "Rehash error: polling queue %d returned %d\n",
200 			       queue_id, ret);
201 			return -EINVAL;
202 		}
203 
204 		if (ret) {
205 			(*pending_rules) -= ret;
206 			for (i = 0; i < ret; i++) {
207 				if (unlikely(comp[i].status != RTE_FLOW_OP_SUCCESS)) {
208 					DR_LOG(ERR,
209 					       "Rehash error: polling queue %d returned completion with error\n",
210 					       queue_id);
211 					err = -EINVAL;
212 				}
213 			}
214 			queue_full = false;
215 		}
216 
217 		got_comp = !!ret;
218 	}
219 
220 	return err;
221 }
222 
223 static void
224 mlx5dr_bwc_rule_fill_attr(struct mlx5dr_bwc_matcher *bwc_matcher,
225 			  uint16_t bwc_queue_idx,
226 			  struct mlx5dr_rule_attr *rule_attr)
227 {
228 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
229 
230 	/* no use of INSERT_BY_INDEX in bwc rule */
231 	rule_attr->rule_idx = 0;
232 
233 	/* notify HW at each rule insertion/deletion */
234 	rule_attr->burst = 0;
235 
236 	/* We don't need user data, but the API requires it to exist */
237 	rule_attr->user_data = (void *)0xFACADE;
238 
239 	rule_attr->queue_id = mlx5dr_bwc_get_queue_id(ctx, bwc_queue_idx);
240 }
241 
242 static struct mlx5dr_bwc_rule *
243 mlx5dr_bwc_rule_alloc(void)
244 {
245 	struct mlx5dr_bwc_rule *bwc_rule;
246 
247 	bwc_rule = simple_calloc(1, sizeof(*bwc_rule));
248 	if (unlikely(!bwc_rule))
249 		goto out_err;
250 
251 	bwc_rule->rule = simple_calloc(1, sizeof(*bwc_rule->rule));
252 	if (unlikely(!bwc_rule->rule))
253 		goto free_rule;
254 
255 	return bwc_rule;
256 
257 free_rule:
258 	simple_free(bwc_rule);
259 out_err:
260 	rte_errno = ENOMEM;
261 	return NULL;
262 }
263 
264 static void
265 mlx5dr_bwc_rule_free(struct mlx5dr_bwc_rule *bwc_rule)
266 {
267 	if (likely(bwc_rule->rule))
268 		simple_free(bwc_rule->rule);
269 	simple_free(bwc_rule);
270 }
271 
272 static void
273 mlx5dr_bwc_rule_list_add(struct mlx5dr_bwc_rule *bwc_rule, uint16_t idx)
274 {
275 	struct mlx5dr_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
276 
277 	rte_atomic_fetch_add_explicit(&bwc_matcher->num_of_rules, 1, rte_memory_order_relaxed);
278 	bwc_rule->bwc_queue_idx = idx;
279 	LIST_INSERT_HEAD(&bwc_matcher->rules[idx], bwc_rule, next);
280 }
281 
282 static void mlx5dr_bwc_rule_list_remove(struct mlx5dr_bwc_rule *bwc_rule)
283 {
284 	struct mlx5dr_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
285 
286 	rte_atomic_fetch_sub_explicit(&bwc_matcher->num_of_rules, 1, rte_memory_order_relaxed);
287 	LIST_REMOVE(bwc_rule, next);
288 }
289 
290 static int
291 mlx5dr_bwc_rule_destroy_hws_async(struct mlx5dr_bwc_rule *bwc_rule,
292 				  struct mlx5dr_rule_attr *attr)
293 {
294 	return mlx5dr_rule_destroy(bwc_rule->rule, attr);
295 }
296 
297 static int
298 mlx5dr_bwc_rule_destroy_hws_sync(struct mlx5dr_bwc_rule *bwc_rule,
299 				 struct mlx5dr_rule_attr *rule_attr)
300 {
301 	struct mlx5dr_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
302 	struct rte_flow_op_result completion;
303 	int ret;
304 
305 	ret = mlx5dr_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
306 	if (unlikely(ret))
307 		return ret;
308 
309 	do {
310 		ret = mlx5dr_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
311 	} while (ret != 1);
312 
313 	if (unlikely(completion.status != RTE_FLOW_OP_SUCCESS ||
314 		     (bwc_rule->rule->status != MLX5DR_RULE_STATUS_DELETED &&
315 		      bwc_rule->rule->status != MLX5DR_RULE_STATUS_DELETING))) {
316 		DR_LOG(ERR, "Failed destroying BWC rule: completion %d, rule status %d",
317 		       completion.status, bwc_rule->rule->status);
318 		rte_errno = EINVAL;
319 		return rte_errno;
320 	}
321 
322 	return 0;
323 }
324 
325 static int mlx5dr_bwc_rule_destroy_hws(struct mlx5dr_bwc_rule *bwc_rule)
326 {
327 	struct mlx5dr_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
328 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
329 	uint16_t idx = bwc_rule->bwc_queue_idx;
330 	struct mlx5dr_rule_attr attr;
331 	rte_spinlock_t *queue_lock;
332 	int ret;
333 
334 	mlx5dr_bwc_rule_fill_attr(bwc_matcher, idx, &attr);
335 
336 	queue_lock = mlx5dr_bwc_get_queue_lock(ctx, idx);
337 
338 	rte_spinlock_lock(queue_lock);
339 
340 	ret = mlx5dr_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
341 	mlx5dr_bwc_rule_list_remove(bwc_rule);
342 
343 	rte_spinlock_unlock(queue_lock);
344 
345 	mlx5dr_bwc_rule_free(bwc_rule);
346 
347 	return ret;
348 }
349 
350 static int mlx5dr_bwc_rule_destroy_root(struct mlx5dr_bwc_rule *bwc_rule)
351 {
352 	int ret;
353 
354 	ret = mlx5dr_rule_destroy_root_no_comp(bwc_rule->rule);
355 
356 	mlx5dr_bwc_rule_free(bwc_rule);
357 
358 	return ret;
359 }
360 
361 int mlx5dr_bwc_rule_destroy(struct mlx5dr_bwc_rule *bwc_rule)
362 {
363 	if (unlikely(mlx5dr_table_is_root(bwc_rule->bwc_matcher->matcher->tbl)))
364 		return mlx5dr_bwc_rule_destroy_root(bwc_rule);
365 
366 	return mlx5dr_bwc_rule_destroy_hws(bwc_rule);
367 }
368 
369 static struct mlx5dr_bwc_rule *
370 mlx5dr_bwc_rule_create_hws_async(struct mlx5dr_bwc_matcher *bwc_matcher,
371 				 const struct rte_flow_item flow_items[],
372 				 uint8_t at_idx,
373 				 struct mlx5dr_rule_action rule_actions[],
374 				 struct mlx5dr_rule_attr *rule_attr)
375 {
376 	struct mlx5dr_bwc_rule *bwc_rule;
377 	int ret;
378 
379 	bwc_rule = mlx5dr_bwc_rule_alloc();
380 	if (unlikely(!bwc_rule))
381 		return NULL;
382 
383 	bwc_rule->bwc_matcher = bwc_matcher;
384 
385 	ret = mlx5dr_rule_create(bwc_matcher->matcher,
386 				 0, /* only one match template supported */
387 				 flow_items,
388 				 at_idx,
389 				 rule_actions,
390 				 rule_attr,
391 				 bwc_rule->rule);
392 
393 	if (unlikely(ret)) {
394 		mlx5dr_bwc_rule_free(bwc_rule);
395 		rte_errno = EINVAL;
396 		return NULL;
397 	}
398 
399 	return bwc_rule;
400 }
401 
402 static struct mlx5dr_bwc_rule *
403 mlx5dr_bwc_rule_create_root_sync(struct mlx5dr_bwc_matcher *bwc_matcher,
404 				 const struct rte_flow_item flow_items[],
405 				 uint8_t num_actions,
406 				 struct mlx5dr_rule_action rule_actions[])
407 {
408 	struct mlx5dr_bwc_rule *bwc_rule;
409 	int ret;
410 
411 	bwc_rule = mlx5dr_bwc_rule_alloc();
412 	if (unlikely(!bwc_rule)) {
413 		rte_errno = ENOMEM;
414 		return NULL;
415 	}
416 
417 	bwc_rule->bwc_matcher = bwc_matcher;
418 	bwc_rule->rule->matcher = bwc_matcher->matcher;
419 
420 	ret = mlx5dr_rule_create_root_no_comp(bwc_rule->rule,
421 					      flow_items,
422 					      num_actions,
423 					      rule_actions);
424 	if (unlikely(ret)) {
425 		mlx5dr_bwc_rule_free(bwc_rule);
426 		rte_errno = EINVAL;
427 		return NULL;
428 	}
429 
430 	return bwc_rule;
431 }
432 
433 static struct mlx5dr_bwc_rule *
434 mlx5dr_bwc_rule_create_hws_sync(struct mlx5dr_bwc_matcher *bwc_matcher,
435 				const struct rte_flow_item flow_items[],
436 				uint8_t at_idx,
437 				struct mlx5dr_rule_action rule_actions[],
438 				struct mlx5dr_rule_attr *rule_attr)
439 
440 {
441 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
442 	struct rte_flow_op_result completion;
443 	struct mlx5dr_bwc_rule *bwc_rule;
444 	int ret;
445 
446 	bwc_rule = mlx5dr_bwc_rule_create_hws_async(bwc_matcher, flow_items,
447 						    at_idx, rule_actions,
448 						    rule_attr);
449 	if (unlikely(!bwc_rule))
450 		return NULL;
451 
452 	do {
453 		ret = mlx5dr_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
454 	} while (ret != 1);
455 
456 	if (unlikely(completion.status != RTE_FLOW_OP_SUCCESS ||
457 		     (bwc_rule->rule->status != MLX5DR_RULE_STATUS_CREATED &&
458 		      bwc_rule->rule->status != MLX5DR_RULE_STATUS_CREATING))) {
459 		DR_LOG(ERR, "Failed creating BWC rule: completion %d, rule status %d",
460 		       completion.status, bwc_rule->rule->status);
461 		mlx5dr_bwc_rule_free(bwc_rule);
462 		return NULL;
463 	}
464 
465 	return bwc_rule;
466 }
467 
468 static bool
469 mlx5dr_bwc_matcher_size_maxed_out(struct mlx5dr_bwc_matcher *bwc_matcher)
470 {
471 	struct mlx5dr_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
472 
473 	return bwc_matcher->size_log + MLX5DR_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
474 	       caps->ste_alloc_log_max - 1;
475 }
476 
477 static bool
478 mlx5dr_bwc_matcher_rehash_size_needed(struct mlx5dr_bwc_matcher *bwc_matcher,
479 				      uint32_t num_of_rules)
480 {
481 	/* size-based rehash for root table is kernel's responsibility */
482 	if (unlikely(mlx5dr_table_is_root(bwc_matcher->matcher->tbl)))
483 		return false;
484 
485 	if (unlikely(mlx5dr_bwc_matcher_size_maxed_out(bwc_matcher)))
486 		return false;
487 
488 	if (unlikely((num_of_rules * 100 / MLX5DR_BWC_MATCHER_REHASH_PERCENT_TH) >=
489 		     (1UL << bwc_matcher->size_log)))
490 		return true;
491 
492 	return false;
493 }
494 
495 static void
496 mlx5dr_bwc_rule_actions_to_action_types(struct mlx5dr_rule_action rule_actions[],
497 					enum mlx5dr_action_type action_types[])
498 {
499 	int i = 0;
500 
501 	for (i = 0;
502 	     rule_actions[i].action && (rule_actions[i].action->type != MLX5DR_ACTION_TYP_LAST);
503 	     i++) {
504 		action_types[i] = (enum mlx5dr_action_type)rule_actions[i].action->type;
505 	}
506 
507 	action_types[i] = MLX5DR_ACTION_TYP_LAST;
508 }
509 
510 static int
511 mlx5dr_bwc_rule_actions_num(struct mlx5dr_rule_action rule_actions[])
512 {
513 	int i = 0;
514 
515 	while (rule_actions[i].action &&
516 	       (rule_actions[i].action->type != MLX5DR_ACTION_TYP_LAST))
517 		i++;
518 
519 	return i;
520 }
521 
522 static int
523 mlx5dr_bwc_matcher_extend_at(struct mlx5dr_bwc_matcher *bwc_matcher,
524 			     struct mlx5dr_rule_action rule_actions[])
525 {
526 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS];
527 
528 	mlx5dr_bwc_rule_actions_to_action_types(rule_actions, action_types);
529 
530 	bwc_matcher->at[bwc_matcher->num_of_at] =
531 		mlx5dr_action_template_create(action_types, 0);
532 
533 	if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at])) {
534 		rte_errno = ENOMEM;
535 		return rte_errno;
536 	}
537 
538 	bwc_matcher->num_of_at++;
539 	return 0;
540 }
541 
542 static int
543 mlx5dr_bwc_matcher_extend_size(struct mlx5dr_bwc_matcher *bwc_matcher)
544 {
545 	struct mlx5dr_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
546 
547 	if (unlikely(mlx5dr_bwc_matcher_size_maxed_out(bwc_matcher))) {
548 		DR_LOG(ERR, "Can't resize matcher: depth exceeds limit %d",
549 		       caps->rtc_log_depth_max);
550 		return -ENOMEM;
551 	}
552 
553 	bwc_matcher->size_log =
554 		RTE_MIN(bwc_matcher->size_log + MLX5DR_BWC_MATCHER_SIZE_LOG_STEP,
555 			caps->ste_alloc_log_max - MLX5DR_MATCHER_ASSURED_MAIN_TBL_DEPTH);
556 
557 	return 0;
558 }
559 
560 static int
561 mlx5dr_bwc_matcher_find_at(struct mlx5dr_bwc_matcher *bwc_matcher,
562 			   struct mlx5dr_rule_action rule_actions[])
563 {
564 	enum mlx5dr_action_type *action_type_arr;
565 	int i, j;
566 
567 	/* start from index 1 - first action template is a dummy */
568 	for (i = 1; i < bwc_matcher->num_of_at; i++) {
569 		j = 0;
570 		action_type_arr = bwc_matcher->at[i]->action_type_arr;
571 
572 		while (rule_actions[j].action &&
573 		       rule_actions[j].action->type != MLX5DR_ACTION_TYP_LAST) {
574 			if (action_type_arr[j] != rule_actions[j].action->type)
575 				break;
576 			j++;
577 		}
578 
579 		if (action_type_arr[j] == MLX5DR_ACTION_TYP_LAST &&
580 		    (!rule_actions[j].action ||
581 		     rule_actions[j].action->type == MLX5DR_ACTION_TYP_LAST))
582 			return i;
583 	}
584 
585 	return -1;
586 }
587 
588 static int
589 mlx5dr_bwc_matcher_move_all(struct mlx5dr_bwc_matcher *bwc_matcher)
590 {
591 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
592 	uint16_t bwc_queues = mlx5dr_bwc_queues(ctx);
593 	struct mlx5dr_bwc_rule **bwc_rules;
594 	struct mlx5dr_rule_attr rule_attr;
595 	uint32_t *pending_rules;
596 	uint16_t burst_th;
597 	bool all_done;
598 	int i, j, ret;
599 
600 	if (mlx5dr_table_is_root(bwc_matcher->matcher->tbl)) {
601 		rte_errno = EINVAL;
602 		return -rte_errno;
603 	}
604 
605 	mlx5dr_bwc_rule_fill_attr(bwc_matcher, 0, &rule_attr);
606 
607 	pending_rules = simple_calloc(bwc_queues, sizeof(*pending_rules));
608 	if (!pending_rules) {
609 		rte_errno = ENOMEM;
610 		return -rte_errno;
611 	}
612 
613 	bwc_rules = simple_calloc(bwc_queues, sizeof(*bwc_rules));
614 	if (!bwc_rules) {
615 		rte_errno = ENOMEM;
616 		goto free_pending_rules;
617 	}
618 
619 	for (i = 0; i < bwc_queues; i++) {
620 		if (LIST_EMPTY(&bwc_matcher->rules[i]))
621 			bwc_rules[i] = NULL;
622 		else
623 			bwc_rules[i] = LIST_FIRST(&bwc_matcher->rules[i]);
624 	}
625 
626 	do {
627 		all_done = true;
628 
629 		for (i = 0; i < bwc_queues; i++) {
630 			rule_attr.queue_id = mlx5dr_bwc_get_queue_id(ctx, i);
631 			burst_th = mlx5dr_bwc_get_burst_th(ctx, rule_attr.queue_id);
632 
633 			for (j = 0; j < burst_th && bwc_rules[i]; j++) {
634 				rule_attr.burst = !!((j + 1) % burst_th);
635 				ret = mlx5dr_matcher_resize_rule_move(bwc_matcher->matcher,
636 								      bwc_rules[i]->rule,
637 								      &rule_attr);
638 				if (unlikely(ret)) {
639 					DR_LOG(ERR, "Moving BWC rule failed during rehash - %d",
640 					       ret);
641 					rte_errno = ENOMEM;
642 					goto free_bwc_rules;
643 				}
644 
645 				all_done = false;
646 				pending_rules[i]++;
647 				bwc_rules[i] = LIST_NEXT(bwc_rules[i], next);
648 
649 				ret = mlx5dr_bwc_queue_poll(ctx, rule_attr.queue_id,
650 							    &pending_rules[i], false);
651 				if (unlikely(ret)) {
652 					rte_errno = EINVAL;
653 					goto free_bwc_rules;
654 				}
655 			}
656 		}
657 	} while (!all_done);
658 
659 	/* drain all the bwc queues */
660 	for (i = 0; i < bwc_queues; i++) {
661 		if (pending_rules[i]) {
662 			uint16_t queue_id = mlx5dr_bwc_get_queue_id(ctx, i);
663 			mlx5dr_send_engine_flush_queue(&ctx->send_queue[queue_id]);
664 			ret = mlx5dr_bwc_queue_poll(ctx, queue_id,
665 						    &pending_rules[i], true);
666 			if (unlikely(ret)) {
667 				rte_errno = EINVAL;
668 				goto free_bwc_rules;
669 			}
670 		}
671 	}
672 
673 	rte_errno = 0;
674 
675 free_bwc_rules:
676 	simple_free(bwc_rules);
677 free_pending_rules:
678 	simple_free(pending_rules);
679 
680 	return -rte_errno;
681 }
682 
683 static int
684 mlx5dr_bwc_matcher_move(struct mlx5dr_bwc_matcher *bwc_matcher)
685 {
686 	struct mlx5dr_matcher_attr matcher_attr = {0};
687 	struct mlx5dr_matcher *old_matcher;
688 	struct mlx5dr_matcher *new_matcher;
689 	int ret;
690 
691 	mlx5dr_bwc_matcher_init_attr(&matcher_attr,
692 				     bwc_matcher->priority,
693 				     bwc_matcher->size_log,
694 				     mlx5dr_table_is_root(bwc_matcher->matcher->tbl));
695 
696 	old_matcher = bwc_matcher->matcher;
697 	new_matcher = mlx5dr_matcher_create(old_matcher->tbl,
698 					    &bwc_matcher->mt, 1,
699 					    bwc_matcher->at,
700 					    bwc_matcher->num_of_at,
701 					    &matcher_attr);
702 	if (!new_matcher) {
703 		DR_LOG(ERR, "Rehash error: matcher creation failed");
704 		return -ENOMEM;
705 	}
706 
707 	ret = mlx5dr_matcher_resize_set_target(old_matcher, new_matcher);
708 	if (ret) {
709 		DR_LOG(ERR, "Rehash error: failed setting resize target");
710 		return ret;
711 	}
712 
713 	ret = mlx5dr_bwc_matcher_move_all(bwc_matcher);
714 	if (ret) {
715 		DR_LOG(ERR, "Rehash error: moving rules failed");
716 		return -ENOMEM;
717 	}
718 
719 	bwc_matcher->matcher = new_matcher;
720 	mlx5dr_matcher_destroy(old_matcher);
721 
722 	return 0;
723 }
724 
725 static int
726 mlx5dr_bwc_matcher_rehash_size(struct mlx5dr_bwc_matcher *bwc_matcher)
727 {
728 	uint32_t num_of_rules;
729 	int ret;
730 
731 	/* If the current matcher size is already at its max size, we can't
732 	 * do the rehash. Skip it and try adding the rule again - perhaps
733 	 * there was some change.
734 	 */
735 	if (mlx5dr_bwc_matcher_size_maxed_out(bwc_matcher))
736 		return 0;
737 
738 	/* It is possible that other rule has already performed rehash.
739 	 * Need to check again if we really need rehash.
740 	 * If the reason for rehash was size, but not any more - skip rehash.
741 	 */
742 	num_of_rules = rte_atomic_load_explicit(&bwc_matcher->num_of_rules,
743 						rte_memory_order_relaxed);
744 	if (!mlx5dr_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
745 		return 0;
746 
747 	/* Now we're done all the checking - do the rehash:
748 	 *  - extend match RTC size
749 	 *  - create new matcher
750 	 *  - move all the rules to the new matcher
751 	 *  - destroy the old matcher
752 	 */
753 
754 	ret = mlx5dr_bwc_matcher_extend_size(bwc_matcher);
755 	if (ret)
756 		return ret;
757 
758 	return mlx5dr_bwc_matcher_move(bwc_matcher);
759 }
760 
761 static int
762 mlx5dr_bwc_matcher_rehash_at(struct mlx5dr_bwc_matcher *bwc_matcher)
763 {
764 	/* Rehash by action template doesn't require any additional checking.
765 	 * The bwc_matcher already contains the new action template.
766 	 * Just do the usual rehash:
767 	 *  - create new matcher
768 	 *  - move all the rules to the new matcher
769 	 *  - destroy the old matcher
770 	 */
771 	return mlx5dr_bwc_matcher_move(bwc_matcher);
772 }
773 
774 static struct mlx5dr_bwc_rule *
775 mlx5dr_bwc_rule_create_root(struct mlx5dr_bwc_matcher *bwc_matcher,
776 			    const struct rte_flow_item flow_items[],
777 			    struct mlx5dr_rule_action rule_actions[])
778 {
779 	struct mlx5dr_bwc_rule *bwc_rule;
780 
781 	bwc_rule = mlx5dr_bwc_rule_create_root_sync(bwc_matcher,
782 						    flow_items,
783 						    mlx5dr_bwc_rule_actions_num(rule_actions),
784 						    rule_actions);
785 
786 	if (unlikely(!bwc_rule))
787 		DR_LOG(ERR, "BWC rule: failed creating rule on root tbl");
788 
789 	return bwc_rule;
790 }
791 
792 static struct mlx5dr_bwc_rule *
793 mlx5dr_bwc_rule_create_hws(struct mlx5dr_bwc_matcher *bwc_matcher,
794 			   const struct rte_flow_item flow_items[],
795 			   struct mlx5dr_rule_action rule_actions[])
796 {
797 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
798 	struct mlx5dr_bwc_rule *bwc_rule = NULL;
799 	struct mlx5dr_rule_attr rule_attr;
800 	rte_spinlock_t *queue_lock;
801 	uint32_t num_of_rules;
802 	uint16_t idx;
803 	int at_idx;
804 	int ret;
805 
806 	idx = mlx5dr_bwc_gen_queue_idx(ctx);
807 
808 	mlx5dr_bwc_rule_fill_attr(bwc_matcher, idx, &rule_attr);
809 
810 	queue_lock = mlx5dr_bwc_get_queue_lock(ctx, idx);
811 
812 	rte_spinlock_lock(queue_lock);
813 
814 	/* check if rehash needed due to missing action template */
815 	at_idx = mlx5dr_bwc_matcher_find_at(bwc_matcher, rule_actions);
816 	if (unlikely(at_idx < 0)) {
817 		/* we need to extend BWC matcher action templates array */
818 		rte_spinlock_unlock(queue_lock);
819 		mlx5dr_bwc_lock_all_queues(ctx);
820 
821 		ret = mlx5dr_bwc_matcher_extend_at(bwc_matcher, rule_actions);
822 		if (unlikely(ret)) {
823 			mlx5dr_bwc_unlock_all_queues(ctx);
824 			rte_errno = EINVAL;
825 			DR_LOG(ERR, "BWC rule: failed extending action template - %d", ret);
826 			return NULL;
827 		}
828 
829 		/* action templates array was extended, we need the last idx */
830 		at_idx = bwc_matcher->num_of_at - 1;
831 
832 		ret = mlx5dr_matcher_attach_at(bwc_matcher->matcher,
833 					       bwc_matcher->at[at_idx]);
834 		if (unlikely(ret)) {
835 			/* Action template attach failed, possibly due to
836 			 * requiring more action STEs.
837 			 * Need to attempt creating new matcher with all
838 			 * the action templates, including the new one.
839 			 */
840 			ret = mlx5dr_bwc_matcher_rehash_at(bwc_matcher);
841 			if (unlikely(ret)) {
842 				mlx5dr_action_template_destroy(bwc_matcher->at[at_idx]);
843 				bwc_matcher->at[at_idx] = NULL;
844 				bwc_matcher->num_of_at--;
845 
846 				mlx5dr_bwc_unlock_all_queues(ctx);
847 
848 				DR_LOG(ERR, "BWC rule insertion: rehash AT failed - %d", ret);
849 				return NULL;
850 			}
851 		}
852 
853 		mlx5dr_bwc_unlock_all_queues(ctx);
854 		rte_spinlock_lock(queue_lock);
855 	}
856 
857 	/* check if number of rules require rehash */
858 	num_of_rules = rte_atomic_load_explicit(&bwc_matcher->num_of_rules,
859 						rte_memory_order_relaxed);
860 	if (unlikely(mlx5dr_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
861 		rte_spinlock_unlock(queue_lock);
862 
863 		mlx5dr_bwc_lock_all_queues(ctx);
864 		ret = mlx5dr_bwc_matcher_rehash_size(bwc_matcher);
865 		mlx5dr_bwc_unlock_all_queues(ctx);
866 
867 		if (ret) {
868 			DR_LOG(ERR, "BWC rule insertion: rehash size [%d -> %d] failed - %d",
869 			       bwc_matcher->size_log - MLX5DR_BWC_MATCHER_SIZE_LOG_STEP,
870 			       bwc_matcher->size_log,
871 			       ret);
872 			return NULL;
873 		}
874 
875 		rte_spinlock_lock(queue_lock);
876 	}
877 
878 	bwc_rule = mlx5dr_bwc_rule_create_hws_sync(bwc_matcher,
879 						   flow_items,
880 						   at_idx,
881 						   rule_actions,
882 						   &rule_attr);
883 
884 	if (likely(bwc_rule)) {
885 		mlx5dr_bwc_rule_list_add(bwc_rule, idx);
886 		rte_spinlock_unlock(queue_lock);
887 		return bwc_rule; /* rule inserted successfully */
888 	}
889 
890 	/* At this point the rule wasn't added.
891 	 * It could be because there was collision, or some other problem.
892 	 * If we don't dive deeper than API, the only thing we know is that
893 	 * the status of completion is RTE_FLOW_OP_ERROR.
894 	 * Try rehash by size and insert rule again - last chance.
895 	 */
896 
897 	rte_spinlock_unlock(queue_lock);
898 
899 	mlx5dr_bwc_lock_all_queues(ctx);
900 	ret = mlx5dr_bwc_matcher_rehash_size(bwc_matcher);
901 	mlx5dr_bwc_unlock_all_queues(ctx);
902 
903 	if (ret) {
904 		DR_LOG(ERR, "BWC rule insertion: rehash failed - %d", ret);
905 		return NULL;
906 	}
907 
908 	/* Rehash done, but we still have that pesky rule to add */
909 	rte_spinlock_lock(queue_lock);
910 
911 	bwc_rule = mlx5dr_bwc_rule_create_hws_sync(bwc_matcher,
912 						   flow_items,
913 						   at_idx,
914 						   rule_actions,
915 						   &rule_attr);
916 
917 	if (unlikely(!bwc_rule)) {
918 		rte_spinlock_unlock(queue_lock);
919 		DR_LOG(ERR, "BWC rule insertion failed");
920 		return NULL;
921 	}
922 
923 	mlx5dr_bwc_rule_list_add(bwc_rule, idx);
924 	rte_spinlock_unlock(queue_lock);
925 
926 	return bwc_rule;
927 }
928 
929 struct mlx5dr_bwc_rule *
930 mlx5dr_bwc_rule_create(struct mlx5dr_bwc_matcher *bwc_matcher,
931 		       const struct rte_flow_item flow_items[],
932 		       struct mlx5dr_rule_action rule_actions[])
933 {
934 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
935 
936 	if (unlikely(!mlx5dr_context_bwc_supported(ctx))) {
937 		rte_errno = EINVAL;
938 		DR_LOG(ERR, "BWC rule: Context created w/o BWC API compatibility");
939 		return NULL;
940 	}
941 
942 	if (unlikely(mlx5dr_table_is_root(bwc_matcher->matcher->tbl)))
943 		return mlx5dr_bwc_rule_create_root(bwc_matcher,
944 						   flow_items,
945 						   rule_actions);
946 
947 	return mlx5dr_bwc_rule_create_hws(bwc_matcher,
948 					  flow_items,
949 					  rule_actions);
950 }
951