xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_bwc.c (revision b56ba2139f4dc04b97f69f0d0ece1f28725a100b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static uint16_t mlx5dr_bwc_queues(struct mlx5dr_context *ctx)
8 {
9 	return (ctx->queues - 1) / 2;
10 }
11 
12 static uint16_t
13 mlx5dr_bwc_gen_queue_idx(struct mlx5dr_context *ctx)
14 {
15 	/* assign random queue */
16 	return rand() % mlx5dr_bwc_queues(ctx);
17 }
18 
19 static uint16_t
20 mlx5dr_bwc_get_queue_id(struct mlx5dr_context *ctx, uint16_t idx)
21 {
22 	return idx + mlx5dr_bwc_queues(ctx);
23 }
24 
25 static uint16_t
26 mlx5dr_bwc_get_burst_th(struct mlx5dr_context *ctx, uint16_t queue_id)
27 {
28 	return RTE_MIN(ctx->send_queue[queue_id].num_entries / 2,
29 		       MLX5DR_BWC_MATCHER_REHASH_BURST_TH);
30 }
31 
32 static rte_spinlock_t *
33 mlx5dr_bwc_get_queue_lock(struct mlx5dr_context *ctx, uint16_t idx)
34 {
35 	return &ctx->bwc_send_queue_locks[idx];
36 }
37 
38 static void mlx5dr_bwc_lock_all_queues(struct mlx5dr_context *ctx)
39 {
40 	uint16_t bwc_queues = mlx5dr_bwc_queues(ctx);
41 	rte_spinlock_t *queue_lock;
42 	int i;
43 
44 	for (i = 0; i < bwc_queues; i++) {
45 		queue_lock = mlx5dr_bwc_get_queue_lock(ctx, i);
46 		rte_spinlock_lock(queue_lock);
47 	}
48 }
49 
50 static void mlx5dr_bwc_unlock_all_queues(struct mlx5dr_context *ctx)
51 {
52 	uint16_t bwc_queues = mlx5dr_bwc_queues(ctx);
53 	rte_spinlock_t *queue_lock;
54 	int i;
55 
56 	for (i = 0; i < bwc_queues; i++) {
57 		queue_lock = mlx5dr_bwc_get_queue_lock(ctx, i);
58 		rte_spinlock_unlock(queue_lock);
59 	}
60 }
61 
62 static void mlx5dr_bwc_matcher_init_attr(struct mlx5dr_matcher_attr *attr,
63 					 uint32_t priority,
64 					 uint8_t size_log,
65 					 bool is_root)
66 {
67 	memset(attr, 0, sizeof(*attr));
68 
69 	attr->priority = priority;
70 	attr->optimize_using_rule_idx = 0;
71 	attr->mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
72 	attr->optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
73 	attr->insert_mode = MLX5DR_MATCHER_INSERT_BY_HASH;
74 	attr->distribute_mode = MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
75 	attr->rule.num_log = size_log;
76 
77 	if (!is_root) {
78 		attr->resizable = true;
79 		attr->max_num_of_at_attach = MLX5DR_BWC_MATCHER_ATTACH_AT_NUM;
80 	}
81 }
82 
83 struct mlx5dr_bwc_matcher *
84 mlx5dr_bwc_matcher_create(struct mlx5dr_table *table,
85 			  uint32_t priority,
86 			  const struct rte_flow_item flow_items[])
87 {
88 	enum mlx5dr_action_type init_action_types[1] = { MLX5DR_ACTION_TYP_LAST };
89 	uint16_t bwc_queues = mlx5dr_bwc_queues(table->ctx);
90 	struct mlx5dr_bwc_matcher *bwc_matcher;
91 	struct mlx5dr_matcher_attr attr = {0};
92 	int i;
93 
94 	if (!mlx5dr_context_bwc_supported(table->ctx)) {
95 		rte_errno = EINVAL;
96 		DR_LOG(ERR, "BWC rule: Context created w/o BWC API compatibility");
97 		return NULL;
98 	}
99 
100 	bwc_matcher = simple_calloc(1, sizeof(*bwc_matcher));
101 	if (!bwc_matcher) {
102 		rte_errno = ENOMEM;
103 		return NULL;
104 	}
105 
106 	bwc_matcher->rules = simple_calloc(bwc_queues, sizeof(*bwc_matcher->rules));
107 	if (!bwc_matcher->rules) {
108 		rte_errno = ENOMEM;
109 		goto free_bwc_matcher;
110 	}
111 
112 	for (i = 0; i < bwc_queues; i++)
113 		LIST_INIT(&bwc_matcher->rules[i]);
114 
115 	mlx5dr_bwc_matcher_init_attr(&attr,
116 				     priority,
117 				     MLX5DR_BWC_MATCHER_INIT_SIZE_LOG,
118 				     mlx5dr_table_is_root(table));
119 
120 	bwc_matcher->mt = mlx5dr_match_template_create(flow_items,
121 						       MLX5DR_MATCH_TEMPLATE_FLAG_NONE);
122 	if (!bwc_matcher->mt) {
123 		rte_errno = EINVAL;
124 		goto free_bwc_matcher_rules;
125 	}
126 
127 	bwc_matcher->priority = priority;
128 	bwc_matcher->size_log = MLX5DR_BWC_MATCHER_INIT_SIZE_LOG;
129 
130 	/* create dummy action template */
131 	bwc_matcher->at[0] = mlx5dr_action_template_create(init_action_types, 0);
132 	bwc_matcher->num_of_at = 1;
133 
134 	bwc_matcher->matcher = mlx5dr_matcher_create(table,
135 						     &bwc_matcher->mt, 1,
136 						     &bwc_matcher->at[0],
137 						     bwc_matcher->num_of_at,
138 						     &attr);
139 	if (!bwc_matcher->matcher) {
140 		rte_errno = EINVAL;
141 		goto free_at;
142 	}
143 
144 	return bwc_matcher;
145 
146 free_at:
147 	mlx5dr_action_template_destroy(bwc_matcher->at[0]);
148 	mlx5dr_match_template_destroy(bwc_matcher->mt);
149 free_bwc_matcher_rules:
150 	simple_free(bwc_matcher->rules);
151 free_bwc_matcher:
152 	simple_free(bwc_matcher);
153 
154 	return NULL;
155 }
156 
157 int mlx5dr_bwc_matcher_destroy(struct mlx5dr_bwc_matcher *bwc_matcher)
158 {
159 	int i;
160 
161 	if (bwc_matcher->num_of_rules)
162 		DR_LOG(ERR, "BWC matcher destroy: matcher still has %d rules",
163 		       bwc_matcher->num_of_rules);
164 
165 	mlx5dr_matcher_destroy(bwc_matcher->matcher);
166 	bwc_matcher->matcher = NULL;
167 
168 	for (i = 0; i < bwc_matcher->num_of_at; i++)
169 		mlx5dr_action_template_destroy(bwc_matcher->at[i]);
170 
171 	mlx5dr_match_template_destroy(bwc_matcher->mt);
172 	simple_free(bwc_matcher->rules);
173 	simple_free(bwc_matcher);
174 
175 	return 0;
176 }
177 
178 static int
179 mlx5dr_bwc_queue_poll(struct mlx5dr_context *ctx,
180 		      uint16_t queue_id,
181 		      uint32_t *pending_rules,
182 		      bool drain)
183 {
184 	struct rte_flow_op_result comp[MLX5DR_BWC_MATCHER_REHASH_BURST_TH];
185 	uint16_t burst_th = mlx5dr_bwc_get_burst_th(ctx, queue_id);
186 	bool got_comp = *pending_rules >= burst_th;
187 	bool queue_full;
188 	int err = 0;
189 	int ret;
190 	int i;
191 
192 	/* Check if there are any completions at all */
193 	if (!got_comp && !drain)
194 		return 0;
195 
196 	/* The FULL state of a SQ is always a subcondition of the original 'got_comp'. */
197 	queue_full = mlx5dr_send_engine_full(&ctx->send_queue[queue_id]);
198 	while (queue_full || ((got_comp || drain) && *pending_rules)) {
199 		ret = mlx5dr_send_queue_poll(ctx, queue_id, comp, burst_th);
200 		if (unlikely(ret < 0)) {
201 			DR_LOG(ERR, "Rehash error: polling queue %d returned %d\n",
202 			       queue_id, ret);
203 			return -EINVAL;
204 		}
205 
206 		if (ret) {
207 			(*pending_rules) -= ret;
208 			for (i = 0; i < ret; i++) {
209 				if (unlikely(comp[i].status != RTE_FLOW_OP_SUCCESS)) {
210 					DR_LOG(ERR,
211 					       "Rehash error: polling queue %d returned completion with error\n",
212 					       queue_id);
213 					err = -EINVAL;
214 				}
215 			}
216 			queue_full = false;
217 		}
218 
219 		got_comp = !!ret;
220 	}
221 
222 	return err;
223 }
224 
225 static void
226 mlx5dr_bwc_rule_fill_attr(struct mlx5dr_bwc_matcher *bwc_matcher,
227 			  uint16_t bwc_queue_idx,
228 			  struct mlx5dr_rule_attr *rule_attr)
229 {
230 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
231 
232 	/* no use of INSERT_BY_INDEX in bwc rule */
233 	rule_attr->rule_idx = 0;
234 
235 	/* notify HW at each rule insertion/deletion */
236 	rule_attr->burst = 0;
237 
238 	/* We don't need user data, but the API requires it to exist */
239 	rule_attr->user_data = (void *)0xFACADE;
240 
241 	rule_attr->queue_id = mlx5dr_bwc_get_queue_id(ctx, bwc_queue_idx);
242 }
243 
244 static struct mlx5dr_bwc_rule *
245 mlx5dr_bwc_rule_alloc(void)
246 {
247 	struct mlx5dr_bwc_rule *bwc_rule;
248 
249 	bwc_rule = simple_calloc(1, sizeof(*bwc_rule));
250 	if (unlikely(!bwc_rule))
251 		goto out_err;
252 
253 	bwc_rule->rule = simple_calloc(1, sizeof(*bwc_rule->rule));
254 	if (unlikely(!bwc_rule->rule))
255 		goto free_rule;
256 
257 	return bwc_rule;
258 
259 free_rule:
260 	simple_free(bwc_rule);
261 out_err:
262 	rte_errno = ENOMEM;
263 	return NULL;
264 }
265 
266 static void
267 mlx5dr_bwc_rule_free(struct mlx5dr_bwc_rule *bwc_rule)
268 {
269 	if (likely(bwc_rule->rule))
270 		simple_free(bwc_rule->rule);
271 	simple_free(bwc_rule);
272 }
273 
274 static void
275 mlx5dr_bwc_rule_list_add(struct mlx5dr_bwc_rule *bwc_rule, uint16_t idx)
276 {
277 	struct mlx5dr_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
278 
279 	rte_atomic_fetch_add_explicit(&bwc_matcher->num_of_rules, 1, rte_memory_order_relaxed);
280 	bwc_rule->bwc_queue_idx = idx;
281 	LIST_INSERT_HEAD(&bwc_matcher->rules[idx], bwc_rule, next);
282 }
283 
284 static void mlx5dr_bwc_rule_list_remove(struct mlx5dr_bwc_rule *bwc_rule)
285 {
286 	struct mlx5dr_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
287 
288 	rte_atomic_fetch_sub_explicit(&bwc_matcher->num_of_rules, 1, rte_memory_order_relaxed);
289 	LIST_REMOVE(bwc_rule, next);
290 }
291 
292 static int
293 mlx5dr_bwc_rule_destroy_hws_async(struct mlx5dr_bwc_rule *bwc_rule,
294 				  struct mlx5dr_rule_attr *attr)
295 {
296 	return mlx5dr_rule_destroy(bwc_rule->rule, attr);
297 }
298 
299 static int
300 mlx5dr_bwc_rule_destroy_hws_sync(struct mlx5dr_bwc_rule *bwc_rule,
301 				 struct mlx5dr_rule_attr *rule_attr)
302 {
303 	struct mlx5dr_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
304 	struct rte_flow_op_result completion;
305 	int ret;
306 
307 	ret = mlx5dr_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
308 	if (unlikely(ret))
309 		return ret;
310 
311 	do {
312 		ret = mlx5dr_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
313 	} while (ret != 1);
314 
315 	if (unlikely(completion.status != RTE_FLOW_OP_SUCCESS ||
316 		     (bwc_rule->rule->status != MLX5DR_RULE_STATUS_DELETED &&
317 		      bwc_rule->rule->status != MLX5DR_RULE_STATUS_DELETING))) {
318 		DR_LOG(ERR, "Failed destroying BWC rule: completion %d, rule status %d",
319 		       completion.status, bwc_rule->rule->status);
320 		rte_errno = EINVAL;
321 		return rte_errno;
322 	}
323 
324 	return 0;
325 }
326 
327 static int mlx5dr_bwc_rule_destroy_hws(struct mlx5dr_bwc_rule *bwc_rule)
328 {
329 	struct mlx5dr_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
330 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
331 	uint16_t idx = bwc_rule->bwc_queue_idx;
332 	struct mlx5dr_rule_attr attr;
333 	rte_spinlock_t *queue_lock;
334 	int ret;
335 
336 	mlx5dr_bwc_rule_fill_attr(bwc_matcher, idx, &attr);
337 
338 	queue_lock = mlx5dr_bwc_get_queue_lock(ctx, idx);
339 
340 	rte_spinlock_lock(queue_lock);
341 
342 	ret = mlx5dr_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
343 	mlx5dr_bwc_rule_list_remove(bwc_rule);
344 
345 	rte_spinlock_unlock(queue_lock);
346 
347 	mlx5dr_bwc_rule_free(bwc_rule);
348 
349 	return ret;
350 }
351 
352 static int mlx5dr_bwc_rule_destroy_root(struct mlx5dr_bwc_rule *bwc_rule)
353 {
354 	int ret;
355 
356 	ret = mlx5dr_rule_destroy_root_no_comp(bwc_rule->rule);
357 
358 	mlx5dr_bwc_rule_free(bwc_rule);
359 
360 	return ret;
361 }
362 
363 int mlx5dr_bwc_rule_destroy(struct mlx5dr_bwc_rule *bwc_rule)
364 {
365 	if (unlikely(mlx5dr_table_is_root(bwc_rule->bwc_matcher->matcher->tbl)))
366 		return mlx5dr_bwc_rule_destroy_root(bwc_rule);
367 
368 	return mlx5dr_bwc_rule_destroy_hws(bwc_rule);
369 }
370 
371 static struct mlx5dr_bwc_rule *
372 mlx5dr_bwc_rule_create_hws_async(struct mlx5dr_bwc_matcher *bwc_matcher,
373 				 const struct rte_flow_item flow_items[],
374 				 uint8_t at_idx,
375 				 struct mlx5dr_rule_action rule_actions[],
376 				 struct mlx5dr_rule_attr *rule_attr)
377 {
378 	struct mlx5dr_bwc_rule *bwc_rule;
379 	int ret;
380 
381 	bwc_rule = mlx5dr_bwc_rule_alloc();
382 	if (unlikely(!bwc_rule))
383 		return NULL;
384 
385 	bwc_rule->bwc_matcher = bwc_matcher;
386 
387 	ret = mlx5dr_rule_create(bwc_matcher->matcher,
388 				 0, /* only one match template supported */
389 				 flow_items,
390 				 at_idx,
391 				 rule_actions,
392 				 rule_attr,
393 				 bwc_rule->rule);
394 
395 	if (unlikely(ret)) {
396 		mlx5dr_bwc_rule_free(bwc_rule);
397 		rte_errno = EINVAL;
398 		return NULL;
399 	}
400 
401 	return bwc_rule;
402 }
403 
404 static struct mlx5dr_bwc_rule *
405 mlx5dr_bwc_rule_create_root_sync(struct mlx5dr_bwc_matcher *bwc_matcher,
406 				 const struct rte_flow_item flow_items[],
407 				 uint8_t num_actions,
408 				 struct mlx5dr_rule_action rule_actions[])
409 {
410 	struct mlx5dr_bwc_rule *bwc_rule;
411 	int ret;
412 
413 	bwc_rule = mlx5dr_bwc_rule_alloc();
414 	if (unlikely(!bwc_rule)) {
415 		rte_errno = ENOMEM;
416 		return NULL;
417 	}
418 
419 	bwc_rule->bwc_matcher = bwc_matcher;
420 	bwc_rule->rule->matcher = bwc_matcher->matcher;
421 
422 	ret = mlx5dr_rule_create_root_no_comp(bwc_rule->rule,
423 					      flow_items,
424 					      num_actions,
425 					      rule_actions);
426 	if (unlikely(ret)) {
427 		mlx5dr_bwc_rule_free(bwc_rule);
428 		rte_errno = EINVAL;
429 		return NULL;
430 	}
431 
432 	return bwc_rule;
433 }
434 
435 static struct mlx5dr_bwc_rule *
436 mlx5dr_bwc_rule_create_hws_sync(struct mlx5dr_bwc_matcher *bwc_matcher,
437 				const struct rte_flow_item flow_items[],
438 				uint8_t at_idx,
439 				struct mlx5dr_rule_action rule_actions[],
440 				struct mlx5dr_rule_attr *rule_attr)
441 
442 {
443 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
444 	struct rte_flow_op_result completion;
445 	struct mlx5dr_bwc_rule *bwc_rule;
446 	int ret;
447 
448 	bwc_rule = mlx5dr_bwc_rule_create_hws_async(bwc_matcher, flow_items,
449 						    at_idx, rule_actions,
450 						    rule_attr);
451 	if (unlikely(!bwc_rule))
452 		return NULL;
453 
454 	do {
455 		ret = mlx5dr_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
456 	} while (ret != 1);
457 
458 	if (unlikely(completion.status != RTE_FLOW_OP_SUCCESS ||
459 		     (bwc_rule->rule->status != MLX5DR_RULE_STATUS_CREATED &&
460 		      bwc_rule->rule->status != MLX5DR_RULE_STATUS_CREATING))) {
461 		DR_LOG(ERR, "Failed creating BWC rule: completion %d, rule status %d",
462 		       completion.status, bwc_rule->rule->status);
463 		mlx5dr_bwc_rule_free(bwc_rule);
464 		return NULL;
465 	}
466 
467 	return bwc_rule;
468 }
469 
470 static bool
471 mlx5dr_bwc_matcher_size_maxed_out(struct mlx5dr_bwc_matcher *bwc_matcher)
472 {
473 	struct mlx5dr_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
474 
475 	return bwc_matcher->size_log + MLX5DR_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
476 	       caps->ste_alloc_log_max - 1;
477 }
478 
479 static bool
480 mlx5dr_bwc_matcher_rehash_size_needed(struct mlx5dr_bwc_matcher *bwc_matcher,
481 				      uint32_t num_of_rules)
482 {
483 	/* size-based rehash for root table is kernel's responsibility */
484 	if (unlikely(mlx5dr_table_is_root(bwc_matcher->matcher->tbl)))
485 		return false;
486 
487 	if (unlikely(mlx5dr_bwc_matcher_size_maxed_out(bwc_matcher)))
488 		return false;
489 
490 	if (unlikely((num_of_rules * 100 / MLX5DR_BWC_MATCHER_REHASH_PERCENT_TH) >=
491 		     (1UL << bwc_matcher->size_log)))
492 		return true;
493 
494 	return false;
495 }
496 
497 static void
498 mlx5dr_bwc_rule_actions_to_action_types(struct mlx5dr_rule_action rule_actions[],
499 					enum mlx5dr_action_type action_types[])
500 {
501 	int i = 0;
502 
503 	for (i = 0;
504 	     rule_actions[i].action && (rule_actions[i].action->type != MLX5DR_ACTION_TYP_LAST);
505 	     i++) {
506 		action_types[i] = (enum mlx5dr_action_type)rule_actions[i].action->type;
507 	}
508 
509 	action_types[i] = MLX5DR_ACTION_TYP_LAST;
510 }
511 
512 static int
513 mlx5dr_bwc_rule_actions_num(struct mlx5dr_rule_action rule_actions[])
514 {
515 	int i = 0;
516 
517 	while (rule_actions[i].action &&
518 	       (rule_actions[i].action->type != MLX5DR_ACTION_TYP_LAST))
519 		i++;
520 
521 	return i;
522 }
523 
524 static int
525 mlx5dr_bwc_matcher_extend_at(struct mlx5dr_bwc_matcher *bwc_matcher,
526 			     struct mlx5dr_rule_action rule_actions[])
527 {
528 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS];
529 
530 	mlx5dr_bwc_rule_actions_to_action_types(rule_actions, action_types);
531 
532 	bwc_matcher->at[bwc_matcher->num_of_at] =
533 		mlx5dr_action_template_create(action_types, 0);
534 
535 	if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at])) {
536 		rte_errno = ENOMEM;
537 		return rte_errno;
538 	}
539 
540 	bwc_matcher->num_of_at++;
541 	return 0;
542 }
543 
544 static int
545 mlx5dr_bwc_matcher_extend_size(struct mlx5dr_bwc_matcher *bwc_matcher)
546 {
547 	struct mlx5dr_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
548 
549 	if (unlikely(mlx5dr_bwc_matcher_size_maxed_out(bwc_matcher))) {
550 		DR_LOG(ERR, "Can't resize matcher: depth exceeds limit %d",
551 		       caps->rtc_log_depth_max);
552 		return -ENOMEM;
553 	}
554 
555 	bwc_matcher->size_log =
556 		RTE_MIN(bwc_matcher->size_log + MLX5DR_BWC_MATCHER_SIZE_LOG_STEP,
557 			caps->ste_alloc_log_max - MLX5DR_MATCHER_ASSURED_MAIN_TBL_DEPTH);
558 
559 	return 0;
560 }
561 
562 static int
563 mlx5dr_bwc_matcher_find_at(struct mlx5dr_bwc_matcher *bwc_matcher,
564 			   struct mlx5dr_rule_action rule_actions[])
565 {
566 	enum mlx5dr_action_type *action_type_arr;
567 	int i, j;
568 
569 	/* start from index 1 - first action template is a dummy */
570 	for (i = 1; i < bwc_matcher->num_of_at; i++) {
571 		j = 0;
572 		action_type_arr = bwc_matcher->at[i]->action_type_arr;
573 
574 		while (rule_actions[j].action &&
575 		       rule_actions[j].action->type != MLX5DR_ACTION_TYP_LAST) {
576 			if (action_type_arr[j] != rule_actions[j].action->type)
577 				break;
578 			j++;
579 		}
580 
581 		if (action_type_arr[j] == MLX5DR_ACTION_TYP_LAST &&
582 		    (!rule_actions[j].action ||
583 		     rule_actions[j].action->type == MLX5DR_ACTION_TYP_LAST))
584 			return i;
585 	}
586 
587 	return -1;
588 }
589 
590 static int
591 mlx5dr_bwc_matcher_move_all(struct mlx5dr_bwc_matcher *bwc_matcher)
592 {
593 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
594 	uint16_t bwc_queues = mlx5dr_bwc_queues(ctx);
595 	struct mlx5dr_bwc_rule **bwc_rules;
596 	struct mlx5dr_rule_attr rule_attr;
597 	uint32_t *pending_rules;
598 	uint16_t burst_th;
599 	bool all_done;
600 	int i, j, ret;
601 
602 	if (mlx5dr_table_is_root(bwc_matcher->matcher->tbl)) {
603 		rte_errno = EINVAL;
604 		return -rte_errno;
605 	}
606 
607 	mlx5dr_bwc_rule_fill_attr(bwc_matcher, 0, &rule_attr);
608 
609 	pending_rules = simple_calloc(bwc_queues, sizeof(*pending_rules));
610 	if (!pending_rules) {
611 		rte_errno = ENOMEM;
612 		return -rte_errno;
613 	}
614 
615 	bwc_rules = simple_calloc(bwc_queues, sizeof(*bwc_rules));
616 	if (!bwc_rules) {
617 		rte_errno = ENOMEM;
618 		goto free_pending_rules;
619 	}
620 
621 	for (i = 0; i < bwc_queues; i++) {
622 		if (LIST_EMPTY(&bwc_matcher->rules[i]))
623 			bwc_rules[i] = NULL;
624 		else
625 			bwc_rules[i] = LIST_FIRST(&bwc_matcher->rules[i]);
626 	}
627 
628 	do {
629 		all_done = true;
630 
631 		for (i = 0; i < bwc_queues; i++) {
632 			rule_attr.queue_id = mlx5dr_bwc_get_queue_id(ctx, i);
633 			burst_th = mlx5dr_bwc_get_burst_th(ctx, rule_attr.queue_id);
634 
635 			for (j = 0; j < burst_th && bwc_rules[i]; j++) {
636 				rule_attr.burst = !!((j + 1) % burst_th);
637 				ret = mlx5dr_matcher_resize_rule_move(bwc_matcher->matcher,
638 								      bwc_rules[i]->rule,
639 								      &rule_attr);
640 				if (unlikely(ret)) {
641 					DR_LOG(ERR, "Moving BWC rule failed during rehash - %d",
642 					       ret);
643 					rte_errno = ENOMEM;
644 					goto free_bwc_rules;
645 				}
646 
647 				all_done = false;
648 				pending_rules[i]++;
649 				bwc_rules[i] = LIST_NEXT(bwc_rules[i], next);
650 
651 				ret = mlx5dr_bwc_queue_poll(ctx, rule_attr.queue_id,
652 							    &pending_rules[i], false);
653 				if (unlikely(ret)) {
654 					rte_errno = EINVAL;
655 					goto free_bwc_rules;
656 				}
657 			}
658 		}
659 	} while (!all_done);
660 
661 	/* drain all the bwc queues */
662 	for (i = 0; i < bwc_queues; i++) {
663 		if (pending_rules[i]) {
664 			uint16_t queue_id = mlx5dr_bwc_get_queue_id(ctx, i);
665 			mlx5dr_send_engine_flush_queue(&ctx->send_queue[queue_id]);
666 			ret = mlx5dr_bwc_queue_poll(ctx, queue_id,
667 						    &pending_rules[i], true);
668 			if (unlikely(ret)) {
669 				rte_errno = EINVAL;
670 				goto free_bwc_rules;
671 			}
672 		}
673 	}
674 
675 	rte_errno = 0;
676 
677 free_bwc_rules:
678 	simple_free(bwc_rules);
679 free_pending_rules:
680 	simple_free(pending_rules);
681 
682 	return -rte_errno;
683 }
684 
685 static int
686 mlx5dr_bwc_matcher_move(struct mlx5dr_bwc_matcher *bwc_matcher)
687 {
688 	struct mlx5dr_matcher_attr matcher_attr = {0};
689 	struct mlx5dr_matcher *old_matcher;
690 	struct mlx5dr_matcher *new_matcher;
691 	int ret;
692 
693 	mlx5dr_bwc_matcher_init_attr(&matcher_attr,
694 				     bwc_matcher->priority,
695 				     bwc_matcher->size_log,
696 				     mlx5dr_table_is_root(bwc_matcher->matcher->tbl));
697 
698 	old_matcher = bwc_matcher->matcher;
699 	new_matcher = mlx5dr_matcher_create(old_matcher->tbl,
700 					    &bwc_matcher->mt, 1,
701 					    bwc_matcher->at,
702 					    bwc_matcher->num_of_at,
703 					    &matcher_attr);
704 	if (!new_matcher) {
705 		DR_LOG(ERR, "Rehash error: matcher creation failed");
706 		return -ENOMEM;
707 	}
708 
709 	ret = mlx5dr_matcher_resize_set_target(old_matcher, new_matcher);
710 	if (ret) {
711 		DR_LOG(ERR, "Rehash error: failed setting resize target");
712 		return ret;
713 	}
714 
715 	ret = mlx5dr_bwc_matcher_move_all(bwc_matcher);
716 	if (ret) {
717 		DR_LOG(ERR, "Rehash error: moving rules failed");
718 		return -ENOMEM;
719 	}
720 
721 	bwc_matcher->matcher = new_matcher;
722 	mlx5dr_matcher_destroy(old_matcher);
723 
724 	return 0;
725 }
726 
727 static int
728 mlx5dr_bwc_matcher_rehash_size(struct mlx5dr_bwc_matcher *bwc_matcher)
729 {
730 	uint32_t num_of_rules;
731 	int ret;
732 
733 	/* If the current matcher size is already at its max size, we can't
734 	 * do the rehash. Skip it and try adding the rule again - perhaps
735 	 * there was some change.
736 	 */
737 	if (mlx5dr_bwc_matcher_size_maxed_out(bwc_matcher))
738 		return 0;
739 
740 	/* It is possible that other rule has already performed rehash.
741 	 * Need to check again if we really need rehash.
742 	 * If the reason for rehash was size, but not any more - skip rehash.
743 	 */
744 	num_of_rules = rte_atomic_load_explicit(&bwc_matcher->num_of_rules,
745 						rte_memory_order_relaxed);
746 	if (!mlx5dr_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
747 		return 0;
748 
749 	/* Now we're done all the checking - do the rehash:
750 	 *  - extend match RTC size
751 	 *  - create new matcher
752 	 *  - move all the rules to the new matcher
753 	 *  - destroy the old matcher
754 	 */
755 
756 	ret = mlx5dr_bwc_matcher_extend_size(bwc_matcher);
757 	if (ret)
758 		return ret;
759 
760 	return mlx5dr_bwc_matcher_move(bwc_matcher);
761 }
762 
763 static int
764 mlx5dr_bwc_matcher_rehash_at(struct mlx5dr_bwc_matcher *bwc_matcher)
765 {
766 	/* Rehash by action template doesn't require any additional checking.
767 	 * The bwc_matcher already contains the new action template.
768 	 * Just do the usual rehash:
769 	 *  - create new matcher
770 	 *  - move all the rules to the new matcher
771 	 *  - destroy the old matcher
772 	 */
773 	return mlx5dr_bwc_matcher_move(bwc_matcher);
774 }
775 
776 static struct mlx5dr_bwc_rule *
777 mlx5dr_bwc_rule_create_root(struct mlx5dr_bwc_matcher *bwc_matcher,
778 			    const struct rte_flow_item flow_items[],
779 			    struct mlx5dr_rule_action rule_actions[])
780 {
781 	struct mlx5dr_bwc_rule *bwc_rule;
782 
783 	bwc_rule = mlx5dr_bwc_rule_create_root_sync(bwc_matcher,
784 						    flow_items,
785 						    mlx5dr_bwc_rule_actions_num(rule_actions),
786 						    rule_actions);
787 
788 	if (unlikely(!bwc_rule))
789 		DR_LOG(ERR, "BWC rule: failed creating rule on root tbl");
790 
791 	return bwc_rule;
792 }
793 
794 static struct mlx5dr_bwc_rule *
795 mlx5dr_bwc_rule_create_hws(struct mlx5dr_bwc_matcher *bwc_matcher,
796 			   const struct rte_flow_item flow_items[],
797 			   struct mlx5dr_rule_action rule_actions[])
798 {
799 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
800 	struct mlx5dr_bwc_rule *bwc_rule = NULL;
801 	struct mlx5dr_rule_attr rule_attr;
802 	rte_spinlock_t *queue_lock;
803 	uint32_t num_of_rules;
804 	uint16_t idx;
805 	int at_idx;
806 	int ret;
807 
808 	idx = mlx5dr_bwc_gen_queue_idx(ctx);
809 
810 	mlx5dr_bwc_rule_fill_attr(bwc_matcher, idx, &rule_attr);
811 
812 	queue_lock = mlx5dr_bwc_get_queue_lock(ctx, idx);
813 
814 	rte_spinlock_lock(queue_lock);
815 
816 	/* check if rehash needed due to missing action template */
817 	at_idx = mlx5dr_bwc_matcher_find_at(bwc_matcher, rule_actions);
818 	if (unlikely(at_idx < 0)) {
819 		/* we need to extend BWC matcher action templates array */
820 		rte_spinlock_unlock(queue_lock);
821 		mlx5dr_bwc_lock_all_queues(ctx);
822 
823 		ret = mlx5dr_bwc_matcher_extend_at(bwc_matcher, rule_actions);
824 		if (unlikely(ret)) {
825 			mlx5dr_bwc_unlock_all_queues(ctx);
826 			rte_errno = EINVAL;
827 			DR_LOG(ERR, "BWC rule: failed extending action template - %d", ret);
828 			return NULL;
829 		}
830 
831 		/* action templates array was extended, we need the last idx */
832 		at_idx = bwc_matcher->num_of_at - 1;
833 
834 		ret = mlx5dr_matcher_attach_at(bwc_matcher->matcher,
835 					       bwc_matcher->at[at_idx]);
836 		if (unlikely(ret)) {
837 			/* Action template attach failed, possibly due to
838 			 * requiring more action STEs.
839 			 * Need to attempt creating new matcher with all
840 			 * the action templates, including the new one.
841 			 */
842 			ret = mlx5dr_bwc_matcher_rehash_at(bwc_matcher);
843 			if (unlikely(ret)) {
844 				mlx5dr_action_template_destroy(bwc_matcher->at[at_idx]);
845 				bwc_matcher->at[at_idx] = NULL;
846 				bwc_matcher->num_of_at--;
847 
848 				mlx5dr_bwc_unlock_all_queues(ctx);
849 
850 				DR_LOG(ERR, "BWC rule insertion: rehash AT failed - %d", ret);
851 				return NULL;
852 			}
853 		}
854 
855 		mlx5dr_bwc_unlock_all_queues(ctx);
856 		rte_spinlock_lock(queue_lock);
857 	}
858 
859 	/* check if number of rules require rehash */
860 	num_of_rules = rte_atomic_load_explicit(&bwc_matcher->num_of_rules,
861 						rte_memory_order_relaxed);
862 	if (unlikely(mlx5dr_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
863 		rte_spinlock_unlock(queue_lock);
864 
865 		mlx5dr_bwc_lock_all_queues(ctx);
866 		ret = mlx5dr_bwc_matcher_rehash_size(bwc_matcher);
867 		mlx5dr_bwc_unlock_all_queues(ctx);
868 
869 		if (ret) {
870 			DR_LOG(ERR, "BWC rule insertion: rehash size [%d -> %d] failed - %d",
871 			       bwc_matcher->size_log - MLX5DR_BWC_MATCHER_SIZE_LOG_STEP,
872 			       bwc_matcher->size_log,
873 			       ret);
874 			return NULL;
875 		}
876 
877 		rte_spinlock_lock(queue_lock);
878 	}
879 
880 	bwc_rule = mlx5dr_bwc_rule_create_hws_sync(bwc_matcher,
881 						   flow_items,
882 						   at_idx,
883 						   rule_actions,
884 						   &rule_attr);
885 
886 	if (likely(bwc_rule)) {
887 		mlx5dr_bwc_rule_list_add(bwc_rule, idx);
888 		rte_spinlock_unlock(queue_lock);
889 		return bwc_rule; /* rule inserted successfully */
890 	}
891 
892 	/* At this point the rule wasn't added.
893 	 * It could be because there was collision, or some other problem.
894 	 * If we don't dive deeper than API, the only thing we know is that
895 	 * the status of completion is RTE_FLOW_OP_ERROR.
896 	 * Try rehash by size and insert rule again - last chance.
897 	 */
898 
899 	rte_spinlock_unlock(queue_lock);
900 
901 	mlx5dr_bwc_lock_all_queues(ctx);
902 	ret = mlx5dr_bwc_matcher_rehash_size(bwc_matcher);
903 	mlx5dr_bwc_unlock_all_queues(ctx);
904 
905 	if (ret) {
906 		DR_LOG(ERR, "BWC rule insertion: rehash failed - %d", ret);
907 		return NULL;
908 	}
909 
910 	/* Rehash done, but we still have that pesky rule to add */
911 	rte_spinlock_lock(queue_lock);
912 
913 	bwc_rule = mlx5dr_bwc_rule_create_hws_sync(bwc_matcher,
914 						   flow_items,
915 						   at_idx,
916 						   rule_actions,
917 						   &rule_attr);
918 
919 	if (unlikely(!bwc_rule)) {
920 		rte_spinlock_unlock(queue_lock);
921 		DR_LOG(ERR, "BWC rule insertion failed");
922 		return NULL;
923 	}
924 
925 	mlx5dr_bwc_rule_list_add(bwc_rule, idx);
926 	rte_spinlock_unlock(queue_lock);
927 
928 	return bwc_rule;
929 }
930 
931 struct mlx5dr_bwc_rule *
932 mlx5dr_bwc_rule_create(struct mlx5dr_bwc_matcher *bwc_matcher,
933 		       const struct rte_flow_item flow_items[],
934 		       struct mlx5dr_rule_action rule_actions[])
935 {
936 	struct mlx5dr_context *ctx = bwc_matcher->matcher->tbl->ctx;
937 
938 	if (unlikely(!mlx5dr_context_bwc_supported(ctx))) {
939 		rte_errno = EINVAL;
940 		DR_LOG(ERR, "BWC rule: Context created w/o BWC API compatibility");
941 		return NULL;
942 	}
943 
944 	if (unlikely(mlx5dr_table_is_root(bwc_matcher->matcher->tbl)))
945 		return mlx5dr_bwc_rule_create_root(bwc_matcher,
946 						   flow_items,
947 						   rule_actions);
948 
949 	return mlx5dr_bwc_rule_create_hws(bwc_matcher,
950 					  flow_items,
951 					  rule_actions);
952 }
953