xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,
8 			     const struct rte_flow_item *items,
9 			     bool *skip_rx, bool *skip_tx)
10 {
11 	struct mlx5dr_match_template *mt = matcher->mt[0];
12 	const struct flow_hw_port_info *vport;
13 	const struct rte_flow_item_ethdev *v;
14 
15 	/* Flow_src is the 1st priority */
16 	if (matcher->attr.optimize_flow_src) {
17 		*skip_tx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_WIRE;
18 		*skip_rx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_VPORT;
19 		return;
20 	}
21 
22 	/* By default FDB rules are added to both RX and TX */
23 	*skip_rx = false;
24 	*skip_tx = false;
25 
26 	if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {
27 		v = items[mt->vport_item_id].spec;
28 		vport = flow_hw_conv_port_id(v->port_id);
29 		if (unlikely(!vport)) {
30 			DR_LOG(ERR, "Fail to map port ID %d, ignoring", v->port_id);
31 			return;
32 		}
33 
34 		if (!vport->is_wire)
35 			/* Match vport ID is not WIRE -> Skip RX */
36 			*skip_rx = true;
37 		else
38 			/* Match vport ID is WIRE -> Skip TX */
39 			*skip_tx = true;
40 	}
41 }
42 
43 static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,
44 				     struct mlx5dr_rule *rule,
45 				     const struct rte_flow_item *items,
46 				     void *user_data)
47 {
48 	struct mlx5dr_matcher *matcher = rule->matcher;
49 	struct mlx5dr_table *tbl = matcher->tbl;
50 	bool skip_rx, skip_tx;
51 
52 	dep_wqe->rule = rule;
53 	dep_wqe->user_data = user_data;
54 
55 	switch (tbl->type) {
56 	case MLX5DR_TABLE_TYPE_NIC_RX:
57 	case MLX5DR_TABLE_TYPE_NIC_TX:
58 		dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
59 		dep_wqe->retry_rtc_0 = matcher->col_matcher ?
60 				       matcher->col_matcher->match_ste.rtc_0->id : 0;
61 		dep_wqe->rtc_1 = 0;
62 		dep_wqe->retry_rtc_1 = 0;
63 		break;
64 
65 	case MLX5DR_TABLE_TYPE_FDB:
66 		mlx5dr_rule_skip(matcher, items, &skip_rx, &skip_tx);
67 
68 		if (!skip_rx) {
69 			dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
70 			dep_wqe->retry_rtc_0 = matcher->col_matcher ?
71 					       matcher->col_matcher->match_ste.rtc_0->id : 0;
72 		} else {
73 			dep_wqe->rtc_0 = 0;
74 			dep_wqe->retry_rtc_0 = 0;
75 		}
76 
77 		if (!skip_tx) {
78 			dep_wqe->rtc_1 = matcher->match_ste.rtc_1->id;
79 			dep_wqe->retry_rtc_1 = matcher->col_matcher ?
80 					       matcher->col_matcher->match_ste.rtc_1->id : 0;
81 		} else {
82 			dep_wqe->rtc_1 = 0;
83 			dep_wqe->retry_rtc_1 = 0;
84 		}
85 
86 		break;
87 
88 	default:
89 		assert(false);
90 		break;
91 	}
92 }
93 
94 static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,
95 				 struct mlx5dr_rule *rule,
96 				 bool err,
97 				 void *user_data,
98 				 enum mlx5dr_rule_status rule_status_on_succ)
99 {
100 	enum rte_flow_op_status comp_status;
101 
102 	if (!err) {
103 		comp_status = RTE_FLOW_OP_SUCCESS;
104 		rule->status = rule_status_on_succ;
105 	} else {
106 		comp_status = RTE_FLOW_OP_ERROR;
107 		rule->status = MLX5DR_RULE_STATUS_FAILED;
108 	}
109 
110 	mlx5dr_send_engine_inc_rule(queue);
111 	mlx5dr_send_engine_gen_comp(queue, user_data, comp_status);
112 }
113 
114 static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,
115 					struct mlx5dr_rule_attr *attr)
116 {
117 	struct mlx5dr_matcher *matcher = rule->matcher;
118 	int ret;
119 
120 	/* Use rule_idx for locking optimzation, otherwise allocate from pool */
121 	if (matcher->attr.optimize_using_rule_idx) {
122 		rule->action_ste_idx = attr->rule_idx * matcher->action_ste.max_stes;
123 	} else {
124 		struct mlx5dr_pool_chunk ste = {0};
125 
126 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
127 		ret = mlx5dr_pool_chunk_alloc(matcher->action_ste.pool, &ste);
128 		if (ret) {
129 			DR_LOG(ERR, "Failed to allocate STE for rule actions");
130 			return ret;
131 		}
132 		rule->action_ste_idx = ste.offset;
133 	}
134 	return 0;
135 }
136 
137 void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule)
138 {
139 	struct mlx5dr_matcher *matcher = rule->matcher;
140 
141 	if (rule->action_ste_idx > -1 && !matcher->attr.optimize_using_rule_idx) {
142 		struct mlx5dr_pool_chunk ste = {0};
143 
144 		/* This release is safe only when the rule match part was deleted */
145 		ste.order = rte_log2_u32(matcher->action_ste.max_stes);
146 		ste.offset = rule->action_ste_idx;
147 		mlx5dr_pool_chunk_free(matcher->action_ste.pool, &ste);
148 	}
149 }
150 
151 static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,
152 				    struct mlx5dr_send_ste_attr *ste_attr,
153 				    struct mlx5dr_actions_apply_data *apply)
154 {
155 	struct mlx5dr_matcher *matcher = rule->matcher;
156 	struct mlx5dr_table *tbl = matcher->tbl;
157 	struct mlx5dr_context *ctx = tbl->ctx;
158 
159 	/* Init rule before reuse */
160 	rule->rtc_0 = 0;
161 	rule->rtc_1 = 0;
162 	rule->pending_wqes = 0;
163 	rule->action_ste_idx = -1;
164 	rule->status = MLX5DR_RULE_STATUS_CREATING;
165 
166 	/* Init default send STE attributes */
167 	ste_attr->gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
168 	ste_attr->send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
169 	ste_attr->send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
170 	ste_attr->send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
171 
172 	/* Init default action apply */
173 	apply->tbl_type = tbl->type;
174 	apply->common_res = &ctx->common_res[tbl->type];
175 	apply->jump_to_action_stc = matcher->action_ste.stc.offset;
176 	apply->require_dep = 0;
177 }
178 
179 static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
180 				  struct mlx5dr_rule_attr *attr,
181 				  uint8_t mt_idx,
182 				  const struct rte_flow_item items[],
183 				  uint8_t at_idx,
184 				  struct mlx5dr_rule_action rule_actions[])
185 {
186 	struct mlx5dr_action_template *at = rule->matcher->at[at_idx];
187 	struct mlx5dr_match_template *mt = rule->matcher->mt[mt_idx];
188 	bool is_jumbo = mlx5dr_definer_is_jumbo(mt->definer);
189 	struct mlx5dr_matcher *matcher = rule->matcher;
190 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
191 	struct mlx5dr_send_ste_attr ste_attr = {0};
192 	struct mlx5dr_send_ring_dep_wqe *dep_wqe;
193 	struct mlx5dr_actions_wqe_setter *setter;
194 	struct mlx5dr_actions_apply_data apply;
195 	struct mlx5dr_send_engine *queue;
196 	uint8_t total_stes, action_stes;
197 	int i, ret;
198 
199 	queue = &ctx->send_queue[attr->queue_id];
200 	if (unlikely(mlx5dr_send_engine_err(queue))) {
201 		rte_errno = EIO;
202 		return rte_errno;
203 	}
204 
205 	mlx5dr_rule_create_init(rule, &ste_attr, &apply);
206 
207 	/* Allocate dependent match WQE since rule might have dependent writes.
208 	 * The queued dependent WQE can be later aborted or kept as a dependency.
209 	 * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
210 	 */
211 	dep_wqe = mlx5dr_send_add_new_dep_wqe(queue);
212 	mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, attr->user_data);
213 
214 	ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
215 	ste_attr.wqe_data = &dep_wqe->wqe_data;
216 	apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
217 	apply.wqe_data = (uint32_t *)&dep_wqe->wqe_data;
218 	apply.rule_action = rule_actions;
219 	apply.queue = queue;
220 
221 	setter = &at->setters[at->num_of_action_stes];
222 	total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
223 	action_stes = total_stes - 1;
224 
225 	if (action_stes) {
226 		/* Allocate action STEs for complex rules */
227 		ret = mlx5dr_rule_alloc_action_ste(rule, attr);
228 		if (ret) {
229 			DR_LOG(ERR, "Failed to allocate action memory %d", ret);
230 			mlx5dr_send_abort_new_dep_wqe(queue);
231 			return ret;
232 		}
233 		/* Skip RX/TX based on the dep_wqe init */
234 		ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0->id : 0;
235 		ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1->id : 0;
236 		/* Action STEs are written to a specific index last to first */
237 		ste_attr.direct_index = rule->action_ste_idx + action_stes;
238 		apply.next_direct_idx = ste_attr.direct_index;
239 	} else {
240 		apply.next_direct_idx = 0;
241 	}
242 
243 	for (i = total_stes; i-- > 0;) {
244 		mlx5dr_action_apply_setter(&apply, setter--, !i && is_jumbo);
245 
246 		if (i == 0) {
247 			/* Handle last match STE */
248 			mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz,
249 						  (uint8_t *)dep_wqe->wqe_data.action);
250 
251 			/* Rule has dependent WQEs, match dep_wqe is queued */
252 			if (action_stes || apply.require_dep)
253 				break;
254 
255 			/* Rule has no dependencies, abort dep_wqe and send WQE now */
256 			mlx5dr_send_abort_new_dep_wqe(queue);
257 			ste_attr.wqe_tag_is_jumbo = is_jumbo;
258 			ste_attr.send_attr.notify_hw = !attr->burst;
259 			ste_attr.send_attr.user_data = dep_wqe->user_data;
260 			ste_attr.send_attr.rule = dep_wqe->rule;
261 			ste_attr.direct_index = 0;
262 			ste_attr.rtc_0 = dep_wqe->rtc_0;
263 			ste_attr.rtc_1 = dep_wqe->rtc_1;
264 			ste_attr.used_id_rtc_0 = &rule->rtc_0;
265 			ste_attr.used_id_rtc_1 = &rule->rtc_1;
266 			ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
267 			ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
268 		} else {
269 			apply.next_direct_idx = --ste_attr.direct_index;
270 		}
271 
272 		mlx5dr_send_ste(queue, &ste_attr);
273 	}
274 
275 	/* Backup TAG on the rule for deletion */
276 	if (is_jumbo)
277 		memcpy(rule->tag.jumbo, dep_wqe->wqe_data.action, MLX5DR_JUMBO_TAG_SZ);
278 	else
279 		memcpy(rule->tag.match, dep_wqe->wqe_data.tag, MLX5DR_MATCH_TAG_SZ);
280 
281 	mlx5dr_send_engine_inc_rule(queue);
282 
283 	/* Send dependent WQEs */
284 	if (!attr->burst)
285 		mlx5dr_send_all_dep_wqe(queue);
286 
287 	return 0;
288 }
289 
290 static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,
291 					   struct mlx5dr_rule_attr *attr)
292 {
293 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
294 	struct mlx5dr_send_engine *queue;
295 
296 	queue = &ctx->send_queue[attr->queue_id];
297 
298 	mlx5dr_rule_gen_comp(queue, rule, false,
299 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
300 
301 	/* Rule failed now we can safely release action STEs */
302 	mlx5dr_rule_free_action_ste_idx(rule);
303 
304 	/* If a rule that was indicated as burst (need to trigger HW) has failed
305 	 * insertion we won't ring the HW as nothing is being written to the WQ.
306 	 * In such case update the last WQE and ring the HW with that work
307 	 */
308 	if (attr->burst)
309 		return;
310 
311 	mlx5dr_send_all_dep_wqe(queue);
312 	mlx5dr_send_engine_flush_queue(queue);
313 }
314 
315 static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
316 				   struct mlx5dr_rule_attr *attr)
317 {
318 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
319 	struct mlx5dr_matcher *matcher = rule->matcher;
320 	struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};
321 	struct mlx5dr_send_ste_attr ste_attr = {0};
322 	struct mlx5dr_send_engine *queue;
323 
324 	queue = &ctx->send_queue[attr->queue_id];
325 
326 	/* Rule is not completed yet */
327 	if (rule->status == MLX5DR_RULE_STATUS_CREATING) {
328 		rte_errno = EBUSY;
329 		return rte_errno;
330 	}
331 
332 	/* Rule failed and doesn't require cleanup */
333 	if (rule->status == MLX5DR_RULE_STATUS_FAILED) {
334 		mlx5dr_rule_destroy_failed_hws(rule, attr);
335 		return 0;
336 	}
337 
338 	if (unlikely(mlx5dr_send_engine_err(queue))) {
339 		mlx5dr_rule_destroy_failed_hws(rule, attr);
340 		return 0;
341 	}
342 
343 	mlx5dr_send_engine_inc_rule(queue);
344 
345 	/* Send dependent WQE */
346 	if (!attr->burst)
347 		mlx5dr_send_all_dep_wqe(queue);
348 
349 	rule->status = MLX5DR_RULE_STATUS_DELETING;
350 
351 	ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
352 	ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
353 	ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
354 
355 	ste_attr.send_attr.rule = rule;
356 	ste_attr.send_attr.notify_hw = !attr->burst;
357 	ste_attr.send_attr.user_data = attr->user_data;
358 
359 	ste_attr.rtc_0 = rule->rtc_0;
360 	ste_attr.rtc_1 = rule->rtc_1;
361 	ste_attr.used_id_rtc_0 = &rule->rtc_0;
362 	ste_attr.used_id_rtc_1 = &rule->rtc_1;
363 	ste_attr.wqe_ctrl = &wqe_ctrl;
364 	ste_attr.wqe_tag = &rule->tag;
365 	ste_attr.wqe_tag_is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt[0]->definer);
366 	ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
367 
368 	mlx5dr_send_ste(queue, &ste_attr);
369 
370 	return 0;
371 }
372 
373 static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,
374 				   struct mlx5dr_rule_attr *rule_attr,
375 				   const struct rte_flow_item items[],
376 				   uint8_t at_idx,
377 				   struct mlx5dr_rule_action rule_actions[])
378 {
379 	struct mlx5dv_flow_matcher *dv_matcher = rule->matcher->dv_matcher;
380 	uint8_t num_actions = rule->matcher->at[at_idx]->num_actions;
381 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
382 	struct mlx5dv_flow_match_parameters *value;
383 	struct mlx5_flow_attr flow_attr = {0};
384 	struct mlx5dv_flow_action_attr *attr;
385 	struct rte_flow_error error;
386 	uint8_t match_criteria;
387 	int ret;
388 
389 	attr = simple_calloc(num_actions, sizeof(*attr));
390 	if (!attr) {
391 		rte_errno = ENOMEM;
392 		return rte_errno;
393 	}
394 
395 	value = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +
396 			      offsetof(struct mlx5dv_flow_match_parameters, match_buf));
397 	if (!value) {
398 		rte_errno = ENOMEM;
399 		goto free_attr;
400 	}
401 
402 	flow_attr.tbl_type = rule->matcher->tbl->type;
403 
404 	ret = flow_dv_translate_items_hws(items, &flow_attr, value->match_buf,
405 					  MLX5_SET_MATCHER_HS_V, NULL,
406 					  &match_criteria,
407 					  &error);
408 	if (ret) {
409 		DR_LOG(ERR, "Failed to convert items to PRM [%s]", error.message);
410 		goto free_value;
411 	}
412 
413 	/* Convert actions to verb action attr */
414 	ret = mlx5dr_action_root_build_attr(rule_actions, num_actions, attr);
415 	if (ret)
416 		goto free_value;
417 
418 	/* Create verb flow */
419 	value->match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
420 	rule->flow = mlx5_glue->dv_create_flow_root(dv_matcher,
421 						    value,
422 						    num_actions,
423 						    attr);
424 
425 	mlx5dr_rule_gen_comp(&ctx->send_queue[rule_attr->queue_id], rule, !rule->flow,
426 			     rule_attr->user_data, MLX5DR_RULE_STATUS_CREATED);
427 
428 	simple_free(value);
429 	simple_free(attr);
430 
431 	return 0;
432 
433 free_value:
434 	simple_free(value);
435 free_attr:
436 	simple_free(attr);
437 
438 	return -rte_errno;
439 }
440 
441 static int mlx5dr_rule_destroy_root(struct mlx5dr_rule *rule,
442 				    struct mlx5dr_rule_attr *attr)
443 {
444 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
445 	int err = 0;
446 
447 	if (rule->flow)
448 		err = ibv_destroy_flow(rule->flow);
449 
450 	mlx5dr_rule_gen_comp(&ctx->send_queue[attr->queue_id], rule, err,
451 			     attr->user_data, MLX5DR_RULE_STATUS_DELETED);
452 
453 	return 0;
454 }
455 
456 int mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
457 		       uint8_t mt_idx,
458 		       const struct rte_flow_item items[],
459 		       uint8_t at_idx,
460 		       struct mlx5dr_rule_action rule_actions[],
461 		       struct mlx5dr_rule_attr *attr,
462 		       struct mlx5dr_rule *rule_handle)
463 {
464 	struct mlx5dr_context *ctx;
465 	int ret;
466 
467 	rule_handle->matcher = matcher;
468 	ctx = matcher->tbl->ctx;
469 
470 	if (unlikely(!attr->user_data)) {
471 		rte_errno = EINVAL;
472 		return -rte_errno;
473 	}
474 
475 	/* Check if there is room in queue */
476 	if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
477 		rte_errno = EBUSY;
478 		return -rte_errno;
479 	}
480 
481 	assert(matcher->num_of_mt >= mt_idx);
482 	assert(matcher->num_of_at >= at_idx);
483 
484 	if (unlikely(mlx5dr_table_is_root(matcher->tbl)))
485 		ret = mlx5dr_rule_create_root(rule_handle,
486 					      attr,
487 					      items,
488 					      at_idx,
489 					      rule_actions);
490 	else
491 		ret = mlx5dr_rule_create_hws(rule_handle,
492 					     attr,
493 					     mt_idx,
494 					     items,
495 					     at_idx,
496 					     rule_actions);
497 	return -ret;
498 }
499 
500 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule,
501 			struct mlx5dr_rule_attr *attr)
502 {
503 	struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
504 	int ret;
505 
506 	if (unlikely(!attr->user_data)) {
507 		rte_errno = EINVAL;
508 		return -rte_errno;
509 	}
510 
511 	/* Check if there is room in queue */
512 	if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
513 		rte_errno = EBUSY;
514 		return -rte_errno;
515 	}
516 
517 	if (unlikely(mlx5dr_table_is_root(rule->matcher->tbl)))
518 		ret = mlx5dr_rule_destroy_root(rule, attr);
519 	else
520 		ret = mlx5dr_rule_destroy_hws(rule, attr);
521 
522 	return -ret;
523 }
524 
525 size_t mlx5dr_rule_get_handle_size(void)
526 {
527 	return sizeof(struct mlx5dr_rule);
528 }
529