xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 enum mlx5dr_arg_chunk_size
8 mlx5dr_arg_data_size_to_arg_log_size(uint16_t data_size)
9 {
10 	/* Return the roundup of log2(data_size) */
11 	if (data_size <= MLX5DR_ARG_DATA_SIZE)
12 		return MLX5DR_ARG_CHUNK_SIZE_1;
13 	if (data_size <= MLX5DR_ARG_DATA_SIZE * 2)
14 		return MLX5DR_ARG_CHUNK_SIZE_2;
15 	if (data_size <= MLX5DR_ARG_DATA_SIZE * 4)
16 		return MLX5DR_ARG_CHUNK_SIZE_3;
17 	if (data_size <= MLX5DR_ARG_DATA_SIZE * 8)
18 		return MLX5DR_ARG_CHUNK_SIZE_4;
19 
20 	return MLX5DR_ARG_CHUNK_SIZE_MAX;
21 }
22 
23 uint32_t mlx5dr_arg_data_size_to_arg_size(uint16_t data_size)
24 {
25 	return BIT(mlx5dr_arg_data_size_to_arg_log_size(data_size));
26 }
27 
28 enum mlx5dr_arg_chunk_size
29 mlx5dr_arg_get_arg_log_size(uint16_t num_of_actions)
30 {
31 	return mlx5dr_arg_data_size_to_arg_log_size(num_of_actions *
32 						    MLX5DR_MODIFY_ACTION_SIZE);
33 }
34 
35 uint32_t mlx5dr_arg_get_arg_size(uint16_t num_of_actions)
36 {
37 	return BIT(mlx5dr_arg_get_arg_log_size(num_of_actions));
38 }
39 
40 /* Cache and cache element handling */
41 int mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache **cache)
42 {
43 	struct mlx5dr_pattern_cache *new_cache;
44 
45 	new_cache = simple_calloc(1, sizeof(*new_cache));
46 	if (!new_cache) {
47 		rte_errno = ENOMEM;
48 		return rte_errno;
49 	}
50 	LIST_INIT(&new_cache->head);
51 	pthread_spin_init(&new_cache->lock, PTHREAD_PROCESS_PRIVATE);
52 
53 	*cache = new_cache;
54 
55 	return 0;
56 }
57 
58 void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache)
59 {
60 	simple_free(cache);
61 }
62 
63 static bool mlx5dr_pat_compare_pattern(enum mlx5dr_action_type cur_type,
64 				       int cur_num_of_actions,
65 				       __be64 cur_actions[],
66 				       enum mlx5dr_action_type type,
67 				       int num_of_actions,
68 				       __be64 actions[])
69 {
70 	int i;
71 
72 	if (cur_num_of_actions != num_of_actions || cur_type != type)
73 		return false;
74 
75 	 /* All decap-l3 look the same, only change is the num of actions */
76 	if (type == MLX5DR_ACTION_TYP_TNL_L3_TO_L2)
77 		return true;
78 
79 	for (i = 0; i < num_of_actions; i++) {
80 		u8 action_id =
81 			MLX5_GET(set_action_in, &actions[i], action_type);
82 
83 		if (action_id == MLX5_MODIFICATION_TYPE_COPY) {
84 			if (actions[i] != cur_actions[i])
85 				return false;
86 		} else {
87 			/* Compare just the control, not the values */
88 			if ((__be32)actions[i] !=
89 			    (__be32)cur_actions[i])
90 				return false;
91 		}
92 	}
93 
94 	return true;
95 }
96 
97 static struct mlx5dr_pattern_cache_item *
98 mlx5dr_pat_find_cached_pattern(struct mlx5dr_pattern_cache *cache,
99 			       struct mlx5dr_action *action,
100 			       uint16_t num_of_actions,
101 			       __be64 *actions)
102 {
103 	struct mlx5dr_pattern_cache_item *cached_pat;
104 
105 	LIST_FOREACH(cached_pat, &cache->head, next) {
106 		if (mlx5dr_pat_compare_pattern(cached_pat->type,
107 					       cached_pat->mh_data.num_of_actions,
108 					       (__be64 *)cached_pat->mh_data.data,
109 					       action->type,
110 					       num_of_actions,
111 					       actions))
112 			return cached_pat;
113 	}
114 
115 	return NULL;
116 }
117 
118 static struct mlx5dr_pattern_cache_item *
119 mlx5dr_pat_get_existing_cached_pattern(struct mlx5dr_pattern_cache *cache,
120 				       struct mlx5dr_action *action,
121 				       uint16_t num_of_actions,
122 				       __be64 *actions)
123 {
124 	struct mlx5dr_pattern_cache_item *cached_pattern;
125 
126 	cached_pattern = mlx5dr_pat_find_cached_pattern(cache, action, num_of_actions, actions);
127 	if (cached_pattern) {
128 		/* LRU: move it to be first in the list */
129 		LIST_REMOVE(cached_pattern, next);
130 		LIST_INSERT_HEAD(&cache->head, cached_pattern, next);
131 		cached_pattern->refcount++;
132 	}
133 
134 	return cached_pattern;
135 }
136 
137 static struct mlx5dr_pattern_cache_item *
138 mlx5dr_pat_get_cached_pattern_by_action(struct mlx5dr_pattern_cache *cache,
139 					struct mlx5dr_action *action)
140 {
141 	struct mlx5dr_pattern_cache_item *cached_pattern;
142 
143 	LIST_FOREACH(cached_pattern, &cache->head, next) {
144 		if (cached_pattern->mh_data.pattern_obj->id == action->modify_header.pattern_obj->id)
145 			return cached_pattern;
146 	}
147 
148 	return NULL;
149 }
150 
151 static struct mlx5dr_pattern_cache_item *
152 mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache *cache,
153 				struct mlx5dr_devx_obj *pattern_obj,
154 				enum mlx5dr_action_type type,
155 				uint16_t num_of_actions,
156 				__be64 *actions)
157 {
158 	struct mlx5dr_pattern_cache_item *cached_pattern;
159 
160 	cached_pattern = simple_calloc(1, sizeof(*cached_pattern));
161 	if (!cached_pattern) {
162 		DR_LOG(ERR, "Failed to allocate cached_pattern");
163 		rte_errno = ENOMEM;
164 		return NULL;
165 	}
166 
167 	cached_pattern->type = type;
168 	cached_pattern->mh_data.num_of_actions = num_of_actions;
169 	cached_pattern->mh_data.pattern_obj = pattern_obj;
170 	cached_pattern->mh_data.data =
171 		simple_malloc(num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);
172 	if (!cached_pattern->mh_data.data) {
173 		DR_LOG(ERR, "Failed to allocate mh_data.data");
174 		rte_errno = ENOMEM;
175 		goto free_cached_obj;
176 	}
177 
178 	memcpy(cached_pattern->mh_data.data, actions,
179 	       num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);
180 
181 	LIST_INSERT_HEAD(&cache->head, cached_pattern, next);
182 	cached_pattern->refcount = 1;
183 
184 	return cached_pattern;
185 
186 free_cached_obj:
187 	simple_free(cached_pattern);
188 	return NULL;
189 }
190 
191 static void
192 mlx5dr_pat_remove_pattern(struct mlx5dr_pattern_cache_item *cached_pattern)
193 {
194 	LIST_REMOVE(cached_pattern, next);
195 	simple_free(cached_pattern->mh_data.data);
196 	simple_free(cached_pattern);
197 }
198 
199 static void
200 mlx5dr_pat_put_pattern(struct mlx5dr_pattern_cache *cache,
201 		       struct mlx5dr_action *action)
202 {
203 	struct mlx5dr_pattern_cache_item *cached_pattern;
204 
205 	pthread_spin_lock(&cache->lock);
206 	cached_pattern = mlx5dr_pat_get_cached_pattern_by_action(cache, action);
207 	if (!cached_pattern) {
208 		DR_LOG(ERR, "Failed to find pattern according to action with pt");
209 		assert(false);
210 		goto out;
211 	}
212 
213 	if (--cached_pattern->refcount)
214 		goto out;
215 
216 	mlx5dr_pat_remove_pattern(cached_pattern);
217 
218 out:
219 	pthread_spin_unlock(&cache->lock);
220 }
221 
222 static int mlx5dr_pat_get_pattern(struct mlx5dr_context *ctx,
223 				  struct mlx5dr_action *action,
224 				  uint16_t num_of_actions,
225 				  size_t pattern_sz,
226 				  __be64 *pattern)
227 {
228 	struct mlx5dr_pattern_cache_item *cached_pattern;
229 	int ret = 0;
230 
231 	pthread_spin_lock(&ctx->pattern_cache->lock);
232 
233 	cached_pattern = mlx5dr_pat_get_existing_cached_pattern(ctx->pattern_cache,
234 								action,
235 								num_of_actions,
236 								pattern);
237 	if (cached_pattern) {
238 		action->modify_header.pattern_obj = cached_pattern->mh_data.pattern_obj;
239 		goto out_unlock;
240 	}
241 
242 	action->modify_header.pattern_obj =
243 		mlx5dr_cmd_header_modify_pattern_create(ctx->ibv_ctx,
244 							pattern_sz,
245 							(uint8_t *)pattern);
246 	if (!action->modify_header.pattern_obj) {
247 		DR_LOG(ERR, "Failed to create pattern FW object");
248 
249 		ret = rte_errno;
250 		goto out_unlock;
251 	}
252 
253 	cached_pattern =
254 		mlx5dr_pat_add_pattern_to_cache(ctx->pattern_cache,
255 						action->modify_header.pattern_obj,
256 						action->type,
257 						num_of_actions,
258 						pattern);
259 	if (!cached_pattern) {
260 		DR_LOG(ERR, "Failed to add pattern to cache");
261 		ret = rte_errno;
262 		goto clean_pattern;
263 	}
264 
265 out_unlock:
266 	pthread_spin_unlock(&ctx->pattern_cache->lock);
267 	return ret;
268 
269 clean_pattern:
270 	mlx5dr_cmd_destroy_obj(action->modify_header.pattern_obj);
271 	pthread_spin_unlock(&ctx->pattern_cache->lock);
272 	return ret;
273 }
274 
275 static void
276 mlx5d_arg_init_send_attr(struct mlx5dr_send_engine_post_attr *send_attr,
277 			 void *comp_data,
278 			 uint32_t arg_idx)
279 {
280 	send_attr->opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
281 	send_attr->opmod = MLX5DR_WQE_GTA_OPMOD_MOD_ARG;
282 	send_attr->len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
283 	send_attr->id = arg_idx;
284 	send_attr->user_data = comp_data;
285 }
286 
287 void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,
288 			      uint32_t arg_idx,
289 			      uint8_t *arg_data,
290 			      uint16_t num_of_actions)
291 {
292 	struct mlx5dr_send_engine_post_attr send_attr = {0};
293 	struct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;
294 	struct mlx5dr_send_engine_post_ctrl ctrl;
295 	struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
296 	size_t wqe_len;
297 
298 	mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
299 
300 	ctrl = mlx5dr_send_engine_post_start(queue);
301 	mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
302 	memset(wqe_ctrl, 0, wqe_len);
303 	mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
304 	mlx5dr_action_prepare_decap_l3_data(arg_data, (uint8_t *)wqe_arg,
305 					    num_of_actions);
306 	mlx5dr_send_engine_post_end(&ctrl, &send_attr);
307 }
308 
309 void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,
310 		      void *comp_data,
311 		      uint32_t arg_idx,
312 		      uint8_t *arg_data,
313 		      size_t data_size)
314 {
315 	struct mlx5dr_send_engine_post_attr send_attr = {0};
316 	struct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;
317 	struct mlx5dr_send_engine_post_ctrl ctrl;
318 	struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
319 	int i, full_iter, leftover;
320 	size_t wqe_len;
321 
322 	mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
323 
324 	/* Each WQE can hold 64B of data, it might require multiple iteration */
325 	full_iter = data_size / MLX5DR_ARG_DATA_SIZE;
326 	leftover = data_size & (MLX5DR_ARG_DATA_SIZE - 1);
327 
328 	for (i = 0; i < full_iter; i++) {
329 		ctrl = mlx5dr_send_engine_post_start(queue);
330 		mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
331 		memset(wqe_ctrl, 0, wqe_len);
332 		mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
333 		memcpy(wqe_arg, arg_data, wqe_len);
334 		send_attr.id = arg_idx++;
335 		mlx5dr_send_engine_post_end(&ctrl, &send_attr);
336 
337 		/* Move to next argument data */
338 		arg_data += MLX5DR_ARG_DATA_SIZE;
339 	}
340 
341 	if (leftover) {
342 		ctrl = mlx5dr_send_engine_post_start(queue);
343 		mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
344 		memset(wqe_ctrl, 0, wqe_len);
345 		mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
346 		memcpy(wqe_arg, arg_data, leftover);
347 		send_attr.id = arg_idx;
348 		mlx5dr_send_engine_post_end(&ctrl, &send_attr);
349 	}
350 }
351 
352 int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,
353 				     uint32_t arg_idx,
354 				     uint8_t *arg_data,
355 				     size_t data_size)
356 {
357 	struct mlx5dr_send_engine *queue;
358 	int ret;
359 
360 	pthread_spin_lock(&ctx->ctrl_lock);
361 
362 	/* Get the control queue */
363 	queue = &ctx->send_queue[ctx->queues - 1];
364 
365 	mlx5dr_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
366 
367 	mlx5dr_send_engine_flush_queue(queue);
368 
369 	/* Poll for completion */
370 	ret = mlx5dr_send_queue_action(ctx, ctx->queues - 1,
371 				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC);
372 
373 	if (ret)
374 		DR_LOG(ERR, "Failed to drain arg queue");
375 
376 	pthread_spin_unlock(&ctx->ctrl_lock);
377 
378 	return ret;
379 }
380 
381 bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx,
382 					  uint32_t arg_size)
383 {
384 	if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
385 	    arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
386 		return false;
387 	}
388 	return true;
389 }
390 
391 static int
392 mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context *ctx,
393 				    struct mlx5dr_action *action,
394 				    uint16_t num_of_actions,
395 				    __be64 *pattern,
396 				    uint32_t bulk_size)
397 {
398 	uint32_t flags = action->flags;
399 	uint16_t args_log_size;
400 	int ret = 0;
401 
402 	/* Alloc bulk of args */
403 	args_log_size = mlx5dr_arg_get_arg_log_size(num_of_actions);
404 	if (args_log_size >= MLX5DR_ARG_CHUNK_SIZE_MAX) {
405 		DR_LOG(ERR, "Exceed number of allowed actions %u",
406 			num_of_actions);
407 		rte_errno = EINVAL;
408 		return rte_errno;
409 	}
410 
411 	if (!mlx5dr_arg_is_valid_arg_request_size(ctx, args_log_size + bulk_size)) {
412 		DR_LOG(ERR, "Arg size %d does not fit FW capability",
413 		       args_log_size + bulk_size);
414 		rte_errno = EINVAL;
415 		return rte_errno;
416 	}
417 
418 	action->modify_header.arg_obj =
419 		mlx5dr_cmd_arg_create(ctx->ibv_ctx, args_log_size + bulk_size,
420 				      ctx->pd_num);
421 	if (!action->modify_header.arg_obj) {
422 		DR_LOG(ERR, "Failed allocating arg in order: %d",
423 			args_log_size + bulk_size);
424 		return rte_errno;
425 	}
426 
427 	/* When INLINE need to write the arg data */
428 	if (flags & MLX5DR_ACTION_FLAG_SHARED)
429 		ret = mlx5dr_arg_write_inline_arg_data(ctx,
430 						       action->modify_header.arg_obj->id,
431 						       (uint8_t *)pattern,
432 						       num_of_actions *
433 						       MLX5DR_MODIFY_ACTION_SIZE);
434 	if (ret) {
435 		DR_LOG(ERR, "Failed writing INLINE arg in order: %d",
436 			args_log_size + bulk_size);
437 		mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);
438 		return rte_errno;
439 	}
440 
441 	return 0;
442 }
443 
444 int mlx5dr_pat_arg_create_modify_header(struct mlx5dr_context *ctx,
445 					struct mlx5dr_action *action,
446 					size_t pattern_sz,
447 					__be64 pattern[],
448 					uint32_t bulk_size)
449 {
450 	uint16_t num_of_actions;
451 	int ret;
452 
453 	num_of_actions = pattern_sz / MLX5DR_MODIFY_ACTION_SIZE;
454 	if (num_of_actions == 0) {
455 		DR_LOG(ERR, "Invalid number of actions %u\n", num_of_actions);
456 		rte_errno = EINVAL;
457 		return rte_errno;
458 	}
459 
460 	action->modify_header.num_of_actions = num_of_actions;
461 
462 	ret = mlx5dr_arg_create_modify_header_arg(ctx, action,
463 						  num_of_actions,
464 						  pattern,
465 						  bulk_size);
466 	if (ret) {
467 		DR_LOG(ERR, "Failed to allocate arg");
468 		return ret;
469 	}
470 
471 	ret = mlx5dr_pat_get_pattern(ctx, action, num_of_actions, pattern_sz,
472 				     pattern);
473 	if (ret) {
474 		DR_LOG(ERR, "Failed to allocate pattern");
475 		goto free_arg;
476 	}
477 
478 	return 0;
479 
480 free_arg:
481 	mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);
482 	return rte_errno;
483 }
484 
485 void mlx5dr_pat_arg_destroy_modify_header(struct mlx5dr_context *ctx,
486 					  struct mlx5dr_action *action)
487 {
488 	mlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);
489 	mlx5dr_pat_put_pattern(ctx->pattern_cache, action);
490 }
491