1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3 */
4
5 #include "mlx5dr_internal.h"
6
7 enum mlx5dr_arg_chunk_size
mlx5dr_arg_data_size_to_arg_log_size(uint16_t data_size)8 mlx5dr_arg_data_size_to_arg_log_size(uint16_t data_size)
9 {
10 /* Return the roundup of log2(data_size) */
11 if (data_size <= MLX5DR_ARG_DATA_SIZE)
12 return MLX5DR_ARG_CHUNK_SIZE_1;
13 if (data_size <= MLX5DR_ARG_DATA_SIZE * 2)
14 return MLX5DR_ARG_CHUNK_SIZE_2;
15 if (data_size <= MLX5DR_ARG_DATA_SIZE * 4)
16 return MLX5DR_ARG_CHUNK_SIZE_3;
17 if (data_size <= MLX5DR_ARG_DATA_SIZE * 8)
18 return MLX5DR_ARG_CHUNK_SIZE_4;
19
20 return MLX5DR_ARG_CHUNK_SIZE_MAX;
21 }
22
mlx5dr_arg_data_size_to_arg_size(uint16_t data_size)23 uint32_t mlx5dr_arg_data_size_to_arg_size(uint16_t data_size)
24 {
25 return BIT(mlx5dr_arg_data_size_to_arg_log_size(data_size));
26 }
27
28 enum mlx5dr_arg_chunk_size
mlx5dr_arg_get_arg_log_size(uint16_t num_of_actions)29 mlx5dr_arg_get_arg_log_size(uint16_t num_of_actions)
30 {
31 return mlx5dr_arg_data_size_to_arg_log_size(num_of_actions *
32 MLX5DR_MODIFY_ACTION_SIZE);
33 }
34
mlx5dr_arg_get_arg_size(uint16_t num_of_actions)35 uint32_t mlx5dr_arg_get_arg_size(uint16_t num_of_actions)
36 {
37 return BIT(mlx5dr_arg_get_arg_log_size(num_of_actions));
38 }
39
mlx5dr_pat_require_reparse(__be64 * actions,uint16_t num_of_actions)40 bool mlx5dr_pat_require_reparse(__be64 *actions, uint16_t num_of_actions)
41 {
42 uint16_t i, field;
43 uint8_t action_id;
44
45 for (i = 0; i < num_of_actions; i++) {
46 action_id = MLX5_GET(set_action_in, &actions[i], action_type);
47
48 switch (action_id) {
49 case MLX5_MODIFICATION_TYPE_NOP:
50 field = MLX5_MODI_OUT_NONE;
51 break;
52
53 case MLX5_MODIFICATION_TYPE_SET:
54 case MLX5_MODIFICATION_TYPE_ADD:
55 field = MLX5_GET(set_action_in, &actions[i], field);
56 break;
57
58 case MLX5_MODIFICATION_TYPE_COPY:
59 case MLX5_MODIFICATION_TYPE_ADD_FIELD:
60 field = MLX5_GET(copy_action_in, &actions[i], dst_field);
61 break;
62
63 default:
64 /* Insert/Remove/Unknown actions require reparse */
65 return true;
66 }
67
68 /* Below fields can change packet structure require a reparse */
69 if (field == MLX5_MODI_OUT_ETHERTYPE ||
70 field == MLX5_MODI_OUT_IP_PROTOCOL)
71 return true;
72 }
73
74 return false;
75 }
76
77 /* Cache and cache element handling */
mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache ** cache)78 int mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache **cache)
79 {
80 struct mlx5dr_pattern_cache *new_cache;
81
82 new_cache = simple_calloc(1, sizeof(*new_cache));
83 if (!new_cache) {
84 rte_errno = ENOMEM;
85 return rte_errno;
86 }
87 LIST_INIT(&new_cache->head);
88 pthread_spin_init(&new_cache->lock, PTHREAD_PROCESS_PRIVATE);
89
90 *cache = new_cache;
91
92 return 0;
93 }
94
mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache * cache)95 void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache)
96 {
97 simple_free(cache);
98 }
99
mlx5dr_pat_compare_pattern(int cur_num_of_actions,__be64 cur_actions[],int num_of_actions,__be64 actions[])100 static bool mlx5dr_pat_compare_pattern(int cur_num_of_actions,
101 __be64 cur_actions[],
102 int num_of_actions,
103 __be64 actions[])
104 {
105 int i;
106
107 if (cur_num_of_actions != num_of_actions)
108 return false;
109
110 for (i = 0; i < num_of_actions; i++) {
111 u8 action_id =
112 MLX5_GET(set_action_in, &actions[i], action_type);
113
114 if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
115 action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
116 if (actions[i] != cur_actions[i])
117 return false;
118 } else {
119 /* Compare just the control, not the values */
120 if ((__be32)actions[i] !=
121 (__be32)cur_actions[i])
122 return false;
123 }
124 }
125
126 return true;
127 }
128
129 static struct mlx5dr_pattern_cache_item *
mlx5dr_pat_find_cached_pattern(struct mlx5dr_pattern_cache * cache,uint16_t num_of_actions,__be64 * actions)130 mlx5dr_pat_find_cached_pattern(struct mlx5dr_pattern_cache *cache,
131 uint16_t num_of_actions,
132 __be64 *actions)
133 {
134 struct mlx5dr_pattern_cache_item *cached_pat;
135
136 LIST_FOREACH(cached_pat, &cache->head, next) {
137 if (mlx5dr_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
138 (__be64 *)cached_pat->mh_data.data,
139 num_of_actions,
140 actions))
141 return cached_pat;
142 }
143
144 return NULL;
145 }
146
147 static struct mlx5dr_pattern_cache_item *
mlx5dr_pat_get_existing_cached_pattern(struct mlx5dr_pattern_cache * cache,uint16_t num_of_actions,__be64 * actions)148 mlx5dr_pat_get_existing_cached_pattern(struct mlx5dr_pattern_cache *cache,
149 uint16_t num_of_actions,
150 __be64 *actions)
151 {
152 struct mlx5dr_pattern_cache_item *cached_pattern;
153
154 cached_pattern = mlx5dr_pat_find_cached_pattern(cache, num_of_actions, actions);
155 if (cached_pattern) {
156 /* LRU: move it to be first in the list */
157 LIST_REMOVE(cached_pattern, next);
158 LIST_INSERT_HEAD(&cache->head, cached_pattern, next);
159 cached_pattern->refcount++;
160 }
161
162 return cached_pattern;
163 }
164
165 static struct mlx5dr_pattern_cache_item *
mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache * cache,struct mlx5dr_devx_obj * pattern_obj,uint16_t num_of_actions,__be64 * actions)166 mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache *cache,
167 struct mlx5dr_devx_obj *pattern_obj,
168 uint16_t num_of_actions,
169 __be64 *actions)
170 {
171 struct mlx5dr_pattern_cache_item *cached_pattern;
172
173 cached_pattern = simple_calloc(1, sizeof(*cached_pattern));
174 if (!cached_pattern) {
175 DR_LOG(ERR, "Failed to allocate cached_pattern");
176 rte_errno = ENOMEM;
177 return NULL;
178 }
179
180 cached_pattern->mh_data.num_of_actions = num_of_actions;
181 cached_pattern->mh_data.pattern_obj = pattern_obj;
182 cached_pattern->mh_data.data =
183 simple_malloc(num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);
184 if (!cached_pattern->mh_data.data) {
185 DR_LOG(ERR, "Failed to allocate mh_data.data");
186 rte_errno = ENOMEM;
187 goto free_cached_obj;
188 }
189
190 memcpy(cached_pattern->mh_data.data, actions,
191 num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);
192
193 LIST_INSERT_HEAD(&cache->head, cached_pattern, next);
194 cached_pattern->refcount = 1;
195
196 return cached_pattern;
197
198 free_cached_obj:
199 simple_free(cached_pattern);
200 return NULL;
201 }
202
203 static struct mlx5dr_pattern_cache_item *
mlx5dr_pat_find_cached_pattern_by_obj(struct mlx5dr_pattern_cache * cache,struct mlx5dr_devx_obj * pat_obj)204 mlx5dr_pat_find_cached_pattern_by_obj(struct mlx5dr_pattern_cache *cache,
205 struct mlx5dr_devx_obj *pat_obj)
206 {
207 struct mlx5dr_pattern_cache_item *cached_pattern;
208
209 LIST_FOREACH(cached_pattern, &cache->head, next) {
210 if (cached_pattern->mh_data.pattern_obj->id == pat_obj->id)
211 return cached_pattern;
212 }
213
214 return NULL;
215 }
216
217 static void
mlx5dr_pat_remove_pattern(struct mlx5dr_pattern_cache_item * cached_pattern)218 mlx5dr_pat_remove_pattern(struct mlx5dr_pattern_cache_item *cached_pattern)
219 {
220 LIST_REMOVE(cached_pattern, next);
221 simple_free(cached_pattern->mh_data.data);
222 simple_free(cached_pattern);
223 }
224
mlx5dr_pat_put_pattern(struct mlx5dr_context * ctx,struct mlx5dr_devx_obj * pat_obj)225 void mlx5dr_pat_put_pattern(struct mlx5dr_context *ctx,
226 struct mlx5dr_devx_obj *pat_obj)
227 {
228 struct mlx5dr_pattern_cache *cache = ctx->pattern_cache;
229 struct mlx5dr_pattern_cache_item *cached_pattern;
230
231 pthread_spin_lock(&cache->lock);
232 cached_pattern = mlx5dr_pat_find_cached_pattern_by_obj(cache, pat_obj);
233 if (!cached_pattern) {
234 DR_LOG(ERR, "Failed to find pattern according to action with pt");
235 assert(false);
236 goto out;
237 }
238
239 if (--cached_pattern->refcount)
240 goto out;
241
242 mlx5dr_pat_remove_pattern(cached_pattern);
243 mlx5dr_cmd_destroy_obj(pat_obj);
244
245 out:
246 pthread_spin_unlock(&cache->lock);
247 }
248
249 struct mlx5dr_devx_obj *
mlx5dr_pat_get_pattern(struct mlx5dr_context * ctx,__be64 * pattern,size_t pattern_sz)250 mlx5dr_pat_get_pattern(struct mlx5dr_context *ctx,
251 __be64 *pattern, size_t pattern_sz)
252 {
253 uint16_t num_of_actions = pattern_sz / MLX5DR_MODIFY_ACTION_SIZE;
254 struct mlx5dr_pattern_cache_item *cached_pattern;
255 struct mlx5dr_devx_obj *pat_obj = NULL;
256
257 pthread_spin_lock(&ctx->pattern_cache->lock);
258
259 cached_pattern = mlx5dr_pat_get_existing_cached_pattern(ctx->pattern_cache,
260 num_of_actions,
261 pattern);
262 if (cached_pattern) {
263 pat_obj = cached_pattern->mh_data.pattern_obj;
264 goto out_unlock;
265 }
266
267 pat_obj = mlx5dr_cmd_header_modify_pattern_create(ctx->ibv_ctx,
268 pattern_sz,
269 (uint8_t *)pattern);
270 if (!pat_obj) {
271 DR_LOG(ERR, "Failed to create pattern FW object");
272 goto out_unlock;
273 }
274
275 cached_pattern = mlx5dr_pat_add_pattern_to_cache(ctx->pattern_cache,
276 pat_obj,
277 num_of_actions,
278 pattern);
279 if (!cached_pattern) {
280 DR_LOG(ERR, "Failed to add pattern to cache");
281 goto clean_pattern;
282 }
283
284 pthread_spin_unlock(&ctx->pattern_cache->lock);
285 return pat_obj;
286
287 clean_pattern:
288 mlx5dr_cmd_destroy_obj(pat_obj);
289 pat_obj = NULL;
290 out_unlock:
291 pthread_spin_unlock(&ctx->pattern_cache->lock);
292 return pat_obj;
293 }
294
295 static void
mlx5d_arg_init_send_attr(struct mlx5dr_send_engine_post_attr * send_attr,void * comp_data,uint32_t arg_idx)296 mlx5d_arg_init_send_attr(struct mlx5dr_send_engine_post_attr *send_attr,
297 void *comp_data,
298 uint32_t arg_idx)
299 {
300 send_attr->opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
301 send_attr->opmod = MLX5DR_WQE_GTA_OPMOD_MOD_ARG;
302 send_attr->len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
303 send_attr->id = arg_idx;
304 send_attr->user_data = comp_data;
305 }
306
mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine * queue,uint32_t arg_idx,uint8_t * arg_data,uint16_t num_of_actions)307 void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,
308 uint32_t arg_idx,
309 uint8_t *arg_data,
310 uint16_t num_of_actions)
311 {
312 struct mlx5dr_send_engine_post_attr send_attr = {0};
313 struct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;
314 struct mlx5dr_send_engine_post_ctrl ctrl;
315 struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
316 size_t wqe_len;
317
318 mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
319
320 ctrl = mlx5dr_send_engine_post_start(queue);
321 mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
322 memset(wqe_ctrl, 0, wqe_len);
323 mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
324 mlx5dr_action_prepare_decap_l3_data(arg_data, (uint8_t *)wqe_arg,
325 num_of_actions);
326 mlx5dr_send_engine_post_end(&ctrl, &send_attr);
327 }
328
mlx5dr_arg_write(struct mlx5dr_send_engine * queue,void * comp_data,uint32_t arg_idx,uint8_t * arg_data,size_t data_size)329 void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,
330 void *comp_data,
331 uint32_t arg_idx,
332 uint8_t *arg_data,
333 size_t data_size)
334 {
335 struct mlx5dr_send_engine_post_attr send_attr = {0};
336 struct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;
337 struct mlx5dr_send_engine_post_ctrl ctrl;
338 struct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;
339 int i, full_iter, leftover;
340 size_t wqe_len;
341
342 mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
343
344 /* Each WQE can hold 64B of data, it might require multiple iteration */
345 full_iter = data_size / MLX5DR_ARG_DATA_SIZE;
346 leftover = data_size & (MLX5DR_ARG_DATA_SIZE - 1);
347
348 for (i = 0; i < full_iter; i++) {
349 ctrl = mlx5dr_send_engine_post_start(queue);
350 mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
351 memset(wqe_ctrl, 0, wqe_len);
352 mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
353 memcpy(wqe_arg, arg_data, wqe_len);
354 send_attr.id = arg_idx++;
355 mlx5dr_send_engine_post_end(&ctrl, &send_attr);
356
357 /* Move to next argument data */
358 arg_data += MLX5DR_ARG_DATA_SIZE;
359 }
360
361 if (leftover) {
362 ctrl = mlx5dr_send_engine_post_start(queue);
363 mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
364 memset(wqe_ctrl, 0, wqe_len);
365 mlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
366 memcpy(wqe_arg, arg_data, leftover);
367 send_attr.id = arg_idx;
368 mlx5dr_send_engine_post_end(&ctrl, &send_attr);
369 }
370 }
371
mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context * ctx,uint32_t arg_idx,uint8_t * arg_data,size_t data_size)372 int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,
373 uint32_t arg_idx,
374 uint8_t *arg_data,
375 size_t data_size)
376 {
377 struct mlx5dr_send_engine *queue;
378 int ret;
379
380 pthread_spin_lock(&ctx->ctrl_lock);
381
382 /* Get the control queue */
383 queue = &ctx->send_queue[ctx->queues - 1];
384
385 mlx5dr_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
386
387 mlx5dr_send_engine_flush_queue(queue);
388
389 /* Poll for completion */
390 ret = mlx5dr_send_queue_action(ctx, ctx->queues - 1,
391 MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC);
392
393 if (ret)
394 DR_LOG(ERR, "Failed to drain arg queue");
395
396 pthread_spin_unlock(&ctx->ctrl_lock);
397
398 return ret;
399 }
400
mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context * ctx,uint32_t arg_size)401 bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx,
402 uint32_t arg_size)
403 {
404 if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
405 arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
406 return false;
407 }
408 return true;
409 }
410
411 struct mlx5dr_devx_obj *
mlx5dr_arg_create(struct mlx5dr_context * ctx,uint8_t * data,size_t data_sz,uint32_t log_bulk_sz,bool write_data)412 mlx5dr_arg_create(struct mlx5dr_context *ctx,
413 uint8_t *data,
414 size_t data_sz,
415 uint32_t log_bulk_sz,
416 bool write_data)
417 {
418 struct mlx5dr_devx_obj *arg_obj;
419 uint16_t single_arg_log_sz;
420 uint16_t multi_arg_log_sz;
421 int ret;
422
423 single_arg_log_sz = mlx5dr_arg_data_size_to_arg_log_size(data_sz);
424 multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
425
426 if (single_arg_log_sz >= MLX5DR_ARG_CHUNK_SIZE_MAX) {
427 DR_LOG(ERR, "Requested single arg %u not supported", single_arg_log_sz);
428 rte_errno = ENOTSUP;
429 return NULL;
430 }
431
432 if (!mlx5dr_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
433 DR_LOG(ERR, "Argument log size %d not supported by FW", multi_arg_log_sz);
434 rte_errno = ENOTSUP;
435 return NULL;
436 }
437
438 /* Alloc bulk of args */
439 arg_obj = mlx5dr_cmd_arg_create(ctx->ibv_ctx, multi_arg_log_sz, ctx->pd_num);
440 if (!arg_obj) {
441 DR_LOG(ERR, "Failed allocating arg in order: %d", multi_arg_log_sz);
442 return NULL;
443 }
444
445 if (write_data) {
446 ret = mlx5dr_arg_write_inline_arg_data(ctx,
447 arg_obj->id,
448 data, data_sz);
449 if (ret) {
450 DR_LOG(ERR, "Failed writing arg data");
451 mlx5dr_cmd_destroy_obj(arg_obj);
452 return NULL;
453 }
454 }
455
456 return arg_obj;
457 }
458
459 struct mlx5dr_devx_obj *
mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context * ctx,__be64 * data,uint8_t num_of_actions,uint32_t log_bulk_sz,bool write_data)460 mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context *ctx,
461 __be64 *data,
462 uint8_t num_of_actions,
463 uint32_t log_bulk_sz,
464 bool write_data)
465 {
466 size_t data_sz = num_of_actions * MLX5DR_MODIFY_ACTION_SIZE;
467 struct mlx5dr_devx_obj *arg_obj;
468
469 arg_obj = mlx5dr_arg_create(ctx,
470 (uint8_t *)data,
471 data_sz,
472 log_bulk_sz,
473 write_data);
474 if (!arg_obj)
475 DR_LOG(ERR, "Failed creating modify header arg");
476
477 return arg_obj;
478 }
479
mlx5dr_pat_verify_actions(__be64 pattern[],size_t sz)480 bool mlx5dr_pat_verify_actions(__be64 pattern[], size_t sz)
481 {
482 size_t i;
483
484 for (i = 0; i < sz / MLX5DR_MODIFY_ACTION_SIZE; i++) {
485 u8 action_id =
486 MLX5_GET(set_action_in, &pattern[i], action_type);
487 if (action_id >= MLX5_MODIFICATION_TYPE_MAX) {
488 DR_LOG(ERR, "Invalid action %u", action_id);
489 return false;
490 }
491 }
492
493 return true;
494 }
495