1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
3 */
4
5 #include "mlx5_flow_os.h"
6 #include "mlx5_win_ext.h"
7
8 #include <rte_thread.h>
9
10 /**
11 * Verify the @p attributes will be correctly understood by the NIC and store
12 * them in the @p flow if everything is correct.
13 *
14 * @param[in] dev
15 * Pointer to dev struct.
16 * @param[in] attributes
17 * Pointer to flow attributes
18 * @param[in] external
19 * This flow rule is created by request external to PMD.
20 * @param[out] error
21 * Pointer to error structure.
22 *
23 * @return
24 * - 0 on success and non root table (not a valid option for Windows yet).
25 * - 1 on success and root table.
26 * - a negative errno value otherwise and rte_errno is set.
27 */
28 int
mlx5_flow_os_validate_flow_attributes(struct rte_eth_dev * dev,const struct rte_flow_attr * attributes,bool external,struct rte_flow_error * error)29 mlx5_flow_os_validate_flow_attributes(struct rte_eth_dev *dev,
30 const struct rte_flow_attr *attributes,
31 bool external,
32 struct rte_flow_error *error)
33 {
34 int ret = 1;
35
36 RTE_SET_USED(dev);
37 RTE_SET_USED(external);
38 if (attributes->group)
39 return rte_flow_error_set(error, ENOTSUP,
40 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
41 NULL,
42 "groups are not supported");
43 if (attributes->priority)
44 return rte_flow_error_set(error, ENOTSUP,
45 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
46 NULL,
47 "priorities are not supported");
48 if (attributes->transfer)
49 return rte_flow_error_set(error, ENOTSUP,
50 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
51 NULL,
52 "transfer not supported");
53 if (!(attributes->ingress))
54 return rte_flow_error_set(error, ENOTSUP,
55 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
56 NULL, "must specify ingress only");
57 return ret;
58 }
59
60 /**
61 * Create flow matcher in a flow table.
62 *
63 * @param[in] ctx
64 * Pointer to relevant device context.
65 * @param[in] attr
66 * Pointer to relevant attributes.
67 * @param[in] table
68 * Pointer to table object.
69 * @param[out] matcher
70 * Pointer to a valid flow matcher object on success, NULL otherwise.
71 *
72 * @return
73 * 0 on success, or errno on failure.
74 */
75 int
mlx5_flow_os_create_flow_matcher(void * ctx,void * attr,void * table,void ** matcher)76 mlx5_flow_os_create_flow_matcher(void *ctx,
77 void *attr,
78 void *table,
79 void **matcher)
80 {
81 struct mlx5dv_flow_matcher_attr *mattr;
82
83 RTE_SET_USED(table);
84 *matcher = NULL;
85 mattr = attr;
86 if (mattr->type != IBV_FLOW_ATTR_NORMAL) {
87 rte_errno = ENOTSUP;
88 return -rte_errno;
89 }
90 struct mlx5_matcher *mlx5_matcher =
91 mlx5_malloc(MLX5_MEM_ZERO,
92 sizeof(struct mlx5_matcher) +
93 MLX5_ST_SZ_BYTES(fte_match_param),
94 0, SOCKET_ID_ANY);
95 if (!mlx5_matcher) {
96 rte_errno = ENOMEM;
97 return -rte_errno;
98 }
99 mlx5_matcher->ctx = ctx;
100 memcpy(&mlx5_matcher->attr, attr, sizeof(mlx5_matcher->attr));
101 memcpy(&mlx5_matcher->match_buf,
102 mattr->match_mask->match_buf,
103 MLX5_ST_SZ_BYTES(fte_match_param));
104 *matcher = mlx5_matcher;
105 return 0;
106 }
107
108 /**
109 * Destroy flow matcher.
110 *
111 * @param[in] matcher
112 * Pointer to matcher object to destroy.
113 *
114 * @return
115 * 0 on success, or the value of errno on failure.
116 */
117 int
mlx5_flow_os_destroy_flow_matcher(void * matcher)118 mlx5_flow_os_destroy_flow_matcher(void *matcher)
119 {
120 mlx5_free(matcher);
121 return 0;
122 }
123
124 /**
125 * Create flow action: dest_devx_tir
126 *
127 * @param[in] tir
128 * Pointer to DevX tir object
129 * @param[out] action
130 * Pointer to a valid action on success, NULL otherwise.
131 *
132 * @return
133 * 0 on success, or errno on failure.
134 */
135 int
mlx5_flow_os_create_flow_action_dest_devx_tir(struct mlx5_devx_obj * tir,void ** action)136 mlx5_flow_os_create_flow_action_dest_devx_tir(struct mlx5_devx_obj *tir,
137 void **action)
138 {
139 struct mlx5_action *mlx5_action =
140 mlx5_malloc(MLX5_MEM_ZERO,
141 sizeof(struct mlx5_action),
142 0, SOCKET_ID_ANY);
143
144 if (!mlx5_action) {
145 rte_errno = ENOMEM;
146 return -rte_errno;
147 }
148 mlx5_action->type = MLX5_FLOW_CONTEXT_DEST_TYPE_TIR;
149 mlx5_action->dest_tir.id = tir->id;
150 *action = mlx5_action;
151 return 0;
152 }
153
154 /**
155 * Destroy flow action.
156 *
157 * @param[in] action
158 * Pointer to action object to destroy.
159 *
160 * @return
161 * 0 on success, or the value of errno on failure.
162 */
163 int
mlx5_flow_os_destroy_flow_action(void * action)164 mlx5_flow_os_destroy_flow_action(void *action)
165 {
166 mlx5_free(action);
167 return 0;
168 }
169
170 /**
171 * Create flow rule.
172 *
173 * @param[in] matcher
174 * Pointer to match mask structure.
175 * @param[in] match_value
176 * Pointer to match value structure.
177 * @param[in] num_actions
178 * Number of actions in flow rule.
179 * @param[in] actions
180 * Pointer to array of flow rule actions.
181 * @param[out] flow
182 * Pointer to a valid flow rule object on success, NULL otherwise.
183 *
184 * @return
185 * 0 on success, or errno on failure.
186 */
187 int
mlx5_flow_os_create_flow(void * matcher,void * match_value,size_t num_actions,void * actions[],void ** flow)188 mlx5_flow_os_create_flow(void *matcher, void *match_value,
189 size_t num_actions,
190 void *actions[], void **flow)
191 {
192 struct mlx5_action *action;
193 size_t i;
194 struct mlx5_matcher *mlx5_matcher = matcher;
195 struct mlx5_flow_dv_match_params *mlx5_match_value = match_value;
196 uint32_t in[MLX5_ST_SZ_DW(devx_fs_rule_add_in)] = {0};
197 void *matcher_c = MLX5_ADDR_OF(devx_fs_rule_add_in, in,
198 match_criteria);
199 void *matcher_v = MLX5_ADDR_OF(devx_fs_rule_add_in, in,
200 match_value);
201
202 MLX5_ASSERT(mlx5_matcher->ctx);
203 memcpy(matcher_c, mlx5_matcher->match_buf,
204 mlx5_match_value->size);
205 /* Use mlx5_match_value->size for match criteria */
206 memcpy(matcher_v, mlx5_match_value->buf,
207 mlx5_match_value->size);
208 for (i = 0; i < num_actions; i++) {
209 action = actions[i];
210 switch (action->type) {
211 case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
212 MLX5_SET(devx_fs_rule_add_in, in,
213 dest.destination_type,
214 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
215 MLX5_SET(devx_fs_rule_add_in, in,
216 dest.destination_id,
217 action->dest_tir.id);
218 break;
219 default:
220 break;
221 }
222 MLX5_SET(devx_fs_rule_add_in, in, match_criteria_enable,
223 MLX5_MATCH_OUTER_HEADERS);
224 }
225 *flow = mlx5_glue->devx_fs_rule_add(mlx5_matcher->ctx, in, sizeof(in));
226 return (*flow) ? 0 : -1;
227 }
228
229 /**
230 * Destroy flow rule.
231 *
232 * @param[in] drv_flow_ptr
233 * Pointer to flow rule object.
234 *
235 * @return
236 * 0 on success, errno on failure.
237 */
238 int
mlx5_flow_os_destroy_flow(void * drv_flow_ptr)239 mlx5_flow_os_destroy_flow(void *drv_flow_ptr)
240 {
241 return mlx5_glue->devx_fs_rule_del(drv_flow_ptr);
242 }
243
244 struct mlx5_workspace_thread {
245 HANDLE thread_handle;
246 struct mlx5_flow_workspace *mlx5_ws;
247 struct mlx5_workspace_thread *next;
248 };
249
250 /**
251 * Static pointer array for multi thread support of mlx5_flow_workspace.
252 */
253 static struct mlx5_workspace_thread *curr;
254 static struct mlx5_workspace_thread *first;
255 rte_thread_key ws_tls_index;
256 static pthread_mutex_t lock_thread_list;
257
258 static bool
mlx5_is_thread_alive(HANDLE thread_handle)259 mlx5_is_thread_alive(HANDLE thread_handle)
260 {
261 DWORD result = WaitForSingleObject(thread_handle, 0);
262
263 if (result == WAIT_OBJECT_0)
264 return false;
265 return true;
266 }
267
268 static int
mlx5_get_current_thread(HANDLE * p_handle)269 mlx5_get_current_thread(HANDLE *p_handle)
270 {
271 BOOL ret = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
272 GetCurrentProcess(), p_handle, 0, 0, DUPLICATE_SAME_ACCESS);
273
274 if (!ret) {
275 RTE_LOG_WIN32_ERR("DuplicateHandle()");
276 return -1;
277 }
278 return 0;
279 }
280
281 static void
mlx5_clear_thread_list(void)282 mlx5_clear_thread_list(void)
283 {
284 struct mlx5_workspace_thread *temp = first;
285 struct mlx5_workspace_thread *next, *prev = NULL;
286 HANDLE curr_thread;
287
288 if (!temp)
289 return;
290 if (mlx5_get_current_thread(&curr_thread)) {
291 DRV_LOG(ERR, "Failed to get current thread "
292 "handle.");
293 return;
294 }
295 while (temp) {
296 next = temp->next;
297 if (temp->thread_handle != curr_thread &&
298 !mlx5_is_thread_alive(temp->thread_handle)) {
299 if (temp == first) {
300 if (curr == temp)
301 curr = temp->next;
302 first = temp->next;
303 } else if (temp == curr) {
304 curr = prev;
305 }
306 flow_release_workspace(temp->mlx5_ws);
307 CloseHandle(temp->thread_handle);
308 free(temp);
309 if (prev)
310 prev->next = next;
311 temp = next;
312 continue;
313 }
314 prev = temp;
315 temp = temp->next;
316 }
317 CloseHandle(curr_thread);
318 }
319
320 /**
321 * Release workspaces before exit.
322 */
323 void
mlx5_flow_os_release_workspace(void)324 mlx5_flow_os_release_workspace(void)
325 {
326 mlx5_clear_thread_list();
327 if (first) {
328 MLX5_ASSERT(!first->next);
329 flow_release_workspace(first->mlx5_ws);
330 free(first);
331 }
332 rte_thread_key_delete(ws_tls_index);
333 pthread_mutex_destroy(&lock_thread_list);
334 }
335
336 static int
mlx5_add_workspace_to_list(struct mlx5_flow_workspace * data)337 mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
338 {
339 HANDLE curr_thread;
340 struct mlx5_workspace_thread *temp = calloc(1, sizeof(*temp));
341
342 if (!temp) {
343 DRV_LOG(ERR, "Failed to allocate thread workspace "
344 "memory.");
345 return -1;
346 }
347 if (mlx5_get_current_thread(&curr_thread)) {
348 DRV_LOG(ERR, "Failed to get current thread "
349 "handle.");
350 free(temp);
351 return -1;
352 }
353 temp->mlx5_ws = data;
354 temp->thread_handle = curr_thread;
355 pthread_mutex_lock(&lock_thread_list);
356 mlx5_clear_thread_list();
357 if (!first) {
358 first = temp;
359 curr = temp;
360 } else {
361 curr->next = temp;
362 curr = curr->next;
363 }
364 pthread_mutex_unlock(&lock_thread_list);
365 return 0;
366 }
367
368 int
mlx5_flow_os_init_workspace_once(void)369 mlx5_flow_os_init_workspace_once(void)
370 {
371 int err = rte_thread_key_create(&ws_tls_index, NULL);
372
373 if (err) {
374 DRV_LOG(ERR, "Can't create flow workspace data thread key.");
375 return -rte_errno;
376 }
377 pthread_mutex_init(&lock_thread_list, NULL);
378 return 0;
379 }
380
381 void *
mlx5_flow_os_get_specific_workspace(void)382 mlx5_flow_os_get_specific_workspace(void)
383 {
384 return rte_thread_value_get(ws_tls_index);
385 }
386
387 int
mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace * data)388 mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
389 {
390 int err = 0;
391 int old_err = rte_errno;
392
393 rte_errno = 0;
394 if (!rte_thread_value_get(ws_tls_index)) {
395 if (rte_errno) {
396 DRV_LOG(ERR, "Failed checking specific workspace.");
397 rte_errno = old_err;
398 return -1;
399 }
400 /*
401 * set_specific_workspace when current value is NULL
402 * can happen only once per thread, mark this thread in
403 * linked list to be able to release resources later on.
404 */
405 err = mlx5_add_workspace_to_list(data);
406 if (err) {
407 DRV_LOG(ERR, "Failed adding workspace to list.");
408 rte_errno = old_err;
409 return -1;
410 }
411 }
412 if (rte_thread_value_set(ws_tls_index, data)) {
413 DRV_LOG(ERR, "Failed setting specific workspace.");
414 err = -1;
415 }
416 rte_errno = old_err;
417 return err;
418 }
419
420 void
mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace * ws)421 mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
422 {
423 RTE_SET_USED(ws);
424 }
425
426 int
mlx5_flow_os_validate_item_esp(const struct rte_eth_dev * dev,const struct rte_flow_item * item,uint64_t item_flags,uint8_t target_protocol,struct rte_flow_error * error)427 mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
428 const struct rte_flow_item *item,
429 uint64_t item_flags,
430 uint8_t target_protocol,
431 struct rte_flow_error *error)
432 {
433 const struct rte_flow_item_esp *mask = item->mask;
434 const struct rte_flow_item_esp *spec = item->spec;
435 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
436 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
437 MLX5_FLOW_LAYER_OUTER_L3;
438 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
439 MLX5_FLOW_LAYER_OUTER_L4;
440 int ret;
441
442 if (!(item_flags & l3m))
443 return rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ITEM, item,
445 "L3 is mandatory to filter on L4");
446 if (item_flags & l4m)
447 return rte_flow_error_set(error, EINVAL,
448 RTE_FLOW_ERROR_TYPE_ITEM, item,
449 "multiple L4 layers not supported");
450 if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
451 return rte_flow_error_set(error, EINVAL,
452 RTE_FLOW_ERROR_TYPE_ITEM, item,
453 "protocol filtering not compatible"
454 " with ESP layer");
455 if (!mask)
456 mask = &rte_flow_item_esp_mask;
457 if (spec && (spec->hdr.spi & mask->hdr.spi))
458 return rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ITEM, item,
460 "matching on spi field in esp is not"
461 " supported on Windows");
462 ret = mlx5_flow_item_acceptable
463 (dev, item, (const uint8_t *)mask,
464 (const uint8_t *)&rte_flow_item_esp_mask,
465 sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
466 error);
467 if (ret < 0)
468 return ret;
469 return 0;
470 }
471