xref: /dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include "mlx5_flow_os.h"
6 
7 #include <rte_thread.h>
8 
9 /* Key of thread specific flow workspace data. */
10 static rte_thread_key key_workspace;
11 /* Flow workspace global list head for garbage collector. */
12 static struct mlx5_flow_workspace *gc_head;
13 /* Spinlock for operating flow workspace list. */
14 static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
15 
16 int
17 mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
18 			    uint64_t item_flags,
19 			    uint8_t target_protocol,
20 			    struct rte_flow_error *error)
21 {
22 	const struct rte_flow_item_esp *mask = item->mask;
23 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
24 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
25 				      MLX5_FLOW_LAYER_OUTER_L3;
26 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
27 				      MLX5_FLOW_LAYER_OUTER_L4;
28 	int ret;
29 
30 	if (!(item_flags & l3m))
31 		return rte_flow_error_set(error, EINVAL,
32 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
33 					  "L3 is mandatory to filter on L4");
34 	if (item_flags & l4m)
35 		return rte_flow_error_set(error, EINVAL,
36 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
37 					  "multiple L4 layers not supported");
38 	if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
39 		return rte_flow_error_set(error, EINVAL,
40 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
41 					  "protocol filtering not compatible"
42 					  " with ESP layer");
43 	if (!mask)
44 		mask = &rte_flow_item_esp_mask;
45 	ret = mlx5_flow_item_acceptable
46 		(item, (const uint8_t *)mask,
47 		 (const uint8_t *)&rte_flow_item_esp_mask,
48 		 sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
49 		 error);
50 	if (ret < 0)
51 		return ret;
52 	return 0;
53 }
54 
55 void
56 mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
57 {
58 	rte_spinlock_lock(&mlx5_flow_workspace_lock);
59 	ws->gc = gc_head;
60 	gc_head = ws;
61 	rte_spinlock_unlock(&mlx5_flow_workspace_lock);
62 }
63 
64 static void
65 mlx5_flow_os_workspace_gc_release(void)
66 {
67 	while (gc_head) {
68 		struct mlx5_flow_workspace *wks = gc_head;
69 
70 		gc_head = wks->gc;
71 		flow_release_workspace(wks);
72 	}
73 }
74 
75 int
76 mlx5_flow_os_init_workspace_once(void)
77 {
78 	if (rte_thread_key_create(&key_workspace, NULL)) {
79 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
80 		rte_errno = ENOMEM;
81 		return -rte_errno;
82 	}
83 	return 0;
84 }
85 
86 void *
87 mlx5_flow_os_get_specific_workspace(void)
88 {
89 	return rte_thread_value_get(key_workspace);
90 }
91 
92 int
93 mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
94 {
95 	return rte_thread_value_set(key_workspace, data);
96 }
97 
98 void
99 mlx5_flow_os_release_workspace(void)
100 {
101 	rte_thread_key_delete(key_workspace);
102 	mlx5_flow_os_workspace_gc_release();
103 }
104