xref: /dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c (revision 80c676259a04f9220a9928f072c6dfa92b798f0a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include "mlx5_flow_os.h"
6 
7 #include <rte_thread.h>
8 
9 /* Key of thread specific flow workspace data. */
10 static rte_thread_key key_workspace;
11 /* Flow workspace global list head for garbage collector. */
12 static struct mlx5_flow_workspace *gc_head;
13 /* Spinlock for operating flow workspace list. */
14 static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
15 
16 int
mlx5_flow_os_validate_item_esp(const struct rte_eth_dev * dev,const struct rte_flow_item * item,uint64_t item_flags,uint8_t target_protocol,struct rte_flow_error * error)17 mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
18 			       const struct rte_flow_item *item,
19 			       uint64_t item_flags,
20 			       uint8_t target_protocol,
21 			       struct rte_flow_error *error)
22 {
23 	const struct rte_flow_item_esp *mask = item->mask;
24 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
25 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
26 				      MLX5_FLOW_LAYER_OUTER_L3;
27 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
28 				      MLX5_FLOW_LAYER_OUTER_L4;
29 	int ret;
30 
31 	if (!mlx5_hws_active(dev)) {
32 		if (!(item_flags & l3m))
33 			return rte_flow_error_set(error, EINVAL,
34 						  RTE_FLOW_ERROR_TYPE_ITEM,
35 						  item, "L3 is mandatory to filter on L4");
36 	}
37 	if (item_flags & l4m)
38 		return rte_flow_error_set(error, EINVAL,
39 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
40 					  "multiple L4 layers not supported");
41 	if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
42 		return rte_flow_error_set(error, EINVAL,
43 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
44 					  "protocol filtering not compatible"
45 					  " with ESP layer");
46 	if (!mask)
47 		mask = &rte_flow_item_esp_mask;
48 	ret = mlx5_flow_item_acceptable
49 		(dev, item, (const uint8_t *)mask,
50 		 (const uint8_t *)&rte_flow_item_esp_mask,
51 		 sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
52 		 error);
53 	if (ret < 0)
54 		return ret;
55 	return 0;
56 }
57 
58 void
mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace * ws)59 mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
60 {
61 	rte_spinlock_lock(&mlx5_flow_workspace_lock);
62 	ws->gc = gc_head;
63 	gc_head = ws;
64 	rte_spinlock_unlock(&mlx5_flow_workspace_lock);
65 }
66 
67 static void
mlx5_flow_os_workspace_gc_release(void)68 mlx5_flow_os_workspace_gc_release(void)
69 {
70 	while (gc_head) {
71 		struct mlx5_flow_workspace *wks = gc_head;
72 
73 		gc_head = wks->gc;
74 		flow_release_workspace(wks);
75 	}
76 }
77 
78 int
mlx5_flow_os_init_workspace_once(void)79 mlx5_flow_os_init_workspace_once(void)
80 {
81 	if (rte_thread_key_create(&key_workspace, NULL)) {
82 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
83 		rte_errno = ENOMEM;
84 		return -rte_errno;
85 	}
86 	return 0;
87 }
88 
89 void *
mlx5_flow_os_get_specific_workspace(void)90 mlx5_flow_os_get_specific_workspace(void)
91 {
92 	return rte_thread_value_get(key_workspace);
93 }
94 
95 int
mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace * data)96 mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
97 {
98 	return rte_thread_value_set(key_workspace, data);
99 }
100 
101 void
mlx5_flow_os_release_workspace(void)102 mlx5_flow_os_release_workspace(void)
103 {
104 	rte_thread_key_delete(key_workspace);
105 	mlx5_flow_os_workspace_gc_release();
106 }
107