xref: /dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c (revision 2ece3b7186b9d22209ac1845f794b91185575a23)
15d55a494STal Shnaiderman /* SPDX-License-Identifier: BSD-3-Clause
25d55a494STal Shnaiderman  * Copyright 2020 Mellanox Technologies, Ltd
35d55a494STal Shnaiderman  */
45d55a494STal Shnaiderman 
55d55a494STal Shnaiderman #include "mlx5_flow_os.h"
65d55a494STal Shnaiderman 
75d55a494STal Shnaiderman #include <rte_thread.h>
85d55a494STal Shnaiderman 
95d55a494STal Shnaiderman /* Key of thread specific flow workspace data. */
101325a1ffSTal Shnaiderman static rte_thread_key key_workspace;
11*2ece3b71SBing Zhao /* Flow workspace global list head for garbage collector. */
12*2ece3b71SBing Zhao static struct mlx5_flow_workspace *gc_head;
13*2ece3b71SBing Zhao /* Spinlock for operating flow workspace list. */
14*2ece3b71SBing Zhao static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
155d55a494STal Shnaiderman 
165d55a494STal Shnaiderman int
17fb96caa5SRaja Zidane mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
18fb96caa5SRaja Zidane 			    uint64_t item_flags,
19fb96caa5SRaja Zidane 			    uint8_t target_protocol,
20fb96caa5SRaja Zidane 			    struct rte_flow_error *error)
21fb96caa5SRaja Zidane {
22fb96caa5SRaja Zidane 	const struct rte_flow_item_esp *mask = item->mask;
23fb96caa5SRaja Zidane 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
24fb96caa5SRaja Zidane 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
25fb96caa5SRaja Zidane 				      MLX5_FLOW_LAYER_OUTER_L3;
26fb96caa5SRaja Zidane 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
27fb96caa5SRaja Zidane 				      MLX5_FLOW_LAYER_OUTER_L4;
28fb96caa5SRaja Zidane 	int ret;
29fb96caa5SRaja Zidane 
30fb96caa5SRaja Zidane 	if (!(item_flags & l3m))
31fb96caa5SRaja Zidane 		return rte_flow_error_set(error, EINVAL,
32fb96caa5SRaja Zidane 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
33fb96caa5SRaja Zidane 					  "L3 is mandatory to filter on L4");
34fb96caa5SRaja Zidane 	if (item_flags & l4m)
35fb96caa5SRaja Zidane 		return rte_flow_error_set(error, EINVAL,
36fb96caa5SRaja Zidane 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
37fb96caa5SRaja Zidane 					  "multiple L4 layers not supported");
38fb96caa5SRaja Zidane 	if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
39fb96caa5SRaja Zidane 		return rte_flow_error_set(error, EINVAL,
40fb96caa5SRaja Zidane 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
41fb96caa5SRaja Zidane 					  "protocol filtering not compatible"
42fb96caa5SRaja Zidane 					  " with ESP layer");
43fb96caa5SRaja Zidane 	if (!mask)
44fb96caa5SRaja Zidane 		mask = &rte_flow_item_esp_mask;
45fb96caa5SRaja Zidane 	ret = mlx5_flow_item_acceptable
46fb96caa5SRaja Zidane 		(item, (const uint8_t *)mask,
47fb96caa5SRaja Zidane 		 (const uint8_t *)&rte_flow_item_esp_mask,
48fb96caa5SRaja Zidane 		 sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
49fb96caa5SRaja Zidane 		 error);
50fb96caa5SRaja Zidane 	if (ret < 0)
51fb96caa5SRaja Zidane 		return ret;
52fb96caa5SRaja Zidane 	return 0;
53fb96caa5SRaja Zidane }
54fb96caa5SRaja Zidane 
55*2ece3b71SBing Zhao void
56*2ece3b71SBing Zhao mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
57*2ece3b71SBing Zhao {
58*2ece3b71SBing Zhao 	rte_spinlock_lock(&mlx5_flow_workspace_lock);
59*2ece3b71SBing Zhao 	ws->gc = gc_head;
60*2ece3b71SBing Zhao 	gc_head = ws;
61*2ece3b71SBing Zhao 	rte_spinlock_unlock(&mlx5_flow_workspace_lock);
62*2ece3b71SBing Zhao }
63*2ece3b71SBing Zhao 
64*2ece3b71SBing Zhao static void
65*2ece3b71SBing Zhao mlx5_flow_os_workspace_gc_release(void)
66*2ece3b71SBing Zhao {
67*2ece3b71SBing Zhao 	while (gc_head) {
68*2ece3b71SBing Zhao 		struct mlx5_flow_workspace *wks = gc_head;
69*2ece3b71SBing Zhao 
70*2ece3b71SBing Zhao 		gc_head = wks->gc;
71*2ece3b71SBing Zhao 		flow_release_workspace(wks);
72*2ece3b71SBing Zhao 	}
73*2ece3b71SBing Zhao }
74*2ece3b71SBing Zhao 
75fb96caa5SRaja Zidane int
765d55a494STal Shnaiderman mlx5_flow_os_init_workspace_once(void)
775d55a494STal Shnaiderman {
78dc7c5e0aSGregory Etelson 	if (rte_thread_key_create(&key_workspace, NULL)) {
795d55a494STal Shnaiderman 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
806be4c57aSMichael Baum 		rte_errno = ENOMEM;
816be4c57aSMichael Baum 		return -rte_errno;
825d55a494STal Shnaiderman 	}
835d55a494STal Shnaiderman 	return 0;
845d55a494STal Shnaiderman }
855d55a494STal Shnaiderman 
865d55a494STal Shnaiderman void *
875d55a494STal Shnaiderman mlx5_flow_os_get_specific_workspace(void)
885d55a494STal Shnaiderman {
891325a1ffSTal Shnaiderman 	return rte_thread_value_get(key_workspace);
905d55a494STal Shnaiderman }
915d55a494STal Shnaiderman 
925d55a494STal Shnaiderman int
935d55a494STal Shnaiderman mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
945d55a494STal Shnaiderman {
951325a1ffSTal Shnaiderman 	return rte_thread_value_set(key_workspace, data);
965d55a494STal Shnaiderman }
975d55a494STal Shnaiderman 
985d55a494STal Shnaiderman void
995d55a494STal Shnaiderman mlx5_flow_os_release_workspace(void)
1005d55a494STal Shnaiderman {
1011325a1ffSTal Shnaiderman 	rte_thread_key_delete(key_workspace);
102*2ece3b71SBing Zhao 	mlx5_flow_os_workspace_gc_release();
1035d55a494STal Shnaiderman }
104