15d55a494STal Shnaiderman /* SPDX-License-Identifier: BSD-3-Clause
25d55a494STal Shnaiderman * Copyright 2020 Mellanox Technologies, Ltd
35d55a494STal Shnaiderman */
45d55a494STal Shnaiderman
55d55a494STal Shnaiderman #include "mlx5_flow_os.h"
65d55a494STal Shnaiderman
75d55a494STal Shnaiderman #include <rte_thread.h>
85d55a494STal Shnaiderman
95d55a494STal Shnaiderman /* Key of thread specific flow workspace data. */
101325a1ffSTal Shnaiderman static rte_thread_key key_workspace;
112ece3b71SBing Zhao /* Flow workspace global list head for garbage collector. */
122ece3b71SBing Zhao static struct mlx5_flow_workspace *gc_head;
132ece3b71SBing Zhao /* Spinlock for operating flow workspace list. */
142ece3b71SBing Zhao static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
155d55a494STal Shnaiderman
165d55a494STal Shnaiderman int
mlx5_flow_os_validate_item_esp(const struct rte_eth_dev * dev,const struct rte_flow_item * item,uint64_t item_flags,uint8_t target_protocol,struct rte_flow_error * error)17*80c67625SGregory Etelson mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
18*80c67625SGregory Etelson const struct rte_flow_item *item,
19fb96caa5SRaja Zidane uint64_t item_flags,
20fb96caa5SRaja Zidane uint8_t target_protocol,
21fb96caa5SRaja Zidane struct rte_flow_error *error)
22fb96caa5SRaja Zidane {
23fb96caa5SRaja Zidane const struct rte_flow_item_esp *mask = item->mask;
24fb96caa5SRaja Zidane const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
25fb96caa5SRaja Zidane const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
26fb96caa5SRaja Zidane MLX5_FLOW_LAYER_OUTER_L3;
27fb96caa5SRaja Zidane const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
28fb96caa5SRaja Zidane MLX5_FLOW_LAYER_OUTER_L4;
29fb96caa5SRaja Zidane int ret;
30fb96caa5SRaja Zidane
31*80c67625SGregory Etelson if (!mlx5_hws_active(dev)) {
32fb96caa5SRaja Zidane if (!(item_flags & l3m))
33fb96caa5SRaja Zidane return rte_flow_error_set(error, EINVAL,
34*80c67625SGregory Etelson RTE_FLOW_ERROR_TYPE_ITEM,
35*80c67625SGregory Etelson item, "L3 is mandatory to filter on L4");
36*80c67625SGregory Etelson }
37fb96caa5SRaja Zidane if (item_flags & l4m)
38fb96caa5SRaja Zidane return rte_flow_error_set(error, EINVAL,
39fb96caa5SRaja Zidane RTE_FLOW_ERROR_TYPE_ITEM, item,
40fb96caa5SRaja Zidane "multiple L4 layers not supported");
41fb96caa5SRaja Zidane if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
42fb96caa5SRaja Zidane return rte_flow_error_set(error, EINVAL,
43fb96caa5SRaja Zidane RTE_FLOW_ERROR_TYPE_ITEM, item,
44fb96caa5SRaja Zidane "protocol filtering not compatible"
45fb96caa5SRaja Zidane " with ESP layer");
46fb96caa5SRaja Zidane if (!mask)
47fb96caa5SRaja Zidane mask = &rte_flow_item_esp_mask;
48fb96caa5SRaja Zidane ret = mlx5_flow_item_acceptable
49*80c67625SGregory Etelson (dev, item, (const uint8_t *)mask,
50fb96caa5SRaja Zidane (const uint8_t *)&rte_flow_item_esp_mask,
51fb96caa5SRaja Zidane sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
52fb96caa5SRaja Zidane error);
53fb96caa5SRaja Zidane if (ret < 0)
54fb96caa5SRaja Zidane return ret;
55fb96caa5SRaja Zidane return 0;
56fb96caa5SRaja Zidane }
57fb96caa5SRaja Zidane
582ece3b71SBing Zhao void
mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace * ws)592ece3b71SBing Zhao mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
602ece3b71SBing Zhao {
612ece3b71SBing Zhao rte_spinlock_lock(&mlx5_flow_workspace_lock);
622ece3b71SBing Zhao ws->gc = gc_head;
632ece3b71SBing Zhao gc_head = ws;
642ece3b71SBing Zhao rte_spinlock_unlock(&mlx5_flow_workspace_lock);
652ece3b71SBing Zhao }
662ece3b71SBing Zhao
672ece3b71SBing Zhao static void
mlx5_flow_os_workspace_gc_release(void)682ece3b71SBing Zhao mlx5_flow_os_workspace_gc_release(void)
692ece3b71SBing Zhao {
702ece3b71SBing Zhao while (gc_head) {
712ece3b71SBing Zhao struct mlx5_flow_workspace *wks = gc_head;
722ece3b71SBing Zhao
732ece3b71SBing Zhao gc_head = wks->gc;
742ece3b71SBing Zhao flow_release_workspace(wks);
752ece3b71SBing Zhao }
762ece3b71SBing Zhao }
772ece3b71SBing Zhao
78fb96caa5SRaja Zidane int
mlx5_flow_os_init_workspace_once(void)795d55a494STal Shnaiderman mlx5_flow_os_init_workspace_once(void)
805d55a494STal Shnaiderman {
81dc7c5e0aSGregory Etelson if (rte_thread_key_create(&key_workspace, NULL)) {
825d55a494STal Shnaiderman DRV_LOG(ERR, "Can't create flow workspace data thread key.");
836be4c57aSMichael Baum rte_errno = ENOMEM;
846be4c57aSMichael Baum return -rte_errno;
855d55a494STal Shnaiderman }
865d55a494STal Shnaiderman return 0;
875d55a494STal Shnaiderman }
885d55a494STal Shnaiderman
895d55a494STal Shnaiderman void *
mlx5_flow_os_get_specific_workspace(void)905d55a494STal Shnaiderman mlx5_flow_os_get_specific_workspace(void)
915d55a494STal Shnaiderman {
921325a1ffSTal Shnaiderman return rte_thread_value_get(key_workspace);
935d55a494STal Shnaiderman }
945d55a494STal Shnaiderman
955d55a494STal Shnaiderman int
mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace * data)965d55a494STal Shnaiderman mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
975d55a494STal Shnaiderman {
981325a1ffSTal Shnaiderman return rte_thread_value_set(key_workspace, data);
995d55a494STal Shnaiderman }
1005d55a494STal Shnaiderman
1015d55a494STal Shnaiderman void
mlx5_flow_os_release_workspace(void)1025d55a494STal Shnaiderman mlx5_flow_os_release_workspace(void)
1035d55a494STal Shnaiderman {
1041325a1ffSTal Shnaiderman rte_thread_key_delete(key_workspace);
1052ece3b71SBing Zhao mlx5_flow_os_workspace_gc_release();
1065d55a494STal Shnaiderman }
107