xref: /dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c (revision 80c676259a04f9220a9928f072c6dfa92b798f0a)
188259515SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
288259515SOphir Munk  * Copyright 2020 Mellanox Technologies, Ltd
388259515SOphir Munk  */
488259515SOphir Munk 
588259515SOphir Munk #include "mlx5_flow_os.h"
688259515SOphir Munk #include "mlx5_win_ext.h"
788259515SOphir Munk 
85d55a494STal Shnaiderman #include <rte_thread.h>
95d55a494STal Shnaiderman 
1088259515SOphir Munk /**
1188259515SOphir Munk  * Verify the @p attributes will be correctly understood by the NIC and store
1288259515SOphir Munk  * them in the @p flow if everything is correct.
1388259515SOphir Munk  *
1488259515SOphir Munk  * @param[in] dev
1588259515SOphir Munk  *   Pointer to dev struct.
1688259515SOphir Munk  * @param[in] attributes
1788259515SOphir Munk  *   Pointer to flow attributes
1888259515SOphir Munk  * @param[in] external
1988259515SOphir Munk  *   This flow rule is created by request external to PMD.
2088259515SOphir Munk  * @param[out] error
2188259515SOphir Munk  *   Pointer to error structure.
2288259515SOphir Munk  *
2388259515SOphir Munk  * @return
2488259515SOphir Munk  *   - 0 on success and non root table (not a valid option for Windows yet).
2588259515SOphir Munk  *   - 1 on success and root table.
2688259515SOphir Munk  *   - a negative errno value otherwise and rte_errno is set.
2788259515SOphir Munk  */
2888259515SOphir Munk int
mlx5_flow_os_validate_flow_attributes(struct rte_eth_dev * dev,const struct rte_flow_attr * attributes,bool external,struct rte_flow_error * error)2988259515SOphir Munk mlx5_flow_os_validate_flow_attributes(struct rte_eth_dev *dev,
3088259515SOphir Munk 				      const struct rte_flow_attr *attributes,
3188259515SOphir Munk 				      bool external,
3288259515SOphir Munk 				      struct rte_flow_error *error)
3388259515SOphir Munk {
3488259515SOphir Munk 	int ret = 1;
3588259515SOphir Munk 
3688259515SOphir Munk 	RTE_SET_USED(dev);
3788259515SOphir Munk 	RTE_SET_USED(external);
3888259515SOphir Munk 	if (attributes->group)
3988259515SOphir Munk 		return rte_flow_error_set(error, ENOTSUP,
4088259515SOphir Munk 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4188259515SOphir Munk 					  NULL,
4288259515SOphir Munk 					  "groups are not supported");
4388259515SOphir Munk 	if (attributes->priority)
4488259515SOphir Munk 		return rte_flow_error_set(error, ENOTSUP,
4588259515SOphir Munk 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4688259515SOphir Munk 					  NULL,
4788259515SOphir Munk 					  "priorities are not supported");
4888259515SOphir Munk 	if (attributes->transfer)
4988259515SOphir Munk 		return rte_flow_error_set(error, ENOTSUP,
5088259515SOphir Munk 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
5188259515SOphir Munk 					  NULL,
5288259515SOphir Munk 					  "transfer not supported");
5388259515SOphir Munk 	if (!(attributes->ingress))
5488259515SOphir Munk 		return rte_flow_error_set(error, ENOTSUP,
5588259515SOphir Munk 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
5688259515SOphir Munk 					  NULL, "must specify ingress only");
5788259515SOphir Munk 	return ret;
5888259515SOphir Munk }
5988259515SOphir Munk 
6088259515SOphir Munk /**
6188259515SOphir Munk  * Create flow matcher in a flow table.
6288259515SOphir Munk  *
6388259515SOphir Munk  * @param[in] ctx
6488259515SOphir Munk  *   Pointer to relevant device context.
6588259515SOphir Munk  * @param[in] attr
6688259515SOphir Munk  *   Pointer to relevant attributes.
6788259515SOphir Munk  * @param[in] table
6888259515SOphir Munk  *   Pointer to table object.
6988259515SOphir Munk  * @param[out] matcher
7088259515SOphir Munk  *   Pointer to a valid flow matcher object on success, NULL otherwise.
7188259515SOphir Munk  *
7288259515SOphir Munk  * @return
7388259515SOphir Munk  *   0 on success, or errno on failure.
7488259515SOphir Munk  */
7588259515SOphir Munk int
mlx5_flow_os_create_flow_matcher(void * ctx,void * attr,void * table,void ** matcher)7688259515SOphir Munk mlx5_flow_os_create_flow_matcher(void *ctx,
7788259515SOphir Munk 				 void *attr,
7888259515SOphir Munk 				 void *table,
7988259515SOphir Munk 				 void **matcher)
8088259515SOphir Munk {
8103e1f7f7SOphir Munk 	struct mlx5dv_flow_matcher_attr *mattr;
8203e1f7f7SOphir Munk 
8388259515SOphir Munk 	RTE_SET_USED(table);
8488259515SOphir Munk 	*matcher = NULL;
8503e1f7f7SOphir Munk 	mattr = attr;
8603e1f7f7SOphir Munk 	if (mattr->type != IBV_FLOW_ATTR_NORMAL) {
8788259515SOphir Munk 		rte_errno = ENOTSUP;
8888259515SOphir Munk 		return -rte_errno;
8988259515SOphir Munk 	}
9003e1f7f7SOphir Munk 	struct mlx5_matcher *mlx5_matcher =
9103e1f7f7SOphir Munk 		mlx5_malloc(MLX5_MEM_ZERO,
9203e1f7f7SOphir Munk 		       sizeof(struct mlx5_matcher) +
9303e1f7f7SOphir Munk 		       MLX5_ST_SZ_BYTES(fte_match_param),
9403e1f7f7SOphir Munk 		       0, SOCKET_ID_ANY);
9503e1f7f7SOphir Munk 	if (!mlx5_matcher) {
9603e1f7f7SOphir Munk 		rte_errno = ENOMEM;
9703e1f7f7SOphir Munk 		return -rte_errno;
9803e1f7f7SOphir Munk 	}
9903e1f7f7SOphir Munk 	mlx5_matcher->ctx = ctx;
10003e1f7f7SOphir Munk 	memcpy(&mlx5_matcher->attr, attr, sizeof(mlx5_matcher->attr));
10103e1f7f7SOphir Munk 	memcpy(&mlx5_matcher->match_buf,
10203e1f7f7SOphir Munk 	       mattr->match_mask->match_buf,
10303e1f7f7SOphir Munk 	       MLX5_ST_SZ_BYTES(fte_match_param));
10403e1f7f7SOphir Munk 	*matcher = mlx5_matcher;
10503e1f7f7SOphir Munk 	return 0;
10603e1f7f7SOphir Munk }
10788259515SOphir Munk 
10888259515SOphir Munk /**
10988259515SOphir Munk  * Destroy flow matcher.
11088259515SOphir Munk  *
11188259515SOphir Munk  * @param[in] matcher
11288259515SOphir Munk  *   Pointer to matcher object to destroy.
11388259515SOphir Munk  *
11488259515SOphir Munk  * @return
11588259515SOphir Munk  *   0 on success, or the value of errno on failure.
11688259515SOphir Munk  */
11788259515SOphir Munk int
mlx5_flow_os_destroy_flow_matcher(void * matcher)11888259515SOphir Munk mlx5_flow_os_destroy_flow_matcher(void *matcher)
11988259515SOphir Munk {
12003e1f7f7SOphir Munk 	mlx5_free(matcher);
12103e1f7f7SOphir Munk 	return 0;
12288259515SOphir Munk }
12388259515SOphir Munk 
12488259515SOphir Munk /**
12588259515SOphir Munk  * Create flow action: dest_devx_tir
12688259515SOphir Munk  *
12788259515SOphir Munk  * @param[in] tir
12888259515SOphir Munk  *   Pointer to DevX tir object
12988259515SOphir Munk  * @param[out] action
13088259515SOphir Munk  *   Pointer to a valid action on success, NULL otherwise.
13188259515SOphir Munk  *
13288259515SOphir Munk  * @return
13388259515SOphir Munk  *   0 on success, or errno on failure.
13488259515SOphir Munk  */
13588259515SOphir Munk int
mlx5_flow_os_create_flow_action_dest_devx_tir(struct mlx5_devx_obj * tir,void ** action)13688259515SOphir Munk mlx5_flow_os_create_flow_action_dest_devx_tir(struct mlx5_devx_obj *tir,
13788259515SOphir Munk 					      void **action)
13888259515SOphir Munk {
13968e28591SOphir Munk 	struct mlx5_action *mlx5_action =
14068e28591SOphir Munk 		mlx5_malloc(MLX5_MEM_ZERO,
14168e28591SOphir Munk 		       sizeof(struct mlx5_action),
14268e28591SOphir Munk 		       0, SOCKET_ID_ANY);
14368e28591SOphir Munk 
14468e28591SOphir Munk 	if (!mlx5_action) {
14568e28591SOphir Munk 		rte_errno = ENOMEM;
14688259515SOphir Munk 		return -rte_errno;
14788259515SOphir Munk 	}
14868e28591SOphir Munk 	mlx5_action->type = MLX5_FLOW_CONTEXT_DEST_TYPE_TIR;
14968e28591SOphir Munk 	mlx5_action->dest_tir.id = tir->id;
15068e28591SOphir Munk 	*action = mlx5_action;
15168e28591SOphir Munk 	return 0;
15268e28591SOphir Munk }
15388259515SOphir Munk 
15488259515SOphir Munk /**
15588259515SOphir Munk  * Destroy flow action.
15688259515SOphir Munk  *
15788259515SOphir Munk  * @param[in] action
15888259515SOphir Munk  *   Pointer to action object to destroy.
15988259515SOphir Munk  *
16088259515SOphir Munk  * @return
16188259515SOphir Munk  *   0 on success, or the value of errno on failure.
16288259515SOphir Munk  */
16388259515SOphir Munk int
mlx5_flow_os_destroy_flow_action(void * action)16488259515SOphir Munk mlx5_flow_os_destroy_flow_action(void *action)
16588259515SOphir Munk {
16668e28591SOphir Munk 	mlx5_free(action);
16768e28591SOphir Munk 	return 0;
16888259515SOphir Munk }
16988259515SOphir Munk 
17088259515SOphir Munk /**
17188259515SOphir Munk  * Create flow rule.
17288259515SOphir Munk  *
17388259515SOphir Munk  * @param[in] matcher
17488259515SOphir Munk  *   Pointer to match mask structure.
17588259515SOphir Munk  * @param[in] match_value
17688259515SOphir Munk  *   Pointer to match value structure.
17788259515SOphir Munk  * @param[in] num_actions
17888259515SOphir Munk  *   Number of actions in flow rule.
17988259515SOphir Munk  * @param[in] actions
18088259515SOphir Munk  *   Pointer to array of flow rule actions.
18188259515SOphir Munk  * @param[out] flow
18288259515SOphir Munk  *   Pointer to a valid flow rule object on success, NULL otherwise.
18388259515SOphir Munk  *
18488259515SOphir Munk  * @return
18588259515SOphir Munk  *   0 on success, or errno on failure.
18688259515SOphir Munk  */
18788259515SOphir Munk int
mlx5_flow_os_create_flow(void * matcher,void * match_value,size_t num_actions,void * actions[],void ** flow)18888259515SOphir Munk mlx5_flow_os_create_flow(void *matcher, void *match_value,
18988259515SOphir Munk 			 size_t num_actions,
19088259515SOphir Munk 			 void *actions[], void **flow)
19188259515SOphir Munk {
1921d194496SOphir Munk 	struct mlx5_action *action;
19316047bd0STal Shnaiderman 	size_t i;
1941d194496SOphir Munk 	struct mlx5_matcher *mlx5_matcher = matcher;
1951d194496SOphir Munk 	struct mlx5_flow_dv_match_params *mlx5_match_value = match_value;
1961d194496SOphir Munk 	uint32_t in[MLX5_ST_SZ_DW(devx_fs_rule_add_in)] = {0};
1971d194496SOphir Munk 	void *matcher_c = MLX5_ADDR_OF(devx_fs_rule_add_in, in,
1981d194496SOphir Munk 				       match_criteria);
1991d194496SOphir Munk 	void *matcher_v = MLX5_ADDR_OF(devx_fs_rule_add_in, in,
2001d194496SOphir Munk 				       match_value);
2011d194496SOphir Munk 
2021d194496SOphir Munk 	MLX5_ASSERT(mlx5_matcher->ctx);
2031d194496SOphir Munk 	memcpy(matcher_c, mlx5_matcher->match_buf,
2041d194496SOphir Munk 	       mlx5_match_value->size);
2051d194496SOphir Munk 	/* Use mlx5_match_value->size for match criteria */
2061d194496SOphir Munk 	memcpy(matcher_v, mlx5_match_value->buf,
2071d194496SOphir Munk 	       mlx5_match_value->size);
2081d194496SOphir Munk 	for (i = 0; i < num_actions; i++) {
2091d194496SOphir Munk 		action = actions[i];
2101d194496SOphir Munk 		switch (action->type) {
2111d194496SOphir Munk 		case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
2121d194496SOphir Munk 			MLX5_SET(devx_fs_rule_add_in, in,
2131d194496SOphir Munk 				 dest.destination_type,
2141d194496SOphir Munk 				 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
2151d194496SOphir Munk 			MLX5_SET(devx_fs_rule_add_in, in,
2161d194496SOphir Munk 				 dest.destination_id,
2171d194496SOphir Munk 				 action->dest_tir.id);
2181d194496SOphir Munk 			break;
2191d194496SOphir Munk 		default:
2201d194496SOphir Munk 			break;
2211d194496SOphir Munk 		}
2221d194496SOphir Munk 		MLX5_SET(devx_fs_rule_add_in, in, match_criteria_enable,
2231d194496SOphir Munk 			 MLX5_MATCH_OUTER_HEADERS);
2241d194496SOphir Munk 	}
2251d194496SOphir Munk 	*flow = mlx5_glue->devx_fs_rule_add(mlx5_matcher->ctx, in, sizeof(in));
2261d194496SOphir Munk 	return (*flow) ? 0 : -1;
22788259515SOphir Munk }
22888259515SOphir Munk 
22988259515SOphir Munk /**
23088259515SOphir Munk  * Destroy flow rule.
23188259515SOphir Munk  *
23288259515SOphir Munk  * @param[in] drv_flow_ptr
23388259515SOphir Munk  *   Pointer to flow rule object.
23488259515SOphir Munk  *
23588259515SOphir Munk  * @return
23688259515SOphir Munk  *   0 on success, errno on failure.
23788259515SOphir Munk  */
23888259515SOphir Munk int
mlx5_flow_os_destroy_flow(void * drv_flow_ptr)23988259515SOphir Munk mlx5_flow_os_destroy_flow(void *drv_flow_ptr)
24088259515SOphir Munk {
2411d194496SOphir Munk 	return mlx5_glue->devx_fs_rule_del(drv_flow_ptr);
24288259515SOphir Munk }
2435d55a494STal Shnaiderman 
2445d55a494STal Shnaiderman struct mlx5_workspace_thread {
2455d55a494STal Shnaiderman 	HANDLE	thread_handle;
2465d55a494STal Shnaiderman 	struct mlx5_flow_workspace *mlx5_ws;
2475d55a494STal Shnaiderman 	struct mlx5_workspace_thread *next;
2485d55a494STal Shnaiderman };
2495d55a494STal Shnaiderman 
2505d55a494STal Shnaiderman /**
2515d55a494STal Shnaiderman  * Static pointer array for multi thread support of mlx5_flow_workspace.
2525d55a494STal Shnaiderman  */
2535d55a494STal Shnaiderman static struct mlx5_workspace_thread *curr;
2545d55a494STal Shnaiderman static struct mlx5_workspace_thread *first;
2551325a1ffSTal Shnaiderman rte_thread_key ws_tls_index;
2565d55a494STal Shnaiderman static pthread_mutex_t lock_thread_list;
2575d55a494STal Shnaiderman 
2585d55a494STal Shnaiderman static bool
mlx5_is_thread_alive(HANDLE thread_handle)2595d55a494STal Shnaiderman mlx5_is_thread_alive(HANDLE thread_handle)
2605d55a494STal Shnaiderman {
2615d55a494STal Shnaiderman 	DWORD result = WaitForSingleObject(thread_handle, 0);
2625d55a494STal Shnaiderman 
2635d55a494STal Shnaiderman 	if (result == WAIT_OBJECT_0)
2645d55a494STal Shnaiderman 		return false;
2655976328dSTal Shnaiderman 	return true;
2665d55a494STal Shnaiderman }
2675d55a494STal Shnaiderman 
2685d55a494STal Shnaiderman static int
mlx5_get_current_thread(HANDLE * p_handle)2695d55a494STal Shnaiderman mlx5_get_current_thread(HANDLE *p_handle)
2705d55a494STal Shnaiderman {
2715d55a494STal Shnaiderman 	BOOL ret = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
2725d55a494STal Shnaiderman 		GetCurrentProcess(), p_handle, 0, 0, DUPLICATE_SAME_ACCESS);
2735d55a494STal Shnaiderman 
2745d55a494STal Shnaiderman 	if (!ret) {
2755d55a494STal Shnaiderman 		RTE_LOG_WIN32_ERR("DuplicateHandle()");
2765d55a494STal Shnaiderman 		return -1;
2775d55a494STal Shnaiderman 	}
2785d55a494STal Shnaiderman 	return 0;
2795d55a494STal Shnaiderman }
2805d55a494STal Shnaiderman 
2815d55a494STal Shnaiderman static void
mlx5_clear_thread_list(void)2825d55a494STal Shnaiderman mlx5_clear_thread_list(void)
2835d55a494STal Shnaiderman {
2845d55a494STal Shnaiderman 	struct mlx5_workspace_thread *temp = first;
2855d55a494STal Shnaiderman 	struct mlx5_workspace_thread *next, *prev = NULL;
2865d55a494STal Shnaiderman 	HANDLE curr_thread;
2875d55a494STal Shnaiderman 
2885d55a494STal Shnaiderman 	if (!temp)
2895d55a494STal Shnaiderman 		return;
2905d55a494STal Shnaiderman 	if (mlx5_get_current_thread(&curr_thread)) {
2915d55a494STal Shnaiderman 		DRV_LOG(ERR, "Failed to get current thread "
2925d55a494STal Shnaiderman 			"handle.");
2935d55a494STal Shnaiderman 		return;
2945d55a494STal Shnaiderman 	}
2955d55a494STal Shnaiderman 	while (temp) {
2965d55a494STal Shnaiderman 		next = temp->next;
2975d55a494STal Shnaiderman 		if (temp->thread_handle != curr_thread &&
2985d55a494STal Shnaiderman 		    !mlx5_is_thread_alive(temp->thread_handle)) {
2995d55a494STal Shnaiderman 			if (temp == first) {
3005d55a494STal Shnaiderman 				if (curr == temp)
3015d55a494STal Shnaiderman 					curr = temp->next;
3025d55a494STal Shnaiderman 				first = temp->next;
3035d55a494STal Shnaiderman 			} else if (temp == curr) {
3045d55a494STal Shnaiderman 				curr = prev;
3055d55a494STal Shnaiderman 			}
3065d55a494STal Shnaiderman 			flow_release_workspace(temp->mlx5_ws);
3075d55a494STal Shnaiderman 			CloseHandle(temp->thread_handle);
3085d55a494STal Shnaiderman 			free(temp);
3095d55a494STal Shnaiderman 			if (prev)
3105d55a494STal Shnaiderman 				prev->next = next;
3115d55a494STal Shnaiderman 			temp = next;
3125d55a494STal Shnaiderman 			continue;
3135d55a494STal Shnaiderman 		}
3145d55a494STal Shnaiderman 		prev = temp;
3155d55a494STal Shnaiderman 		temp = temp->next;
3165d55a494STal Shnaiderman 	}
3175d55a494STal Shnaiderman 	CloseHandle(curr_thread);
3185d55a494STal Shnaiderman }
3195d55a494STal Shnaiderman 
3205d55a494STal Shnaiderman /**
3215d55a494STal Shnaiderman  * Release workspaces before exit.
3225d55a494STal Shnaiderman  */
3235d55a494STal Shnaiderman void
mlx5_flow_os_release_workspace(void)3245d55a494STal Shnaiderman mlx5_flow_os_release_workspace(void)
3255d55a494STal Shnaiderman {
3265d55a494STal Shnaiderman 	mlx5_clear_thread_list();
3275d55a494STal Shnaiderman 	if (first) {
3285d55a494STal Shnaiderman 		MLX5_ASSERT(!first->next);
3295d55a494STal Shnaiderman 		flow_release_workspace(first->mlx5_ws);
3305d55a494STal Shnaiderman 		free(first);
3315d55a494STal Shnaiderman 	}
3321325a1ffSTal Shnaiderman 	rte_thread_key_delete(ws_tls_index);
3335d55a494STal Shnaiderman 	pthread_mutex_destroy(&lock_thread_list);
3345d55a494STal Shnaiderman }
3355d55a494STal Shnaiderman 
3365d55a494STal Shnaiderman static int
mlx5_add_workspace_to_list(struct mlx5_flow_workspace * data)3375d55a494STal Shnaiderman mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
3385d55a494STal Shnaiderman {
3395d55a494STal Shnaiderman 	HANDLE curr_thread;
3405d55a494STal Shnaiderman 	struct mlx5_workspace_thread *temp = calloc(1, sizeof(*temp));
3415d55a494STal Shnaiderman 
3425d55a494STal Shnaiderman 	if (!temp) {
3435d55a494STal Shnaiderman 		DRV_LOG(ERR, "Failed to allocate thread workspace "
3445d55a494STal Shnaiderman 			"memory.");
3455d55a494STal Shnaiderman 		return -1;
3465d55a494STal Shnaiderman 	}
3475d55a494STal Shnaiderman 	if (mlx5_get_current_thread(&curr_thread)) {
3485d55a494STal Shnaiderman 		DRV_LOG(ERR, "Failed to get current thread "
3495d55a494STal Shnaiderman 			"handle.");
3505d55a494STal Shnaiderman 		free(temp);
3515d55a494STal Shnaiderman 		return -1;
3525d55a494STal Shnaiderman 	}
3535d55a494STal Shnaiderman 	temp->mlx5_ws = data;
3545d55a494STal Shnaiderman 	temp->thread_handle = curr_thread;
3555d55a494STal Shnaiderman 	pthread_mutex_lock(&lock_thread_list);
3565d55a494STal Shnaiderman 	mlx5_clear_thread_list();
3575d55a494STal Shnaiderman 	if (!first) {
3585d55a494STal Shnaiderman 		first = temp;
3595d55a494STal Shnaiderman 		curr = temp;
3605d55a494STal Shnaiderman 	} else {
3615d55a494STal Shnaiderman 		curr->next = temp;
3625d55a494STal Shnaiderman 		curr = curr->next;
3635d55a494STal Shnaiderman 	}
3645d55a494STal Shnaiderman 	pthread_mutex_unlock(&lock_thread_list);
3655d55a494STal Shnaiderman 	return 0;
3665d55a494STal Shnaiderman }
3675d55a494STal Shnaiderman 
3685d55a494STal Shnaiderman int
mlx5_flow_os_init_workspace_once(void)3695d55a494STal Shnaiderman mlx5_flow_os_init_workspace_once(void)
3705d55a494STal Shnaiderman {
3711325a1ffSTal Shnaiderman 	int err = rte_thread_key_create(&ws_tls_index, NULL);
3725d55a494STal Shnaiderman 
3735d55a494STal Shnaiderman 	if (err) {
3745d55a494STal Shnaiderman 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
3756be4c57aSMichael Baum 		return -rte_errno;
3765d55a494STal Shnaiderman 	}
3775d55a494STal Shnaiderman 	pthread_mutex_init(&lock_thread_list, NULL);
3785d55a494STal Shnaiderman 	return 0;
3795d55a494STal Shnaiderman }
3805d55a494STal Shnaiderman 
3815d55a494STal Shnaiderman void *
mlx5_flow_os_get_specific_workspace(void)3825d55a494STal Shnaiderman mlx5_flow_os_get_specific_workspace(void)
3835d55a494STal Shnaiderman {
3841325a1ffSTal Shnaiderman 	return rte_thread_value_get(ws_tls_index);
3855d55a494STal Shnaiderman }
3865d55a494STal Shnaiderman 
3875d55a494STal Shnaiderman int
mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace * data)3885d55a494STal Shnaiderman mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
3895d55a494STal Shnaiderman {
3905d55a494STal Shnaiderman 	int err = 0;
3915d55a494STal Shnaiderman 	int old_err = rte_errno;
3925d55a494STal Shnaiderman 
3935d55a494STal Shnaiderman 	rte_errno = 0;
3941325a1ffSTal Shnaiderman 	if (!rte_thread_value_get(ws_tls_index)) {
3955d55a494STal Shnaiderman 		if (rte_errno) {
3965d55a494STal Shnaiderman 			DRV_LOG(ERR, "Failed checking specific workspace.");
3975d55a494STal Shnaiderman 			rte_errno = old_err;
3985d55a494STal Shnaiderman 			return -1;
3995d55a494STal Shnaiderman 		}
4005d55a494STal Shnaiderman 		/*
4015d55a494STal Shnaiderman 		 * set_specific_workspace when current value is NULL
4025d55a494STal Shnaiderman 		 * can happen only once per thread, mark this thread in
4037be78d02SJosh Soref 		 * linked list to be able to release resources later on.
4045d55a494STal Shnaiderman 		 */
4055d55a494STal Shnaiderman 		err = mlx5_add_workspace_to_list(data);
4065d55a494STal Shnaiderman 		if (err) {
4075d55a494STal Shnaiderman 			DRV_LOG(ERR, "Failed adding workspace to list.");
4085d55a494STal Shnaiderman 			rte_errno = old_err;
4095d55a494STal Shnaiderman 			return -1;
4105d55a494STal Shnaiderman 		}
4115d55a494STal Shnaiderman 	}
4121325a1ffSTal Shnaiderman 	if (rte_thread_value_set(ws_tls_index, data)) {
4135d55a494STal Shnaiderman 		DRV_LOG(ERR, "Failed setting specific workspace.");
4145d55a494STal Shnaiderman 		err = -1;
4155d55a494STal Shnaiderman 	}
4165d55a494STal Shnaiderman 	rte_errno = old_err;
4175d55a494STal Shnaiderman 	return err;
4185d55a494STal Shnaiderman }
419fb96caa5SRaja Zidane 
4202ece3b71SBing Zhao void
mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace * ws)4212ece3b71SBing Zhao mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
4222ece3b71SBing Zhao {
4232ece3b71SBing Zhao 	RTE_SET_USED(ws);
4242ece3b71SBing Zhao }
4252ece3b71SBing Zhao 
426fb96caa5SRaja Zidane int
mlx5_flow_os_validate_item_esp(const struct rte_eth_dev * dev,const struct rte_flow_item * item,uint64_t item_flags,uint8_t target_protocol,struct rte_flow_error * error)427*80c67625SGregory Etelson mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
428*80c67625SGregory Etelson 			    const struct rte_flow_item *item,
42990385c46SMaayan Kashani 			    uint64_t item_flags,
43090385c46SMaayan Kashani 			    uint8_t target_protocol,
431fb96caa5SRaja Zidane 			    struct rte_flow_error *error)
432fb96caa5SRaja Zidane {
433fb96caa5SRaja Zidane 	const struct rte_flow_item_esp *mask = item->mask;
434fb96caa5SRaja Zidane 	const struct rte_flow_item_esp *spec = item->spec;
435fb96caa5SRaja Zidane 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
436fb96caa5SRaja Zidane 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
437fb96caa5SRaja Zidane 				      MLX5_FLOW_LAYER_OUTER_L3;
438fb96caa5SRaja Zidane 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
439fb96caa5SRaja Zidane 				      MLX5_FLOW_LAYER_OUTER_L4;
440fb96caa5SRaja Zidane 	int ret;
441fb96caa5SRaja Zidane 
442fb96caa5SRaja Zidane 	if (!(item_flags & l3m))
443fb96caa5SRaja Zidane 		return rte_flow_error_set(error, EINVAL,
444fb96caa5SRaja Zidane 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
445fb96caa5SRaja Zidane 					  "L3 is mandatory to filter on L4");
446fb96caa5SRaja Zidane 	if (item_flags & l4m)
447fb96caa5SRaja Zidane 		return rte_flow_error_set(error, EINVAL,
448fb96caa5SRaja Zidane 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
449fb96caa5SRaja Zidane 					  "multiple L4 layers not supported");
450fb96caa5SRaja Zidane 	if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
451fb96caa5SRaja Zidane 		return rte_flow_error_set(error, EINVAL,
452fb96caa5SRaja Zidane 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
453fb96caa5SRaja Zidane 					  "protocol filtering not compatible"
454fb96caa5SRaja Zidane 					  " with ESP layer");
455fb96caa5SRaja Zidane 	if (!mask)
456fb96caa5SRaja Zidane 		mask = &rte_flow_item_esp_mask;
457fb96caa5SRaja Zidane 	if (spec && (spec->hdr.spi & mask->hdr.spi))
458fb96caa5SRaja Zidane 		return rte_flow_error_set(error, EINVAL,
459fb96caa5SRaja Zidane 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
460fb96caa5SRaja Zidane 					  "matching on spi field in esp is not"
461fb96caa5SRaja Zidane 					  " supported on Windows");
462fb96caa5SRaja Zidane 	ret = mlx5_flow_item_acceptable
463*80c67625SGregory Etelson 		(dev, item, (const uint8_t *)mask,
464fb96caa5SRaja Zidane 		 (const uint8_t *)&rte_flow_item_esp_mask,
465fb96caa5SRaja Zidane 		 sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
466fb96caa5SRaja Zidane 		 error);
467fb96caa5SRaja Zidane 	if (ret < 0)
468fb96caa5SRaja Zidane 		return ret;
469fb96caa5SRaja Zidane 	return 0;
470fb96caa5SRaja Zidane }
471