xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_context.c (revision 691326d15da263d068de71c468c74c225c4f75c3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 bool mlx5dr_context_cap_dynamic_reparse(struct mlx5dr_context *ctx)
8 {
9 	return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
10 }
11 
12 uint8_t mlx5dr_context_get_reparse_mode(struct mlx5dr_context *ctx)
13 {
14 	/* Prefer to use dynamic reparse, reparse only specific actions */
15 	if (mlx5dr_context_cap_dynamic_reparse(ctx))
16 		return MLX5_IFC_RTC_REPARSE_NEVER;
17 
18 	/* Otherwise use less efficient static */
19 	return MLX5_IFC_RTC_REPARSE_ALWAYS;
20 }
21 
22 static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx,
23 				     struct mlx5dr_context_attr *attr)
24 {
25 	struct mlx5dr_pool_attr pool_attr = {0};
26 	uint8_t max_log_sz;
27 	int i;
28 
29 	if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
30 		return rte_errno;
31 
32 	if (mlx5dr_definer_init_cache(&ctx->definer_cache))
33 		goto uninit_pat_cache;
34 
35 	/* Create an STC pool per FT type */
36 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
37 	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
38 	if (!attr->initial_log_stc_memory)
39 		attr->initial_log_stc_memory = MLX5DR_POOL_STC_LOG_SZ;
40 	max_log_sz = RTE_MIN(attr->initial_log_stc_memory, ctx->caps->stc_alloc_log_max);
41 	pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran);
42 
43 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
44 		pool_attr.table_type = i;
45 		ctx->stc_pool[i] = mlx5dr_pool_create(ctx, &pool_attr);
46 		if (!ctx->stc_pool[i]) {
47 			DR_LOG(ERR, "Failed to allocate STC pool [%d]", i);
48 			goto free_stc_pools;
49 		}
50 	}
51 
52 	return 0;
53 
54 free_stc_pools:
55 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++)
56 		if (ctx->stc_pool[i])
57 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
58 
59 	mlx5dr_definer_uninit_cache(ctx->definer_cache);
60 
61 uninit_pat_cache:
62 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
63 	return rte_errno;
64 }
65 
66 static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
67 {
68 	int i;
69 
70 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
71 		if (ctx->stc_pool[i])
72 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
73 	}
74 
75 	mlx5dr_definer_uninit_cache(ctx->definer_cache);
76 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
77 }
78 
79 static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
80 				  struct ibv_pd *pd)
81 {
82 	struct mlx5dv_pd mlx5_pd = {0};
83 	struct mlx5dv_obj obj;
84 	int ret;
85 
86 	if (pd) {
87 		ctx->pd = pd;
88 	} else {
89 		ctx->pd = mlx5_glue->alloc_pd(ctx->ibv_ctx);
90 		if (!ctx->pd) {
91 			DR_LOG(ERR, "Failed to allocate PD");
92 			rte_errno = errno;
93 			return rte_errno;
94 		}
95 		ctx->flags |= MLX5DR_CONTEXT_FLAG_PRIVATE_PD;
96 	}
97 
98 	obj.pd.in = ctx->pd;
99 	obj.pd.out = &mlx5_pd;
100 
101 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
102 	if (ret)
103 		goto free_private_pd;
104 
105 	ctx->pd_num = mlx5_pd.pdn;
106 
107 	return 0;
108 
109 free_private_pd:
110 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
111 		mlx5_glue->dealloc_pd(ctx->pd);
112 
113 	return ret;
114 }
115 
116 static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
117 {
118 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
119 		return mlx5_glue->dealloc_pd(ctx->pd);
120 
121 	return 0;
122 }
123 
124 static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
125 {
126 	struct mlx5dr_cmd_query_caps *caps = ctx->caps;
127 
128 	/* HWS not supported on device / FW */
129 	if (!caps->wqe_based_update) {
130 		DR_LOG(INFO, "Required HWS WQE based insertion cap not supported");
131 		return;
132 	}
133 
134 	/* Current solution requires all rules to set reparse bit */
135 	if ((!caps->nic_ft.reparse ||
136 	     (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
137 	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
138 		DR_LOG(INFO, "Required HWS reparse cap not supported");
139 		return;
140 	}
141 
142 	/* FW/HW must support 8DW STE */
143 	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
144 		DR_LOG(INFO, "Required HWS STE format not supported");
145 		return;
146 	}
147 
148 	/* Adding rules by hash and by offset are requirements */
149 	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
150 	    !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
151 		DR_LOG(INFO, "Required HWS RTC update mode not supported");
152 		return;
153 	}
154 
155 	/* Support for SELECT definer ID is required */
156 	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
157 		DR_LOG(INFO, "Required HWS Dynamic definer not supported");
158 		return;
159 	}
160 
161 	ctx->flags |= MLX5DR_CONTEXT_FLAG_HWS_SUPPORT;
162 }
163 
164 static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx,
165 				   struct mlx5dr_context_attr *attr)
166 {
167 	int ret;
168 
169 	mlx5dr_context_check_hws_supp(ctx);
170 
171 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
172 		return 0;
173 
174 	ret = mlx5dr_context_init_pd(ctx, attr->pd);
175 	if (ret)
176 		return ret;
177 
178 	ret = mlx5dr_context_pools_init(ctx, attr);
179 	if (ret)
180 		goto uninit_pd;
181 
182 	if (attr->bwc)
183 		ctx->flags |= MLX5DR_CONTEXT_FLAG_BWC_SUPPORT;
184 
185 	ret = mlx5dr_send_queues_open(ctx, attr->queues, attr->queue_size);
186 	if (ret)
187 		goto pools_uninit;
188 
189 	return 0;
190 
191 pools_uninit:
192 	mlx5dr_context_pools_uninit(ctx);
193 uninit_pd:
194 	mlx5dr_context_uninit_pd(ctx);
195 	return ret;
196 }
197 
198 static void mlx5dr_context_uninit_hws(struct mlx5dr_context *ctx)
199 {
200 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
201 		return;
202 
203 	mlx5dr_send_queues_close(ctx);
204 	mlx5dr_context_pools_uninit(ctx);
205 	mlx5dr_context_uninit_pd(ctx);
206 }
207 
208 static int mlx5dr_context_init_shared_ctx(struct mlx5dr_context *ctx,
209 					  struct ibv_context *ibv_ctx,
210 					  struct mlx5dr_context_attr *attr)
211 {
212 	struct mlx5dr_cmd_query_caps shared_caps = {0};
213 	int ret;
214 
215 	if (!attr->shared_ibv_ctx) {
216 		ctx->ibv_ctx = ibv_ctx;
217 	} else {
218 		ctx->ibv_ctx = attr->shared_ibv_ctx;
219 		ctx->local_ibv_ctx = ibv_ctx;
220 		ret = mlx5dr_cmd_query_caps(attr->shared_ibv_ctx, &shared_caps);
221 		if (ret || !shared_caps.cross_vhca_resources) {
222 			DR_LOG(INFO, "No cross_vhca_resources cap for shared ibv");
223 			rte_errno = ENOTSUP;
224 			return rte_errno;
225 		}
226 		ctx->caps->shared_vhca_id = shared_caps.vhca_id;
227 	}
228 
229 	if (ctx->local_ibv_ctx && !ctx->caps->cross_vhca_resources) {
230 		DR_LOG(INFO, "No cross_vhca_resources cap for local ibv");
231 		rte_errno = ENOTSUP;
232 		return rte_errno;
233 	}
234 
235 	return 0;
236 }
237 
238 struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
239 					   struct mlx5dr_context_attr *attr)
240 {
241 	struct mlx5dr_context *ctx;
242 	int ret;
243 
244 	ctx = simple_calloc(1, sizeof(*ctx));
245 	if (!ctx) {
246 		rte_errno = ENOMEM;
247 		return NULL;
248 	}
249 
250 	pthread_spin_init(&ctx->ctrl_lock, PTHREAD_PROCESS_PRIVATE);
251 
252 	ctx->caps = simple_calloc(1, sizeof(*ctx->caps));
253 	if (!ctx->caps)
254 		goto free_ctx;
255 
256 	ret = mlx5dr_cmd_query_caps(ibv_ctx, ctx->caps);
257 	if (ret)
258 		goto free_caps;
259 
260 	if (mlx5dr_context_init_shared_ctx(ctx, ibv_ctx, attr))
261 		goto free_caps;
262 
263 	ret = mlx5dr_context_init_hws(ctx, attr);
264 	if (ret)
265 		goto free_caps;
266 
267 	return ctx;
268 
269 free_caps:
270 	simple_free(ctx->caps);
271 free_ctx:
272 	pthread_spin_destroy(&ctx->ctrl_lock);
273 	simple_free(ctx);
274 	return NULL;
275 }
276 
277 int mlx5dr_context_close(struct mlx5dr_context *ctx)
278 {
279 	mlx5dr_context_uninit_hws(ctx);
280 	simple_free(ctx->caps);
281 	pthread_spin_destroy(&ctx->ctrl_lock);
282 	simple_free(ctx);
283 	return 0;
284 }
285