xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_context.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 bool mlx5dr_context_cap_dynamic_reparse(struct mlx5dr_context *ctx)
8 {
9 	return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
10 }
11 
12 uint8_t mlx5dr_context_get_reparse_mode(struct mlx5dr_context *ctx)
13 {
14 	/* Prefer to use dynamic reparse, reparse only specific actions */
15 	if (mlx5dr_context_cap_dynamic_reparse(ctx))
16 		return MLX5_IFC_RTC_REPARSE_NEVER;
17 
18 	/* Otherwise use less efficient static */
19 	return MLX5_IFC_RTC_REPARSE_ALWAYS;
20 }
21 
22 static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
23 {
24 	struct mlx5dr_pool_attr pool_attr = {0};
25 	uint8_t max_log_sz;
26 	int i;
27 
28 	if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
29 		return rte_errno;
30 
31 	if (mlx5dr_definer_init_cache(&ctx->definer_cache))
32 		goto uninit_pat_cache;
33 
34 	/* Create an STC pool per FT type */
35 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
36 	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
37 	max_log_sz = RTE_MIN(MLX5DR_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
38 	pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran);
39 
40 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
41 		pool_attr.table_type = i;
42 		ctx->stc_pool[i] = mlx5dr_pool_create(ctx, &pool_attr);
43 		if (!ctx->stc_pool[i]) {
44 			DR_LOG(ERR, "Failed to allocate STC pool [%d]", i);
45 			goto free_stc_pools;
46 		}
47 	}
48 
49 	return 0;
50 
51 free_stc_pools:
52 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++)
53 		if (ctx->stc_pool[i])
54 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
55 
56 	mlx5dr_definer_uninit_cache(ctx->definer_cache);
57 
58 uninit_pat_cache:
59 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
60 	return rte_errno;
61 }
62 
63 static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
64 {
65 	int i;
66 
67 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
68 		if (ctx->stc_pool[i])
69 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
70 	}
71 
72 	mlx5dr_definer_uninit_cache(ctx->definer_cache);
73 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
74 }
75 
76 static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
77 				  struct ibv_pd *pd)
78 {
79 	struct mlx5dv_pd mlx5_pd = {0};
80 	struct mlx5dv_obj obj;
81 	int ret;
82 
83 	if (pd) {
84 		ctx->pd = pd;
85 	} else {
86 		ctx->pd = mlx5_glue->alloc_pd(ctx->ibv_ctx);
87 		if (!ctx->pd) {
88 			DR_LOG(ERR, "Failed to allocate PD");
89 			rte_errno = errno;
90 			return rte_errno;
91 		}
92 		ctx->flags |= MLX5DR_CONTEXT_FLAG_PRIVATE_PD;
93 	}
94 
95 	obj.pd.in = ctx->pd;
96 	obj.pd.out = &mlx5_pd;
97 
98 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
99 	if (ret)
100 		goto free_private_pd;
101 
102 	ctx->pd_num = mlx5_pd.pdn;
103 
104 	return 0;
105 
106 free_private_pd:
107 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
108 		mlx5_glue->dealloc_pd(ctx->pd);
109 
110 	return ret;
111 }
112 
113 static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
114 {
115 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
116 		return mlx5_glue->dealloc_pd(ctx->pd);
117 
118 	return 0;
119 }
120 
121 static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
122 {
123 	struct mlx5dr_cmd_query_caps *caps = ctx->caps;
124 
125 	/* HWS not supported on device / FW */
126 	if (!caps->wqe_based_update) {
127 		DR_LOG(INFO, "Required HWS WQE based insertion cap not supported");
128 		return;
129 	}
130 
131 	/* Current solution requires all rules to set reparse bit */
132 	if ((!caps->nic_ft.reparse ||
133 	     (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
134 	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
135 		DR_LOG(INFO, "Required HWS reparse cap not supported");
136 		return;
137 	}
138 
139 	/* FW/HW must support 8DW STE */
140 	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
141 		DR_LOG(INFO, "Required HWS STE format not supported");
142 		return;
143 	}
144 
145 	/* Adding rules by hash and by offset are requirements */
146 	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
147 	    !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
148 		DR_LOG(INFO, "Required HWS RTC update mode not supported");
149 		return;
150 	}
151 
152 	/* Support for SELECT definer ID is required */
153 	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
154 		DR_LOG(INFO, "Required HWS Dynamic definer not supported");
155 		return;
156 	}
157 
158 	ctx->flags |= MLX5DR_CONTEXT_FLAG_HWS_SUPPORT;
159 }
160 
161 static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx,
162 				   struct mlx5dr_context_attr *attr)
163 {
164 	int ret;
165 
166 	mlx5dr_context_check_hws_supp(ctx);
167 
168 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
169 		return 0;
170 
171 	ret = mlx5dr_context_init_pd(ctx, attr->pd);
172 	if (ret)
173 		return ret;
174 
175 	ret = mlx5dr_context_pools_init(ctx);
176 	if (ret)
177 		goto uninit_pd;
178 
179 	if (attr->bwc)
180 		ctx->flags |= MLX5DR_CONTEXT_FLAG_BWC_SUPPORT;
181 
182 	ret = mlx5dr_send_queues_open(ctx, attr->queues, attr->queue_size);
183 	if (ret)
184 		goto pools_uninit;
185 
186 	return 0;
187 
188 pools_uninit:
189 	mlx5dr_context_pools_uninit(ctx);
190 uninit_pd:
191 	mlx5dr_context_uninit_pd(ctx);
192 	return ret;
193 }
194 
195 static void mlx5dr_context_uninit_hws(struct mlx5dr_context *ctx)
196 {
197 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
198 		return;
199 
200 	mlx5dr_send_queues_close(ctx);
201 	mlx5dr_context_pools_uninit(ctx);
202 	mlx5dr_context_uninit_pd(ctx);
203 }
204 
205 static int mlx5dr_context_init_shared_ctx(struct mlx5dr_context *ctx,
206 					  struct ibv_context *ibv_ctx,
207 					  struct mlx5dr_context_attr *attr)
208 {
209 	struct mlx5dr_cmd_query_caps shared_caps = {0};
210 	int ret;
211 
212 	if (!attr->shared_ibv_ctx) {
213 		ctx->ibv_ctx = ibv_ctx;
214 	} else {
215 		ctx->ibv_ctx = attr->shared_ibv_ctx;
216 		ctx->local_ibv_ctx = ibv_ctx;
217 		ret = mlx5dr_cmd_query_caps(attr->shared_ibv_ctx, &shared_caps);
218 		if (ret || !shared_caps.cross_vhca_resources) {
219 			DR_LOG(INFO, "No cross_vhca_resources cap for shared ibv");
220 			rte_errno = ENOTSUP;
221 			return rte_errno;
222 		}
223 		ctx->caps->shared_vhca_id = shared_caps.vhca_id;
224 	}
225 
226 	if (ctx->local_ibv_ctx && !ctx->caps->cross_vhca_resources) {
227 		DR_LOG(INFO, "No cross_vhca_resources cap for local ibv");
228 		rte_errno = ENOTSUP;
229 		return rte_errno;
230 	}
231 
232 	return 0;
233 }
234 
235 struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
236 					   struct mlx5dr_context_attr *attr)
237 {
238 	struct mlx5dr_context *ctx;
239 	int ret;
240 
241 	ctx = simple_calloc(1, sizeof(*ctx));
242 	if (!ctx) {
243 		rte_errno = ENOMEM;
244 		return NULL;
245 	}
246 
247 	pthread_spin_init(&ctx->ctrl_lock, PTHREAD_PROCESS_PRIVATE);
248 
249 	ctx->caps = simple_calloc(1, sizeof(*ctx->caps));
250 	if (!ctx->caps)
251 		goto free_ctx;
252 
253 	ret = mlx5dr_cmd_query_caps(ibv_ctx, ctx->caps);
254 	if (ret)
255 		goto free_caps;
256 
257 	if (mlx5dr_context_init_shared_ctx(ctx, ibv_ctx, attr))
258 		goto free_caps;
259 
260 	ret = mlx5dr_context_init_hws(ctx, attr);
261 	if (ret)
262 		goto free_caps;
263 
264 	return ctx;
265 
266 free_caps:
267 	simple_free(ctx->caps);
268 free_ctx:
269 	pthread_spin_destroy(&ctx->ctrl_lock);
270 	simple_free(ctx);
271 	return NULL;
272 }
273 
274 int mlx5dr_context_close(struct mlx5dr_context *ctx)
275 {
276 	mlx5dr_context_uninit_hws(ctx);
277 	simple_free(ctx->caps);
278 	pthread_spin_destroy(&ctx->ctrl_lock);
279 	simple_free(ctx);
280 	return 0;
281 }
282