xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_context.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
8 {
9 	struct mlx5dr_pool_attr pool_attr = {0};
10 	uint8_t max_log_sz;
11 	int i;
12 
13 	if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
14 		return rte_errno;
15 
16 	if (mlx5dr_definer_init_cache(&ctx->definer_cache))
17 		goto uninit_pat_cache;
18 
19 	/* Create an STC pool per FT type */
20 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
21 	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
22 	max_log_sz = RTE_MIN(MLX5DR_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
23 	pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran);
24 
25 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
26 		pool_attr.table_type = i;
27 		ctx->stc_pool[i] = mlx5dr_pool_create(ctx, &pool_attr);
28 		if (!ctx->stc_pool[i]) {
29 			DR_LOG(ERR, "Failed to allocate STC pool [%d]", i);
30 			goto free_stc_pools;
31 		}
32 	}
33 
34 	return 0;
35 
36 free_stc_pools:
37 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++)
38 		if (ctx->stc_pool[i])
39 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
40 
41 	mlx5dr_definer_uninit_cache(ctx->definer_cache);
42 
43 uninit_pat_cache:
44 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
45 	return rte_errno;
46 }
47 
48 static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
49 {
50 	int i;
51 
52 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
53 		if (ctx->stc_pool[i])
54 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
55 	}
56 
57 	mlx5dr_definer_uninit_cache(ctx->definer_cache);
58 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
59 }
60 
61 static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
62 				  struct ibv_pd *pd)
63 {
64 	struct mlx5dv_pd mlx5_pd = {0};
65 	struct mlx5dv_obj obj;
66 	int ret;
67 
68 	if (pd) {
69 		ctx->pd = pd;
70 	} else {
71 		ctx->pd = mlx5_glue->alloc_pd(ctx->ibv_ctx);
72 		if (!ctx->pd) {
73 			DR_LOG(ERR, "Failed to allocate PD");
74 			rte_errno = errno;
75 			return rte_errno;
76 		}
77 		ctx->flags |= MLX5DR_CONTEXT_FLAG_PRIVATE_PD;
78 	}
79 
80 	obj.pd.in = ctx->pd;
81 	obj.pd.out = &mlx5_pd;
82 
83 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
84 	if (ret)
85 		goto free_private_pd;
86 
87 	ctx->pd_num = mlx5_pd.pdn;
88 
89 	return 0;
90 
91 free_private_pd:
92 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
93 		mlx5_glue->dealloc_pd(ctx->pd);
94 
95 	return ret;
96 }
97 
98 static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
99 {
100 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
101 		return mlx5_glue->dealloc_pd(ctx->pd);
102 
103 	return 0;
104 }
105 
106 static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
107 {
108 	struct mlx5dr_cmd_query_caps *caps = ctx->caps;
109 
110 	/* HWS not supported on device / FW */
111 	if (!caps->wqe_based_update) {
112 		DR_LOG(INFO, "Required HWS WQE based insertion cap not supported");
113 		return;
114 	}
115 
116 	/* Current solution requires all rules to set reparse bit */
117 	if ((!caps->nic_ft.reparse ||
118 	     (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
119 	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
120 		DR_LOG(INFO, "Required HWS reparse cap not supported");
121 		return;
122 	}
123 
124 	/* FW/HW must support 8DW STE */
125 	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
126 		DR_LOG(INFO, "Required HWS STE format not supported");
127 		return;
128 	}
129 
130 	/* Adding rules by hash and by offset are requirements */
131 	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
132 	    !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
133 		DR_LOG(INFO, "Required HWS RTC update mode not supported");
134 		return;
135 	}
136 
137 	/* Support for SELECT definer ID is required */
138 	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
139 		DR_LOG(INFO, "Required HWS Dynamic definer not supported");
140 		return;
141 	}
142 
143 	ctx->flags |= MLX5DR_CONTEXT_FLAG_HWS_SUPPORT;
144 }
145 
146 static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx,
147 				   struct mlx5dr_context_attr *attr)
148 {
149 	int ret;
150 
151 	mlx5dr_context_check_hws_supp(ctx);
152 
153 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
154 		return 0;
155 
156 	ret = mlx5dr_context_init_pd(ctx, attr->pd);
157 	if (ret)
158 		return ret;
159 
160 	ret = mlx5dr_context_pools_init(ctx);
161 	if (ret)
162 		goto uninit_pd;
163 
164 	ret = mlx5dr_send_queues_open(ctx, attr->queues, attr->queue_size);
165 	if (ret)
166 		goto pools_uninit;
167 
168 	return 0;
169 
170 pools_uninit:
171 	mlx5dr_context_pools_uninit(ctx);
172 uninit_pd:
173 	mlx5dr_context_uninit_pd(ctx);
174 	return ret;
175 }
176 
177 static void mlx5dr_context_uninit_hws(struct mlx5dr_context *ctx)
178 {
179 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
180 		return;
181 
182 	mlx5dr_send_queues_close(ctx);
183 	mlx5dr_context_pools_uninit(ctx);
184 	mlx5dr_context_uninit_pd(ctx);
185 }
186 
187 static int mlx5dr_context_init_shared_ctx(struct mlx5dr_context *ctx,
188 					  struct ibv_context *ibv_ctx,
189 					  struct mlx5dr_context_attr *attr)
190 {
191 	struct mlx5dr_cmd_query_caps shared_caps = {0};
192 	int ret;
193 
194 	if (!attr->shared_ibv_ctx) {
195 		ctx->ibv_ctx = ibv_ctx;
196 	} else {
197 		ctx->ibv_ctx = attr->shared_ibv_ctx;
198 		ctx->local_ibv_ctx = ibv_ctx;
199 		ret = mlx5dr_cmd_query_caps(attr->shared_ibv_ctx, &shared_caps);
200 		if (ret || !shared_caps.cross_vhca_resources) {
201 			DR_LOG(INFO, "No cross_vhca_resources cap for shared ibv");
202 			rte_errno = ENOTSUP;
203 			return rte_errno;
204 		}
205 		ctx->caps->shared_vhca_id = shared_caps.vhca_id;
206 	}
207 
208 	if (ctx->local_ibv_ctx && !ctx->caps->cross_vhca_resources) {
209 		DR_LOG(INFO, "No cross_vhca_resources cap for local ibv");
210 		rte_errno = ENOTSUP;
211 		return rte_errno;
212 	}
213 
214 	return 0;
215 }
216 
217 struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
218 					   struct mlx5dr_context_attr *attr)
219 {
220 	struct mlx5dr_context *ctx;
221 	int ret;
222 
223 	ctx = simple_calloc(1, sizeof(*ctx));
224 	if (!ctx) {
225 		rte_errno = ENOMEM;
226 		return NULL;
227 	}
228 
229 	pthread_spin_init(&ctx->ctrl_lock, PTHREAD_PROCESS_PRIVATE);
230 
231 	ctx->caps = simple_calloc(1, sizeof(*ctx->caps));
232 	if (!ctx->caps)
233 		goto free_ctx;
234 
235 	ret = mlx5dr_cmd_query_caps(ibv_ctx, ctx->caps);
236 	if (ret)
237 		goto free_caps;
238 
239 	if (mlx5dr_context_init_shared_ctx(ctx, ibv_ctx, attr))
240 		goto free_caps;
241 
242 	ret = mlx5dr_context_init_hws(ctx, attr);
243 	if (ret)
244 		goto free_caps;
245 
246 	return ctx;
247 
248 free_caps:
249 	simple_free(ctx->caps);
250 free_ctx:
251 	simple_free(ctx);
252 	return NULL;
253 }
254 
255 int mlx5dr_context_close(struct mlx5dr_context *ctx)
256 {
257 	mlx5dr_context_uninit_hws(ctx);
258 	simple_free(ctx->caps);
259 	pthread_spin_destroy(&ctx->ctrl_lock);
260 	simple_free(ctx);
261 	return 0;
262 }
263