xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_context.c (revision 02d36ef6a9528e0f4a3403956e66bcea5fadbf8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
8 {
9 	struct mlx5dr_pool_attr pool_attr = {0};
10 	uint8_t max_log_sz;
11 	int i;
12 
13 	if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
14 		return rte_errno;
15 
16 	/* Create an STC pool per FT type */
17 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
18 	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
19 	max_log_sz = RTE_MIN(MLX5DR_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
20 	pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran);
21 
22 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
23 		pool_attr.table_type = i;
24 		ctx->stc_pool[i] = mlx5dr_pool_create(ctx, &pool_attr);
25 		if (!ctx->stc_pool[i]) {
26 			DR_LOG(ERR, "Failed to allocate STC pool [%d]", i);
27 			goto free_stc_pools;
28 		}
29 	}
30 
31 	return 0;
32 
33 free_stc_pools:
34 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++)
35 		if (ctx->stc_pool[i])
36 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
37 
38 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
39 
40 	return rte_errno;
41 }
42 
43 static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
44 {
45 	int i;
46 
47 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
48 
49 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
50 		if (ctx->stc_pool[i])
51 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
52 	}
53 }
54 
55 static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
56 				  struct ibv_pd *pd)
57 {
58 	struct mlx5dv_pd mlx5_pd = {0};
59 	struct mlx5dv_obj obj;
60 	int ret;
61 
62 	if (pd) {
63 		ctx->pd = pd;
64 	} else {
65 		ctx->pd = mlx5_glue->alloc_pd(ctx->ibv_ctx);
66 		if (!ctx->pd) {
67 			DR_LOG(ERR, "Failed to allocate PD");
68 			rte_errno = errno;
69 			return rte_errno;
70 		}
71 		ctx->flags |= MLX5DR_CONTEXT_FLAG_PRIVATE_PD;
72 	}
73 
74 	obj.pd.in = ctx->pd;
75 	obj.pd.out = &mlx5_pd;
76 
77 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
78 	if (ret)
79 		goto free_private_pd;
80 
81 	ctx->pd_num = mlx5_pd.pdn;
82 
83 	return 0;
84 
85 free_private_pd:
86 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
87 		mlx5_glue->dealloc_pd(ctx->pd);
88 
89 	return ret;
90 }
91 
92 static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
93 {
94 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
95 		return mlx5_glue->dealloc_pd(ctx->pd);
96 
97 	return 0;
98 }
99 
100 static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
101 {
102 	struct mlx5dr_cmd_query_caps *caps = ctx->caps;
103 
104 	/* HWS not supported on device / FW */
105 	if (!caps->wqe_based_update) {
106 		DR_LOG(INFO, "Required HWS WQE based insertion cap not supported");
107 		return;
108 	}
109 
110 	/* Current solution requires all rules to set reparse bit */
111 	if ((!caps->nic_ft.reparse ||
112 	     (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
113 	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
114 		DR_LOG(INFO, "Required HWS reparse cap not supported");
115 		return;
116 	}
117 
118 	/* FW/HW must support 8DW STE */
119 	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
120 		DR_LOG(INFO, "Required HWS STE format not supported");
121 		return;
122 	}
123 
124 	/* Adding rules by hash and by offset are requirements */
125 	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
126 	    !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
127 		DR_LOG(INFO, "Required HWS RTC update mode not supported");
128 		return;
129 	}
130 
131 	/* Support for SELECT definer ID is required */
132 	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
133 		DR_LOG(INFO, "Required HWS Dynamic definer not supported");
134 		return;
135 	}
136 
137 	ctx->flags |= MLX5DR_CONTEXT_FLAG_HWS_SUPPORT;
138 }
139 
140 static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx,
141 				   struct mlx5dr_context_attr *attr)
142 {
143 	int ret;
144 
145 	mlx5dr_context_check_hws_supp(ctx);
146 
147 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
148 		return 0;
149 
150 	ret = mlx5dr_context_init_pd(ctx, attr->pd);
151 	if (ret)
152 		return ret;
153 
154 	ret = mlx5dr_context_pools_init(ctx);
155 	if (ret)
156 		goto uninit_pd;
157 
158 	ret = mlx5dr_send_queues_open(ctx, attr->queues, attr->queue_size);
159 	if (ret)
160 		goto pools_uninit;
161 
162 	return 0;
163 
164 pools_uninit:
165 	mlx5dr_context_pools_uninit(ctx);
166 uninit_pd:
167 	mlx5dr_context_uninit_pd(ctx);
168 	return ret;
169 }
170 
171 static void mlx5dr_context_uninit_hws(struct mlx5dr_context *ctx)
172 {
173 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
174 		return;
175 
176 	mlx5dr_send_queues_close(ctx);
177 	mlx5dr_context_pools_uninit(ctx);
178 	mlx5dr_context_uninit_pd(ctx);
179 }
180 
181 struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
182 					   struct mlx5dr_context_attr *attr)
183 {
184 	struct mlx5dr_context *ctx;
185 	int ret;
186 
187 	ctx = simple_calloc(1, sizeof(*ctx));
188 	if (!ctx) {
189 		rte_errno = ENOMEM;
190 		return NULL;
191 	}
192 
193 	ctx->ibv_ctx = ibv_ctx;
194 	pthread_spin_init(&ctx->ctrl_lock, PTHREAD_PROCESS_PRIVATE);
195 
196 	ctx->caps = simple_calloc(1, sizeof(*ctx->caps));
197 	if (!ctx->caps)
198 		goto free_ctx;
199 
200 	ret = mlx5dr_cmd_query_caps(ibv_ctx, ctx->caps);
201 	if (ret)
202 		goto free_caps;
203 
204 	ret = mlx5dr_context_init_hws(ctx, attr);
205 	if (ret)
206 		goto free_caps;
207 
208 	return ctx;
209 
210 free_caps:
211 	simple_free(ctx->caps);
212 free_ctx:
213 	simple_free(ctx);
214 	return NULL;
215 }
216 
217 int mlx5dr_context_close(struct mlx5dr_context *ctx)
218 {
219 	mlx5dr_context_uninit_hws(ctx);
220 	simple_free(ctx->caps);
221 	pthread_spin_destroy(&ctx->ctrl_lock);
222 	simple_free(ctx);
223 	return 0;
224 }
225