xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_context.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
8 {
9 	struct mlx5dr_pool_attr pool_attr = {0};
10 	uint8_t max_log_sz;
11 	int i;
12 
13 	if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
14 		return rte_errno;
15 
16 	/* Create an STC pool per FT type */
17 	pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
18 	pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
19 	max_log_sz = RTE_MIN(MLX5DR_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
20 	pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran);
21 
22 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
23 		pool_attr.table_type = i;
24 		ctx->stc_pool[i] = mlx5dr_pool_create(ctx, &pool_attr);
25 		if (!ctx->stc_pool[i]) {
26 			DR_LOG(ERR, "Failed to allocate STC pool [%d]", i);
27 			goto free_stc_pools;
28 		}
29 	}
30 
31 	return 0;
32 
33 free_stc_pools:
34 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++)
35 		if (ctx->stc_pool[i])
36 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
37 
38 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
39 
40 	return rte_errno;
41 }
42 
43 static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
44 {
45 	int i;
46 
47 	mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
48 
49 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
50 		if (ctx->stc_pool[i])
51 			mlx5dr_pool_destroy(ctx->stc_pool[i]);
52 	}
53 }
54 
55 static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
56 				  struct ibv_pd *pd)
57 {
58 	struct mlx5dv_pd mlx5_pd = {0};
59 	struct mlx5dv_obj obj;
60 	int ret;
61 
62 	if (pd) {
63 		ctx->pd = pd;
64 	} else {
65 		ctx->pd = mlx5_glue->alloc_pd(ctx->ibv_ctx);
66 		if (!ctx->pd) {
67 			DR_LOG(ERR, "Failed to allocate PD");
68 			rte_errno = errno;
69 			return rte_errno;
70 		}
71 		ctx->flags |= MLX5DR_CONTEXT_FLAG_PRIVATE_PD;
72 	}
73 
74 	obj.pd.in = ctx->pd;
75 	obj.pd.out = &mlx5_pd;
76 
77 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
78 	if (ret)
79 		goto free_private_pd;
80 
81 	ctx->pd_num = mlx5_pd.pdn;
82 
83 	return 0;
84 
85 free_private_pd:
86 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
87 		mlx5_glue->dealloc_pd(ctx->pd);
88 
89 	return ret;
90 }
91 
92 static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
93 {
94 	if (ctx->flags & MLX5DR_CONTEXT_FLAG_PRIVATE_PD)
95 		return mlx5_glue->dealloc_pd(ctx->pd);
96 
97 	return 0;
98 }
99 
100 static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
101 {
102 	struct mlx5dr_cmd_query_caps *caps = ctx->caps;
103 
104 	/* HWS not supported on device / FW */
105 	if (!caps->wqe_based_update) {
106 		DR_LOG(INFO, "Required HWS WQE based insertion cap not supported");
107 		return;
108 	}
109 
110 	/* Current solution requires all rules to set reparse bit */
111 	if ((!caps->nic_ft.reparse || !caps->fdb_ft.reparse) ||
112 	    !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
113 		DR_LOG(INFO, "Required HWS reparse cap not supported");
114 		return;
115 	}
116 
117 	/* FW/HW must support 8DW STE */
118 	if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
119 		DR_LOG(INFO, "Required HWS STE format not supported");
120 		return;
121 	}
122 
123 	/* Adding rules by hash and by offset are requirements */
124 	if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
125 	    !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
126 		DR_LOG(INFO, "Required HWS RTC update mode not supported");
127 		return;
128 	}
129 
130 	/* Support for SELECT definer ID is required */
131 	if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
132 		DR_LOG(INFO, "Required HWS Dynamic definer not supported");
133 		return;
134 	}
135 
136 	ctx->flags |= MLX5DR_CONTEXT_FLAG_HWS_SUPPORT;
137 }
138 
139 static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx,
140 				   struct mlx5dr_context_attr *attr)
141 {
142 	int ret;
143 
144 	mlx5dr_context_check_hws_supp(ctx);
145 
146 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
147 		return 0;
148 
149 	ret = mlx5dr_context_init_pd(ctx, attr->pd);
150 	if (ret)
151 		return ret;
152 
153 	ret = mlx5dr_context_pools_init(ctx);
154 	if (ret)
155 		goto uninit_pd;
156 
157 	ret = mlx5dr_send_queues_open(ctx, attr->queues, attr->queue_size);
158 	if (ret)
159 		goto pools_uninit;
160 
161 	return 0;
162 
163 pools_uninit:
164 	mlx5dr_context_pools_uninit(ctx);
165 uninit_pd:
166 	mlx5dr_context_uninit_pd(ctx);
167 	return ret;
168 }
169 
170 static void mlx5dr_context_uninit_hws(struct mlx5dr_context *ctx)
171 {
172 	if (!(ctx->flags & MLX5DR_CONTEXT_FLAG_HWS_SUPPORT))
173 		return;
174 
175 	mlx5dr_send_queues_close(ctx);
176 	mlx5dr_context_pools_uninit(ctx);
177 	mlx5dr_context_uninit_pd(ctx);
178 }
179 
180 struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx,
181 					   struct mlx5dr_context_attr *attr)
182 {
183 	struct mlx5dr_context *ctx;
184 	int ret;
185 
186 	ctx = simple_calloc(1, sizeof(*ctx));
187 	if (!ctx) {
188 		rte_errno = ENOMEM;
189 		return NULL;
190 	}
191 
192 	ctx->ibv_ctx = ibv_ctx;
193 	pthread_spin_init(&ctx->ctrl_lock, PTHREAD_PROCESS_PRIVATE);
194 
195 	ctx->caps = simple_calloc(1, sizeof(*ctx->caps));
196 	if (!ctx->caps)
197 		goto free_ctx;
198 
199 	ret = mlx5dr_cmd_query_caps(ibv_ctx, ctx->caps);
200 	if (ret)
201 		goto free_caps;
202 
203 	ret = mlx5dr_context_init_hws(ctx, attr);
204 	if (ret)
205 		goto free_caps;
206 
207 	return ctx;
208 
209 free_caps:
210 	simple_free(ctx->caps);
211 free_ctx:
212 	simple_free(ctx);
213 	return NULL;
214 }
215 
216 int mlx5dr_context_close(struct mlx5dr_context *ctx)
217 {
218 	mlx5dr_context_uninit_hws(ctx);
219 	simple_free(ctx->caps);
220 	pthread_spin_destroy(&ctx->ctrl_lock);
221 	simple_free(ctx);
222 	return 0;
223 }
224