xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c (revision e1df15783bdfffc79de60d8db3bc9a19f74b479f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj)
8 {
9 	int ret;
10 
11 	ret = mlx5_glue->devx_obj_destroy(devx_obj->obj);
12 	simple_free(devx_obj);
13 
14 	return ret;
15 }
16 
17 struct mlx5dr_devx_obj *
18 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
19 			     struct mlx5dr_cmd_ft_create_attr *ft_attr)
20 {
21 	uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
22 	uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
23 	struct mlx5dr_devx_obj *devx_obj;
24 	void *ft_ctx;
25 
26 	devx_obj = simple_malloc(sizeof(*devx_obj));
27 	if (!devx_obj) {
28 		DR_LOG(ERR, "Failed to allocate memory for flow table object");
29 		rte_errno = ENOMEM;
30 		return NULL;
31 	}
32 
33 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
34 	MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
35 
36 	ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
37 	MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
38 	MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
39 
40 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
41 	if (!devx_obj->obj) {
42 		DR_LOG(ERR, "Failed to create FT");
43 		simple_free(devx_obj);
44 		rte_errno = errno;
45 		return NULL;
46 	}
47 
48 	devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id);
49 
50 	return devx_obj;
51 }
52 
53 int
54 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj,
55 			     struct mlx5dr_cmd_ft_modify_attr *ft_attr)
56 {
57 	uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
58 	uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
59 	void *ft_ctx;
60 	int ret;
61 
62 	MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
63 	MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
64 	MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
65 	MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id);
66 
67 	ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
68 
69 	MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
70 	MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
71 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0);
72 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1);
73 
74 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
75 	if (ret) {
76 		DR_LOG(ERR, "Failed to modify FT");
77 		rte_errno = errno;
78 	}
79 
80 	return ret;
81 }
82 
83 int
84 mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj,
85 			    struct mlx5dr_cmd_ft_query_attr *ft_attr,
86 			    uint64_t *icm_addr_0, uint64_t *icm_addr_1)
87 {
88 	uint32_t out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
89 	uint32_t in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
90 	void *ft_ctx;
91 	int ret;
92 
93 	MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
94 	MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
95 	MLX5_SET(query_flow_table_in, in, table_id, devx_obj->id);
96 
97 	ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out));
98 	if (ret) {
99 		DR_LOG(ERR, "Failed to query FT");
100 		rte_errno = errno;
101 		return ret;
102 	}
103 
104 	ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
105 	*icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_0);
106 	*icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_1);
107 
108 	return ret;
109 }
110 
111 static struct mlx5dr_devx_obj *
112 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx,
113 			     struct mlx5dr_cmd_fg_attr *fg_attr)
114 {
115 	uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
116 	uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0};
117 	struct mlx5dr_devx_obj *devx_obj;
118 
119 	devx_obj = simple_malloc(sizeof(*devx_obj));
120 	if (!devx_obj) {
121 		DR_LOG(ERR, "Failed to allocate memory for flow group object");
122 		rte_errno = ENOMEM;
123 		return NULL;
124 	}
125 
126 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
127 	MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
128 	MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
129 
130 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
131 	if (!devx_obj->obj) {
132 		DR_LOG(ERR, "Failed to create Flow group");
133 		simple_free(devx_obj);
134 		rte_errno = errno;
135 		return NULL;
136 	}
137 
138 	devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id);
139 
140 	return devx_obj;
141 }
142 
143 struct mlx5dr_devx_obj *
144 mlx5dr_cmd_set_fte(struct ibv_context *ctx,
145 		   uint32_t table_type,
146 		   uint32_t table_id,
147 		   uint32_t group_id,
148 		   struct mlx5dr_cmd_set_fte_attr *fte_attr)
149 {
150 	uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
151 	uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
152 	struct mlx5dr_devx_obj *devx_obj;
153 	void *in_flow_context;
154 	uint32_t action_flags;
155 	void *in_dests;
156 
157 	devx_obj = simple_malloc(sizeof(*devx_obj));
158 	if (!devx_obj) {
159 		DR_LOG(ERR, "Failed to allocate memory for fte object");
160 		rte_errno = ENOMEM;
161 		return NULL;
162 	}
163 
164 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
165 	MLX5_SET(set_fte_in, in, table_type, table_type);
166 	MLX5_SET(set_fte_in, in, table_id, table_id);
167 
168 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
169 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
170 	MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
171 
172 	action_flags = fte_attr->action_flags;
173 	MLX5_SET(flow_context, in_flow_context, action, action_flags);
174 
175 	if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
176 		/* Only destination_list_size of size 1 is supported */
177 		MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
178 		in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
179 		MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
180 		MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
181 	}
182 
183 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
184 	if (!devx_obj->obj) {
185 		DR_LOG(ERR, "Failed to create FTE");
186 		rte_errno = errno;
187 		goto free_devx;
188 	}
189 
190 	return devx_obj;
191 
192 free_devx:
193 	simple_free(devx_obj);
194 	return NULL;
195 }
196 
197 struct mlx5dr_cmd_forward_tbl *
198 mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
199 			      struct mlx5dr_cmd_ft_create_attr *ft_attr,
200 			      struct mlx5dr_cmd_set_fte_attr *fte_attr)
201 {
202 	struct mlx5dr_cmd_fg_attr fg_attr = {0};
203 	struct mlx5dr_cmd_forward_tbl *tbl;
204 
205 	tbl = simple_calloc(1, sizeof(*tbl));
206 	if (!tbl) {
207 		DR_LOG(ERR, "Failed to allocate memory");
208 		rte_errno = ENOMEM;
209 		return NULL;
210 	}
211 
212 	tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr);
213 	if (!tbl->ft) {
214 		DR_LOG(ERR, "Failed to create FT");
215 		goto free_tbl;
216 	}
217 
218 	fg_attr.table_id = tbl->ft->id;
219 	fg_attr.table_type = ft_attr->type;
220 
221 	tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr);
222 	if (!tbl->fg) {
223 		DR_LOG(ERR, "Failed to create FG");
224 		goto free_ft;
225 	}
226 
227 	tbl->fte = mlx5dr_cmd_set_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, fte_attr);
228 	if (!tbl->fte) {
229 		DR_LOG(ERR, "Failed to create FTE");
230 		goto free_fg;
231 	}
232 	return tbl;
233 
234 free_fg:
235 	mlx5dr_cmd_destroy_obj(tbl->fg);
236 free_ft:
237 	mlx5dr_cmd_destroy_obj(tbl->ft);
238 free_tbl:
239 	simple_free(tbl);
240 	return NULL;
241 }
242 
243 void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
244 {
245 	mlx5dr_cmd_destroy_obj(tbl->fte);
246 	mlx5dr_cmd_destroy_obj(tbl->fg);
247 	mlx5dr_cmd_destroy_obj(tbl->ft);
248 	simple_free(tbl);
249 }
250 
251 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
252 					  uint32_t fw_ft_type,
253 					  enum mlx5dr_table_type type,
254 					  struct mlx5dr_cmd_ft_modify_attr *ft_attr)
255 {
256 	struct mlx5dr_devx_obj *default_miss_tbl;
257 
258 	if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx))
259 		return;
260 
261 	ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
262 	ft_attr->type = fw_ft_type;
263 	ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
264 
265 	if (type == MLX5DR_TABLE_TYPE_FDB) {
266 		default_miss_tbl = ctx->common_res[type].default_miss->ft;
267 		if (!default_miss_tbl) {
268 			assert(false);
269 			return;
270 		}
271 		ft_attr->table_miss_id = default_miss_tbl->id;
272 	} else {
273 		ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id;
274 	}
275 }
276 
277 struct mlx5dr_devx_obj *
278 mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
279 		      struct mlx5dr_cmd_rtc_create_attr *rtc_attr)
280 {
281 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
282 	uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
283 	struct mlx5dr_devx_obj *devx_obj;
284 	void *attr;
285 
286 	devx_obj = simple_malloc(sizeof(*devx_obj));
287 	if (!devx_obj) {
288 		DR_LOG(ERR, "Failed to allocate memory for RTC object");
289 		rte_errno = ENOMEM;
290 		return NULL;
291 	}
292 
293 	attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
294 	MLX5_SET(general_obj_in_cmd_hdr,
295 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
296 	MLX5_SET(general_obj_in_cmd_hdr,
297 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC);
298 
299 	attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
300 	MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
301 		MLX5_IFC_RTC_STE_FORMAT_11DW :
302 		MLX5_IFC_RTC_STE_FORMAT_8DW);
303 
304 	if (rtc_attr->is_scnd_range) {
305 		MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
306 		MLX5_SET(rtc, attr, num_match_ste, 2);
307 	}
308 
309 	MLX5_SET(rtc, attr, pd, rtc_attr->pd);
310 	MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
311 	MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
312 	MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
313 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
314 	MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
315 	MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
316 	MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
317 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
318 	MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
319 	MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
320 	MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
321 	MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
322 	MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
323 	MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
324 	MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS);
325 
326 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
327 	if (!devx_obj->obj) {
328 		DR_LOG(ERR, "Failed to create RTC");
329 		simple_free(devx_obj);
330 		rte_errno = errno;
331 		return NULL;
332 	}
333 
334 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
335 
336 	return devx_obj;
337 }
338 
339 struct mlx5dr_devx_obj *
340 mlx5dr_cmd_stc_create(struct ibv_context *ctx,
341 		      struct mlx5dr_cmd_stc_create_attr *stc_attr)
342 {
343 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
344 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
345 	struct mlx5dr_devx_obj *devx_obj;
346 	void *attr;
347 
348 	devx_obj = simple_malloc(sizeof(*devx_obj));
349 	if (!devx_obj) {
350 		DR_LOG(ERR, "Failed to allocate memory for STC object");
351 		rte_errno = ENOMEM;
352 		return NULL;
353 	}
354 
355 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
356 	MLX5_SET(general_obj_in_cmd_hdr,
357 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
358 	MLX5_SET(general_obj_in_cmd_hdr,
359 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
360 	MLX5_SET(general_obj_in_cmd_hdr,
361 		 attr, log_obj_range, stc_attr->log_obj_range);
362 
363 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
364 	MLX5_SET(stc, attr, table_type, stc_attr->table_type);
365 
366 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
367 	if (!devx_obj->obj) {
368 		DR_LOG(ERR, "Failed to create STC");
369 		simple_free(devx_obj);
370 		rte_errno = errno;
371 		return NULL;
372 	}
373 
374 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
375 
376 	return devx_obj;
377 }
378 
379 static int
380 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
381 				    void *stc_parm)
382 {
383 	switch (stc_attr->action_type) {
384 	case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
385 		MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id);
386 		break;
387 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
388 		MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num);
389 		break;
390 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
391 		MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id);
392 		break;
393 	case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
394 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
395 			 header_modify_pattern_id, stc_attr->modify_header.pattern_id);
396 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
397 			 header_modify_argument_id, stc_attr->modify_header.arg_id);
398 		break;
399 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
400 		MLX5_SET(stc_ste_param_remove, stc_parm, action_type,
401 			 MLX5_MODIFICATION_TYPE_REMOVE);
402 		MLX5_SET(stc_ste_param_remove, stc_parm, decap,
403 			 stc_attr->remove_header.decap);
404 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor,
405 			 stc_attr->remove_header.start_anchor);
406 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor,
407 			 stc_attr->remove_header.end_anchor);
408 		break;
409 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
410 		MLX5_SET(stc_ste_param_insert, stc_parm, action_type,
411 			 MLX5_MODIFICATION_TYPE_INSERT);
412 		MLX5_SET(stc_ste_param_insert, stc_parm, encap,
413 			 stc_attr->insert_header.encap);
414 		MLX5_SET(stc_ste_param_insert, stc_parm, inline_data,
415 			 stc_attr->insert_header.is_inline);
416 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor,
417 			 stc_attr->insert_header.insert_anchor);
418 		/* HW gets the next 2 sizes in words */
419 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_size,
420 			 stc_attr->insert_header.header_size / 2);
421 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset,
422 			 stc_attr->insert_header.insert_offset / 2);
423 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument,
424 			 stc_attr->insert_header.arg_id);
425 		break;
426 	case MLX5_IFC_STC_ACTION_TYPE_COPY:
427 	case MLX5_IFC_STC_ACTION_TYPE_SET:
428 	case MLX5_IFC_STC_ACTION_TYPE_ADD:
429 		*(__be64 *)stc_parm = stc_attr->modify_action.data;
430 		break;
431 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
432 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
433 		MLX5_SET(stc_ste_param_vport, stc_parm, vport_number,
434 			 stc_attr->vport.vport_num);
435 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id,
436 			 stc_attr->vport.esw_owner_vhca_id);
437 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1);
438 		break;
439 	case MLX5_IFC_STC_ACTION_TYPE_DROP:
440 	case MLX5_IFC_STC_ACTION_TYPE_NOP:
441 	case MLX5_IFC_STC_ACTION_TYPE_TAG:
442 	case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
443 		break;
444 	case MLX5_IFC_STC_ACTION_TYPE_ASO:
445 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id,
446 			 stc_attr->aso.devx_obj_id);
447 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id,
448 			 stc_attr->aso.return_reg_id);
449 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type,
450 			 stc_attr->aso.aso_type);
451 		break;
452 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
453 		MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id,
454 			 stc_attr->ste_table.ste_obj_id);
455 		MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id,
456 			 stc_attr->ste_table.match_definer_id);
457 		MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size,
458 			 stc_attr->ste_table.log_hash_size);
459 		break;
460 	case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
461 		MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type,
462 			 MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
463 		MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor,
464 			 stc_attr->remove_words.start_anchor);
465 		MLX5_SET(stc_ste_param_remove_words, stc_parm,
466 			 remove_size, stc_attr->remove_words.num_of_words);
467 		break;
468 	default:
469 		DR_LOG(ERR, "Not supported type %d", stc_attr->action_type);
470 		rte_errno = EINVAL;
471 		return rte_errno;
472 	}
473 	return 0;
474 }
475 
476 int
477 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj,
478 		      struct mlx5dr_cmd_stc_modify_attr *stc_attr)
479 {
480 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
481 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
482 	void *stc_parm;
483 	void *attr;
484 	int ret;
485 
486 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
487 	MLX5_SET(general_obj_in_cmd_hdr,
488 		 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
489 	MLX5_SET(general_obj_in_cmd_hdr,
490 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
491 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id);
492 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset);
493 
494 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
495 	MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
496 	MLX5_SET(stc, attr, action_type, stc_attr->action_type);
497 	MLX5_SET64(stc, attr, modify_field_select,
498 		   MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
499 
500 	/* Set destination TIRN, TAG, FT ID, STE ID */
501 	stc_parm = MLX5_ADDR_OF(stc, attr, stc_param);
502 	ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm);
503 	if (ret)
504 		return ret;
505 
506 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
507 	if (ret) {
508 		DR_LOG(ERR, "Failed to modify STC FW action_type %d", stc_attr->action_type);
509 		rte_errno = errno;
510 	}
511 
512 	return ret;
513 }
514 
515 struct mlx5dr_devx_obj *
516 mlx5dr_cmd_arg_create(struct ibv_context *ctx,
517 		      uint16_t log_obj_range,
518 		      uint32_t pd)
519 {
520 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
521 	uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
522 	struct mlx5dr_devx_obj *devx_obj;
523 	void *attr;
524 
525 	devx_obj = simple_malloc(sizeof(*devx_obj));
526 	if (!devx_obj) {
527 		DR_LOG(ERR, "Failed to allocate memory for ARG object");
528 		rte_errno = ENOMEM;
529 		return NULL;
530 	}
531 
532 	attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
533 	MLX5_SET(general_obj_in_cmd_hdr,
534 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
535 	MLX5_SET(general_obj_in_cmd_hdr,
536 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG);
537 	MLX5_SET(general_obj_in_cmd_hdr,
538 		 attr, log_obj_range, log_obj_range);
539 
540 	attr = MLX5_ADDR_OF(create_arg_in, in, arg);
541 	MLX5_SET(arg, attr, access_pd, pd);
542 
543 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
544 	if (!devx_obj->obj) {
545 		DR_LOG(ERR, "Failed to create ARG");
546 		simple_free(devx_obj);
547 		rte_errno = errno;
548 		return NULL;
549 	}
550 
551 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
552 
553 	return devx_obj;
554 }
555 
556 struct mlx5dr_devx_obj *
557 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
558 					uint32_t pattern_length,
559 					uint8_t *actions)
560 {
561 	uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
562 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
563 	struct mlx5dr_devx_obj *devx_obj;
564 	uint64_t *pattern_data;
565 	int num_of_actions;
566 	void *pattern;
567 	void *attr;
568 	int i;
569 
570 	if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
571 		DR_LOG(ERR, "Pattern length %d exceeds limit %d",
572 			pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
573 		rte_errno = EINVAL;
574 		return NULL;
575 	}
576 
577 	devx_obj = simple_malloc(sizeof(*devx_obj));
578 	if (!devx_obj) {
579 		DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object");
580 		rte_errno = ENOMEM;
581 		return NULL;
582 	}
583 
584 	attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
585 	MLX5_SET(general_obj_in_cmd_hdr,
586 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
587 	MLX5_SET(general_obj_in_cmd_hdr,
588 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN);
589 
590 	pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
591 	/* Pattern_length is in ddwords */
592 	MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
593 
594 	pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
595 	memcpy(pattern_data, actions, pattern_length);
596 
597 	num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE;
598 	for (i = 0; i < num_of_actions; i++) {
599 		int type;
600 
601 		type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
602 		if (type != MLX5_MODIFICATION_TYPE_COPY)
603 			/* Action typ-copy use all bytes for control */
604 			MLX5_SET(set_action_in, &pattern_data[i], data, 0);
605 	}
606 
607 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
608 	if (!devx_obj->obj) {
609 		DR_LOG(ERR, "Failed to create header_modify_pattern");
610 		rte_errno = errno;
611 		goto free_obj;
612 	}
613 
614 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
615 
616 	return devx_obj;
617 
618 free_obj:
619 	simple_free(devx_obj);
620 	return NULL;
621 }
622 
623 struct mlx5dr_devx_obj *
624 mlx5dr_cmd_ste_create(struct ibv_context *ctx,
625 		      struct mlx5dr_cmd_ste_create_attr *ste_attr)
626 {
627 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
628 	uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
629 	struct mlx5dr_devx_obj *devx_obj;
630 	void *attr;
631 
632 	devx_obj = simple_malloc(sizeof(*devx_obj));
633 	if (!devx_obj) {
634 		DR_LOG(ERR, "Failed to allocate memory for STE object");
635 		rte_errno = ENOMEM;
636 		return NULL;
637 	}
638 
639 	attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
640 	MLX5_SET(general_obj_in_cmd_hdr,
641 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
642 	MLX5_SET(general_obj_in_cmd_hdr,
643 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE);
644 	MLX5_SET(general_obj_in_cmd_hdr,
645 		 attr, log_obj_range, ste_attr->log_obj_range);
646 
647 	attr = MLX5_ADDR_OF(create_ste_in, in, ste);
648 	MLX5_SET(ste, attr, table_type, ste_attr->table_type);
649 
650 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
651 	if (!devx_obj->obj) {
652 		DR_LOG(ERR, "Failed to create STE");
653 		simple_free(devx_obj);
654 		rte_errno = errno;
655 		return NULL;
656 	}
657 
658 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
659 
660 	return devx_obj;
661 }
662 
663 struct mlx5dr_devx_obj *
664 mlx5dr_cmd_definer_create(struct ibv_context *ctx,
665 			  struct mlx5dr_cmd_definer_create_attr *def_attr)
666 {
667 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
668 	uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
669 	struct mlx5dr_devx_obj *devx_obj;
670 	void *ptr;
671 
672 	devx_obj = simple_malloc(sizeof(*devx_obj));
673 	if (!devx_obj) {
674 		DR_LOG(ERR, "Failed to allocate memory for definer object");
675 		rte_errno = ENOMEM;
676 		return NULL;
677 	}
678 
679 	MLX5_SET(general_obj_in_cmd_hdr,
680 		 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
681 	MLX5_SET(general_obj_in_cmd_hdr,
682 		 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER);
683 
684 	ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
685 	MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
686 
687 	MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
688 	MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
689 	MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
690 	MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
691 	MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
692 	MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
693 	MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
694 	MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
695 	MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
696 
697 	MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
698 	MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
699 	MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
700 	MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
701 	MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
702 	MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
703 	MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
704 	MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
705 
706 	ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
707 	memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
708 
709 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
710 	if (!devx_obj->obj) {
711 		DR_LOG(ERR, "Failed to create Definer");
712 		simple_free(devx_obj);
713 		rte_errno = errno;
714 		return NULL;
715 	}
716 
717 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
718 
719 	return devx_obj;
720 }
721 
722 struct mlx5dr_devx_obj *
723 mlx5dr_cmd_sq_create(struct ibv_context *ctx,
724 		     struct mlx5dr_cmd_sq_create_attr *attr)
725 {
726 	uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
727 	uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
728 	void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
729 	void *wqc = MLX5_ADDR_OF(sqc, sqc, wq);
730 	struct mlx5dr_devx_obj *devx_obj;
731 
732 	devx_obj = simple_malloc(sizeof(*devx_obj));
733 	if (!devx_obj) {
734 		DR_LOG(ERR, "Failed to create SQ");
735 		rte_errno = ENOMEM;
736 		return NULL;
737 	}
738 
739 	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
740 	MLX5_SET(sqc, sqc, cqn, attr->cqn);
741 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
742 	MLX5_SET(sqc, sqc, non_wire, 1);
743 	MLX5_SET(sqc, sqc, ts_format, attr->ts_format);
744 	MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC);
745 	MLX5_SET(wq, wqc, pd, attr->pdn);
746 	MLX5_SET(wq, wqc, uar_page, attr->page_id);
747 	MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB));
748 	MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz);
749 	MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id);
750 	MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id);
751 
752 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
753 	if (!devx_obj->obj) {
754 		simple_free(devx_obj);
755 		rte_errno = errno;
756 		return NULL;
757 	}
758 
759 	devx_obj->id = MLX5_GET(create_sq_out, out, sqn);
760 
761 	return devx_obj;
762 }
763 
764 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
765 {
766 	uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
767 	uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
768 	void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
769 	int ret;
770 
771 	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
772 	MLX5_SET(modify_sq_in, in, sqn, devx_obj->id);
773 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
774 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
775 
776 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
777 	if (ret) {
778 		DR_LOG(ERR, "Failed to modify SQ");
779 		rte_errno = errno;
780 	}
781 
782 	return ret;
783 }
784 
785 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx,
786 				       struct mlx5dr_cmd_allow_other_vhca_access_attr *attr)
787 {
788 	uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
789 	uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
790 	void *key;
791 	int ret;
792 
793 	MLX5_SET(allow_other_vhca_access_in,
794 		 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
795 	MLX5_SET(allow_other_vhca_access_in,
796 		 in, object_type_to_be_accessed, attr->obj_type);
797 	MLX5_SET(allow_other_vhca_access_in,
798 		 in, object_id_to_be_accessed, attr->obj_id);
799 
800 	key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
801 	memcpy(key, attr->access_key, sizeof(attr->access_key));
802 
803 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
804 	if (ret) {
805 		DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command");
806 		rte_errno = errno;
807 		return rte_errno;
808 	}
809 
810 	return 0;
811 }
812 
813 struct mlx5dr_devx_obj *
814 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
815 			    struct mlx5dr_cmd_alias_obj_create_attr *alias_attr)
816 {
817 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
818 	uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
819 	struct mlx5dr_devx_obj *devx_obj;
820 	void *attr;
821 	void *key;
822 
823 	devx_obj = simple_malloc(sizeof(*devx_obj));
824 	if (!devx_obj) {
825 		DR_LOG(ERR, "Failed to allocate memory for ALIAS general object");
826 		rte_errno = ENOMEM;
827 		return NULL;
828 	}
829 
830 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
831 	MLX5_SET(general_obj_in_cmd_hdr,
832 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
833 	MLX5_SET(general_obj_in_cmd_hdr,
834 		 attr, obj_type, alias_attr->obj_type);
835 	MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1);
836 
837 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
838 	MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
839 	MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
840 
841 	key = MLX5_ADDR_OF(alias_context, attr, access_key);
842 	memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
843 
844 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
845 	if (!devx_obj->obj) {
846 		DR_LOG(ERR, "Failed to create ALIAS OBJ");
847 		simple_free(devx_obj);
848 		rte_errno = errno;
849 		return NULL;
850 	}
851 
852 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
853 
854 	return devx_obj;
855 }
856 
857 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx,
858 			    struct mlx5dr_cmd_generate_wqe_attr *attr,
859 			    struct mlx5_cqe64 *ret_cqe)
860 {
861 	uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
862 	uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
863 	uint8_t status;
864 	void *ptr;
865 	int ret;
866 
867 	MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
868 	MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
869 
870 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
871 	memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
872 
873 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
874 	memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
875 
876 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
877 	memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
878 
879 	if (attr->gta_data_1) {
880 		ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
881 		memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
882 	}
883 
884 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
885 	if (ret) {
886 		DR_LOG(ERR, "Failed to write GTA WQE using FW");
887 		rte_errno = errno;
888 		return rte_errno;
889 	}
890 
891 	status = MLX5_GET(generate_wqe_out, out, status);
892 	if (status) {
893 		DR_LOG(ERR, "Invalid FW CQE status %d", status);
894 		rte_errno = EINVAL;
895 		return rte_errno;
896 	}
897 
898 	ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
899 	memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
900 
901 	return 0;
902 }
903 
904 int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
905 			  struct mlx5dr_cmd_query_caps *caps)
906 {
907 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
908 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
909 	const struct flow_hw_port_info *port_info;
910 	struct ibv_device_attr_ex attr_ex;
911 	u32 res;
912 	int ret;
913 
914 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
915 	MLX5_SET(query_hca_cap_in, in, op_mod,
916 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
917 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
918 
919 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
920 	if (ret) {
921 		DR_LOG(ERR, "Failed to query device caps");
922 		rte_errno = errno;
923 		return rte_errno;
924 	}
925 
926 	caps->wqe_based_update =
927 		MLX5_GET(query_hca_cap_out, out,
928 			 capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
929 
930 	caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
931 					 capability.cmd_hca_cap.eswitch_manager);
932 
933 	caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
934 					capability.cmd_hca_cap.flex_parser_protocols);
935 
936 	caps->log_header_modify_argument_granularity =
937 		MLX5_GET(query_hca_cap_out, out,
938 			 capability.cmd_hca_cap.log_header_modify_argument_granularity);
939 
940 	caps->log_header_modify_argument_granularity -=
941 			MLX5_GET(query_hca_cap_out, out,
942 				 capability.cmd_hca_cap.
943 				 log_header_modify_argument_granularity_offset);
944 
945 	caps->log_header_modify_argument_max_alloc =
946 		MLX5_GET(query_hca_cap_out, out,
947 			 capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
948 
949 	caps->definer_format_sup =
950 		MLX5_GET64(query_hca_cap_out, out,
951 			   capability.cmd_hca_cap.match_definer_format_supported);
952 
953 	caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
954 				 capability.cmd_hca_cap.vhca_id);
955 
956 	caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
957 				      capability.cmd_hca_cap.sq_ts_format);
958 
959 	caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
960 				      capability.cmd_hca_cap.ipsec_offload);
961 
962 	MLX5_SET(query_hca_cap_in, in, op_mod,
963 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
964 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
965 
966 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
967 	if (ret) {
968 		DR_LOG(ERR, "Failed to query device caps");
969 		rte_errno = errno;
970 		return rte_errno;
971 	}
972 
973 	caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out,
974 					       capability.cmd_hca_cap_2.
975 					       format_select_dw_8_6_ext);
976 
977 	caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out,
978 						 capability.cmd_hca_cap_2.
979 						 format_select_dw_gtpu_dw_0);
980 
981 	caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out,
982 						 capability.cmd_hca_cap_2.
983 						 format_select_dw_gtpu_dw_1);
984 
985 	caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out,
986 						 capability.cmd_hca_cap_2.
987 						 format_select_dw_gtpu_dw_2);
988 
989 	caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out,
990 						     capability.cmd_hca_cap_2.
991 						     format_select_dw_gtpu_first_ext_dw_0);
992 
993 	caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out,
994 					   capability.cmd_hca_cap_2.
995 					   generate_wqe_type);
996 
997 	/* check cross-VHCA support in cap2 */
998 	res =
999 	MLX5_GET(query_hca_cap_out, out,
1000 		capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported);
1001 
1002 	caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) &&
1003 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) &&
1004 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC);
1005 
1006 	res =
1007 	MLX5_GET(query_hca_cap_out, out,
1008 		capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access);
1009 
1010 	caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) &&
1011 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) &&
1012 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC);
1013 
1014 	MLX5_SET(query_hca_cap_in, in, op_mod,
1015 		 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
1016 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
1017 
1018 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1019 	if (ret) {
1020 		DR_LOG(ERR, "Failed to query flow table caps");
1021 		rte_errno = errno;
1022 		return rte_errno;
1023 	}
1024 
1025 	caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1026 					  capability.flow_table_nic_cap.
1027 					  flow_table_properties_nic_receive.max_ft_level);
1028 
1029 	caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1030 					capability.flow_table_nic_cap.
1031 					flow_table_properties_nic_receive.reparse);
1032 
1033 	/* check cross-VHCA support in flow table properties */
1034 	res =
1035 	MLX5_GET(query_hca_cap_out, out,
1036 		capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object);
1037 	caps->cross_vhca_resources &= res;
1038 
1039 	if (caps->wqe_based_update) {
1040 		MLX5_SET(query_hca_cap_in, in, op_mod,
1041 			 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE |
1042 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1043 
1044 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1045 		if (ret) {
1046 			DR_LOG(ERR, "Failed to query WQE based FT caps");
1047 			rte_errno = errno;
1048 			return rte_errno;
1049 		}
1050 
1051 		caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out,
1052 						  capability.wqe_based_flow_table_cap.
1053 						  rtc_reparse_mode);
1054 
1055 		caps->ste_format = MLX5_GET(query_hca_cap_out, out,
1056 					    capability.wqe_based_flow_table_cap.
1057 					    ste_format);
1058 
1059 		caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out,
1060 						capability.wqe_based_flow_table_cap.
1061 						rtc_index_mode);
1062 
1063 		caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out,
1064 						   capability.wqe_based_flow_table_cap.
1065 						   rtc_log_depth_max);
1066 
1067 		caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1068 						   capability.wqe_based_flow_table_cap.
1069 						   ste_alloc_log_max);
1070 
1071 		caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1072 						    capability.wqe_based_flow_table_cap.
1073 						    ste_alloc_log_granularity);
1074 
1075 		caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out,
1076 						       capability.wqe_based_flow_table_cap.
1077 						       trivial_match_definer);
1078 
1079 		caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1080 						   capability.wqe_based_flow_table_cap.
1081 						   stc_alloc_log_max);
1082 
1083 		caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1084 						    capability.wqe_based_flow_table_cap.
1085 						    stc_alloc_log_granularity);
1086 
1087 		caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out,
1088 						      capability.wqe_based_flow_table_cap.
1089 						      rtc_hash_split_table);
1090 
1091 		caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out,
1092 							 capability.wqe_based_flow_table_cap.
1093 							 rtc_linear_lookup_table);
1094 
1095 		caps->access_index_mode = MLX5_GET(query_hca_cap_out, out,
1096 						   capability.wqe_based_flow_table_cap.
1097 						   access_index_mode);
1098 
1099 		caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out,
1100 						      capability.wqe_based_flow_table_cap.
1101 						      linear_match_definer_reg_c3);
1102 
1103 		caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1104 							  capability.wqe_based_flow_table_cap.
1105 							  rtc_max_num_hash_definer_gen_wqe);
1106 
1107 		caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1108 							 capability.wqe_based_flow_table_cap.
1109 							 ste_format_gen_wqe);
1110 	}
1111 
1112 	if (caps->eswitch_manager) {
1113 		MLX5_SET(query_hca_cap_in, in, op_mod,
1114 			 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE |
1115 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1116 
1117 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1118 		if (ret) {
1119 			DR_LOG(ERR, "Failed to query flow table esw caps");
1120 			rte_errno = errno;
1121 			return rte_errno;
1122 		}
1123 
1124 		caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1125 						  capability.flow_table_nic_cap.
1126 						  flow_table_properties_nic_receive.max_ft_level);
1127 
1128 		caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1129 						capability.flow_table_nic_cap.
1130 						flow_table_properties_nic_receive.reparse);
1131 
1132 		MLX5_SET(query_hca_cap_in, in, op_mod,
1133 			 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR);
1134 
1135 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1136 		if (ret) {
1137 			DR_LOG(ERR, "Query eswitch capabilities failed %d\n", ret);
1138 			rte_errno = errno;
1139 			return rte_errno;
1140 		}
1141 
1142 		if (MLX5_GET(query_hca_cap_out, out,
1143 			     capability.esw_cap.esw_manager_vport_number_valid))
1144 			caps->eswitch_manager_vport_number =
1145 			MLX5_GET(query_hca_cap_out, out,
1146 				 capability.esw_cap.esw_manager_vport_number);
1147 	}
1148 
1149 	ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
1150 	if (ret) {
1151 		DR_LOG(ERR, "Failed to query device attributes");
1152 		rte_errno = ret;
1153 		return rte_errno;
1154 	}
1155 
1156 	strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver));
1157 
1158 	port_info = flow_hw_get_wire_port(ctx);
1159 	if (port_info) {
1160 		caps->wire_regc = port_info->regc_value;
1161 		caps->wire_regc_mask = port_info->regc_mask;
1162 	} else {
1163 		DR_LOG(INFO, "Failed to query wire port regc value");
1164 	}
1165 
1166 	return ret;
1167 }
1168 
1169 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
1170 			     struct mlx5dr_cmd_query_vport_caps *vport_caps,
1171 			     uint32_t port_num)
1172 {
1173 	struct mlx5_port_info port_info = {0};
1174 	uint32_t flags;
1175 	int ret;
1176 
1177 	flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID;
1178 
1179 	ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info);
1180 	/* Check if query succeed and vport is enabled */
1181 	if (ret || (port_info.query_flags & flags) != flags) {
1182 		rte_errno = ENOTSUP;
1183 		return rte_errno;
1184 	}
1185 
1186 	vport_caps->vport_num = port_info.vport_id;
1187 	vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id;
1188 
1189 	if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1190 		vport_caps->metadata_c = port_info.vport_meta_tag;
1191 		vport_caps->metadata_c_mask = port_info.vport_meta_mask;
1192 	}
1193 
1194 	return 0;
1195 }
1196