xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c (revision e4f0e2158b8e210065e91f45fd83aee118cbbd96)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static uint32_t mlx5dr_cmd_get_syndrome(uint32_t *out)
8 {
9 	/* Assumption: syndrome is always the second u32 */
10 	return be32toh(out[1]);
11 }
12 
13 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj)
14 {
15 	int ret;
16 
17 	ret = mlx5_glue->devx_obj_destroy(devx_obj->obj);
18 	simple_free(devx_obj);
19 
20 	return ret;
21 }
22 
23 struct mlx5dr_devx_obj *
24 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
25 			     struct mlx5dr_cmd_ft_create_attr *ft_attr)
26 {
27 	uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
28 	uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
29 	struct mlx5dr_devx_obj *devx_obj;
30 	void *ft_ctx;
31 
32 	devx_obj = simple_malloc(sizeof(*devx_obj));
33 	if (!devx_obj) {
34 		DR_LOG(ERR, "Failed to allocate memory for flow table object");
35 		rte_errno = ENOMEM;
36 		return NULL;
37 	}
38 
39 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
40 	MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
41 
42 	ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
43 	MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
44 	MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
45 
46 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
47 	if (!devx_obj->obj) {
48 		DR_LOG(ERR, "Failed to create FT (syndrome: %#x)",
49 		       mlx5dr_cmd_get_syndrome(out));
50 		simple_free(devx_obj);
51 		rte_errno = errno;
52 		return NULL;
53 	}
54 
55 	devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id);
56 
57 	return devx_obj;
58 }
59 
60 int
61 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj,
62 			     struct mlx5dr_cmd_ft_modify_attr *ft_attr)
63 {
64 	uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
65 	uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
66 	void *ft_ctx;
67 	int ret;
68 
69 	MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
70 	MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
71 	MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
72 	MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id);
73 
74 	ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
75 
76 	MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
77 	MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
78 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0);
79 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1);
80 
81 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
82 	if (ret) {
83 		DR_LOG(ERR, "Failed to modify FT (syndrome: %#x)",
84 		       mlx5dr_cmd_get_syndrome(out));
85 		rte_errno = errno;
86 	}
87 
88 	return ret;
89 }
90 
91 int
92 mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj,
93 			    struct mlx5dr_cmd_ft_query_attr *ft_attr,
94 			    uint64_t *icm_addr_0, uint64_t *icm_addr_1)
95 {
96 	uint32_t out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
97 	uint32_t in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
98 	void *ft_ctx;
99 	int ret;
100 
101 	MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
102 	MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
103 	MLX5_SET(query_flow_table_in, in, table_id, devx_obj->id);
104 
105 	ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out));
106 	if (ret) {
107 		DR_LOG(ERR, "Failed to query FT (syndrome: %#x)",
108 		       mlx5dr_cmd_get_syndrome(out));
109 		rte_errno = errno;
110 		return ret;
111 	}
112 
113 	ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
114 	*icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_0);
115 	*icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_1);
116 
117 	return ret;
118 }
119 
120 static struct mlx5dr_devx_obj *
121 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx,
122 			     struct mlx5dr_cmd_fg_attr *fg_attr)
123 {
124 	uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
125 	uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0};
126 	struct mlx5dr_devx_obj *devx_obj;
127 
128 	devx_obj = simple_malloc(sizeof(*devx_obj));
129 	if (!devx_obj) {
130 		DR_LOG(ERR, "Failed to allocate memory for flow group object");
131 		rte_errno = ENOMEM;
132 		return NULL;
133 	}
134 
135 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
136 	MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
137 	MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
138 
139 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
140 	if (!devx_obj->obj) {
141 		DR_LOG(ERR, "Failed to create Flow group(syndrome: %#x)",
142 		       mlx5dr_cmd_get_syndrome(out));
143 		simple_free(devx_obj);
144 		rte_errno = errno;
145 		return NULL;
146 	}
147 
148 	devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id);
149 
150 	return devx_obj;
151 }
152 
153 struct mlx5dr_devx_obj *
154 mlx5dr_cmd_set_fte(struct ibv_context *ctx,
155 		   uint32_t table_type,
156 		   uint32_t table_id,
157 		   uint32_t group_id,
158 		   struct mlx5dr_cmd_set_fte_attr *fte_attr)
159 {
160 	uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
161 	uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
162 	struct mlx5dr_devx_obj *devx_obj;
163 	void *in_flow_context;
164 	uint32_t action_flags;
165 	void *in_dests;
166 
167 	devx_obj = simple_malloc(sizeof(*devx_obj));
168 	if (!devx_obj) {
169 		DR_LOG(ERR, "Failed to allocate memory for fte object");
170 		rte_errno = ENOMEM;
171 		return NULL;
172 	}
173 
174 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
175 	MLX5_SET(set_fte_in, in, table_type, table_type);
176 	MLX5_SET(set_fte_in, in, table_id, table_id);
177 
178 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
179 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
180 	MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
181 
182 	action_flags = fte_attr->action_flags;
183 	MLX5_SET(flow_context, in_flow_context, action, action_flags);
184 
185 	if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
186 		/* Only destination_list_size of size 1 is supported */
187 		MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
188 		in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
189 		MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
190 		MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
191 	}
192 
193 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
194 	if (!devx_obj->obj) {
195 		DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
196 		       mlx5dr_cmd_get_syndrome(out));
197 		rte_errno = errno;
198 		goto free_devx;
199 	}
200 
201 	return devx_obj;
202 
203 free_devx:
204 	simple_free(devx_obj);
205 	return NULL;
206 }
207 
208 struct mlx5dr_cmd_forward_tbl *
209 mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
210 			      struct mlx5dr_cmd_ft_create_attr *ft_attr,
211 			      struct mlx5dr_cmd_set_fte_attr *fte_attr)
212 {
213 	struct mlx5dr_cmd_fg_attr fg_attr = {0};
214 	struct mlx5dr_cmd_forward_tbl *tbl;
215 
216 	tbl = simple_calloc(1, sizeof(*tbl));
217 	if (!tbl) {
218 		DR_LOG(ERR, "Failed to allocate memory");
219 		rte_errno = ENOMEM;
220 		return NULL;
221 	}
222 
223 	tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr);
224 	if (!tbl->ft) {
225 		DR_LOG(ERR, "Failed to create FT");
226 		goto free_tbl;
227 	}
228 
229 	fg_attr.table_id = tbl->ft->id;
230 	fg_attr.table_type = ft_attr->type;
231 
232 	tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr);
233 	if (!tbl->fg) {
234 		DR_LOG(ERR, "Failed to create FG");
235 		goto free_ft;
236 	}
237 
238 	tbl->fte = mlx5dr_cmd_set_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, fte_attr);
239 	if (!tbl->fte) {
240 		DR_LOG(ERR, "Failed to create FTE");
241 		goto free_fg;
242 	}
243 	return tbl;
244 
245 free_fg:
246 	mlx5dr_cmd_destroy_obj(tbl->fg);
247 free_ft:
248 	mlx5dr_cmd_destroy_obj(tbl->ft);
249 free_tbl:
250 	simple_free(tbl);
251 	return NULL;
252 }
253 
254 void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
255 {
256 	mlx5dr_cmd_destroy_obj(tbl->fte);
257 	mlx5dr_cmd_destroy_obj(tbl->fg);
258 	mlx5dr_cmd_destroy_obj(tbl->ft);
259 	simple_free(tbl);
260 }
261 
262 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
263 					  uint32_t fw_ft_type,
264 					  enum mlx5dr_table_type type,
265 					  struct mlx5dr_cmd_ft_modify_attr *ft_attr)
266 {
267 	struct mlx5dr_devx_obj *default_miss_tbl;
268 
269 	if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx))
270 		return;
271 
272 	ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
273 	ft_attr->type = fw_ft_type;
274 	ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
275 
276 	if (type == MLX5DR_TABLE_TYPE_FDB) {
277 		default_miss_tbl = ctx->common_res[type].default_miss->ft;
278 		if (!default_miss_tbl) {
279 			assert(false);
280 			return;
281 		}
282 		ft_attr->table_miss_id = default_miss_tbl->id;
283 	} else {
284 		ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id;
285 	}
286 }
287 
288 struct mlx5dr_devx_obj *
289 mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
290 		      struct mlx5dr_cmd_rtc_create_attr *rtc_attr)
291 {
292 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
293 	uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
294 	struct mlx5dr_devx_obj *devx_obj;
295 	void *attr;
296 
297 	devx_obj = simple_malloc(sizeof(*devx_obj));
298 	if (!devx_obj) {
299 		DR_LOG(ERR, "Failed to allocate memory for RTC object");
300 		rte_errno = ENOMEM;
301 		return NULL;
302 	}
303 
304 	attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
305 	MLX5_SET(general_obj_in_cmd_hdr,
306 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
307 	MLX5_SET(general_obj_in_cmd_hdr,
308 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC);
309 
310 	attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
311 	MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
312 		MLX5_IFC_RTC_STE_FORMAT_11DW :
313 		MLX5_IFC_RTC_STE_FORMAT_8DW);
314 
315 	if (rtc_attr->is_scnd_range) {
316 		MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
317 		MLX5_SET(rtc, attr, num_match_ste, 2);
318 	}
319 
320 	MLX5_SET(rtc, attr, pd, rtc_attr->pd);
321 	MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
322 	MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
323 	MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
324 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
325 	MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
326 	MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
327 	MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
328 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
329 	MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
330 	MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
331 	MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
332 	MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
333 	MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
334 	MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
335 	MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS);
336 
337 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
338 	if (!devx_obj->obj) {
339 		DR_LOG(ERR, "Failed to create RTC (syndrome: %#x)",
340 		       mlx5dr_cmd_get_syndrome(out));
341 		simple_free(devx_obj);
342 		rte_errno = errno;
343 		return NULL;
344 	}
345 
346 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
347 
348 	return devx_obj;
349 }
350 
351 struct mlx5dr_devx_obj *
352 mlx5dr_cmd_stc_create(struct ibv_context *ctx,
353 		      struct mlx5dr_cmd_stc_create_attr *stc_attr)
354 {
355 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
356 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
357 	struct mlx5dr_devx_obj *devx_obj;
358 	void *attr;
359 
360 	devx_obj = simple_malloc(sizeof(*devx_obj));
361 	if (!devx_obj) {
362 		DR_LOG(ERR, "Failed to allocate memory for STC object");
363 		rte_errno = ENOMEM;
364 		return NULL;
365 	}
366 
367 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
368 	MLX5_SET(general_obj_in_cmd_hdr,
369 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
370 	MLX5_SET(general_obj_in_cmd_hdr,
371 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
372 	MLX5_SET(general_obj_in_cmd_hdr,
373 		 attr, log_obj_range, stc_attr->log_obj_range);
374 
375 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
376 	MLX5_SET(stc, attr, table_type, stc_attr->table_type);
377 
378 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
379 	if (!devx_obj->obj) {
380 		DR_LOG(ERR, "Failed to create STC (syndrome: %#x)",
381 		       mlx5dr_cmd_get_syndrome(out));
382 		simple_free(devx_obj);
383 		rte_errno = errno;
384 		return NULL;
385 	}
386 
387 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
388 
389 	return devx_obj;
390 }
391 
392 static int
393 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
394 				    void *stc_parm)
395 {
396 	switch (stc_attr->action_type) {
397 	case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
398 		MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id);
399 		break;
400 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
401 		MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num);
402 		break;
403 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
404 		MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id);
405 		break;
406 	case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
407 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
408 			 header_modify_pattern_id, stc_attr->modify_header.pattern_id);
409 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
410 			 header_modify_argument_id, stc_attr->modify_header.arg_id);
411 		break;
412 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
413 		MLX5_SET(stc_ste_param_remove, stc_parm, action_type,
414 			 MLX5_MODIFICATION_TYPE_REMOVE);
415 		MLX5_SET(stc_ste_param_remove, stc_parm, decap,
416 			 stc_attr->remove_header.decap);
417 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor,
418 			 stc_attr->remove_header.start_anchor);
419 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor,
420 			 stc_attr->remove_header.end_anchor);
421 		break;
422 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
423 		MLX5_SET(stc_ste_param_insert, stc_parm, action_type,
424 			 MLX5_MODIFICATION_TYPE_INSERT);
425 		MLX5_SET(stc_ste_param_insert, stc_parm, encap,
426 			 stc_attr->insert_header.encap);
427 		MLX5_SET(stc_ste_param_insert, stc_parm, inline_data,
428 			 stc_attr->insert_header.is_inline);
429 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor,
430 			 stc_attr->insert_header.insert_anchor);
431 		/* HW gets the next 2 sizes in words */
432 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_size,
433 			 stc_attr->insert_header.header_size / 2);
434 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset,
435 			 stc_attr->insert_header.insert_offset / 2);
436 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument,
437 			 stc_attr->insert_header.arg_id);
438 		break;
439 	case MLX5_IFC_STC_ACTION_TYPE_COPY:
440 	case MLX5_IFC_STC_ACTION_TYPE_SET:
441 	case MLX5_IFC_STC_ACTION_TYPE_ADD:
442 	case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
443 		*(__be64 *)stc_parm = stc_attr->modify_action.data;
444 		break;
445 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
446 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
447 		MLX5_SET(stc_ste_param_vport, stc_parm, vport_number,
448 			 stc_attr->vport.vport_num);
449 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id,
450 			 stc_attr->vport.esw_owner_vhca_id);
451 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1);
452 		break;
453 	case MLX5_IFC_STC_ACTION_TYPE_DROP:
454 	case MLX5_IFC_STC_ACTION_TYPE_NOP:
455 	case MLX5_IFC_STC_ACTION_TYPE_TAG:
456 	case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
457 		break;
458 	case MLX5_IFC_STC_ACTION_TYPE_ASO:
459 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id,
460 			 stc_attr->aso.devx_obj_id);
461 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id,
462 			 stc_attr->aso.return_reg_id);
463 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type,
464 			 stc_attr->aso.aso_type);
465 		break;
466 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
467 		MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id,
468 			 stc_attr->ste_table.ste_obj_id);
469 		MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id,
470 			 stc_attr->ste_table.match_definer_id);
471 		MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size,
472 			 stc_attr->ste_table.log_hash_size);
473 		break;
474 	case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
475 		MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type,
476 			 MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
477 		MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor,
478 			 stc_attr->remove_words.start_anchor);
479 		MLX5_SET(stc_ste_param_remove_words, stc_parm,
480 			 remove_size, stc_attr->remove_words.num_of_words);
481 		break;
482 	default:
483 		DR_LOG(ERR, "Not supported type %d", stc_attr->action_type);
484 		rte_errno = EINVAL;
485 		return rte_errno;
486 	}
487 	return 0;
488 }
489 
490 int
491 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj,
492 		      struct mlx5dr_cmd_stc_modify_attr *stc_attr)
493 {
494 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
495 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
496 	void *stc_parm;
497 	void *attr;
498 	int ret;
499 
500 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
501 	MLX5_SET(general_obj_in_cmd_hdr,
502 		 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
503 	MLX5_SET(general_obj_in_cmd_hdr,
504 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
505 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id);
506 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset);
507 
508 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
509 	MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
510 	MLX5_SET(stc, attr, action_type, stc_attr->action_type);
511 	MLX5_SET64(stc, attr, modify_field_select,
512 		   MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
513 
514 	/* Set destination TIRN, TAG, FT ID, STE ID */
515 	stc_parm = MLX5_ADDR_OF(stc, attr, stc_param);
516 	ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm);
517 	if (ret)
518 		return ret;
519 
520 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
521 	if (ret) {
522 		DR_LOG(ERR, "Failed to modify STC FW action_type %d (syndrome: %#x)",
523 		       stc_attr->action_type, mlx5dr_cmd_get_syndrome(out));
524 		rte_errno = errno;
525 	}
526 
527 	return ret;
528 }
529 
530 struct mlx5dr_devx_obj *
531 mlx5dr_cmd_arg_create(struct ibv_context *ctx,
532 		      uint16_t log_obj_range,
533 		      uint32_t pd)
534 {
535 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
536 	uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
537 	struct mlx5dr_devx_obj *devx_obj;
538 	void *attr;
539 
540 	devx_obj = simple_malloc(sizeof(*devx_obj));
541 	if (!devx_obj) {
542 		DR_LOG(ERR, "Failed to allocate memory for ARG object");
543 		rte_errno = ENOMEM;
544 		return NULL;
545 	}
546 
547 	attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
548 	MLX5_SET(general_obj_in_cmd_hdr,
549 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
550 	MLX5_SET(general_obj_in_cmd_hdr,
551 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG);
552 	MLX5_SET(general_obj_in_cmd_hdr,
553 		 attr, log_obj_range, log_obj_range);
554 
555 	attr = MLX5_ADDR_OF(create_arg_in, in, arg);
556 	MLX5_SET(arg, attr, access_pd, pd);
557 
558 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
559 	if (!devx_obj->obj) {
560 		DR_LOG(ERR, "Failed to create ARG (syndrome: %#x)",
561 		       mlx5dr_cmd_get_syndrome(out));
562 		simple_free(devx_obj);
563 		rte_errno = errno;
564 		return NULL;
565 	}
566 
567 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
568 
569 	return devx_obj;
570 }
571 
572 struct mlx5dr_devx_obj *
573 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
574 					uint32_t pattern_length,
575 					uint8_t *actions)
576 {
577 	uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
578 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
579 	struct mlx5dr_devx_obj *devx_obj;
580 	uint64_t *pattern_data;
581 	int num_of_actions;
582 	void *pattern;
583 	void *attr;
584 	int i;
585 
586 	if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
587 		DR_LOG(ERR, "Pattern length %d exceeds limit %d",
588 			pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
589 		rte_errno = EINVAL;
590 		return NULL;
591 	}
592 
593 	devx_obj = simple_malloc(sizeof(*devx_obj));
594 	if (!devx_obj) {
595 		DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object");
596 		rte_errno = ENOMEM;
597 		return NULL;
598 	}
599 	attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
600 	MLX5_SET(general_obj_in_cmd_hdr,
601 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
602 	MLX5_SET(general_obj_in_cmd_hdr,
603 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN);
604 
605 	pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
606 	/* Pattern_length is in ddwords */
607 	MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
608 
609 	pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
610 	memcpy(pattern_data, actions, pattern_length);
611 
612 	num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE;
613 	for (i = 0; i < num_of_actions; i++) {
614 		int type;
615 
616 		type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
617 		if (type != MLX5_MODIFICATION_TYPE_COPY &&
618 		    type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
619 			/* Action typ-copy use all bytes for control */
620 			MLX5_SET(set_action_in, &pattern_data[i], data, 0);
621 	}
622 
623 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
624 	if (!devx_obj->obj) {
625 		DR_LOG(ERR, "Failed to create header_modify_pattern (syndrome: %#x)",
626 		       mlx5dr_cmd_get_syndrome(out));
627 		rte_errno = errno;
628 		goto free_obj;
629 	}
630 
631 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
632 
633 	return devx_obj;
634 
635 free_obj:
636 	simple_free(devx_obj);
637 	return NULL;
638 }
639 
640 struct mlx5dr_devx_obj *
641 mlx5dr_cmd_ste_create(struct ibv_context *ctx,
642 		      struct mlx5dr_cmd_ste_create_attr *ste_attr)
643 {
644 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
645 	uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
646 	struct mlx5dr_devx_obj *devx_obj;
647 	void *attr;
648 
649 	devx_obj = simple_malloc(sizeof(*devx_obj));
650 	if (!devx_obj) {
651 		DR_LOG(ERR, "Failed to allocate memory for STE object");
652 		rte_errno = ENOMEM;
653 		return NULL;
654 	}
655 
656 	attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
657 	MLX5_SET(general_obj_in_cmd_hdr,
658 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
659 	MLX5_SET(general_obj_in_cmd_hdr,
660 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE);
661 	MLX5_SET(general_obj_in_cmd_hdr,
662 		 attr, log_obj_range, ste_attr->log_obj_range);
663 
664 	attr = MLX5_ADDR_OF(create_ste_in, in, ste);
665 	MLX5_SET(ste, attr, table_type, ste_attr->table_type);
666 
667 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
668 	if (!devx_obj->obj) {
669 		DR_LOG(ERR, "Failed to create STE (syndrome: %#x)",
670 		       mlx5dr_cmd_get_syndrome(out));
671 		simple_free(devx_obj);
672 		rte_errno = errno;
673 		return NULL;
674 	}
675 
676 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
677 
678 	return devx_obj;
679 }
680 
681 struct mlx5dr_devx_obj *
682 mlx5dr_cmd_definer_create(struct ibv_context *ctx,
683 			  struct mlx5dr_cmd_definer_create_attr *def_attr)
684 {
685 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
686 	uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
687 	struct mlx5dr_devx_obj *devx_obj;
688 	void *ptr;
689 
690 	devx_obj = simple_malloc(sizeof(*devx_obj));
691 	if (!devx_obj) {
692 		DR_LOG(ERR, "Failed to allocate memory for definer object");
693 		rte_errno = ENOMEM;
694 		return NULL;
695 	}
696 
697 	MLX5_SET(general_obj_in_cmd_hdr,
698 		 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
699 	MLX5_SET(general_obj_in_cmd_hdr,
700 		 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER);
701 
702 	ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
703 	MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
704 
705 	MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
706 	MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
707 	MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
708 	MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
709 	MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
710 	MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
711 	MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
712 	MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
713 	MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
714 
715 	MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
716 	MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
717 	MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
718 	MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
719 	MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
720 	MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
721 	MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
722 	MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
723 
724 	ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
725 	memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
726 
727 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
728 	if (!devx_obj->obj) {
729 		DR_LOG(ERR, "Failed to create Definer (syndrome: %#x)",
730 		       mlx5dr_cmd_get_syndrome(out));
731 		simple_free(devx_obj);
732 		rte_errno = errno;
733 		return NULL;
734 	}
735 
736 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
737 
738 	return devx_obj;
739 }
740 
741 struct mlx5dr_devx_obj *
742 mlx5dr_cmd_sq_create(struct ibv_context *ctx,
743 		     struct mlx5dr_cmd_sq_create_attr *attr)
744 {
745 	uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
746 	uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
747 	void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
748 	void *wqc = MLX5_ADDR_OF(sqc, sqc, wq);
749 	struct mlx5dr_devx_obj *devx_obj;
750 
751 	devx_obj = simple_malloc(sizeof(*devx_obj));
752 	if (!devx_obj) {
753 		DR_LOG(ERR, "Failed to create SQ");
754 		rte_errno = ENOMEM;
755 		return NULL;
756 	}
757 
758 	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
759 	MLX5_SET(sqc, sqc, cqn, attr->cqn);
760 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
761 	MLX5_SET(sqc, sqc, non_wire, 1);
762 	MLX5_SET(sqc, sqc, ts_format, attr->ts_format);
763 	MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC);
764 	MLX5_SET(wq, wqc, pd, attr->pdn);
765 	MLX5_SET(wq, wqc, uar_page, attr->page_id);
766 	MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB));
767 	MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz);
768 	MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id);
769 	MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id);
770 
771 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
772 	if (!devx_obj->obj) {
773 		simple_free(devx_obj);
774 		rte_errno = errno;
775 		return NULL;
776 	}
777 
778 	devx_obj->id = MLX5_GET(create_sq_out, out, sqn);
779 
780 	return devx_obj;
781 }
782 
783 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
784 {
785 	uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
786 	uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
787 	void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
788 	int ret;
789 
790 	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
791 	MLX5_SET(modify_sq_in, in, sqn, devx_obj->id);
792 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
793 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
794 
795 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
796 	if (ret) {
797 		DR_LOG(ERR, "Failed to modify SQ (syndrome: %#x)",
798 		       mlx5dr_cmd_get_syndrome(out));
799 		rte_errno = errno;
800 	}
801 
802 	return ret;
803 }
804 
805 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx,
806 				       struct mlx5dr_cmd_allow_other_vhca_access_attr *attr)
807 {
808 	uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
809 	uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
810 	void *key;
811 	int ret;
812 
813 	MLX5_SET(allow_other_vhca_access_in,
814 		 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
815 	MLX5_SET(allow_other_vhca_access_in,
816 		 in, object_type_to_be_accessed, attr->obj_type);
817 	MLX5_SET(allow_other_vhca_access_in,
818 		 in, object_id_to_be_accessed, attr->obj_id);
819 
820 	key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
821 	memcpy(key, attr->access_key, sizeof(attr->access_key));
822 
823 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
824 	if (ret) {
825 		DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command");
826 		rte_errno = errno;
827 		return rte_errno;
828 	}
829 
830 	return 0;
831 }
832 
833 struct mlx5dr_devx_obj *
834 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
835 			    struct mlx5dr_cmd_alias_obj_create_attr *alias_attr)
836 {
837 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
838 	uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
839 	struct mlx5dr_devx_obj *devx_obj;
840 	void *attr;
841 	void *key;
842 
843 	devx_obj = simple_malloc(sizeof(*devx_obj));
844 	if (!devx_obj) {
845 		DR_LOG(ERR, "Failed to allocate memory for ALIAS general object");
846 		rte_errno = ENOMEM;
847 		return NULL;
848 	}
849 
850 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
851 	MLX5_SET(general_obj_in_cmd_hdr,
852 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
853 	MLX5_SET(general_obj_in_cmd_hdr,
854 		 attr, obj_type, alias_attr->obj_type);
855 	MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1);
856 
857 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
858 	MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
859 	MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
860 
861 	key = MLX5_ADDR_OF(alias_context, attr, access_key);
862 	memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
863 
864 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
865 	if (!devx_obj->obj) {
866 		DR_LOG(ERR, "Failed to create ALIAS OBJ (syndrome: %#x)",
867 		       mlx5dr_cmd_get_syndrome(out));
868 		simple_free(devx_obj);
869 		rte_errno = errno;
870 		return NULL;
871 	}
872 
873 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
874 
875 	return devx_obj;
876 }
877 
878 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx,
879 			    struct mlx5dr_cmd_generate_wqe_attr *attr,
880 			    struct mlx5_cqe64 *ret_cqe)
881 {
882 	uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
883 	uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
884 	uint8_t status;
885 	void *ptr;
886 	int ret;
887 
888 	MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
889 	MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
890 
891 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
892 	memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
893 
894 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
895 	memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
896 
897 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
898 	memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
899 
900 	if (attr->gta_data_1) {
901 		ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
902 		memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
903 	}
904 
905 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
906 	if (ret) {
907 		DR_LOG(ERR, "Failed to write GTA WQE using FW");
908 		rte_errno = errno;
909 		return rte_errno;
910 	}
911 
912 	status = MLX5_GET(generate_wqe_out, out, status);
913 	if (status) {
914 		DR_LOG(ERR, "Invalid FW CQE status %d", status);
915 		rte_errno = EINVAL;
916 		return rte_errno;
917 	}
918 
919 	ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
920 	memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
921 
922 	return 0;
923 }
924 
925 int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
926 			  struct mlx5dr_cmd_query_caps *caps)
927 {
928 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
929 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
930 	const struct flow_hw_port_info *port_info;
931 	struct ibv_device_attr_ex attr_ex;
932 	u32 res;
933 	int ret;
934 
935 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
936 	MLX5_SET(query_hca_cap_in, in, op_mod,
937 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
938 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
939 
940 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
941 	if (ret) {
942 		DR_LOG(ERR, "Failed to query device caps");
943 		rte_errno = errno;
944 		return rte_errno;
945 	}
946 
947 	caps->wqe_based_update =
948 		MLX5_GET(query_hca_cap_out, out,
949 			 capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
950 
951 	caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
952 					 capability.cmd_hca_cap.eswitch_manager);
953 
954 	caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
955 					capability.cmd_hca_cap.flex_parser_protocols);
956 
957 	caps->log_header_modify_argument_granularity =
958 		MLX5_GET(query_hca_cap_out, out,
959 			 capability.cmd_hca_cap.log_header_modify_argument_granularity);
960 
961 	caps->log_header_modify_argument_granularity -=
962 			MLX5_GET(query_hca_cap_out, out,
963 				 capability.cmd_hca_cap.
964 				 log_header_modify_argument_granularity_offset);
965 
966 	caps->log_header_modify_argument_max_alloc =
967 		MLX5_GET(query_hca_cap_out, out,
968 			 capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
969 
970 	caps->definer_format_sup =
971 		MLX5_GET64(query_hca_cap_out, out,
972 			   capability.cmd_hca_cap.match_definer_format_supported);
973 
974 	caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
975 				 capability.cmd_hca_cap.vhca_id);
976 
977 	caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
978 				      capability.cmd_hca_cap.sq_ts_format);
979 
980 	caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
981 				      capability.cmd_hca_cap.ipsec_offload);
982 
983 	MLX5_SET(query_hca_cap_in, in, op_mod,
984 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
985 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
986 
987 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
988 	if (ret) {
989 		DR_LOG(ERR, "Failed to query device caps");
990 		rte_errno = errno;
991 		return rte_errno;
992 	}
993 
994 	caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out,
995 					       capability.cmd_hca_cap_2.
996 					       format_select_dw_8_6_ext);
997 
998 	caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out,
999 						 capability.cmd_hca_cap_2.
1000 						 format_select_dw_gtpu_dw_0);
1001 
1002 	caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out,
1003 						 capability.cmd_hca_cap_2.
1004 						 format_select_dw_gtpu_dw_1);
1005 
1006 	caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out,
1007 						 capability.cmd_hca_cap_2.
1008 						 format_select_dw_gtpu_dw_2);
1009 
1010 	caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out,
1011 						     capability.cmd_hca_cap_2.
1012 						     format_select_dw_gtpu_first_ext_dw_0);
1013 
1014 	caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1015 					   capability.cmd_hca_cap_2.
1016 					   generate_wqe_type);
1017 
1018 	/* check cross-VHCA support in cap2 */
1019 	res =
1020 	MLX5_GET(query_hca_cap_out, out,
1021 		capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported);
1022 
1023 	caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) &&
1024 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) &&
1025 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC);
1026 
1027 	res =
1028 	MLX5_GET(query_hca_cap_out, out,
1029 		capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access);
1030 
1031 	caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) &&
1032 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) &&
1033 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC);
1034 
1035 	MLX5_SET(query_hca_cap_in, in, op_mod,
1036 		 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
1037 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
1038 
1039 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1040 	if (ret) {
1041 		DR_LOG(ERR, "Failed to query flow table caps");
1042 		rte_errno = errno;
1043 		return rte_errno;
1044 	}
1045 
1046 	caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1047 					  capability.flow_table_nic_cap.
1048 					  flow_table_properties_nic_receive.max_ft_level);
1049 
1050 	caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1051 					capability.flow_table_nic_cap.
1052 					flow_table_properties_nic_receive.reparse);
1053 
1054 	caps->nic_ft.ignore_flow_level_rtc_valid =
1055 		MLX5_GET(query_hca_cap_out,
1056 			 out,
1057 			 capability.flow_table_nic_cap.
1058 			 flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
1059 
1060 	/* check cross-VHCA support in flow table properties */
1061 	res =
1062 	MLX5_GET(query_hca_cap_out, out,
1063 		capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object);
1064 	caps->cross_vhca_resources &= res;
1065 
1066 	if (caps->wqe_based_update) {
1067 		MLX5_SET(query_hca_cap_in, in, op_mod,
1068 			 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE |
1069 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1070 
1071 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1072 		if (ret) {
1073 			DR_LOG(ERR, "Failed to query WQE based FT caps");
1074 			rte_errno = errno;
1075 			return rte_errno;
1076 		}
1077 
1078 		caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out,
1079 						  capability.wqe_based_flow_table_cap.
1080 						  rtc_reparse_mode);
1081 
1082 		caps->ste_format = MLX5_GET(query_hca_cap_out, out,
1083 					    capability.wqe_based_flow_table_cap.
1084 					    ste_format);
1085 
1086 		caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out,
1087 						capability.wqe_based_flow_table_cap.
1088 						rtc_index_mode);
1089 
1090 		caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out,
1091 						   capability.wqe_based_flow_table_cap.
1092 						   rtc_log_depth_max);
1093 
1094 		caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1095 						   capability.wqe_based_flow_table_cap.
1096 						   ste_alloc_log_max);
1097 
1098 		caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1099 						    capability.wqe_based_flow_table_cap.
1100 						    ste_alloc_log_granularity);
1101 
1102 		caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out,
1103 						       capability.wqe_based_flow_table_cap.
1104 						       trivial_match_definer);
1105 
1106 		caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1107 						   capability.wqe_based_flow_table_cap.
1108 						   stc_alloc_log_max);
1109 
1110 		caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1111 						    capability.wqe_based_flow_table_cap.
1112 						    stc_alloc_log_granularity);
1113 
1114 		caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out,
1115 						      capability.wqe_based_flow_table_cap.
1116 						      rtc_hash_split_table);
1117 
1118 		caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out,
1119 							 capability.wqe_based_flow_table_cap.
1120 							 rtc_linear_lookup_table);
1121 
1122 		caps->access_index_mode = MLX5_GET(query_hca_cap_out, out,
1123 						   capability.wqe_based_flow_table_cap.
1124 						   access_index_mode);
1125 
1126 		caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out,
1127 						      capability.wqe_based_flow_table_cap.
1128 						      linear_match_definer_reg_c3);
1129 
1130 		caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1131 							  capability.wqe_based_flow_table_cap.
1132 							  rtc_max_num_hash_definer_gen_wqe);
1133 
1134 		caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1135 							 capability.wqe_based_flow_table_cap.
1136 							 ste_format_gen_wqe);
1137 	}
1138 
1139 	if (caps->eswitch_manager) {
1140 		MLX5_SET(query_hca_cap_in, in, op_mod,
1141 			 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE |
1142 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1143 
1144 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1145 		if (ret) {
1146 			DR_LOG(ERR, "Failed to query flow table esw caps");
1147 			rte_errno = errno;
1148 			return rte_errno;
1149 		}
1150 
1151 		caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1152 						  capability.flow_table_nic_cap.
1153 						  flow_table_properties_nic_receive.max_ft_level);
1154 
1155 		caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1156 						capability.flow_table_nic_cap.
1157 						flow_table_properties_nic_receive.reparse);
1158 
1159 		MLX5_SET(query_hca_cap_in, in, op_mod,
1160 			 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR);
1161 
1162 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1163 		if (ret) {
1164 			DR_LOG(ERR, "Query eswitch capabilities failed %d", ret);
1165 			rte_errno = errno;
1166 			return rte_errno;
1167 		}
1168 
1169 		if (MLX5_GET(query_hca_cap_out, out,
1170 			     capability.esw_cap.esw_manager_vport_number_valid))
1171 			caps->eswitch_manager_vport_number =
1172 			MLX5_GET(query_hca_cap_out, out,
1173 				 capability.esw_cap.esw_manager_vport_number);
1174 	}
1175 
1176 	ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
1177 	if (ret) {
1178 		DR_LOG(ERR, "Failed to query device attributes");
1179 		rte_errno = ret;
1180 		return rte_errno;
1181 	}
1182 
1183 	strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver));
1184 
1185 	port_info = flow_hw_get_wire_port(ctx);
1186 	if (port_info) {
1187 		caps->wire_regc = port_info->regc_value;
1188 		caps->wire_regc_mask = port_info->regc_mask;
1189 	} else {
1190 		DR_LOG(INFO, "Failed to query wire port regc value");
1191 	}
1192 
1193 	return ret;
1194 }
1195 
1196 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
1197 			     struct mlx5dr_cmd_query_vport_caps *vport_caps,
1198 			     uint32_t port_num)
1199 {
1200 	struct mlx5_port_info port_info = {0};
1201 	uint32_t flags;
1202 	int ret;
1203 
1204 	flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID;
1205 
1206 	ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info);
1207 	/* Check if query succeed and vport is enabled */
1208 	if (ret || (port_info.query_flags & flags) != flags) {
1209 		rte_errno = ENOTSUP;
1210 		return rte_errno;
1211 	}
1212 
1213 	vport_caps->vport_num = port_info.vport_id;
1214 	vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id;
1215 
1216 	if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1217 		vport_caps->metadata_c = port_info.vport_meta_tag;
1218 		vport_caps->metadata_c_mask = port_info.vport_meta_mask;
1219 	}
1220 
1221 	return 0;
1222 }
1223