xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj)
8 {
9 	int ret;
10 
11 	ret = mlx5_glue->devx_obj_destroy(devx_obj->obj);
12 	simple_free(devx_obj);
13 
14 	return ret;
15 }
16 
17 struct mlx5dr_devx_obj *
18 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
19 			     struct mlx5dr_cmd_ft_create_attr *ft_attr)
20 {
21 	uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
22 	uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
23 	struct mlx5dr_devx_obj *devx_obj;
24 	void *ft_ctx;
25 
26 	devx_obj = simple_malloc(sizeof(*devx_obj));
27 	if (!devx_obj) {
28 		DR_LOG(ERR, "Failed to allocate memory for flow table object");
29 		rte_errno = ENOMEM;
30 		return NULL;
31 	}
32 
33 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
34 	MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
35 
36 	ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
37 	MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
38 	MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
39 
40 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
41 	if (!devx_obj->obj) {
42 		DR_LOG(ERR, "Failed to create FT");
43 		simple_free(devx_obj);
44 		rte_errno = errno;
45 		return NULL;
46 	}
47 
48 	devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id);
49 
50 	return devx_obj;
51 }
52 
53 int
54 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj,
55 			     struct mlx5dr_cmd_ft_modify_attr *ft_attr)
56 {
57 	uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
58 	uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
59 	void *ft_ctx;
60 	int ret;
61 
62 	MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
63 	MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
64 	MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
65 	MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id);
66 
67 	ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
68 
69 	MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
70 	MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
71 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0);
72 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1);
73 
74 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
75 	if (ret) {
76 		DR_LOG(ERR, "Failed to modify FT");
77 		rte_errno = errno;
78 	}
79 
80 	return ret;
81 }
82 
83 static struct mlx5dr_devx_obj *
84 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx,
85 			     struct mlx5dr_cmd_fg_attr *fg_attr)
86 {
87 	uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
88 	uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0};
89 	struct mlx5dr_devx_obj *devx_obj;
90 
91 	devx_obj = simple_malloc(sizeof(*devx_obj));
92 	if (!devx_obj) {
93 		DR_LOG(ERR, "Failed to allocate memory for flow group object");
94 		rte_errno = ENOMEM;
95 		return NULL;
96 	}
97 
98 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
99 	MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
100 	MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
101 
102 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
103 	if (!devx_obj->obj) {
104 		DR_LOG(ERR, "Failed to create Flow group");
105 		simple_free(devx_obj);
106 		rte_errno = errno;
107 		return NULL;
108 	}
109 
110 	devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id);
111 
112 	return devx_obj;
113 }
114 
115 static struct mlx5dr_devx_obj *
116 mlx5dr_cmd_set_vport_fte(struct ibv_context *ctx,
117 			 uint32_t table_type,
118 			 uint32_t table_id,
119 			 uint32_t group_id,
120 			 uint32_t vport_id)
121 {
122 	uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
123 	uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
124 	struct mlx5dr_devx_obj *devx_obj;
125 	void *in_flow_context;
126 	void *in_dests;
127 
128 	devx_obj = simple_malloc(sizeof(*devx_obj));
129 	if (!devx_obj) {
130 		DR_LOG(ERR, "Failed to allocate memory for fte object");
131 		rte_errno = ENOMEM;
132 		return NULL;
133 	}
134 
135 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
136 	MLX5_SET(set_fte_in, in, table_type, table_type);
137 	MLX5_SET(set_fte_in, in, table_id, table_id);
138 
139 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
140 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
141 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
142 	MLX5_SET(flow_context, in_flow_context, action, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
143 
144 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
145 	MLX5_SET(dest_format, in_dests, destination_type,
146 		 MLX5_FLOW_DESTINATION_TYPE_VPORT);
147 	MLX5_SET(dest_format, in_dests, destination_id, vport_id);
148 
149 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
150 	if (!devx_obj->obj) {
151 		DR_LOG(ERR, "Failed to create FTE");
152 		simple_free(devx_obj);
153 		rte_errno = errno;
154 		return NULL;
155 	}
156 
157 	return devx_obj;
158 }
159 
160 void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
161 {
162 	mlx5dr_cmd_destroy_obj(tbl->fte);
163 	mlx5dr_cmd_destroy_obj(tbl->fg);
164 	mlx5dr_cmd_destroy_obj(tbl->ft);
165 }
166 
167 struct mlx5dr_cmd_forward_tbl *
168 mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
169 			  struct mlx5dr_cmd_ft_create_attr *ft_attr,
170 			  uint32_t vport)
171 {
172 	struct mlx5dr_cmd_fg_attr fg_attr = {0};
173 	struct mlx5dr_cmd_forward_tbl *tbl;
174 
175 	tbl = simple_calloc(1, sizeof(*tbl));
176 	if (!tbl) {
177 		DR_LOG(ERR, "Failed to allocate memory for forward default");
178 		rte_errno = ENOMEM;
179 		return NULL;
180 	}
181 
182 	tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr);
183 	if (!tbl->ft) {
184 		DR_LOG(ERR, "Failed to create FT for miss-table");
185 		goto free_tbl;
186 	}
187 
188 	fg_attr.table_id = tbl->ft->id;
189 	fg_attr.table_type = ft_attr->type;
190 
191 	tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr);
192 	if (!tbl->fg) {
193 		DR_LOG(ERR, "Failed to create FG for miss-table");
194 		goto free_ft;
195 	}
196 
197 	tbl->fte = mlx5dr_cmd_set_vport_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, vport);
198 	if (!tbl->fte) {
199 		DR_LOG(ERR, "Failed to create FTE for miss-table");
200 		goto free_fg;
201 	}
202 	return tbl;
203 
204 free_fg:
205 	mlx5dr_cmd_destroy_obj(tbl->fg);
206 free_ft:
207 	mlx5dr_cmd_destroy_obj(tbl->ft);
208 free_tbl:
209 	simple_free(tbl);
210 	return NULL;
211 }
212 
213 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
214 					  uint32_t fw_ft_type,
215 					  enum mlx5dr_table_type type,
216 					  struct mlx5dr_cmd_ft_modify_attr *ft_attr)
217 {
218 	struct mlx5dr_devx_obj *default_miss_tbl;
219 
220 	if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx))
221 		return;
222 
223 	ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
224 	ft_attr->type = fw_ft_type;
225 	ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
226 
227 	if (type == MLX5DR_TABLE_TYPE_FDB) {
228 		default_miss_tbl = ctx->common_res[type].default_miss->ft;
229 		if (!default_miss_tbl) {
230 			assert(false);
231 			return;
232 		}
233 		ft_attr->table_miss_id = default_miss_tbl->id;
234 	} else {
235 		ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id;
236 	}
237 }
238 
239 struct mlx5dr_devx_obj *
240 mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
241 		      struct mlx5dr_cmd_rtc_create_attr *rtc_attr)
242 {
243 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
244 	uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
245 	struct mlx5dr_devx_obj *devx_obj;
246 	void *attr;
247 
248 	devx_obj = simple_malloc(sizeof(*devx_obj));
249 	if (!devx_obj) {
250 		DR_LOG(ERR, "Failed to allocate memory for RTC object");
251 		rte_errno = ENOMEM;
252 		return NULL;
253 	}
254 
255 	attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
256 	MLX5_SET(general_obj_in_cmd_hdr,
257 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
258 	MLX5_SET(general_obj_in_cmd_hdr,
259 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC);
260 
261 	attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
262 	MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
263 		MLX5_IFC_RTC_STE_FORMAT_11DW :
264 		MLX5_IFC_RTC_STE_FORMAT_8DW);
265 
266 	if (rtc_attr->is_scnd_range) {
267 		MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
268 		MLX5_SET(rtc, attr, num_match_ste, 2);
269 	}
270 
271 	MLX5_SET(rtc, attr, pd, rtc_attr->pd);
272 	MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
273 	MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
274 	MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
275 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
276 	MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
277 	MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
278 	MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
279 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
280 	MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
281 	MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
282 	MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
283 	MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
284 	MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
285 	MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
286 	MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS);
287 
288 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
289 	if (!devx_obj->obj) {
290 		DR_LOG(ERR, "Failed to create RTC");
291 		simple_free(devx_obj);
292 		rte_errno = errno;
293 		return NULL;
294 	}
295 
296 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
297 
298 	return devx_obj;
299 }
300 
301 struct mlx5dr_devx_obj *
302 mlx5dr_cmd_stc_create(struct ibv_context *ctx,
303 		      struct mlx5dr_cmd_stc_create_attr *stc_attr)
304 {
305 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
306 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
307 	struct mlx5dr_devx_obj *devx_obj;
308 	void *attr;
309 
310 	devx_obj = simple_malloc(sizeof(*devx_obj));
311 	if (!devx_obj) {
312 		DR_LOG(ERR, "Failed to allocate memory for STC object");
313 		rte_errno = ENOMEM;
314 		return NULL;
315 	}
316 
317 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
318 	MLX5_SET(general_obj_in_cmd_hdr,
319 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
320 	MLX5_SET(general_obj_in_cmd_hdr,
321 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
322 	MLX5_SET(general_obj_in_cmd_hdr,
323 		 attr, log_obj_range, stc_attr->log_obj_range);
324 
325 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
326 	MLX5_SET(stc, attr, table_type, stc_attr->table_type);
327 
328 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
329 	if (!devx_obj->obj) {
330 		DR_LOG(ERR, "Failed to create STC");
331 		simple_free(devx_obj);
332 		rte_errno = errno;
333 		return NULL;
334 	}
335 
336 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
337 
338 	return devx_obj;
339 }
340 
341 static int
342 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
343 				    void *stc_parm)
344 {
345 	switch (stc_attr->action_type) {
346 	case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
347 		MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id);
348 		break;
349 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
350 		MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num);
351 		break;
352 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
353 		MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id);
354 		break;
355 	case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
356 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
357 			 header_modify_pattern_id, stc_attr->modify_header.pattern_id);
358 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
359 			 header_modify_argument_id, stc_attr->modify_header.arg_id);
360 		break;
361 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
362 		MLX5_SET(stc_ste_param_remove, stc_parm, action_type,
363 			 MLX5_MODIFICATION_TYPE_REMOVE);
364 		MLX5_SET(stc_ste_param_remove, stc_parm, decap,
365 			 stc_attr->remove_header.decap);
366 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor,
367 			 stc_attr->remove_header.start_anchor);
368 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor,
369 			 stc_attr->remove_header.end_anchor);
370 		break;
371 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
372 		MLX5_SET(stc_ste_param_insert, stc_parm, action_type,
373 			 MLX5_MODIFICATION_TYPE_INSERT);
374 		MLX5_SET(stc_ste_param_insert, stc_parm, encap,
375 			 stc_attr->insert_header.encap);
376 		MLX5_SET(stc_ste_param_insert, stc_parm, inline_data,
377 			 stc_attr->insert_header.is_inline);
378 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor,
379 			 stc_attr->insert_header.insert_anchor);
380 		/* HW gets the next 2 sizes in words */
381 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_size,
382 			 stc_attr->insert_header.header_size / 2);
383 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset,
384 			 stc_attr->insert_header.insert_offset / 2);
385 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument,
386 			 stc_attr->insert_header.arg_id);
387 		break;
388 	case MLX5_IFC_STC_ACTION_TYPE_COPY:
389 	case MLX5_IFC_STC_ACTION_TYPE_SET:
390 	case MLX5_IFC_STC_ACTION_TYPE_ADD:
391 		*(__be64 *)stc_parm = stc_attr->modify_action.data;
392 		break;
393 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
394 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
395 		MLX5_SET(stc_ste_param_vport, stc_parm, vport_number,
396 			 stc_attr->vport.vport_num);
397 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id,
398 			 stc_attr->vport.esw_owner_vhca_id);
399 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1);
400 		break;
401 	case MLX5_IFC_STC_ACTION_TYPE_DROP:
402 	case MLX5_IFC_STC_ACTION_TYPE_NOP:
403 	case MLX5_IFC_STC_ACTION_TYPE_TAG:
404 	case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
405 		break;
406 	case MLX5_IFC_STC_ACTION_TYPE_ASO:
407 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id,
408 			 stc_attr->aso.devx_obj_id);
409 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id,
410 			 stc_attr->aso.return_reg_id);
411 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type,
412 			 stc_attr->aso.aso_type);
413 		break;
414 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
415 		MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id,
416 			 stc_attr->ste_table.ste_obj_id);
417 		MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id,
418 			 stc_attr->ste_table.match_definer_id);
419 		MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size,
420 			 stc_attr->ste_table.log_hash_size);
421 		break;
422 	case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
423 		MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type,
424 			 MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
425 		MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor,
426 			 stc_attr->remove_words.start_anchor);
427 		MLX5_SET(stc_ste_param_remove_words, stc_parm,
428 			 remove_size, stc_attr->remove_words.num_of_words);
429 		break;
430 	default:
431 		DR_LOG(ERR, "Not supported type %d", stc_attr->action_type);
432 		rte_errno = EINVAL;
433 		return rte_errno;
434 	}
435 	return 0;
436 }
437 
438 int
439 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj,
440 		      struct mlx5dr_cmd_stc_modify_attr *stc_attr)
441 {
442 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
443 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
444 	void *stc_parm;
445 	void *attr;
446 	int ret;
447 
448 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
449 	MLX5_SET(general_obj_in_cmd_hdr,
450 		 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
451 	MLX5_SET(general_obj_in_cmd_hdr,
452 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
453 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id);
454 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset);
455 
456 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
457 	MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
458 	MLX5_SET(stc, attr, action_type, stc_attr->action_type);
459 	MLX5_SET64(stc, attr, modify_field_select,
460 		   MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
461 
462 	/* Set destination TIRN, TAG, FT ID, STE ID */
463 	stc_parm = MLX5_ADDR_OF(stc, attr, stc_param);
464 	ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm);
465 	if (ret)
466 		return ret;
467 
468 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
469 	if (ret) {
470 		DR_LOG(ERR, "Failed to modify STC FW action_type %d", stc_attr->action_type);
471 		rte_errno = errno;
472 	}
473 
474 	return ret;
475 }
476 
477 struct mlx5dr_devx_obj *
478 mlx5dr_cmd_arg_create(struct ibv_context *ctx,
479 		      uint16_t log_obj_range,
480 		      uint32_t pd)
481 {
482 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
483 	uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
484 	struct mlx5dr_devx_obj *devx_obj;
485 	void *attr;
486 
487 	devx_obj = simple_malloc(sizeof(*devx_obj));
488 	if (!devx_obj) {
489 		DR_LOG(ERR, "Failed to allocate memory for ARG object");
490 		rte_errno = ENOMEM;
491 		return NULL;
492 	}
493 
494 	attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
495 	MLX5_SET(general_obj_in_cmd_hdr,
496 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
497 	MLX5_SET(general_obj_in_cmd_hdr,
498 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG);
499 	MLX5_SET(general_obj_in_cmd_hdr,
500 		 attr, log_obj_range, log_obj_range);
501 
502 	attr = MLX5_ADDR_OF(create_arg_in, in, arg);
503 	MLX5_SET(arg, attr, access_pd, pd);
504 
505 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
506 	if (!devx_obj->obj) {
507 		DR_LOG(ERR, "Failed to create ARG");
508 		simple_free(devx_obj);
509 		rte_errno = errno;
510 		return NULL;
511 	}
512 
513 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
514 
515 	return devx_obj;
516 }
517 
518 struct mlx5dr_devx_obj *
519 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
520 					uint32_t pattern_length,
521 					uint8_t *actions)
522 {
523 	uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
524 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
525 	struct mlx5dr_devx_obj *devx_obj;
526 	void *pattern_data;
527 	void *pattern;
528 	void *attr;
529 
530 	if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
531 		DR_LOG(ERR, "Pattern length %d exceeds limit %d",
532 			pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
533 		rte_errno = EINVAL;
534 		return NULL;
535 	}
536 
537 	devx_obj = simple_malloc(sizeof(*devx_obj));
538 	if (!devx_obj) {
539 		DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object");
540 		rte_errno = ENOMEM;
541 		return NULL;
542 	}
543 
544 	attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
545 	MLX5_SET(general_obj_in_cmd_hdr,
546 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
547 	MLX5_SET(general_obj_in_cmd_hdr,
548 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN);
549 
550 	pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
551 	/* Pattern_length is in ddwords */
552 	MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
553 
554 	pattern_data = MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
555 	memcpy(pattern_data, actions, pattern_length);
556 
557 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
558 	if (!devx_obj->obj) {
559 		DR_LOG(ERR, "Failed to create header_modify_pattern");
560 		rte_errno = errno;
561 		goto free_obj;
562 	}
563 
564 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
565 
566 	return devx_obj;
567 
568 free_obj:
569 	simple_free(devx_obj);
570 	return NULL;
571 }
572 
573 struct mlx5dr_devx_obj *
574 mlx5dr_cmd_ste_create(struct ibv_context *ctx,
575 		      struct mlx5dr_cmd_ste_create_attr *ste_attr)
576 {
577 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
578 	uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
579 	struct mlx5dr_devx_obj *devx_obj;
580 	void *attr;
581 
582 	devx_obj = simple_malloc(sizeof(*devx_obj));
583 	if (!devx_obj) {
584 		DR_LOG(ERR, "Failed to allocate memory for STE object");
585 		rte_errno = ENOMEM;
586 		return NULL;
587 	}
588 
589 	attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
590 	MLX5_SET(general_obj_in_cmd_hdr,
591 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
592 	MLX5_SET(general_obj_in_cmd_hdr,
593 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE);
594 	MLX5_SET(general_obj_in_cmd_hdr,
595 		 attr, log_obj_range, ste_attr->log_obj_range);
596 
597 	attr = MLX5_ADDR_OF(create_ste_in, in, ste);
598 	MLX5_SET(ste, attr, table_type, ste_attr->table_type);
599 
600 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
601 	if (!devx_obj->obj) {
602 		DR_LOG(ERR, "Failed to create STE");
603 		simple_free(devx_obj);
604 		rte_errno = errno;
605 		return NULL;
606 	}
607 
608 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
609 
610 	return devx_obj;
611 }
612 
613 struct mlx5dr_devx_obj *
614 mlx5dr_cmd_definer_create(struct ibv_context *ctx,
615 			  struct mlx5dr_cmd_definer_create_attr *def_attr)
616 {
617 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
618 	uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
619 	struct mlx5dr_devx_obj *devx_obj;
620 	void *ptr;
621 
622 	devx_obj = simple_malloc(sizeof(*devx_obj));
623 	if (!devx_obj) {
624 		DR_LOG(ERR, "Failed to allocate memory for definer object");
625 		rte_errno = ENOMEM;
626 		return NULL;
627 	}
628 
629 	MLX5_SET(general_obj_in_cmd_hdr,
630 		 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
631 	MLX5_SET(general_obj_in_cmd_hdr,
632 		 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER);
633 
634 	ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
635 	MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
636 
637 	MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
638 	MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
639 	MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
640 	MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
641 	MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
642 	MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
643 	MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
644 	MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
645 	MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
646 
647 	MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
648 	MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
649 	MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
650 	MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
651 	MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
652 	MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
653 	MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
654 	MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
655 
656 	ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
657 	memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
658 
659 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
660 	if (!devx_obj->obj) {
661 		DR_LOG(ERR, "Failed to create Definer");
662 		simple_free(devx_obj);
663 		rte_errno = errno;
664 		return NULL;
665 	}
666 
667 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
668 
669 	return devx_obj;
670 }
671 
672 struct mlx5dr_devx_obj *
673 mlx5dr_cmd_sq_create(struct ibv_context *ctx,
674 		     struct mlx5dr_cmd_sq_create_attr *attr)
675 {
676 	uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
677 	uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
678 	void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
679 	void *wqc = MLX5_ADDR_OF(sqc, sqc, wq);
680 	struct mlx5dr_devx_obj *devx_obj;
681 
682 	devx_obj = simple_malloc(sizeof(*devx_obj));
683 	if (!devx_obj) {
684 		DR_LOG(ERR, "Failed to create SQ");
685 		rte_errno = ENOMEM;
686 		return NULL;
687 	}
688 
689 	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
690 	MLX5_SET(sqc, sqc, cqn, attr->cqn);
691 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
692 	MLX5_SET(sqc, sqc, non_wire, 1);
693 	MLX5_SET(sqc, sqc, ts_format, attr->ts_format);
694 	MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC);
695 	MLX5_SET(wq, wqc, pd, attr->pdn);
696 	MLX5_SET(wq, wqc, uar_page, attr->page_id);
697 	MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB));
698 	MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz);
699 	MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id);
700 	MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id);
701 
702 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
703 	if (!devx_obj->obj) {
704 		simple_free(devx_obj);
705 		rte_errno = errno;
706 		return NULL;
707 	}
708 
709 	devx_obj->id = MLX5_GET(create_sq_out, out, sqn);
710 
711 	return devx_obj;
712 }
713 
714 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
715 {
716 	uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
717 	uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
718 	void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
719 	int ret;
720 
721 	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
722 	MLX5_SET(modify_sq_in, in, sqn, devx_obj->id);
723 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
724 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
725 
726 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
727 	if (ret) {
728 		DR_LOG(ERR, "Failed to modify SQ");
729 		rte_errno = errno;
730 	}
731 
732 	return ret;
733 }
734 
735 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx,
736 				       struct mlx5dr_cmd_allow_other_vhca_access_attr *attr)
737 {
738 	uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
739 	uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
740 	void *key;
741 	int ret;
742 
743 	MLX5_SET(allow_other_vhca_access_in,
744 		 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
745 	MLX5_SET(allow_other_vhca_access_in,
746 		 in, object_type_to_be_accessed, attr->obj_type);
747 	MLX5_SET(allow_other_vhca_access_in,
748 		 in, object_id_to_be_accessed, attr->obj_id);
749 
750 	key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
751 	memcpy(key, attr->access_key, sizeof(attr->access_key));
752 
753 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
754 	if (ret) {
755 		DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command");
756 		rte_errno = errno;
757 		return rte_errno;
758 	}
759 
760 	return 0;
761 }
762 
763 struct mlx5dr_devx_obj *
764 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
765 			    struct mlx5dr_cmd_alias_obj_create_attr *alias_attr)
766 {
767 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
768 	uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
769 	struct mlx5dr_devx_obj *devx_obj;
770 	void *attr;
771 	void *key;
772 
773 	devx_obj = simple_malloc(sizeof(*devx_obj));
774 	if (!devx_obj) {
775 		DR_LOG(ERR, "Failed to allocate memory for ALIAS general object");
776 		rte_errno = ENOMEM;
777 		return NULL;
778 	}
779 
780 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
781 	MLX5_SET(general_obj_in_cmd_hdr,
782 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
783 	MLX5_SET(general_obj_in_cmd_hdr,
784 		 attr, obj_type, alias_attr->obj_type);
785 	MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1);
786 
787 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
788 	MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
789 	MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
790 
791 	key = MLX5_ADDR_OF(alias_context, attr, access_key);
792 	memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
793 
794 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
795 	if (!devx_obj->obj) {
796 		DR_LOG(ERR, "Failed to create ALIAS OBJ");
797 		simple_free(devx_obj);
798 		rte_errno = errno;
799 		return NULL;
800 	}
801 
802 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
803 
804 	return devx_obj;
805 }
806 
807 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx,
808 			    struct mlx5dr_cmd_generate_wqe_attr *attr,
809 			    struct mlx5_cqe64 *ret_cqe)
810 {
811 	uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
812 	uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
813 	uint8_t status;
814 	void *ptr;
815 	int ret;
816 
817 	MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
818 	MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
819 
820 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
821 	memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
822 
823 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
824 	memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
825 
826 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
827 	memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
828 
829 	if (attr->gta_data_1) {
830 		ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
831 		memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
832 	}
833 
834 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
835 	if (ret) {
836 		DR_LOG(ERR, "Failed to write GTA WQE using FW");
837 		rte_errno = errno;
838 		return rte_errno;
839 	}
840 
841 	status = MLX5_GET(generate_wqe_out, out, status);
842 	if (status) {
843 		DR_LOG(ERR, "Invalid FW CQE status %d", status);
844 		rte_errno = EINVAL;
845 		return rte_errno;
846 	}
847 
848 	ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
849 	memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
850 
851 	return 0;
852 }
853 
854 int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
855 			  struct mlx5dr_cmd_query_caps *caps)
856 {
857 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
858 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
859 	const struct flow_hw_port_info *port_info;
860 	struct ibv_device_attr_ex attr_ex;
861 	u32 res;
862 	int ret;
863 
864 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
865 	MLX5_SET(query_hca_cap_in, in, op_mod,
866 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
867 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
868 
869 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
870 	if (ret) {
871 		DR_LOG(ERR, "Failed to query device caps");
872 		rte_errno = errno;
873 		return rte_errno;
874 	}
875 
876 	caps->wqe_based_update =
877 		MLX5_GET(query_hca_cap_out, out,
878 			 capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
879 
880 	caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
881 					 capability.cmd_hca_cap.eswitch_manager);
882 
883 	caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
884 					capability.cmd_hca_cap.flex_parser_protocols);
885 
886 	caps->log_header_modify_argument_granularity =
887 		MLX5_GET(query_hca_cap_out, out,
888 			 capability.cmd_hca_cap.log_header_modify_argument_granularity);
889 
890 	caps->log_header_modify_argument_granularity -=
891 			MLX5_GET(query_hca_cap_out, out,
892 				 capability.cmd_hca_cap.
893 				 log_header_modify_argument_granularity_offset);
894 
895 	caps->log_header_modify_argument_max_alloc =
896 		MLX5_GET(query_hca_cap_out, out,
897 			 capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
898 
899 	caps->definer_format_sup =
900 		MLX5_GET64(query_hca_cap_out, out,
901 			   capability.cmd_hca_cap.match_definer_format_supported);
902 
903 	caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
904 				 capability.cmd_hca_cap.vhca_id);
905 
906 	caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
907 				      capability.cmd_hca_cap.sq_ts_format);
908 
909 	caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
910 				      capability.cmd_hca_cap.ipsec_offload);
911 
912 	MLX5_SET(query_hca_cap_in, in, op_mod,
913 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
914 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
915 
916 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
917 	if (ret) {
918 		DR_LOG(ERR, "Failed to query device caps");
919 		rte_errno = errno;
920 		return rte_errno;
921 	}
922 
923 	caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out,
924 					       capability.cmd_hca_cap_2.
925 					       format_select_dw_8_6_ext);
926 
927 	caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out,
928 						 capability.cmd_hca_cap_2.
929 						 format_select_dw_gtpu_dw_0);
930 
931 	caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out,
932 						 capability.cmd_hca_cap_2.
933 						 format_select_dw_gtpu_dw_1);
934 
935 	caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out,
936 						 capability.cmd_hca_cap_2.
937 						 format_select_dw_gtpu_dw_2);
938 
939 	caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out,
940 						     capability.cmd_hca_cap_2.
941 						     format_select_dw_gtpu_first_ext_dw_0);
942 
943 	caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out,
944 					   capability.cmd_hca_cap_2.
945 					   generate_wqe_type);
946 
947 	/* check cross-VHCA support in cap2 */
948 	res =
949 	MLX5_GET(query_hca_cap_out, out,
950 		capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported);
951 
952 	caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) &&
953 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) &&
954 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC);
955 
956 	res =
957 	MLX5_GET(query_hca_cap_out, out,
958 		capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access);
959 
960 	caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) &&
961 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) &&
962 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC);
963 
964 	MLX5_SET(query_hca_cap_in, in, op_mod,
965 		 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
966 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
967 
968 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
969 	if (ret) {
970 		DR_LOG(ERR, "Failed to query flow table caps");
971 		rte_errno = errno;
972 		return rte_errno;
973 	}
974 
975 	caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out,
976 					  capability.flow_table_nic_cap.
977 					  flow_table_properties_nic_receive.max_ft_level);
978 
979 	caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out,
980 					capability.flow_table_nic_cap.
981 					flow_table_properties_nic_receive.reparse);
982 
983 	/* check cross-VHCA support in flow table properties */
984 	res =
985 	MLX5_GET(query_hca_cap_out, out,
986 		capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object);
987 	caps->cross_vhca_resources &= res;
988 
989 	if (caps->wqe_based_update) {
990 		MLX5_SET(query_hca_cap_in, in, op_mod,
991 			 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE |
992 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
993 
994 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
995 		if (ret) {
996 			DR_LOG(ERR, "Failed to query WQE based FT caps");
997 			rte_errno = errno;
998 			return rte_errno;
999 		}
1000 
1001 		caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out,
1002 						  capability.wqe_based_flow_table_cap.
1003 						  rtc_reparse_mode);
1004 
1005 		caps->ste_format = MLX5_GET(query_hca_cap_out, out,
1006 					    capability.wqe_based_flow_table_cap.
1007 					    ste_format);
1008 
1009 		caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out,
1010 						capability.wqe_based_flow_table_cap.
1011 						rtc_index_mode);
1012 
1013 		caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out,
1014 						   capability.wqe_based_flow_table_cap.
1015 						   rtc_log_depth_max);
1016 
1017 		caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1018 						   capability.wqe_based_flow_table_cap.
1019 						   ste_alloc_log_max);
1020 
1021 		caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1022 						    capability.wqe_based_flow_table_cap.
1023 						    ste_alloc_log_granularity);
1024 
1025 		caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out,
1026 						       capability.wqe_based_flow_table_cap.
1027 						       trivial_match_definer);
1028 
1029 		caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1030 						   capability.wqe_based_flow_table_cap.
1031 						   stc_alloc_log_max);
1032 
1033 		caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1034 						    capability.wqe_based_flow_table_cap.
1035 						    stc_alloc_log_granularity);
1036 
1037 		caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out,
1038 						      capability.wqe_based_flow_table_cap.
1039 						      rtc_hash_split_table);
1040 
1041 		caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out,
1042 							 capability.wqe_based_flow_table_cap.
1043 							 rtc_linear_lookup_table);
1044 
1045 		caps->access_index_mode = MLX5_GET(query_hca_cap_out, out,
1046 						   capability.wqe_based_flow_table_cap.
1047 						   access_index_mode);
1048 
1049 		caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out,
1050 						      capability.wqe_based_flow_table_cap.
1051 						      linear_match_definer_reg_c3);
1052 
1053 		caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1054 							  capability.wqe_based_flow_table_cap.
1055 							  rtc_max_num_hash_definer_gen_wqe);
1056 
1057 		caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1058 							 capability.wqe_based_flow_table_cap.
1059 							 ste_format_gen_wqe);
1060 	}
1061 
1062 	if (caps->eswitch_manager) {
1063 		MLX5_SET(query_hca_cap_in, in, op_mod,
1064 			 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE |
1065 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1066 
1067 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1068 		if (ret) {
1069 			DR_LOG(ERR, "Failed to query flow table esw caps");
1070 			rte_errno = errno;
1071 			return rte_errno;
1072 		}
1073 
1074 		caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1075 						  capability.flow_table_nic_cap.
1076 						  flow_table_properties_nic_receive.max_ft_level);
1077 
1078 		caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1079 						capability.flow_table_nic_cap.
1080 						flow_table_properties_nic_receive.reparse);
1081 
1082 		MLX5_SET(query_hca_cap_in, in, op_mod,
1083 			 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR);
1084 
1085 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1086 		if (ret) {
1087 			DR_LOG(ERR, "Query eswitch capabilities failed %d\n", ret);
1088 			rte_errno = errno;
1089 			return rte_errno;
1090 		}
1091 
1092 		if (MLX5_GET(query_hca_cap_out, out,
1093 			     capability.esw_cap.esw_manager_vport_number_valid))
1094 			caps->eswitch_manager_vport_number =
1095 			MLX5_GET(query_hca_cap_out, out,
1096 				 capability.esw_cap.esw_manager_vport_number);
1097 	}
1098 
1099 	ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
1100 	if (ret) {
1101 		DR_LOG(ERR, "Failed to query device attributes");
1102 		rte_errno = ret;
1103 		return rte_errno;
1104 	}
1105 
1106 	strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver));
1107 
1108 	port_info = flow_hw_get_wire_port(ctx);
1109 	if (port_info) {
1110 		caps->wire_regc = port_info->regc_value;
1111 		caps->wire_regc_mask = port_info->regc_mask;
1112 	} else {
1113 		DR_LOG(INFO, "Failed to query wire port regc value");
1114 	}
1115 
1116 	return ret;
1117 }
1118 
1119 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
1120 			     struct mlx5dr_cmd_query_vport_caps *vport_caps,
1121 			     uint32_t port_num)
1122 {
1123 	struct mlx5_port_info port_info = {0};
1124 	uint32_t flags;
1125 	int ret;
1126 
1127 	flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID;
1128 
1129 	ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info);
1130 	/* Check if query succeed and vport is enabled */
1131 	if (ret || (port_info.query_flags & flags) != flags) {
1132 		rte_errno = ENOTSUP;
1133 		return rte_errno;
1134 	}
1135 
1136 	vport_caps->vport_num = port_info.vport_id;
1137 	vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id;
1138 
1139 	if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1140 		vport_caps->metadata_c = port_info.vport_meta_tag;
1141 		vport_caps->metadata_c_mask = port_info.vport_meta_mask;
1142 	}
1143 
1144 	return 0;
1145 }
1146