xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c (revision 004edb48d601ee3c7b1b46a5dbd15fbf0ab1b87e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj)
8 {
9 	int ret;
10 
11 	ret = mlx5_glue->devx_obj_destroy(devx_obj->obj);
12 	simple_free(devx_obj);
13 
14 	return ret;
15 }
16 
17 struct mlx5dr_devx_obj *
18 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
19 			     struct mlx5dr_cmd_ft_create_attr *ft_attr)
20 {
21 	uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
22 	uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
23 	struct mlx5dr_devx_obj *devx_obj;
24 	void *ft_ctx;
25 
26 	devx_obj = simple_malloc(sizeof(*devx_obj));
27 	if (!devx_obj) {
28 		DR_LOG(ERR, "Failed to allocate memory for flow table object");
29 		rte_errno = ENOMEM;
30 		return NULL;
31 	}
32 
33 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
34 	MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
35 
36 	ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
37 	MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
38 	MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
39 
40 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
41 	if (!devx_obj->obj) {
42 		DR_LOG(ERR, "Failed to create FT");
43 		simple_free(devx_obj);
44 		rte_errno = errno;
45 		return NULL;
46 	}
47 
48 	devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id);
49 
50 	return devx_obj;
51 }
52 
53 int
54 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj,
55 			     struct mlx5dr_cmd_ft_modify_attr *ft_attr)
56 {
57 	uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
58 	uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
59 	void *ft_ctx;
60 	int ret;
61 
62 	MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
63 	MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
64 	MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
65 	MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id);
66 
67 	ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
68 
69 	MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
70 	MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
71 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0);
72 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1);
73 
74 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
75 	if (ret) {
76 		DR_LOG(ERR, "Failed to modify FT");
77 		rte_errno = errno;
78 	}
79 
80 	return ret;
81 }
82 
83 int
84 mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj,
85 			    struct mlx5dr_cmd_ft_query_attr *ft_attr,
86 			    uint64_t *icm_addr_0, uint64_t *icm_addr_1)
87 {
88 	uint32_t out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
89 	uint32_t in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
90 	void *ft_ctx;
91 	int ret;
92 
93 	MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
94 	MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
95 	MLX5_SET(query_flow_table_in, in, table_id, devx_obj->id);
96 
97 	ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out));
98 	if (ret) {
99 		DR_LOG(ERR, "Failed to query FT");
100 		rte_errno = errno;
101 		return ret;
102 	}
103 
104 	ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
105 	*icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_0);
106 	*icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_1);
107 
108 	return ret;
109 }
110 
111 static struct mlx5dr_devx_obj *
112 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx,
113 			     struct mlx5dr_cmd_fg_attr *fg_attr)
114 {
115 	uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
116 	uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0};
117 	struct mlx5dr_devx_obj *devx_obj;
118 
119 	devx_obj = simple_malloc(sizeof(*devx_obj));
120 	if (!devx_obj) {
121 		DR_LOG(ERR, "Failed to allocate memory for flow group object");
122 		rte_errno = ENOMEM;
123 		return NULL;
124 	}
125 
126 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
127 	MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
128 	MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
129 
130 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
131 	if (!devx_obj->obj) {
132 		DR_LOG(ERR, "Failed to create Flow group");
133 		simple_free(devx_obj);
134 		rte_errno = errno;
135 		return NULL;
136 	}
137 
138 	devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id);
139 
140 	return devx_obj;
141 }
142 
143 static struct mlx5dr_devx_obj *
144 mlx5dr_cmd_set_vport_fte(struct ibv_context *ctx,
145 			 uint32_t table_type,
146 			 uint32_t table_id,
147 			 uint32_t group_id,
148 			 uint32_t vport_id)
149 {
150 	uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
151 	uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
152 	struct mlx5dr_devx_obj *devx_obj;
153 	void *in_flow_context;
154 	void *in_dests;
155 
156 	devx_obj = simple_malloc(sizeof(*devx_obj));
157 	if (!devx_obj) {
158 		DR_LOG(ERR, "Failed to allocate memory for fte object");
159 		rte_errno = ENOMEM;
160 		return NULL;
161 	}
162 
163 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
164 	MLX5_SET(set_fte_in, in, table_type, table_type);
165 	MLX5_SET(set_fte_in, in, table_id, table_id);
166 
167 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
168 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
169 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
170 	MLX5_SET(flow_context, in_flow_context, action, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
171 
172 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
173 	MLX5_SET(dest_format, in_dests, destination_type,
174 		 MLX5_FLOW_DESTINATION_TYPE_VPORT);
175 	MLX5_SET(dest_format, in_dests, destination_id, vport_id);
176 
177 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
178 	if (!devx_obj->obj) {
179 		DR_LOG(ERR, "Failed to create FTE");
180 		simple_free(devx_obj);
181 		rte_errno = errno;
182 		return NULL;
183 	}
184 
185 	return devx_obj;
186 }
187 
188 void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
189 {
190 	mlx5dr_cmd_destroy_obj(tbl->fte);
191 	mlx5dr_cmd_destroy_obj(tbl->fg);
192 	mlx5dr_cmd_destroy_obj(tbl->ft);
193 }
194 
195 struct mlx5dr_cmd_forward_tbl *
196 mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
197 			  struct mlx5dr_cmd_ft_create_attr *ft_attr,
198 			  uint32_t vport)
199 {
200 	struct mlx5dr_cmd_fg_attr fg_attr = {0};
201 	struct mlx5dr_cmd_forward_tbl *tbl;
202 
203 	tbl = simple_calloc(1, sizeof(*tbl));
204 	if (!tbl) {
205 		DR_LOG(ERR, "Failed to allocate memory for forward default");
206 		rte_errno = ENOMEM;
207 		return NULL;
208 	}
209 
210 	tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr);
211 	if (!tbl->ft) {
212 		DR_LOG(ERR, "Failed to create FT for miss-table");
213 		goto free_tbl;
214 	}
215 
216 	fg_attr.table_id = tbl->ft->id;
217 	fg_attr.table_type = ft_attr->type;
218 
219 	tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr);
220 	if (!tbl->fg) {
221 		DR_LOG(ERR, "Failed to create FG for miss-table");
222 		goto free_ft;
223 	}
224 
225 	tbl->fte = mlx5dr_cmd_set_vport_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, vport);
226 	if (!tbl->fte) {
227 		DR_LOG(ERR, "Failed to create FTE for miss-table");
228 		goto free_fg;
229 	}
230 	return tbl;
231 
232 free_fg:
233 	mlx5dr_cmd_destroy_obj(tbl->fg);
234 free_ft:
235 	mlx5dr_cmd_destroy_obj(tbl->ft);
236 free_tbl:
237 	simple_free(tbl);
238 	return NULL;
239 }
240 
241 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
242 					  uint32_t fw_ft_type,
243 					  enum mlx5dr_table_type type,
244 					  struct mlx5dr_cmd_ft_modify_attr *ft_attr)
245 {
246 	struct mlx5dr_devx_obj *default_miss_tbl;
247 
248 	if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx))
249 		return;
250 
251 	ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
252 	ft_attr->type = fw_ft_type;
253 	ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
254 
255 	if (type == MLX5DR_TABLE_TYPE_FDB) {
256 		default_miss_tbl = ctx->common_res[type].default_miss->ft;
257 		if (!default_miss_tbl) {
258 			assert(false);
259 			return;
260 		}
261 		ft_attr->table_miss_id = default_miss_tbl->id;
262 	} else {
263 		ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id;
264 	}
265 }
266 
267 struct mlx5dr_devx_obj *
268 mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
269 		      struct mlx5dr_cmd_rtc_create_attr *rtc_attr)
270 {
271 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
272 	uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
273 	struct mlx5dr_devx_obj *devx_obj;
274 	void *attr;
275 
276 	devx_obj = simple_malloc(sizeof(*devx_obj));
277 	if (!devx_obj) {
278 		DR_LOG(ERR, "Failed to allocate memory for RTC object");
279 		rte_errno = ENOMEM;
280 		return NULL;
281 	}
282 
283 	attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
284 	MLX5_SET(general_obj_in_cmd_hdr,
285 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
286 	MLX5_SET(general_obj_in_cmd_hdr,
287 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC);
288 
289 	attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
290 	MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
291 		MLX5_IFC_RTC_STE_FORMAT_11DW :
292 		MLX5_IFC_RTC_STE_FORMAT_8DW);
293 
294 	if (rtc_attr->is_scnd_range) {
295 		MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
296 		MLX5_SET(rtc, attr, num_match_ste, 2);
297 	}
298 
299 	MLX5_SET(rtc, attr, pd, rtc_attr->pd);
300 	MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
301 	MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
302 	MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
303 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
304 	MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
305 	MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
306 	MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
307 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
308 	MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
309 	MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
310 	MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
311 	MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
312 	MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
313 	MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
314 	MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS);
315 
316 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
317 	if (!devx_obj->obj) {
318 		DR_LOG(ERR, "Failed to create RTC");
319 		simple_free(devx_obj);
320 		rte_errno = errno;
321 		return NULL;
322 	}
323 
324 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
325 
326 	return devx_obj;
327 }
328 
329 struct mlx5dr_devx_obj *
330 mlx5dr_cmd_stc_create(struct ibv_context *ctx,
331 		      struct mlx5dr_cmd_stc_create_attr *stc_attr)
332 {
333 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
334 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
335 	struct mlx5dr_devx_obj *devx_obj;
336 	void *attr;
337 
338 	devx_obj = simple_malloc(sizeof(*devx_obj));
339 	if (!devx_obj) {
340 		DR_LOG(ERR, "Failed to allocate memory for STC object");
341 		rte_errno = ENOMEM;
342 		return NULL;
343 	}
344 
345 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
346 	MLX5_SET(general_obj_in_cmd_hdr,
347 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
348 	MLX5_SET(general_obj_in_cmd_hdr,
349 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
350 	MLX5_SET(general_obj_in_cmd_hdr,
351 		 attr, log_obj_range, stc_attr->log_obj_range);
352 
353 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
354 	MLX5_SET(stc, attr, table_type, stc_attr->table_type);
355 
356 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
357 	if (!devx_obj->obj) {
358 		DR_LOG(ERR, "Failed to create STC");
359 		simple_free(devx_obj);
360 		rte_errno = errno;
361 		return NULL;
362 	}
363 
364 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
365 
366 	return devx_obj;
367 }
368 
369 static int
370 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
371 				    void *stc_parm)
372 {
373 	switch (stc_attr->action_type) {
374 	case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
375 		MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id);
376 		break;
377 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
378 		MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num);
379 		break;
380 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
381 		MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id);
382 		break;
383 	case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
384 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
385 			 header_modify_pattern_id, stc_attr->modify_header.pattern_id);
386 		MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
387 			 header_modify_argument_id, stc_attr->modify_header.arg_id);
388 		break;
389 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
390 		MLX5_SET(stc_ste_param_remove, stc_parm, action_type,
391 			 MLX5_MODIFICATION_TYPE_REMOVE);
392 		MLX5_SET(stc_ste_param_remove, stc_parm, decap,
393 			 stc_attr->remove_header.decap);
394 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor,
395 			 stc_attr->remove_header.start_anchor);
396 		MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor,
397 			 stc_attr->remove_header.end_anchor);
398 		break;
399 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
400 		MLX5_SET(stc_ste_param_insert, stc_parm, action_type,
401 			 MLX5_MODIFICATION_TYPE_INSERT);
402 		MLX5_SET(stc_ste_param_insert, stc_parm, encap,
403 			 stc_attr->insert_header.encap);
404 		MLX5_SET(stc_ste_param_insert, stc_parm, inline_data,
405 			 stc_attr->insert_header.is_inline);
406 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor,
407 			 stc_attr->insert_header.insert_anchor);
408 		/* HW gets the next 2 sizes in words */
409 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_size,
410 			 stc_attr->insert_header.header_size / 2);
411 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset,
412 			 stc_attr->insert_header.insert_offset / 2);
413 		MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument,
414 			 stc_attr->insert_header.arg_id);
415 		break;
416 	case MLX5_IFC_STC_ACTION_TYPE_COPY:
417 	case MLX5_IFC_STC_ACTION_TYPE_SET:
418 	case MLX5_IFC_STC_ACTION_TYPE_ADD:
419 		*(__be64 *)stc_parm = stc_attr->modify_action.data;
420 		break;
421 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
422 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
423 		MLX5_SET(stc_ste_param_vport, stc_parm, vport_number,
424 			 stc_attr->vport.vport_num);
425 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id,
426 			 stc_attr->vport.esw_owner_vhca_id);
427 		MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1);
428 		break;
429 	case MLX5_IFC_STC_ACTION_TYPE_DROP:
430 	case MLX5_IFC_STC_ACTION_TYPE_NOP:
431 	case MLX5_IFC_STC_ACTION_TYPE_TAG:
432 	case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
433 		break;
434 	case MLX5_IFC_STC_ACTION_TYPE_ASO:
435 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id,
436 			 stc_attr->aso.devx_obj_id);
437 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id,
438 			 stc_attr->aso.return_reg_id);
439 		MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type,
440 			 stc_attr->aso.aso_type);
441 		break;
442 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
443 		MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id,
444 			 stc_attr->ste_table.ste_obj_id);
445 		MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id,
446 			 stc_attr->ste_table.match_definer_id);
447 		MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size,
448 			 stc_attr->ste_table.log_hash_size);
449 		break;
450 	case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
451 		MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type,
452 			 MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
453 		MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor,
454 			 stc_attr->remove_words.start_anchor);
455 		MLX5_SET(stc_ste_param_remove_words, stc_parm,
456 			 remove_size, stc_attr->remove_words.num_of_words);
457 		break;
458 	default:
459 		DR_LOG(ERR, "Not supported type %d", stc_attr->action_type);
460 		rte_errno = EINVAL;
461 		return rte_errno;
462 	}
463 	return 0;
464 }
465 
466 int
467 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj,
468 		      struct mlx5dr_cmd_stc_modify_attr *stc_attr)
469 {
470 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
471 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
472 	void *stc_parm;
473 	void *attr;
474 	int ret;
475 
476 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
477 	MLX5_SET(general_obj_in_cmd_hdr,
478 		 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
479 	MLX5_SET(general_obj_in_cmd_hdr,
480 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
481 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id);
482 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset);
483 
484 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
485 	MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
486 	MLX5_SET(stc, attr, action_type, stc_attr->action_type);
487 	MLX5_SET64(stc, attr, modify_field_select,
488 		   MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
489 
490 	/* Set destination TIRN, TAG, FT ID, STE ID */
491 	stc_parm = MLX5_ADDR_OF(stc, attr, stc_param);
492 	ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm);
493 	if (ret)
494 		return ret;
495 
496 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
497 	if (ret) {
498 		DR_LOG(ERR, "Failed to modify STC FW action_type %d", stc_attr->action_type);
499 		rte_errno = errno;
500 	}
501 
502 	return ret;
503 }
504 
505 struct mlx5dr_devx_obj *
506 mlx5dr_cmd_arg_create(struct ibv_context *ctx,
507 		      uint16_t log_obj_range,
508 		      uint32_t pd)
509 {
510 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
511 	uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
512 	struct mlx5dr_devx_obj *devx_obj;
513 	void *attr;
514 
515 	devx_obj = simple_malloc(sizeof(*devx_obj));
516 	if (!devx_obj) {
517 		DR_LOG(ERR, "Failed to allocate memory for ARG object");
518 		rte_errno = ENOMEM;
519 		return NULL;
520 	}
521 
522 	attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
523 	MLX5_SET(general_obj_in_cmd_hdr,
524 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
525 	MLX5_SET(general_obj_in_cmd_hdr,
526 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG);
527 	MLX5_SET(general_obj_in_cmd_hdr,
528 		 attr, log_obj_range, log_obj_range);
529 
530 	attr = MLX5_ADDR_OF(create_arg_in, in, arg);
531 	MLX5_SET(arg, attr, access_pd, pd);
532 
533 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
534 	if (!devx_obj->obj) {
535 		DR_LOG(ERR, "Failed to create ARG");
536 		simple_free(devx_obj);
537 		rte_errno = errno;
538 		return NULL;
539 	}
540 
541 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
542 
543 	return devx_obj;
544 }
545 
546 struct mlx5dr_devx_obj *
547 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
548 					uint32_t pattern_length,
549 					uint8_t *actions)
550 {
551 	uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
552 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
553 	struct mlx5dr_devx_obj *devx_obj;
554 	uint64_t *pattern_data;
555 	int num_of_actions;
556 	void *pattern;
557 	void *attr;
558 	int i;
559 
560 	if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
561 		DR_LOG(ERR, "Pattern length %d exceeds limit %d",
562 			pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
563 		rte_errno = EINVAL;
564 		return NULL;
565 	}
566 
567 	devx_obj = simple_malloc(sizeof(*devx_obj));
568 	if (!devx_obj) {
569 		DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object");
570 		rte_errno = ENOMEM;
571 		return NULL;
572 	}
573 
574 	attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
575 	MLX5_SET(general_obj_in_cmd_hdr,
576 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
577 	MLX5_SET(general_obj_in_cmd_hdr,
578 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN);
579 
580 	pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
581 	/* Pattern_length is in ddwords */
582 	MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
583 
584 	pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
585 	memcpy(pattern_data, actions, pattern_length);
586 
587 	num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE;
588 	for (i = 0; i < num_of_actions; i++) {
589 		int type;
590 
591 		type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
592 		if (type != MLX5_MODIFICATION_TYPE_COPY)
593 			/* Action typ-copy use all bytes for control */
594 			MLX5_SET(set_action_in, &pattern_data[i], data, 0);
595 	}
596 
597 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
598 	if (!devx_obj->obj) {
599 		DR_LOG(ERR, "Failed to create header_modify_pattern");
600 		rte_errno = errno;
601 		goto free_obj;
602 	}
603 
604 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
605 
606 	return devx_obj;
607 
608 free_obj:
609 	simple_free(devx_obj);
610 	return NULL;
611 }
612 
613 struct mlx5dr_devx_obj *
614 mlx5dr_cmd_ste_create(struct ibv_context *ctx,
615 		      struct mlx5dr_cmd_ste_create_attr *ste_attr)
616 {
617 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
618 	uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
619 	struct mlx5dr_devx_obj *devx_obj;
620 	void *attr;
621 
622 	devx_obj = simple_malloc(sizeof(*devx_obj));
623 	if (!devx_obj) {
624 		DR_LOG(ERR, "Failed to allocate memory for STE object");
625 		rte_errno = ENOMEM;
626 		return NULL;
627 	}
628 
629 	attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
630 	MLX5_SET(general_obj_in_cmd_hdr,
631 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
632 	MLX5_SET(general_obj_in_cmd_hdr,
633 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE);
634 	MLX5_SET(general_obj_in_cmd_hdr,
635 		 attr, log_obj_range, ste_attr->log_obj_range);
636 
637 	attr = MLX5_ADDR_OF(create_ste_in, in, ste);
638 	MLX5_SET(ste, attr, table_type, ste_attr->table_type);
639 
640 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
641 	if (!devx_obj->obj) {
642 		DR_LOG(ERR, "Failed to create STE");
643 		simple_free(devx_obj);
644 		rte_errno = errno;
645 		return NULL;
646 	}
647 
648 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
649 
650 	return devx_obj;
651 }
652 
653 struct mlx5dr_devx_obj *
654 mlx5dr_cmd_definer_create(struct ibv_context *ctx,
655 			  struct mlx5dr_cmd_definer_create_attr *def_attr)
656 {
657 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
658 	uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
659 	struct mlx5dr_devx_obj *devx_obj;
660 	void *ptr;
661 
662 	devx_obj = simple_malloc(sizeof(*devx_obj));
663 	if (!devx_obj) {
664 		DR_LOG(ERR, "Failed to allocate memory for definer object");
665 		rte_errno = ENOMEM;
666 		return NULL;
667 	}
668 
669 	MLX5_SET(general_obj_in_cmd_hdr,
670 		 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
671 	MLX5_SET(general_obj_in_cmd_hdr,
672 		 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER);
673 
674 	ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
675 	MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
676 
677 	MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
678 	MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
679 	MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
680 	MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
681 	MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
682 	MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
683 	MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
684 	MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
685 	MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
686 
687 	MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
688 	MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
689 	MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
690 	MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
691 	MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
692 	MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
693 	MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
694 	MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
695 
696 	ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
697 	memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
698 
699 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
700 	if (!devx_obj->obj) {
701 		DR_LOG(ERR, "Failed to create Definer");
702 		simple_free(devx_obj);
703 		rte_errno = errno;
704 		return NULL;
705 	}
706 
707 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
708 
709 	return devx_obj;
710 }
711 
712 struct mlx5dr_devx_obj *
713 mlx5dr_cmd_sq_create(struct ibv_context *ctx,
714 		     struct mlx5dr_cmd_sq_create_attr *attr)
715 {
716 	uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
717 	uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
718 	void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
719 	void *wqc = MLX5_ADDR_OF(sqc, sqc, wq);
720 	struct mlx5dr_devx_obj *devx_obj;
721 
722 	devx_obj = simple_malloc(sizeof(*devx_obj));
723 	if (!devx_obj) {
724 		DR_LOG(ERR, "Failed to create SQ");
725 		rte_errno = ENOMEM;
726 		return NULL;
727 	}
728 
729 	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
730 	MLX5_SET(sqc, sqc, cqn, attr->cqn);
731 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
732 	MLX5_SET(sqc, sqc, non_wire, 1);
733 	MLX5_SET(sqc, sqc, ts_format, attr->ts_format);
734 	MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC);
735 	MLX5_SET(wq, wqc, pd, attr->pdn);
736 	MLX5_SET(wq, wqc, uar_page, attr->page_id);
737 	MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB));
738 	MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz);
739 	MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id);
740 	MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id);
741 
742 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
743 	if (!devx_obj->obj) {
744 		simple_free(devx_obj);
745 		rte_errno = errno;
746 		return NULL;
747 	}
748 
749 	devx_obj->id = MLX5_GET(create_sq_out, out, sqn);
750 
751 	return devx_obj;
752 }
753 
754 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
755 {
756 	uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
757 	uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
758 	void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
759 	int ret;
760 
761 	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
762 	MLX5_SET(modify_sq_in, in, sqn, devx_obj->id);
763 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
764 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
765 
766 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
767 	if (ret) {
768 		DR_LOG(ERR, "Failed to modify SQ");
769 		rte_errno = errno;
770 	}
771 
772 	return ret;
773 }
774 
775 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx,
776 				       struct mlx5dr_cmd_allow_other_vhca_access_attr *attr)
777 {
778 	uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
779 	uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
780 	void *key;
781 	int ret;
782 
783 	MLX5_SET(allow_other_vhca_access_in,
784 		 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
785 	MLX5_SET(allow_other_vhca_access_in,
786 		 in, object_type_to_be_accessed, attr->obj_type);
787 	MLX5_SET(allow_other_vhca_access_in,
788 		 in, object_id_to_be_accessed, attr->obj_id);
789 
790 	key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
791 	memcpy(key, attr->access_key, sizeof(attr->access_key));
792 
793 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
794 	if (ret) {
795 		DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command");
796 		rte_errno = errno;
797 		return rte_errno;
798 	}
799 
800 	return 0;
801 }
802 
803 struct mlx5dr_devx_obj *
804 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
805 			    struct mlx5dr_cmd_alias_obj_create_attr *alias_attr)
806 {
807 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
808 	uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
809 	struct mlx5dr_devx_obj *devx_obj;
810 	void *attr;
811 	void *key;
812 
813 	devx_obj = simple_malloc(sizeof(*devx_obj));
814 	if (!devx_obj) {
815 		DR_LOG(ERR, "Failed to allocate memory for ALIAS general object");
816 		rte_errno = ENOMEM;
817 		return NULL;
818 	}
819 
820 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
821 	MLX5_SET(general_obj_in_cmd_hdr,
822 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
823 	MLX5_SET(general_obj_in_cmd_hdr,
824 		 attr, obj_type, alias_attr->obj_type);
825 	MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1);
826 
827 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
828 	MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
829 	MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
830 
831 	key = MLX5_ADDR_OF(alias_context, attr, access_key);
832 	memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
833 
834 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
835 	if (!devx_obj->obj) {
836 		DR_LOG(ERR, "Failed to create ALIAS OBJ");
837 		simple_free(devx_obj);
838 		rte_errno = errno;
839 		return NULL;
840 	}
841 
842 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
843 
844 	return devx_obj;
845 }
846 
847 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx,
848 			    struct mlx5dr_cmd_generate_wqe_attr *attr,
849 			    struct mlx5_cqe64 *ret_cqe)
850 {
851 	uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
852 	uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
853 	uint8_t status;
854 	void *ptr;
855 	int ret;
856 
857 	MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
858 	MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
859 
860 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
861 	memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
862 
863 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
864 	memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
865 
866 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
867 	memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
868 
869 	if (attr->gta_data_1) {
870 		ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
871 		memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
872 	}
873 
874 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
875 	if (ret) {
876 		DR_LOG(ERR, "Failed to write GTA WQE using FW");
877 		rte_errno = errno;
878 		return rte_errno;
879 	}
880 
881 	status = MLX5_GET(generate_wqe_out, out, status);
882 	if (status) {
883 		DR_LOG(ERR, "Invalid FW CQE status %d", status);
884 		rte_errno = EINVAL;
885 		return rte_errno;
886 	}
887 
888 	ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
889 	memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
890 
891 	return 0;
892 }
893 
894 int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
895 			  struct mlx5dr_cmd_query_caps *caps)
896 {
897 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
898 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
899 	const struct flow_hw_port_info *port_info;
900 	struct ibv_device_attr_ex attr_ex;
901 	u32 res;
902 	int ret;
903 
904 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
905 	MLX5_SET(query_hca_cap_in, in, op_mod,
906 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
907 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
908 
909 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
910 	if (ret) {
911 		DR_LOG(ERR, "Failed to query device caps");
912 		rte_errno = errno;
913 		return rte_errno;
914 	}
915 
916 	caps->wqe_based_update =
917 		MLX5_GET(query_hca_cap_out, out,
918 			 capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
919 
920 	caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
921 					 capability.cmd_hca_cap.eswitch_manager);
922 
923 	caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
924 					capability.cmd_hca_cap.flex_parser_protocols);
925 
926 	caps->log_header_modify_argument_granularity =
927 		MLX5_GET(query_hca_cap_out, out,
928 			 capability.cmd_hca_cap.log_header_modify_argument_granularity);
929 
930 	caps->log_header_modify_argument_granularity -=
931 			MLX5_GET(query_hca_cap_out, out,
932 				 capability.cmd_hca_cap.
933 				 log_header_modify_argument_granularity_offset);
934 
935 	caps->log_header_modify_argument_max_alloc =
936 		MLX5_GET(query_hca_cap_out, out,
937 			 capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
938 
939 	caps->definer_format_sup =
940 		MLX5_GET64(query_hca_cap_out, out,
941 			   capability.cmd_hca_cap.match_definer_format_supported);
942 
943 	caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
944 				 capability.cmd_hca_cap.vhca_id);
945 
946 	caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
947 				      capability.cmd_hca_cap.sq_ts_format);
948 
949 	caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
950 				      capability.cmd_hca_cap.ipsec_offload);
951 
952 	MLX5_SET(query_hca_cap_in, in, op_mod,
953 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
954 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
955 
956 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
957 	if (ret) {
958 		DR_LOG(ERR, "Failed to query device caps");
959 		rte_errno = errno;
960 		return rte_errno;
961 	}
962 
963 	caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out,
964 					       capability.cmd_hca_cap_2.
965 					       format_select_dw_8_6_ext);
966 
967 	caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out,
968 						 capability.cmd_hca_cap_2.
969 						 format_select_dw_gtpu_dw_0);
970 
971 	caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out,
972 						 capability.cmd_hca_cap_2.
973 						 format_select_dw_gtpu_dw_1);
974 
975 	caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out,
976 						 capability.cmd_hca_cap_2.
977 						 format_select_dw_gtpu_dw_2);
978 
979 	caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out,
980 						     capability.cmd_hca_cap_2.
981 						     format_select_dw_gtpu_first_ext_dw_0);
982 
983 	caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out,
984 					   capability.cmd_hca_cap_2.
985 					   generate_wqe_type);
986 
987 	/* check cross-VHCA support in cap2 */
988 	res =
989 	MLX5_GET(query_hca_cap_out, out,
990 		capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported);
991 
992 	caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) &&
993 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) &&
994 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC);
995 
996 	res =
997 	MLX5_GET(query_hca_cap_out, out,
998 		capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access);
999 
1000 	caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) &&
1001 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) &&
1002 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC);
1003 
1004 	MLX5_SET(query_hca_cap_in, in, op_mod,
1005 		 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
1006 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
1007 
1008 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1009 	if (ret) {
1010 		DR_LOG(ERR, "Failed to query flow table caps");
1011 		rte_errno = errno;
1012 		return rte_errno;
1013 	}
1014 
1015 	caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1016 					  capability.flow_table_nic_cap.
1017 					  flow_table_properties_nic_receive.max_ft_level);
1018 
1019 	caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1020 					capability.flow_table_nic_cap.
1021 					flow_table_properties_nic_receive.reparse);
1022 
1023 	/* check cross-VHCA support in flow table properties */
1024 	res =
1025 	MLX5_GET(query_hca_cap_out, out,
1026 		capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object);
1027 	caps->cross_vhca_resources &= res;
1028 
1029 	if (caps->wqe_based_update) {
1030 		MLX5_SET(query_hca_cap_in, in, op_mod,
1031 			 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE |
1032 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1033 
1034 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1035 		if (ret) {
1036 			DR_LOG(ERR, "Failed to query WQE based FT caps");
1037 			rte_errno = errno;
1038 			return rte_errno;
1039 		}
1040 
1041 		caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out,
1042 						  capability.wqe_based_flow_table_cap.
1043 						  rtc_reparse_mode);
1044 
1045 		caps->ste_format = MLX5_GET(query_hca_cap_out, out,
1046 					    capability.wqe_based_flow_table_cap.
1047 					    ste_format);
1048 
1049 		caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out,
1050 						capability.wqe_based_flow_table_cap.
1051 						rtc_index_mode);
1052 
1053 		caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out,
1054 						   capability.wqe_based_flow_table_cap.
1055 						   rtc_log_depth_max);
1056 
1057 		caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1058 						   capability.wqe_based_flow_table_cap.
1059 						   ste_alloc_log_max);
1060 
1061 		caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1062 						    capability.wqe_based_flow_table_cap.
1063 						    ste_alloc_log_granularity);
1064 
1065 		caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out,
1066 						       capability.wqe_based_flow_table_cap.
1067 						       trivial_match_definer);
1068 
1069 		caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1070 						   capability.wqe_based_flow_table_cap.
1071 						   stc_alloc_log_max);
1072 
1073 		caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1074 						    capability.wqe_based_flow_table_cap.
1075 						    stc_alloc_log_granularity);
1076 
1077 		caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out,
1078 						      capability.wqe_based_flow_table_cap.
1079 						      rtc_hash_split_table);
1080 
1081 		caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out,
1082 							 capability.wqe_based_flow_table_cap.
1083 							 rtc_linear_lookup_table);
1084 
1085 		caps->access_index_mode = MLX5_GET(query_hca_cap_out, out,
1086 						   capability.wqe_based_flow_table_cap.
1087 						   access_index_mode);
1088 
1089 		caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out,
1090 						      capability.wqe_based_flow_table_cap.
1091 						      linear_match_definer_reg_c3);
1092 
1093 		caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1094 							  capability.wqe_based_flow_table_cap.
1095 							  rtc_max_num_hash_definer_gen_wqe);
1096 
1097 		caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1098 							 capability.wqe_based_flow_table_cap.
1099 							 ste_format_gen_wqe);
1100 	}
1101 
1102 	if (caps->eswitch_manager) {
1103 		MLX5_SET(query_hca_cap_in, in, op_mod,
1104 			 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE |
1105 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1106 
1107 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1108 		if (ret) {
1109 			DR_LOG(ERR, "Failed to query flow table esw caps");
1110 			rte_errno = errno;
1111 			return rte_errno;
1112 		}
1113 
1114 		caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1115 						  capability.flow_table_nic_cap.
1116 						  flow_table_properties_nic_receive.max_ft_level);
1117 
1118 		caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1119 						capability.flow_table_nic_cap.
1120 						flow_table_properties_nic_receive.reparse);
1121 
1122 		MLX5_SET(query_hca_cap_in, in, op_mod,
1123 			 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR);
1124 
1125 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1126 		if (ret) {
1127 			DR_LOG(ERR, "Query eswitch capabilities failed %d\n", ret);
1128 			rte_errno = errno;
1129 			return rte_errno;
1130 		}
1131 
1132 		if (MLX5_GET(query_hca_cap_out, out,
1133 			     capability.esw_cap.esw_manager_vport_number_valid))
1134 			caps->eswitch_manager_vport_number =
1135 			MLX5_GET(query_hca_cap_out, out,
1136 				 capability.esw_cap.esw_manager_vport_number);
1137 	}
1138 
1139 	ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
1140 	if (ret) {
1141 		DR_LOG(ERR, "Failed to query device attributes");
1142 		rte_errno = ret;
1143 		return rte_errno;
1144 	}
1145 
1146 	strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver));
1147 
1148 	port_info = flow_hw_get_wire_port(ctx);
1149 	if (port_info) {
1150 		caps->wire_regc = port_info->regc_value;
1151 		caps->wire_regc_mask = port_info->regc_mask;
1152 	} else {
1153 		DR_LOG(INFO, "Failed to query wire port regc value");
1154 	}
1155 
1156 	return ret;
1157 }
1158 
1159 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
1160 			     struct mlx5dr_cmd_query_vport_caps *vport_caps,
1161 			     uint32_t port_num)
1162 {
1163 	struct mlx5_port_info port_info = {0};
1164 	uint32_t flags;
1165 	int ret;
1166 
1167 	flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID;
1168 
1169 	ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info);
1170 	/* Check if query succeed and vport is enabled */
1171 	if (ret || (port_info.query_flags & flags) != flags) {
1172 		rte_errno = ENOTSUP;
1173 		return rte_errno;
1174 	}
1175 
1176 	vport_caps->vport_num = port_info.vport_id;
1177 	vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id;
1178 
1179 	if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1180 		vport_caps->metadata_c = port_info.vport_meta_tag;
1181 		vport_caps->metadata_c_mask = port_info.vport_meta_mask;
1182 	}
1183 
1184 	return 0;
1185 }
1186