xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c (revision 53fc80ac1a70d97dd7209a1101137584580c62d2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 static uint32_t mlx5dr_cmd_get_syndrome(uint32_t *out)
8 {
9 	/* Assumption: syndrome is always the second u32 */
10 	return be32toh(out[1]);
11 }
12 
13 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj)
14 {
15 	int ret;
16 
17 	ret = mlx5_glue->devx_obj_destroy(devx_obj->obj);
18 	simple_free(devx_obj);
19 
20 	return ret;
21 }
22 
23 struct mlx5dr_devx_obj *
24 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
25 			     struct mlx5dr_cmd_ft_create_attr *ft_attr)
26 {
27 	uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
28 	uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
29 	struct mlx5dr_devx_obj *devx_obj;
30 	void *ft_ctx;
31 
32 	devx_obj = simple_malloc(sizeof(*devx_obj));
33 	if (!devx_obj) {
34 		DR_LOG(ERR, "Failed to allocate memory for flow table object");
35 		rte_errno = ENOMEM;
36 		return NULL;
37 	}
38 
39 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
40 	MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
41 
42 	ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
43 	MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
44 	MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
45 	MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
46 
47 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
48 	if (!devx_obj->obj) {
49 		DR_LOG(ERR, "Failed to create FT (syndrome: %#x)",
50 		       mlx5dr_cmd_get_syndrome(out));
51 		simple_free(devx_obj);
52 		rte_errno = errno;
53 		return NULL;
54 	}
55 
56 	devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id);
57 
58 	return devx_obj;
59 }
60 
61 int
62 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj,
63 			     struct mlx5dr_cmd_ft_modify_attr *ft_attr)
64 {
65 	uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
66 	uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
67 	void *ft_ctx;
68 	int ret;
69 
70 	MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
71 	MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
72 	MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
73 	MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id);
74 
75 	ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
76 
77 	MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
78 	MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
79 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0);
80 	MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1);
81 
82 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
83 	if (ret) {
84 		DR_LOG(ERR, "Failed to modify FT (syndrome: %#x)",
85 		       mlx5dr_cmd_get_syndrome(out));
86 		rte_errno = errno;
87 	}
88 
89 	return ret;
90 }
91 
92 int
93 mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj,
94 			    struct mlx5dr_cmd_ft_query_attr *ft_attr,
95 			    uint64_t *icm_addr_0, uint64_t *icm_addr_1)
96 {
97 	uint32_t out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
98 	uint32_t in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
99 	void *ft_ctx;
100 	int ret;
101 
102 	MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
103 	MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
104 	MLX5_SET(query_flow_table_in, in, table_id, devx_obj->id);
105 
106 	ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out));
107 	if (ret) {
108 		DR_LOG(ERR, "Failed to query FT (syndrome: %#x)",
109 		       mlx5dr_cmd_get_syndrome(out));
110 		rte_errno = errno;
111 		return ret;
112 	}
113 
114 	ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
115 	*icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_0);
116 	*icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_1);
117 
118 	return ret;
119 }
120 
121 static struct mlx5dr_devx_obj *
122 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx,
123 			     struct mlx5dr_cmd_fg_attr *fg_attr)
124 {
125 	uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
126 	uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0};
127 	struct mlx5dr_devx_obj *devx_obj;
128 
129 	devx_obj = simple_malloc(sizeof(*devx_obj));
130 	if (!devx_obj) {
131 		DR_LOG(ERR, "Failed to allocate memory for flow group object");
132 		rte_errno = ENOMEM;
133 		return NULL;
134 	}
135 
136 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
137 	MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
138 	MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
139 
140 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
141 	if (!devx_obj->obj) {
142 		DR_LOG(ERR, "Failed to create Flow group(syndrome: %#x)",
143 		       mlx5dr_cmd_get_syndrome(out));
144 		simple_free(devx_obj);
145 		rte_errno = errno;
146 		return NULL;
147 	}
148 
149 	devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id);
150 
151 	return devx_obj;
152 }
153 
154 struct mlx5dr_devx_obj *
155 mlx5dr_cmd_set_fte(struct ibv_context *ctx,
156 		   uint32_t table_type,
157 		   uint32_t table_id,
158 		   uint32_t group_id,
159 		   struct mlx5dr_cmd_set_fte_attr *fte_attr)
160 {
161 	uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
162 	struct mlx5dr_devx_obj *devx_obj;
163 	uint32_t dest_entry_sz;
164 	uint32_t total_dest_sz;
165 	void *in_flow_context;
166 	uint32_t action_flags;
167 	uint8_t *in_dests;
168 	uint32_t inlen;
169 	uint32_t *in;
170 	uint32_t i;
171 
172 	dest_entry_sz = fte_attr->extended_dest ?
173 			MLX5_ST_SZ_BYTES(extended_dest_format) :
174 			MLX5_ST_SZ_BYTES(dest_format);
175 	total_dest_sz = dest_entry_sz * fte_attr->dests_num;
176 	inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
177 	in = simple_calloc(1, inlen);
178 	if (!in) {
179 		rte_errno = ENOMEM;
180 		return NULL;
181 	}
182 
183 	devx_obj = simple_malloc(sizeof(*devx_obj));
184 	if (!devx_obj) {
185 		DR_LOG(ERR, "Failed to allocate memory for fte object");
186 		rte_errno = ENOMEM;
187 		goto free_in;
188 	}
189 
190 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
191 	MLX5_SET(set_fte_in, in, table_type, table_type);
192 	MLX5_SET(set_fte_in, in, table_id, table_id);
193 
194 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
195 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
196 	MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
197 	MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
198 	MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
199 
200 	action_flags = fte_attr->action_flags;
201 	MLX5_SET(flow_context, in_flow_context, action, action_flags);
202 
203 	if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT)
204 		MLX5_SET(flow_context, in_flow_context,
205 			 packet_reformat_id, fte_attr->packet_reformat_id);
206 
207 	if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
208 		MLX5_SET(flow_context, in_flow_context,
209 			 encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
210 		MLX5_SET(flow_context, in_flow_context,
211 			 encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
212 	}
213 
214 	if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
215 		in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
216 
217 		for (i = 0; i < fte_attr->dests_num; i++) {
218 			struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i];
219 
220 			switch (dest->destination_type) {
221 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
222 				if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
223 					MLX5_SET(dest_format, in_dests,
224 						 destination_eswitch_owner_vhca_id_valid, 1);
225 					MLX5_SET(dest_format, in_dests,
226 						 destination_eswitch_owner_vhca_id,
227 						 dest->esw_owner_vhca_id);
228 				}
229 				/* Fall through */
230 			case MLX5_FLOW_DESTINATION_TYPE_TIR:
231 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
232 				MLX5_SET(dest_format, in_dests, destination_type,
233 					 dest->destination_type);
234 				MLX5_SET(dest_format, in_dests, destination_id,
235 					 dest->destination_id);
236 				if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) {
237 					MLX5_SET(dest_format, in_dests, packet_reformat, 1);
238 					MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
239 						 dest->ext_reformat->id);
240 				}
241 				break;
242 			default:
243 				rte_errno = EOPNOTSUPP;
244 				goto free_devx;
245 			}
246 
247 			in_dests = in_dests + dest_entry_sz;
248 		}
249 		MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
250 	}
251 
252 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
253 	if (!devx_obj->obj) {
254 		DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
255 		       mlx5dr_cmd_get_syndrome(out));
256 		rte_errno = errno;
257 		goto free_devx;
258 	}
259 
260 	simple_free(in);
261 	return devx_obj;
262 
263 free_devx:
264 	simple_free(devx_obj);
265 free_in:
266 	simple_free(in);
267 	return NULL;
268 }
269 
270 struct mlx5dr_cmd_forward_tbl *
271 mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
272 			      struct mlx5dr_cmd_ft_create_attr *ft_attr,
273 			      struct mlx5dr_cmd_set_fte_attr *fte_attr)
274 {
275 	struct mlx5dr_cmd_fg_attr fg_attr = {0};
276 	struct mlx5dr_cmd_forward_tbl *tbl;
277 
278 	tbl = simple_calloc(1, sizeof(*tbl));
279 	if (!tbl) {
280 		DR_LOG(ERR, "Failed to allocate memory");
281 		rte_errno = ENOMEM;
282 		return NULL;
283 	}
284 
285 	tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr);
286 	if (!tbl->ft) {
287 		DR_LOG(ERR, "Failed to create FT");
288 		goto free_tbl;
289 	}
290 
291 	fg_attr.table_id = tbl->ft->id;
292 	fg_attr.table_type = ft_attr->type;
293 
294 	tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr);
295 	if (!tbl->fg) {
296 		DR_LOG(ERR, "Failed to create FG");
297 		goto free_ft;
298 	}
299 
300 	tbl->fte = mlx5dr_cmd_set_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, fte_attr);
301 	if (!tbl->fte) {
302 		DR_LOG(ERR, "Failed to create FTE");
303 		goto free_fg;
304 	}
305 	return tbl;
306 
307 free_fg:
308 	mlx5dr_cmd_destroy_obj(tbl->fg);
309 free_ft:
310 	mlx5dr_cmd_destroy_obj(tbl->ft);
311 free_tbl:
312 	simple_free(tbl);
313 	return NULL;
314 }
315 
316 void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
317 {
318 	mlx5dr_cmd_destroy_obj(tbl->fte);
319 	mlx5dr_cmd_destroy_obj(tbl->fg);
320 	mlx5dr_cmd_destroy_obj(tbl->ft);
321 	simple_free(tbl);
322 }
323 
324 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
325 					  uint32_t fw_ft_type,
326 					  enum mlx5dr_table_type type,
327 					  struct mlx5dr_cmd_ft_modify_attr *ft_attr)
328 {
329 	struct mlx5dr_devx_obj *default_miss_tbl;
330 
331 	if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx))
332 		return;
333 
334 	ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
335 	ft_attr->type = fw_ft_type;
336 	ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
337 
338 	if (type == MLX5DR_TABLE_TYPE_FDB) {
339 		default_miss_tbl = ctx->common_res[type].default_miss->ft;
340 		if (!default_miss_tbl) {
341 			assert(false);
342 			return;
343 		}
344 		ft_attr->table_miss_id = default_miss_tbl->id;
345 	} else {
346 		ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id;
347 	}
348 }
349 
350 struct mlx5dr_devx_obj *
351 mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
352 		      struct mlx5dr_cmd_rtc_create_attr *rtc_attr)
353 {
354 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
355 	uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
356 	struct mlx5dr_devx_obj *devx_obj;
357 	void *attr;
358 
359 	devx_obj = simple_malloc(sizeof(*devx_obj));
360 	if (!devx_obj) {
361 		DR_LOG(ERR, "Failed to allocate memory for RTC object");
362 		rte_errno = ENOMEM;
363 		return NULL;
364 	}
365 
366 	attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
367 	MLX5_SET(general_obj_in_cmd_hdr,
368 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
369 	MLX5_SET(general_obj_in_cmd_hdr,
370 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC);
371 
372 	attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
373 	MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
374 		MLX5_IFC_RTC_STE_FORMAT_11DW :
375 		MLX5_IFC_RTC_STE_FORMAT_8DW);
376 
377 	if (rtc_attr->is_scnd_range) {
378 		MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
379 		MLX5_SET(rtc, attr, num_match_ste, 2);
380 	}
381 
382 	MLX5_SET(rtc, attr, pd, rtc_attr->pd);
383 	MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
384 	MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
385 	MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
386 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
387 	MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
388 	MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
389 	MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
390 	MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
391 	MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
392 	MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
393 	MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
394 	MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
395 	MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
396 	MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
397 	MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
398 
399 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
400 	if (!devx_obj->obj) {
401 		DR_LOG(ERR, "Failed to create RTC (syndrome: %#x)",
402 		       mlx5dr_cmd_get_syndrome(out));
403 		simple_free(devx_obj);
404 		rte_errno = errno;
405 		return NULL;
406 	}
407 
408 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
409 
410 	return devx_obj;
411 }
412 
413 struct mlx5dr_devx_obj *
414 mlx5dr_cmd_stc_create(struct ibv_context *ctx,
415 		      struct mlx5dr_cmd_stc_create_attr *stc_attr)
416 {
417 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
418 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
419 	struct mlx5dr_devx_obj *devx_obj;
420 	void *attr;
421 
422 	devx_obj = simple_malloc(sizeof(*devx_obj));
423 	if (!devx_obj) {
424 		DR_LOG(ERR, "Failed to allocate memory for STC object");
425 		rte_errno = ENOMEM;
426 		return NULL;
427 	}
428 
429 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
430 	MLX5_SET(general_obj_in_cmd_hdr,
431 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
432 	MLX5_SET(general_obj_in_cmd_hdr,
433 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
434 	MLX5_SET(general_obj_in_cmd_hdr,
435 		 attr, log_obj_range, stc_attr->log_obj_range);
436 
437 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
438 	MLX5_SET(stc, attr, table_type, stc_attr->table_type);
439 
440 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
441 	if (!devx_obj->obj) {
442 		DR_LOG(ERR, "Failed to create STC (syndrome: %#x)",
443 		       mlx5dr_cmd_get_syndrome(out));
444 		simple_free(devx_obj);
445 		rte_errno = errno;
446 		return NULL;
447 	}
448 
449 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
450 
451 	return devx_obj;
452 }
453 
454 static int
455 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
456 				    void *stc_param)
457 {
458 	switch (stc_attr->action_type) {
459 	case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
460 		MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
461 		break;
462 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
463 		MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
464 		break;
465 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
466 		MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
467 		break;
468 	case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
469 		MLX5_SET(stc_ste_param_header_modify_list, stc_param,
470 			 header_modify_pattern_id, stc_attr->modify_header.pattern_id);
471 		MLX5_SET(stc_ste_param_header_modify_list, stc_param,
472 			 header_modify_argument_id, stc_attr->modify_header.arg_id);
473 		break;
474 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
475 		MLX5_SET(stc_ste_param_remove, stc_param, action_type,
476 			 MLX5_MODIFICATION_TYPE_REMOVE);
477 		MLX5_SET(stc_ste_param_remove, stc_param, decap,
478 			 stc_attr->remove_header.decap);
479 		MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
480 			 stc_attr->remove_header.start_anchor);
481 		MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
482 			 stc_attr->remove_header.end_anchor);
483 		break;
484 	case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
485 		MLX5_SET(stc_ste_param_insert, stc_param, action_type,
486 			 MLX5_MODIFICATION_TYPE_INSERT);
487 		MLX5_SET(stc_ste_param_insert, stc_param, encap,
488 			 stc_attr->insert_header.encap);
489 		MLX5_SET(stc_ste_param_insert, stc_param, push_esp,
490 			 stc_attr->insert_header.push_esp);
491 		MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
492 			 stc_attr->insert_header.is_inline);
493 		MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
494 			 stc_attr->insert_header.insert_anchor);
495 		/* HW gets the next 2 sizes in words */
496 		MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
497 			 stc_attr->insert_header.header_size / W_SIZE);
498 		MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
499 			 stc_attr->insert_header.insert_offset / W_SIZE);
500 		MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
501 			 stc_attr->insert_header.arg_id);
502 		break;
503 	case MLX5_IFC_STC_ACTION_TYPE_COPY:
504 	case MLX5_IFC_STC_ACTION_TYPE_SET:
505 	case MLX5_IFC_STC_ACTION_TYPE_ADD:
506 	case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
507 		*(__be64 *)stc_param = stc_attr->modify_action.data;
508 		break;
509 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
510 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
511 		MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
512 			 stc_attr->vport.vport_num);
513 		MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
514 			 stc_attr->vport.esw_owner_vhca_id);
515 		MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid, 1);
516 		break;
517 	case MLX5_IFC_STC_ACTION_TYPE_DROP:
518 	case MLX5_IFC_STC_ACTION_TYPE_NOP:
519 	case MLX5_IFC_STC_ACTION_TYPE_TAG:
520 	case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
521 		break;
522 	case MLX5_IFC_STC_ACTION_TYPE_ASO:
523 		MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
524 			 stc_attr->aso.devx_obj_id);
525 		MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
526 			 stc_attr->aso.return_reg_id);
527 		MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
528 			 stc_attr->aso.aso_type);
529 		break;
530 	case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
531 		MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
532 			 stc_attr->ste_table.ste_obj_id);
533 		MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
534 			 stc_attr->ste_table.match_definer_id);
535 		MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
536 			 stc_attr->ste_table.log_hash_size);
537 		break;
538 	case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
539 		MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
540 			 MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
541 		MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
542 			 stc_attr->remove_words.start_anchor);
543 		MLX5_SET(stc_ste_param_remove_words, stc_param,
544 			 remove_size, stc_attr->remove_words.num_of_words);
545 		break;
546 	default:
547 		DR_LOG(ERR, "Not supported type %d", stc_attr->action_type);
548 		rte_errno = EINVAL;
549 		return rte_errno;
550 	}
551 	return 0;
552 }
553 
554 int
555 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj,
556 		      struct mlx5dr_cmd_stc_modify_attr *stc_attr)
557 {
558 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
559 	uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
560 	void *stc_param;
561 	void *attr;
562 	int ret;
563 
564 	attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
565 	MLX5_SET(general_obj_in_cmd_hdr,
566 		 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
567 	MLX5_SET(general_obj_in_cmd_hdr,
568 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
569 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id);
570 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset);
571 
572 	attr = MLX5_ADDR_OF(create_stc_in, in, stc);
573 	MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
574 	MLX5_SET(stc, attr, action_type, stc_attr->action_type);
575 	MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
576 	MLX5_SET64(stc, attr, modify_field_select,
577 		   MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
578 
579 	/* Set destination TIRN, TAG, FT ID, STE ID */
580 	stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
581 	ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_param);
582 	if (ret)
583 		return ret;
584 
585 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
586 	if (ret) {
587 		DR_LOG(ERR, "Failed to modify STC FW action_type %d (syndrome: %#x)",
588 		       stc_attr->action_type, mlx5dr_cmd_get_syndrome(out));
589 		rte_errno = errno;
590 	}
591 
592 	return ret;
593 }
594 
595 struct mlx5dr_devx_obj *
596 mlx5dr_cmd_arg_create(struct ibv_context *ctx,
597 		      uint16_t log_obj_range,
598 		      uint32_t pd)
599 {
600 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
601 	uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
602 	struct mlx5dr_devx_obj *devx_obj;
603 	void *attr;
604 
605 	devx_obj = simple_malloc(sizeof(*devx_obj));
606 	if (!devx_obj) {
607 		DR_LOG(ERR, "Failed to allocate memory for ARG object");
608 		rte_errno = ENOMEM;
609 		return NULL;
610 	}
611 
612 	attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
613 	MLX5_SET(general_obj_in_cmd_hdr,
614 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
615 	MLX5_SET(general_obj_in_cmd_hdr,
616 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG);
617 	MLX5_SET(general_obj_in_cmd_hdr,
618 		 attr, log_obj_range, log_obj_range);
619 
620 	attr = MLX5_ADDR_OF(create_arg_in, in, arg);
621 	MLX5_SET(arg, attr, access_pd, pd);
622 
623 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
624 	if (!devx_obj->obj) {
625 		DR_LOG(ERR, "Failed to create ARG (syndrome: %#x)",
626 		       mlx5dr_cmd_get_syndrome(out));
627 		simple_free(devx_obj);
628 		rte_errno = errno;
629 		return NULL;
630 	}
631 
632 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
633 
634 	return devx_obj;
635 }
636 
637 struct mlx5dr_devx_obj *
638 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
639 					uint32_t pattern_length,
640 					uint8_t *actions)
641 {
642 	uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
643 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
644 	struct mlx5dr_devx_obj *devx_obj;
645 	uint64_t *pattern_data;
646 	int num_of_actions;
647 	void *pattern;
648 	void *attr;
649 	int i;
650 
651 	if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
652 		DR_LOG(ERR, "Pattern length %d exceeds limit %d",
653 			pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
654 		rte_errno = EINVAL;
655 		return NULL;
656 	}
657 
658 	devx_obj = simple_malloc(sizeof(*devx_obj));
659 	if (!devx_obj) {
660 		DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object");
661 		rte_errno = ENOMEM;
662 		return NULL;
663 	}
664 	attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
665 	MLX5_SET(general_obj_in_cmd_hdr,
666 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
667 	MLX5_SET(general_obj_in_cmd_hdr,
668 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN);
669 
670 	pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
671 	/* Pattern_length is in ddwords */
672 	MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
673 
674 	pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
675 	memcpy(pattern_data, actions, pattern_length);
676 
677 	num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE;
678 	for (i = 0; i < num_of_actions; i++) {
679 		int type;
680 
681 		type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
682 		if (type != MLX5_MODIFICATION_TYPE_COPY &&
683 		    type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
684 			/* Action typ-copy use all bytes for control */
685 			MLX5_SET(set_action_in, &pattern_data[i], data, 0);
686 	}
687 
688 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
689 	if (!devx_obj->obj) {
690 		DR_LOG(ERR, "Failed to create header_modify_pattern (syndrome: %#x)",
691 		       mlx5dr_cmd_get_syndrome(out));
692 		rte_errno = errno;
693 		goto free_obj;
694 	}
695 
696 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
697 
698 	return devx_obj;
699 
700 free_obj:
701 	simple_free(devx_obj);
702 	return NULL;
703 }
704 
705 struct mlx5dr_devx_obj *
706 mlx5dr_cmd_ste_create(struct ibv_context *ctx,
707 		      struct mlx5dr_cmd_ste_create_attr *ste_attr)
708 {
709 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
710 	uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
711 	struct mlx5dr_devx_obj *devx_obj;
712 	void *attr;
713 
714 	devx_obj = simple_malloc(sizeof(*devx_obj));
715 	if (!devx_obj) {
716 		DR_LOG(ERR, "Failed to allocate memory for STE object");
717 		rte_errno = ENOMEM;
718 		return NULL;
719 	}
720 
721 	attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
722 	MLX5_SET(general_obj_in_cmd_hdr,
723 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
724 	MLX5_SET(general_obj_in_cmd_hdr,
725 		 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE);
726 	MLX5_SET(general_obj_in_cmd_hdr,
727 		 attr, log_obj_range, ste_attr->log_obj_range);
728 
729 	attr = MLX5_ADDR_OF(create_ste_in, in, ste);
730 	MLX5_SET(ste, attr, table_type, ste_attr->table_type);
731 
732 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
733 	if (!devx_obj->obj) {
734 		DR_LOG(ERR, "Failed to create STE (syndrome: %#x)",
735 		       mlx5dr_cmd_get_syndrome(out));
736 		simple_free(devx_obj);
737 		rte_errno = errno;
738 		return NULL;
739 	}
740 
741 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
742 
743 	return devx_obj;
744 }
745 
746 struct mlx5dr_devx_obj *
747 mlx5dr_cmd_definer_create(struct ibv_context *ctx,
748 			  struct mlx5dr_cmd_definer_create_attr *def_attr)
749 {
750 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
751 	uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
752 	struct mlx5dr_devx_obj *devx_obj;
753 	void *ptr;
754 
755 	devx_obj = simple_malloc(sizeof(*devx_obj));
756 	if (!devx_obj) {
757 		DR_LOG(ERR, "Failed to allocate memory for definer object");
758 		rte_errno = ENOMEM;
759 		return NULL;
760 	}
761 
762 	MLX5_SET(general_obj_in_cmd_hdr,
763 		 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
764 	MLX5_SET(general_obj_in_cmd_hdr,
765 		 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER);
766 
767 	ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
768 	MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
769 
770 	MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
771 	MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
772 	MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
773 	MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
774 	MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
775 	MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
776 	MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
777 	MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
778 	MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
779 
780 	MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
781 	MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
782 	MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
783 	MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
784 	MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
785 	MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
786 	MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
787 	MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
788 
789 	ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
790 	memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
791 
792 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
793 	if (!devx_obj->obj) {
794 		DR_LOG(ERR, "Failed to create Definer (syndrome: %#x)",
795 		       mlx5dr_cmd_get_syndrome(out));
796 		simple_free(devx_obj);
797 		rte_errno = errno;
798 		return NULL;
799 	}
800 
801 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
802 
803 	return devx_obj;
804 }
805 
806 struct mlx5dr_devx_obj *
807 mlx5dr_cmd_sq_create(struct ibv_context *ctx,
808 		     struct mlx5dr_cmd_sq_create_attr *attr)
809 {
810 	uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
811 	uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
812 	void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
813 	void *wqc = MLX5_ADDR_OF(sqc, sqc, wq);
814 	struct mlx5dr_devx_obj *devx_obj;
815 
816 	devx_obj = simple_malloc(sizeof(*devx_obj));
817 	if (!devx_obj) {
818 		DR_LOG(ERR, "Failed to create SQ");
819 		rte_errno = ENOMEM;
820 		return NULL;
821 	}
822 
823 	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
824 	MLX5_SET(sqc, sqc, cqn, attr->cqn);
825 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
826 	MLX5_SET(sqc, sqc, non_wire, 1);
827 	MLX5_SET(sqc, sqc, ts_format, attr->ts_format);
828 	MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC);
829 	MLX5_SET(wq, wqc, pd, attr->pdn);
830 	MLX5_SET(wq, wqc, uar_page, attr->page_id);
831 	MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB));
832 	MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz);
833 	MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id);
834 	MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id);
835 
836 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
837 	if (!devx_obj->obj) {
838 		simple_free(devx_obj);
839 		rte_errno = errno;
840 		return NULL;
841 	}
842 
843 	devx_obj->id = MLX5_GET(create_sq_out, out, sqn);
844 
845 	return devx_obj;
846 }
847 
848 struct mlx5dr_devx_obj *
849 mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
850 				  struct mlx5dr_cmd_packet_reformat_create_attr *attr)
851 {
852 	uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
853 	size_t insz, cmd_data_sz, cmd_total_sz;
854 	struct mlx5dr_devx_obj *devx_obj;
855 	void *prctx;
856 	void *pdata;
857 	void *in;
858 
859 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
860 	cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
861 	cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
862 	insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
863 	in = simple_calloc(1, insz);
864 	if (!in) {
865 		rte_errno = ENOMEM;
866 		return NULL;
867 	}
868 
869 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
870 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
871 
872 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
873 			     packet_reformat_context);
874 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
875 
876 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
877 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
878 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
879 	memcpy(pdata, attr->data, attr->data_sz);
880 
881 	devx_obj = simple_malloc(sizeof(*devx_obj));
882 	if (!devx_obj) {
883 		DR_LOG(ERR, "Failed to allocate memory for packet reformat object");
884 		rte_errno = ENOMEM;
885 		goto out_free_in;
886 	}
887 
888 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out));
889 	if (!devx_obj->obj) {
890 		DR_LOG(ERR, "Failed to create packet reformat");
891 		rte_errno = errno;
892 		goto out_free_devx;
893 	}
894 
895 	devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
896 
897 	simple_free(in);
898 
899 	return devx_obj;
900 
901 out_free_devx:
902 	simple_free(devx_obj);
903 out_free_in:
904 	simple_free(in);
905 	return NULL;
906 }
907 
908 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
909 {
910 	uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
911 	uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
912 	void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
913 	int ret;
914 
915 	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
916 	MLX5_SET(modify_sq_in, in, sqn, devx_obj->id);
917 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
918 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
919 
920 	ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
921 	if (ret) {
922 		DR_LOG(ERR, "Failed to modify SQ (syndrome: %#x)",
923 		       mlx5dr_cmd_get_syndrome(out));
924 		rte_errno = errno;
925 	}
926 
927 	return ret;
928 }
929 
930 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx,
931 				       struct mlx5dr_cmd_allow_other_vhca_access_attr *attr)
932 {
933 	uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
934 	uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
935 	void *key;
936 	int ret;
937 
938 	MLX5_SET(allow_other_vhca_access_in,
939 		 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
940 	MLX5_SET(allow_other_vhca_access_in,
941 		 in, object_type_to_be_accessed, attr->obj_type);
942 	MLX5_SET(allow_other_vhca_access_in,
943 		 in, object_id_to_be_accessed, attr->obj_id);
944 
945 	key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
946 	memcpy(key, attr->access_key, sizeof(attr->access_key));
947 
948 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
949 	if (ret) {
950 		DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command");
951 		rte_errno = errno;
952 		return rte_errno;
953 	}
954 
955 	return 0;
956 }
957 
958 struct mlx5dr_devx_obj *
959 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
960 			    struct mlx5dr_cmd_alias_obj_create_attr *alias_attr)
961 {
962 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
963 	uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
964 	struct mlx5dr_devx_obj *devx_obj;
965 	void *attr;
966 	void *key;
967 
968 	devx_obj = simple_malloc(sizeof(*devx_obj));
969 	if (!devx_obj) {
970 		DR_LOG(ERR, "Failed to allocate memory for ALIAS general object");
971 		rte_errno = ENOMEM;
972 		return NULL;
973 	}
974 
975 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
976 	MLX5_SET(general_obj_in_cmd_hdr,
977 		 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
978 	MLX5_SET(general_obj_in_cmd_hdr,
979 		 attr, obj_type, alias_attr->obj_type);
980 	MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1);
981 
982 	attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
983 	MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
984 	MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
985 
986 	key = MLX5_ADDR_OF(alias_context, attr, access_key);
987 	memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
988 
989 	devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
990 	if (!devx_obj->obj) {
991 		DR_LOG(ERR, "Failed to create ALIAS OBJ (syndrome: %#x)",
992 		       mlx5dr_cmd_get_syndrome(out));
993 		simple_free(devx_obj);
994 		rte_errno = errno;
995 		return NULL;
996 	}
997 
998 	devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
999 
1000 	return devx_obj;
1001 }
1002 
1003 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx,
1004 			    struct mlx5dr_cmd_generate_wqe_attr *attr,
1005 			    struct mlx5_cqe64 *ret_cqe)
1006 {
1007 	uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
1008 	uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
1009 	uint8_t status;
1010 	void *ptr;
1011 	int ret;
1012 
1013 	MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
1014 	MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
1015 
1016 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
1017 	memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
1018 
1019 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
1020 	memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
1021 
1022 	ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
1023 	memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
1024 
1025 	if (attr->gta_data_1) {
1026 		ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
1027 		memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
1028 	}
1029 
1030 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1031 	if (ret) {
1032 		DR_LOG(ERR, "Failed to write GTA WQE using FW");
1033 		rte_errno = errno;
1034 		return rte_errno;
1035 	}
1036 
1037 	status = MLX5_GET(generate_wqe_out, out, status);
1038 	if (status) {
1039 		DR_LOG(ERR, "Invalid FW CQE status %d", status);
1040 		rte_errno = EINVAL;
1041 		return rte_errno;
1042 	}
1043 
1044 	ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
1045 	memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
1046 
1047 	return 0;
1048 }
1049 
1050 int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
1051 			  struct mlx5dr_cmd_query_caps *caps)
1052 {
1053 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
1054 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
1055 	const struct flow_hw_port_info *port_info;
1056 	struct ibv_device_attr_ex attr_ex;
1057 	u32 res;
1058 	int ret;
1059 
1060 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1061 	MLX5_SET(query_hca_cap_in, in, op_mod,
1062 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
1063 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
1064 
1065 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1066 	if (ret) {
1067 		DR_LOG(ERR, "Failed to query device caps");
1068 		rte_errno = errno;
1069 		return rte_errno;
1070 	}
1071 
1072 	caps->wqe_based_update =
1073 		MLX5_GET(query_hca_cap_out, out,
1074 			 capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
1075 
1076 	caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
1077 					 capability.cmd_hca_cap.eswitch_manager);
1078 
1079 	caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
1080 					capability.cmd_hca_cap.flex_parser_protocols);
1081 
1082 	caps->log_header_modify_argument_granularity =
1083 		MLX5_GET(query_hca_cap_out, out,
1084 			 capability.cmd_hca_cap.log_header_modify_argument_granularity);
1085 
1086 	caps->log_header_modify_argument_granularity -=
1087 			MLX5_GET(query_hca_cap_out, out,
1088 				 capability.cmd_hca_cap.
1089 				 log_header_modify_argument_granularity_offset);
1090 
1091 	caps->log_header_modify_argument_max_alloc =
1092 		MLX5_GET(query_hca_cap_out, out,
1093 			 capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
1094 
1095 	caps->definer_format_sup =
1096 		MLX5_GET64(query_hca_cap_out, out,
1097 			   capability.cmd_hca_cap.match_definer_format_supported);
1098 
1099 	caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
1100 				 capability.cmd_hca_cap.vhca_id);
1101 
1102 	caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
1103 				      capability.cmd_hca_cap.sq_ts_format);
1104 
1105 	caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
1106 				      capability.cmd_hca_cap.ipsec_offload);
1107 
1108 	caps->roce = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.roce);
1109 
1110 	MLX5_SET(query_hca_cap_in, in, op_mod,
1111 		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
1112 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
1113 
1114 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1115 	if (ret) {
1116 		DR_LOG(ERR, "Failed to query device caps");
1117 		rte_errno = errno;
1118 		return rte_errno;
1119 	}
1120 
1121 	caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out,
1122 					       capability.cmd_hca_cap_2.
1123 					       format_select_dw_8_6_ext);
1124 
1125 	caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out,
1126 						 capability.cmd_hca_cap_2.
1127 						 format_select_dw_gtpu_dw_0);
1128 
1129 	caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out,
1130 						 capability.cmd_hca_cap_2.
1131 						 format_select_dw_gtpu_dw_1);
1132 
1133 	caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out,
1134 						 capability.cmd_hca_cap_2.
1135 						 format_select_dw_gtpu_dw_2);
1136 
1137 	caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out,
1138 						     capability.cmd_hca_cap_2.
1139 						     format_select_dw_gtpu_first_ext_dw_0);
1140 
1141 	caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1142 					   capability.cmd_hca_cap_2.
1143 					   generate_wqe_type);
1144 
1145 	/* check cross-VHCA support in cap2 */
1146 	res =
1147 	MLX5_GET(query_hca_cap_out, out,
1148 		capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported);
1149 
1150 	caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) &&
1151 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) &&
1152 				     (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC);
1153 
1154 	res =
1155 	MLX5_GET(query_hca_cap_out, out,
1156 		capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access);
1157 
1158 	caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) &&
1159 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) &&
1160 				      (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC);
1161 
1162 	caps->flow_table_hash_type = MLX5_GET(query_hca_cap_out, out,
1163 					      capability.cmd_hca_cap_2.flow_table_hash_type);
1164 
1165 	caps->encap_entropy_hash_type = MLX5_GET(query_hca_cap_out, out,
1166 						 capability.cmd_hca_cap_2.encap_entropy_hash_type);
1167 
1168 	MLX5_SET(query_hca_cap_in, in, op_mod,
1169 		 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
1170 		 MLX5_HCA_CAP_OPMOD_GET_CUR);
1171 
1172 	ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1173 	if (ret) {
1174 		DR_LOG(ERR, "Failed to query flow table caps");
1175 		rte_errno = errno;
1176 		return rte_errno;
1177 	}
1178 
1179 	caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1180 					  capability.flow_table_nic_cap.
1181 					  flow_table_properties_nic_receive.max_ft_level);
1182 
1183 	caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1184 					capability.flow_table_nic_cap.
1185 					flow_table_properties_nic_receive.reparse);
1186 
1187 	caps->nic_ft.ignore_flow_level_rtc_valid =
1188 		MLX5_GET(query_hca_cap_out,
1189 			 out,
1190 			 capability.flow_table_nic_cap.
1191 			 flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
1192 
1193 	/* check cross-VHCA support in flow table properties */
1194 	res =
1195 	MLX5_GET(query_hca_cap_out, out,
1196 		capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object);
1197 	caps->cross_vhca_resources &= res;
1198 
1199 	if (caps->wqe_based_update) {
1200 		MLX5_SET(query_hca_cap_in, in, op_mod,
1201 			 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE |
1202 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1203 
1204 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1205 		if (ret) {
1206 			DR_LOG(ERR, "Failed to query WQE based FT caps");
1207 			rte_errno = errno;
1208 			return rte_errno;
1209 		}
1210 
1211 		caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out,
1212 						  capability.wqe_based_flow_table_cap.
1213 						  rtc_reparse_mode);
1214 
1215 		caps->ste_format = MLX5_GET(query_hca_cap_out, out,
1216 					    capability.wqe_based_flow_table_cap.
1217 					    ste_format);
1218 
1219 		caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out,
1220 						capability.wqe_based_flow_table_cap.
1221 						rtc_index_mode);
1222 
1223 		caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out,
1224 						   capability.wqe_based_flow_table_cap.
1225 						   rtc_log_depth_max);
1226 
1227 		caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1228 						   capability.wqe_based_flow_table_cap.
1229 						   ste_alloc_log_max);
1230 
1231 		caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1232 						    capability.wqe_based_flow_table_cap.
1233 						    ste_alloc_log_granularity);
1234 
1235 		caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out,
1236 						       capability.wqe_based_flow_table_cap.
1237 						       trivial_match_definer);
1238 
1239 		caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
1240 						   capability.wqe_based_flow_table_cap.
1241 						   stc_alloc_log_max);
1242 
1243 		caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
1244 						    capability.wqe_based_flow_table_cap.
1245 						    stc_alloc_log_granularity);
1246 
1247 		caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out,
1248 						      capability.wqe_based_flow_table_cap.
1249 						      rtc_hash_split_table);
1250 
1251 		caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out,
1252 							 capability.wqe_based_flow_table_cap.
1253 							 rtc_linear_lookup_table);
1254 
1255 		caps->access_index_mode = MLX5_GET(query_hca_cap_out, out,
1256 						   capability.wqe_based_flow_table_cap.
1257 						   access_index_mode);
1258 
1259 		caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out,
1260 						      capability.wqe_based_flow_table_cap.
1261 						      linear_match_definer_reg_c3);
1262 
1263 		caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1264 							  capability.wqe_based_flow_table_cap.
1265 							  rtc_max_num_hash_definer_gen_wqe);
1266 
1267 		caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out,
1268 							 capability.wqe_based_flow_table_cap.
1269 							 ste_format_gen_wqe);
1270 
1271 		caps->fdb_tir_stc = MLX5_GET(query_hca_cap_out, out,
1272 					     capability.wqe_based_flow_table_cap.
1273 					     fdb_jump_to_tir_stc);
1274 	}
1275 
1276 	if (caps->eswitch_manager) {
1277 		MLX5_SET(query_hca_cap_in, in, op_mod,
1278 			 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE |
1279 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1280 
1281 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1282 		if (ret) {
1283 			DR_LOG(ERR, "Failed to query flow table esw caps");
1284 			rte_errno = errno;
1285 			return rte_errno;
1286 		}
1287 
1288 		caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out,
1289 						  capability.flow_table_nic_cap.
1290 						  flow_table_properties_nic_receive.max_ft_level);
1291 
1292 		caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out,
1293 						capability.flow_table_nic_cap.
1294 						flow_table_properties_nic_receive.reparse);
1295 
1296 		MLX5_SET(query_hca_cap_in, in, op_mod,
1297 			 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR);
1298 
1299 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1300 		if (ret) {
1301 			DR_LOG(ERR, "Query eswitch capabilities failed %d", ret);
1302 			rte_errno = errno;
1303 			return rte_errno;
1304 		}
1305 
1306 		if (MLX5_GET(query_hca_cap_out, out,
1307 			     capability.esw_cap.esw_manager_vport_number_valid))
1308 			caps->eswitch_manager_vport_number =
1309 			MLX5_GET(query_hca_cap_out, out,
1310 				 capability.esw_cap.esw_manager_vport_number);
1311 
1312 		caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
1313 						capability.esw_cap.merged_eswitch);
1314 	}
1315 
1316 	if (caps->roce) {
1317 		MLX5_SET(query_hca_cap_in, in, op_mod,
1318 			 MLX5_GET_HCA_CAP_OP_MOD_ROCE |
1319 			 MLX5_HCA_CAP_OPMOD_GET_CUR);
1320 
1321 		ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
1322 		if (ret) {
1323 			DR_LOG(ERR, "Failed to query roce caps");
1324 			rte_errno = errno;
1325 			return rte_errno;
1326 		}
1327 
1328 		caps->roce_max_src_udp_port = MLX5_GET(query_hca_cap_out, out,
1329 						capability.roce_caps.r_roce_max_src_udp_port);
1330 		caps->roce_min_src_udp_port = MLX5_GET(query_hca_cap_out, out,
1331 						capability.roce_caps.r_roce_min_src_udp_port);
1332 	}
1333 
1334 	ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
1335 	if (ret) {
1336 		DR_LOG(ERR, "Failed to query device attributes");
1337 		rte_errno = ret;
1338 		return rte_errno;
1339 	}
1340 
1341 	strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver));
1342 
1343 	port_info = flow_hw_get_wire_port(ctx);
1344 	if (port_info) {
1345 		caps->wire_regc = port_info->regc_value;
1346 		caps->wire_regc_mask = port_info->regc_mask;
1347 	} else {
1348 		DR_LOG(INFO, "Failed to query wire port regc value");
1349 	}
1350 
1351 	return ret;
1352 }
1353 
1354 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
1355 			     struct mlx5dr_cmd_query_vport_caps *vport_caps,
1356 			     uint32_t port_num)
1357 {
1358 	struct mlx5_port_info port_info = {0};
1359 	uint32_t flags;
1360 	int ret;
1361 
1362 	flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID;
1363 
1364 	ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info);
1365 	/* Check if query succeed and vport is enabled */
1366 	if (ret || (port_info.query_flags & flags) != flags) {
1367 		rte_errno = ENOTSUP;
1368 		return rte_errno;
1369 	}
1370 
1371 	vport_caps->vport_num = port_info.vport_id;
1372 	vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id;
1373 
1374 	if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1375 		vport_caps->metadata_c = port_info.vport_meta_tag;
1376 		vport_caps->metadata_c_mask = port_info.vport_meta_mask;
1377 	}
1378 
1379 	return 0;
1380 }
1381