1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates 3 */ 4 5 #include "mlx5dr_internal.h" 6 7 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj) 8 { 9 int ret; 10 11 ret = mlx5_glue->devx_obj_destroy(devx_obj->obj); 12 simple_free(devx_obj); 13 14 return ret; 15 } 16 17 struct mlx5dr_devx_obj * 18 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx, 19 struct mlx5dr_cmd_ft_create_attr *ft_attr) 20 { 21 uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; 22 uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; 23 struct mlx5dr_devx_obj *devx_obj; 24 void *ft_ctx; 25 26 devx_obj = simple_malloc(sizeof(*devx_obj)); 27 if (!devx_obj) { 28 DR_LOG(ERR, "Failed to allocate memory for flow table object"); 29 rte_errno = ENOMEM; 30 return NULL; 31 } 32 33 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); 34 MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); 35 36 ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); 37 MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); 38 MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid); 39 40 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 41 if (!devx_obj->obj) { 42 DR_LOG(ERR, "Failed to create FT"); 43 simple_free(devx_obj); 44 rte_errno = errno; 45 return NULL; 46 } 47 48 devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id); 49 50 return devx_obj; 51 } 52 53 int 54 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj, 55 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 56 { 57 uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; 58 uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; 59 void *ft_ctx; 60 int ret; 61 62 MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); 63 MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type); 64 MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs); 65 MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id); 66 67 ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context); 68 69 MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action); 70 MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id); 71 MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0); 72 MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1); 73 74 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 75 if (ret) { 76 DR_LOG(ERR, "Failed to modify FT"); 77 rte_errno = errno; 78 } 79 80 return ret; 81 } 82 83 static struct mlx5dr_devx_obj * 84 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx, 85 struct mlx5dr_cmd_fg_attr *fg_attr) 86 { 87 uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; 88 uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0}; 89 struct mlx5dr_devx_obj *devx_obj; 90 91 devx_obj = simple_malloc(sizeof(*devx_obj)); 92 if (!devx_obj) { 93 DR_LOG(ERR, "Failed to allocate memory for flow group object"); 94 rte_errno = ENOMEM; 95 return NULL; 96 } 97 98 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); 99 MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type); 100 MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id); 101 102 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 103 if (!devx_obj->obj) { 104 DR_LOG(ERR, "Failed to create Flow group"); 105 simple_free(devx_obj); 106 rte_errno = errno; 107 return NULL; 108 } 109 110 devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id); 111 112 return devx_obj; 113 } 114 115 static struct mlx5dr_devx_obj * 116 mlx5dr_cmd_set_vport_fte(struct ibv_context *ctx, 117 uint32_t table_type, 118 uint32_t table_id, 119 uint32_t group_id, 120 uint32_t vport_id) 121 { 122 uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0}; 123 uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; 124 struct mlx5dr_devx_obj *devx_obj; 125 void *in_flow_context; 126 void *in_dests; 127 128 devx_obj = simple_malloc(sizeof(*devx_obj)); 129 if (!devx_obj) { 130 DR_LOG(ERR, "Failed to allocate memory for fte object"); 131 rte_errno = ENOMEM; 132 return NULL; 133 } 134 135 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 136 MLX5_SET(set_fte_in, in, table_type, table_type); 137 MLX5_SET(set_fte_in, in, table_id, table_id); 138 139 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 140 MLX5_SET(flow_context, in_flow_context, group_id, group_id); 141 MLX5_SET(flow_context, in_flow_context, destination_list_size, 1); 142 MLX5_SET(flow_context, in_flow_context, action, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 143 144 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); 145 MLX5_SET(dest_format, in_dests, destination_type, 146 MLX5_FLOW_DESTINATION_TYPE_VPORT); 147 MLX5_SET(dest_format, in_dests, destination_id, vport_id); 148 149 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 150 if (!devx_obj->obj) { 151 DR_LOG(ERR, "Failed to create FTE"); 152 simple_free(devx_obj); 153 rte_errno = errno; 154 return NULL; 155 } 156 157 return devx_obj; 158 } 159 160 void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl) 161 { 162 mlx5dr_cmd_destroy_obj(tbl->fte); 163 mlx5dr_cmd_destroy_obj(tbl->fg); 164 mlx5dr_cmd_destroy_obj(tbl->ft); 165 } 166 167 struct mlx5dr_cmd_forward_tbl * 168 mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx, 169 struct mlx5dr_cmd_ft_create_attr *ft_attr, 170 uint32_t vport) 171 { 172 struct mlx5dr_cmd_fg_attr fg_attr = {0}; 173 struct mlx5dr_cmd_forward_tbl *tbl; 174 175 tbl = simple_calloc(1, sizeof(*tbl)); 176 if (!tbl) { 177 DR_LOG(ERR, "Failed to allocate memory for forward default"); 178 rte_errno = ENOMEM; 179 return NULL; 180 } 181 182 tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr); 183 if (!tbl->ft) { 184 DR_LOG(ERR, "Failed to create FT for miss-table"); 185 goto free_tbl; 186 } 187 188 fg_attr.table_id = tbl->ft->id; 189 fg_attr.table_type = ft_attr->type; 190 191 tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr); 192 if (!tbl->fg) { 193 DR_LOG(ERR, "Failed to create FG for miss-table"); 194 goto free_ft; 195 } 196 197 tbl->fte = mlx5dr_cmd_set_vport_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, vport); 198 if (!tbl->fte) { 199 DR_LOG(ERR, "Failed to create FTE for miss-table"); 200 goto free_fg; 201 } 202 return tbl; 203 204 free_fg: 205 mlx5dr_cmd_destroy_obj(tbl->fg); 206 free_ft: 207 mlx5dr_cmd_destroy_obj(tbl->ft); 208 free_tbl: 209 simple_free(tbl); 210 return NULL; 211 } 212 213 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx, 214 uint32_t fw_ft_type, 215 enum mlx5dr_table_type type, 216 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 217 { 218 struct mlx5dr_devx_obj *default_miss_tbl; 219 220 if (type != MLX5DR_TABLE_TYPE_FDB) 221 return; 222 223 default_miss_tbl = ctx->common_res[type].default_miss->ft; 224 if (!default_miss_tbl) { 225 assert(false); 226 return; 227 } 228 ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; 229 ft_attr->type = fw_ft_type; 230 ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; 231 ft_attr->table_miss_id = default_miss_tbl->id; 232 } 233 234 struct mlx5dr_devx_obj * 235 mlx5dr_cmd_rtc_create(struct ibv_context *ctx, 236 struct mlx5dr_cmd_rtc_create_attr *rtc_attr) 237 { 238 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 239 uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0}; 240 struct mlx5dr_devx_obj *devx_obj; 241 void *attr; 242 243 devx_obj = simple_malloc(sizeof(*devx_obj)); 244 if (!devx_obj) { 245 DR_LOG(ERR, "Failed to allocate memory for RTC object"); 246 rte_errno = ENOMEM; 247 return NULL; 248 } 249 250 attr = MLX5_ADDR_OF(create_rtc_in, in, hdr); 251 MLX5_SET(general_obj_in_cmd_hdr, 252 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 253 MLX5_SET(general_obj_in_cmd_hdr, 254 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC); 255 256 attr = MLX5_ADDR_OF(create_rtc_in, in, rtc); 257 MLX5_SET(rtc, attr, ste_format, rtc_attr->is_jumbo ? 258 MLX5_IFC_RTC_STE_FORMAT_11DW : 259 MLX5_IFC_RTC_STE_FORMAT_8DW); 260 MLX5_SET(rtc, attr, pd, rtc_attr->pd); 261 MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode); 262 MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth); 263 MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size); 264 MLX5_SET(rtc, attr, table_type, rtc_attr->table_type); 265 MLX5_SET(rtc, attr, match_definer_id, rtc_attr->definer_id); 266 MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); 267 MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); 268 MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); 269 MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); 270 MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS); 271 272 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 273 if (!devx_obj->obj) { 274 DR_LOG(ERR, "Failed to create RTC"); 275 simple_free(devx_obj); 276 rte_errno = errno; 277 return NULL; 278 } 279 280 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 281 282 return devx_obj; 283 } 284 285 struct mlx5dr_devx_obj * 286 mlx5dr_cmd_stc_create(struct ibv_context *ctx, 287 struct mlx5dr_cmd_stc_create_attr *stc_attr) 288 { 289 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 290 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 291 struct mlx5dr_devx_obj *devx_obj; 292 void *attr; 293 294 devx_obj = simple_malloc(sizeof(*devx_obj)); 295 if (!devx_obj) { 296 DR_LOG(ERR, "Failed to allocate memory for STC object"); 297 rte_errno = ENOMEM; 298 return NULL; 299 } 300 301 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 302 MLX5_SET(general_obj_in_cmd_hdr, 303 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 304 MLX5_SET(general_obj_in_cmd_hdr, 305 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 306 MLX5_SET(general_obj_in_cmd_hdr, 307 attr, log_obj_range, stc_attr->log_obj_range); 308 309 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 310 MLX5_SET(stc, attr, table_type, stc_attr->table_type); 311 312 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 313 if (!devx_obj->obj) { 314 DR_LOG(ERR, "Failed to create STC"); 315 simple_free(devx_obj); 316 rte_errno = errno; 317 return NULL; 318 } 319 320 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 321 322 return devx_obj; 323 } 324 325 static int 326 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr, 327 void *stc_parm) 328 { 329 switch (stc_attr->action_type) { 330 case MLX5_IFC_STC_ACTION_TYPE_COUNTER: 331 MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id); 332 break; 333 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: 334 MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num); 335 break; 336 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT: 337 MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id); 338 break; 339 case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST: 340 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 341 header_modify_pattern_id, stc_attr->modify_header.pattern_id); 342 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 343 header_modify_argument_id, stc_attr->modify_header.arg_id); 344 break; 345 case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE: 346 MLX5_SET(stc_ste_param_remove, stc_parm, action_type, 347 MLX5_MODIFICATION_TYPE_REMOVE); 348 MLX5_SET(stc_ste_param_remove, stc_parm, decap, 349 stc_attr->remove_header.decap); 350 MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor, 351 stc_attr->remove_header.start_anchor); 352 MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor, 353 stc_attr->remove_header.end_anchor); 354 break; 355 case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT: 356 MLX5_SET(stc_ste_param_insert, stc_parm, action_type, 357 MLX5_MODIFICATION_TYPE_INSERT); 358 MLX5_SET(stc_ste_param_insert, stc_parm, encap, 359 stc_attr->insert_header.encap); 360 MLX5_SET(stc_ste_param_insert, stc_parm, inline_data, 361 stc_attr->insert_header.is_inline); 362 MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor, 363 stc_attr->insert_header.insert_anchor); 364 /* HW gets the next 2 sizes in words */ 365 MLX5_SET(stc_ste_param_insert, stc_parm, insert_size, 366 stc_attr->insert_header.header_size / 2); 367 MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset, 368 stc_attr->insert_header.insert_offset / 2); 369 MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument, 370 stc_attr->insert_header.arg_id); 371 break; 372 case MLX5_IFC_STC_ACTION_TYPE_COPY: 373 case MLX5_IFC_STC_ACTION_TYPE_SET: 374 case MLX5_IFC_STC_ACTION_TYPE_ADD: 375 *(__be64 *)stc_parm = stc_attr->modify_action.data; 376 break; 377 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: 378 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK: 379 MLX5_SET(stc_ste_param_vport, stc_parm, vport_number, 380 stc_attr->vport.vport_num); 381 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id, 382 stc_attr->vport.esw_owner_vhca_id); 383 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1); 384 break; 385 case MLX5_IFC_STC_ACTION_TYPE_DROP: 386 case MLX5_IFC_STC_ACTION_TYPE_NOP: 387 case MLX5_IFC_STC_ACTION_TYPE_TAG: 388 case MLX5_IFC_STC_ACTION_TYPE_ALLOW: 389 break; 390 case MLX5_IFC_STC_ACTION_TYPE_ASO: 391 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id, 392 stc_attr->aso.devx_obj_id); 393 MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id, 394 stc_attr->aso.return_reg_id); 395 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type, 396 stc_attr->aso.aso_type); 397 break; 398 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: 399 MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id, 400 stc_attr->ste_table.ste_obj_id); 401 MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id, 402 stc_attr->ste_table.match_definer_id); 403 MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size, 404 stc_attr->ste_table.log_hash_size); 405 break; 406 case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS: 407 MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type, 408 MLX5_MODIFICATION_TYPE_REMOVE_WORDS); 409 MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor, 410 stc_attr->remove_words.start_anchor); 411 MLX5_SET(stc_ste_param_remove_words, stc_parm, 412 remove_size, stc_attr->remove_words.num_of_words); 413 break; 414 default: 415 DR_LOG(ERR, "Not supported type %d", stc_attr->action_type); 416 rte_errno = EINVAL; 417 return rte_errno; 418 } 419 return 0; 420 } 421 422 int 423 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj, 424 struct mlx5dr_cmd_stc_modify_attr *stc_attr) 425 { 426 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 427 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 428 void *stc_parm; 429 void *attr; 430 int ret; 431 432 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 433 MLX5_SET(general_obj_in_cmd_hdr, 434 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 435 MLX5_SET(general_obj_in_cmd_hdr, 436 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 437 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id); 438 MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset); 439 440 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 441 MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset); 442 MLX5_SET(stc, attr, action_type, stc_attr->action_type); 443 MLX5_SET64(stc, attr, modify_field_select, 444 MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC); 445 446 /* Set destination TIRN, TAG, FT ID, STE ID */ 447 stc_parm = MLX5_ADDR_OF(stc, attr, stc_param); 448 ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm); 449 if (ret) 450 return ret; 451 452 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 453 if (ret) { 454 DR_LOG(ERR, "Failed to modify STC FW action_type %d", stc_attr->action_type); 455 rte_errno = errno; 456 } 457 458 return ret; 459 } 460 461 struct mlx5dr_devx_obj * 462 mlx5dr_cmd_arg_create(struct ibv_context *ctx, 463 uint16_t log_obj_range, 464 uint32_t pd) 465 { 466 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 467 uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; 468 struct mlx5dr_devx_obj *devx_obj; 469 void *attr; 470 471 devx_obj = simple_malloc(sizeof(*devx_obj)); 472 if (!devx_obj) { 473 DR_LOG(ERR, "Failed to allocate memory for ARG object"); 474 rte_errno = ENOMEM; 475 return NULL; 476 } 477 478 attr = MLX5_ADDR_OF(create_arg_in, in, hdr); 479 MLX5_SET(general_obj_in_cmd_hdr, 480 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 481 MLX5_SET(general_obj_in_cmd_hdr, 482 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG); 483 MLX5_SET(general_obj_in_cmd_hdr, 484 attr, log_obj_range, log_obj_range); 485 486 attr = MLX5_ADDR_OF(create_arg_in, in, arg); 487 MLX5_SET(arg, attr, access_pd, pd); 488 489 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 490 if (!devx_obj->obj) { 491 DR_LOG(ERR, "Failed to create ARG"); 492 simple_free(devx_obj); 493 rte_errno = errno; 494 return NULL; 495 } 496 497 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 498 499 return devx_obj; 500 } 501 502 struct mlx5dr_devx_obj * 503 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, 504 uint32_t pattern_length, 505 uint8_t *actions) 506 { 507 uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; 508 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 509 struct mlx5dr_devx_obj *devx_obj; 510 void *pattern_data; 511 void *pattern; 512 void *attr; 513 514 if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { 515 DR_LOG(ERR, "Pattern length %d exceeds limit %d", 516 pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY); 517 rte_errno = EINVAL; 518 return NULL; 519 } 520 521 devx_obj = simple_malloc(sizeof(*devx_obj)); 522 if (!devx_obj) { 523 DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object"); 524 rte_errno = ENOMEM; 525 return NULL; 526 } 527 528 attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); 529 MLX5_SET(general_obj_in_cmd_hdr, 530 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 531 MLX5_SET(general_obj_in_cmd_hdr, 532 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN); 533 534 pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern); 535 /* Pattern_length is in ddwords */ 536 MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); 537 538 pattern_data = MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); 539 memcpy(pattern_data, actions, pattern_length); 540 541 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 542 if (!devx_obj->obj) { 543 DR_LOG(ERR, "Failed to create header_modify_pattern"); 544 rte_errno = errno; 545 goto free_obj; 546 } 547 548 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 549 550 return devx_obj; 551 552 free_obj: 553 simple_free(devx_obj); 554 return NULL; 555 } 556 557 struct mlx5dr_devx_obj * 558 mlx5dr_cmd_ste_create(struct ibv_context *ctx, 559 struct mlx5dr_cmd_ste_create_attr *ste_attr) 560 { 561 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 562 uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0}; 563 struct mlx5dr_devx_obj *devx_obj; 564 void *attr; 565 566 devx_obj = simple_malloc(sizeof(*devx_obj)); 567 if (!devx_obj) { 568 DR_LOG(ERR, "Failed to allocate memory for STE object"); 569 rte_errno = ENOMEM; 570 return NULL; 571 } 572 573 attr = MLX5_ADDR_OF(create_ste_in, in, hdr); 574 MLX5_SET(general_obj_in_cmd_hdr, 575 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 576 MLX5_SET(general_obj_in_cmd_hdr, 577 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE); 578 MLX5_SET(general_obj_in_cmd_hdr, 579 attr, log_obj_range, ste_attr->log_obj_range); 580 581 attr = MLX5_ADDR_OF(create_ste_in, in, ste); 582 MLX5_SET(ste, attr, table_type, ste_attr->table_type); 583 584 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 585 if (!devx_obj->obj) { 586 DR_LOG(ERR, "Failed to create STE"); 587 simple_free(devx_obj); 588 rte_errno = errno; 589 return NULL; 590 } 591 592 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 593 594 return devx_obj; 595 } 596 597 struct mlx5dr_devx_obj * 598 mlx5dr_cmd_definer_create(struct ibv_context *ctx, 599 struct mlx5dr_cmd_definer_create_attr *def_attr) 600 { 601 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 602 uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0}; 603 struct mlx5dr_devx_obj *devx_obj; 604 void *ptr; 605 606 devx_obj = simple_malloc(sizeof(*devx_obj)); 607 if (!devx_obj) { 608 DR_LOG(ERR, "Failed to allocate memory for definer object"); 609 rte_errno = ENOMEM; 610 return NULL; 611 } 612 613 MLX5_SET(general_obj_in_cmd_hdr, 614 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 615 MLX5_SET(general_obj_in_cmd_hdr, 616 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER); 617 618 ptr = MLX5_ADDR_OF(create_definer_in, in, definer); 619 MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT); 620 621 MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]); 622 MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]); 623 MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]); 624 MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]); 625 MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]); 626 MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]); 627 MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]); 628 MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]); 629 MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]); 630 631 MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]); 632 MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]); 633 MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]); 634 MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]); 635 MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]); 636 MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]); 637 MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]); 638 MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]); 639 640 ptr = MLX5_ADDR_OF(definer, ptr, match_mask); 641 memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask)); 642 643 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 644 if (!devx_obj->obj) { 645 DR_LOG(ERR, "Failed to create Definer"); 646 simple_free(devx_obj); 647 rte_errno = errno; 648 return NULL; 649 } 650 651 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 652 653 return devx_obj; 654 } 655 656 struct mlx5dr_devx_obj * 657 mlx5dr_cmd_sq_create(struct ibv_context *ctx, 658 struct mlx5dr_cmd_sq_create_attr *attr) 659 { 660 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 661 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 662 void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 663 void *wqc = MLX5_ADDR_OF(sqc, sqc, wq); 664 struct mlx5dr_devx_obj *devx_obj; 665 666 devx_obj = simple_malloc(sizeof(*devx_obj)); 667 if (!devx_obj) { 668 DR_LOG(ERR, "Failed to create SQ"); 669 rte_errno = ENOMEM; 670 return NULL; 671 } 672 673 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 674 MLX5_SET(sqc, sqc, cqn, attr->cqn); 675 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 676 MLX5_SET(sqc, sqc, non_wire, 1); 677 MLX5_SET(sqc, sqc, ts_format, attr->ts_format); 678 MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC); 679 MLX5_SET(wq, wqc, pd, attr->pdn); 680 MLX5_SET(wq, wqc, uar_page, attr->page_id); 681 MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB)); 682 MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz); 683 MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id); 684 MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id); 685 686 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 687 if (!devx_obj->obj) { 688 simple_free(devx_obj); 689 rte_errno = errno; 690 return NULL; 691 } 692 693 devx_obj->id = MLX5_GET(create_sq_out, out, sqn); 694 695 return devx_obj; 696 } 697 698 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj) 699 { 700 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 701 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 702 void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 703 int ret; 704 705 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 706 MLX5_SET(modify_sq_in, in, sqn, devx_obj->id); 707 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); 708 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); 709 710 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 711 if (ret) { 712 DR_LOG(ERR, "Failed to modify SQ"); 713 rte_errno = errno; 714 } 715 716 return ret; 717 } 718 719 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx, 720 struct mlx5dr_cmd_allow_other_vhca_access_attr *attr) 721 { 722 uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; 723 uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; 724 void *key; 725 int ret; 726 727 MLX5_SET(allow_other_vhca_access_in, 728 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); 729 MLX5_SET(allow_other_vhca_access_in, 730 in, object_type_to_be_accessed, attr->obj_type); 731 MLX5_SET(allow_other_vhca_access_in, 732 in, object_id_to_be_accessed, attr->obj_id); 733 734 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); 735 memcpy(key, attr->access_key, sizeof(attr->access_key)); 736 737 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 738 if (ret) { 739 DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command"); 740 rte_errno = errno; 741 return rte_errno; 742 } 743 744 return 0; 745 } 746 747 struct mlx5dr_devx_obj * 748 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx, 749 struct mlx5dr_cmd_alias_obj_create_attr *alias_attr) 750 { 751 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 752 uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; 753 struct mlx5dr_devx_obj *devx_obj; 754 void *attr; 755 void *key; 756 757 devx_obj = simple_malloc(sizeof(*devx_obj)); 758 if (!devx_obj) { 759 DR_LOG(ERR, "Failed to allocate memory for ALIAS general object"); 760 rte_errno = ENOMEM; 761 return NULL; 762 } 763 764 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); 765 MLX5_SET(general_obj_in_cmd_hdr, 766 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 767 MLX5_SET(general_obj_in_cmd_hdr, 768 attr, obj_type, alias_attr->obj_type); 769 MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1); 770 771 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); 772 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); 773 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); 774 775 key = MLX5_ADDR_OF(alias_context, attr, access_key); 776 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); 777 778 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 779 if (!devx_obj->obj) { 780 DR_LOG(ERR, "Failed to create ALIAS OBJ"); 781 simple_free(devx_obj); 782 rte_errno = errno; 783 return NULL; 784 } 785 786 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 787 788 return devx_obj; 789 } 790 791 int mlx5dr_cmd_query_caps(struct ibv_context *ctx, 792 struct mlx5dr_cmd_query_caps *caps) 793 { 794 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 795 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 796 const struct flow_hw_port_info *port_info; 797 struct ibv_device_attr_ex attr_ex; 798 u32 res; 799 int ret; 800 801 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 802 MLX5_SET(query_hca_cap_in, in, op_mod, 803 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 804 MLX5_HCA_CAP_OPMOD_GET_CUR); 805 806 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 807 if (ret) { 808 DR_LOG(ERR, "Failed to query device caps"); 809 rte_errno = errno; 810 return rte_errno; 811 } 812 813 caps->wqe_based_update = 814 MLX5_GET(query_hca_cap_out, out, 815 capability.cmd_hca_cap.wqe_based_flow_table_update_cap); 816 817 caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out, 818 capability.cmd_hca_cap.eswitch_manager); 819 820 caps->flex_protocols = MLX5_GET(query_hca_cap_out, out, 821 capability.cmd_hca_cap.flex_parser_protocols); 822 823 caps->log_header_modify_argument_granularity = 824 MLX5_GET(query_hca_cap_out, out, 825 capability.cmd_hca_cap.log_header_modify_argument_granularity); 826 827 caps->log_header_modify_argument_granularity -= 828 MLX5_GET(query_hca_cap_out, out, 829 capability.cmd_hca_cap. 830 log_header_modify_argument_granularity_offset); 831 832 caps->log_header_modify_argument_max_alloc = 833 MLX5_GET(query_hca_cap_out, out, 834 capability.cmd_hca_cap.log_header_modify_argument_max_alloc); 835 836 caps->definer_format_sup = 837 MLX5_GET64(query_hca_cap_out, out, 838 capability.cmd_hca_cap.match_definer_format_supported); 839 840 caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out, 841 capability.cmd_hca_cap.sq_ts_format); 842 843 MLX5_SET(query_hca_cap_in, in, op_mod, 844 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | 845 MLX5_HCA_CAP_OPMOD_GET_CUR); 846 847 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 848 if (ret) { 849 DR_LOG(ERR, "Failed to query device caps"); 850 rte_errno = errno; 851 return rte_errno; 852 } 853 854 caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out, 855 capability.cmd_hca_cap_2. 856 format_select_dw_8_6_ext); 857 858 caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out, 859 capability.cmd_hca_cap_2. 860 format_select_dw_gtpu_dw_0); 861 862 caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out, 863 capability.cmd_hca_cap_2. 864 format_select_dw_gtpu_dw_1); 865 866 caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out, 867 capability.cmd_hca_cap_2. 868 format_select_dw_gtpu_dw_2); 869 870 caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out, 871 capability.cmd_hca_cap_2. 872 format_select_dw_gtpu_first_ext_dw_0); 873 874 /* check cross-VHCA support in cap2 */ 875 res = 876 MLX5_GET(query_hca_cap_out, out, 877 capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported); 878 879 caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) && 880 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) && 881 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC); 882 883 res = 884 MLX5_GET(query_hca_cap_out, out, 885 capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access); 886 887 caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) && 888 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) && 889 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC); 890 891 MLX5_SET(query_hca_cap_in, in, op_mod, 892 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | 893 MLX5_HCA_CAP_OPMOD_GET_CUR); 894 895 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 896 if (ret) { 897 DR_LOG(ERR, "Failed to query flow table caps"); 898 rte_errno = errno; 899 return rte_errno; 900 } 901 902 caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out, 903 capability.flow_table_nic_cap. 904 flow_table_properties_nic_receive.max_ft_level); 905 906 caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out, 907 capability.flow_table_nic_cap. 908 flow_table_properties_nic_receive.reparse); 909 910 /* check cross-VHCA support in flow table properties */ 911 res = 912 MLX5_GET(query_hca_cap_out, out, 913 capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object); 914 caps->cross_vhca_resources &= res; 915 916 if (caps->wqe_based_update) { 917 MLX5_SET(query_hca_cap_in, in, op_mod, 918 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | 919 MLX5_HCA_CAP_OPMOD_GET_CUR); 920 921 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 922 if (ret) { 923 DR_LOG(ERR, "Failed to query WQE based FT caps"); 924 rte_errno = errno; 925 return rte_errno; 926 } 927 928 caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out, 929 capability.wqe_based_flow_table_cap. 930 rtc_reparse_mode); 931 932 caps->ste_format = MLX5_GET(query_hca_cap_out, out, 933 capability.wqe_based_flow_table_cap. 934 ste_format); 935 936 caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out, 937 capability.wqe_based_flow_table_cap. 938 rtc_index_mode); 939 940 caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out, 941 capability.wqe_based_flow_table_cap. 942 rtc_log_depth_max); 943 944 caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 945 capability.wqe_based_flow_table_cap. 946 ste_alloc_log_max); 947 948 caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 949 capability.wqe_based_flow_table_cap. 950 ste_alloc_log_granularity); 951 952 caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out, 953 capability.wqe_based_flow_table_cap. 954 trivial_match_definer); 955 956 caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 957 capability.wqe_based_flow_table_cap. 958 stc_alloc_log_max); 959 960 caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 961 capability.wqe_based_flow_table_cap. 962 stc_alloc_log_granularity); 963 } 964 965 if (caps->eswitch_manager) { 966 MLX5_SET(query_hca_cap_in, in, op_mod, 967 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | 968 MLX5_HCA_CAP_OPMOD_GET_CUR); 969 970 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 971 if (ret) { 972 DR_LOG(ERR, "Failed to query flow table esw caps"); 973 rte_errno = errno; 974 return rte_errno; 975 } 976 977 caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out, 978 capability.flow_table_nic_cap. 979 flow_table_properties_nic_receive.max_ft_level); 980 981 caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out, 982 capability.flow_table_nic_cap. 983 flow_table_properties_nic_receive.reparse); 984 985 MLX5_SET(query_hca_cap_in, in, op_mod, 986 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR); 987 988 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 989 if (ret) { 990 DR_LOG(ERR, "Query eswitch capabilities failed %d\n", ret); 991 rte_errno = errno; 992 return rte_errno; 993 } 994 995 if (MLX5_GET(query_hca_cap_out, out, 996 capability.esw_cap.esw_manager_vport_number_valid)) 997 caps->eswitch_manager_vport_number = 998 MLX5_GET(query_hca_cap_out, out, 999 capability.esw_cap.esw_manager_vport_number); 1000 } 1001 1002 ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 1003 if (ret) { 1004 DR_LOG(ERR, "Failed to query device attributes"); 1005 rte_errno = ret; 1006 return rte_errno; 1007 } 1008 1009 strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver)); 1010 1011 port_info = flow_hw_get_wire_port(ctx); 1012 if (port_info) { 1013 caps->wire_regc = port_info->regc_value; 1014 caps->wire_regc_mask = port_info->regc_mask; 1015 } else { 1016 DR_LOG(INFO, "Failed to query wire port regc value"); 1017 } 1018 1019 return ret; 1020 } 1021 1022 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx, 1023 struct mlx5dr_cmd_query_vport_caps *vport_caps, 1024 uint32_t port_num) 1025 { 1026 struct mlx5_port_info port_info = {0}; 1027 uint32_t flags; 1028 int ret; 1029 1030 flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID; 1031 1032 ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info); 1033 /* Check if query succeed and vport is enabled */ 1034 if (ret || (port_info.query_flags & flags) != flags) { 1035 rte_errno = ENOTSUP; 1036 return rte_errno; 1037 } 1038 1039 vport_caps->vport_num = port_info.vport_id; 1040 vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id; 1041 1042 if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) { 1043 vport_caps->metadata_c = port_info.vport_meta_tag; 1044 vport_caps->metadata_c_mask = port_info.vport_meta_mask; 1045 } 1046 1047 return 0; 1048 } 1049