1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates 3 */ 4 5 #include "mlx5dr_internal.h" 6 7 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj) 8 { 9 int ret; 10 11 ret = mlx5_glue->devx_obj_destroy(devx_obj->obj); 12 simple_free(devx_obj); 13 14 return ret; 15 } 16 17 struct mlx5dr_devx_obj * 18 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx, 19 struct mlx5dr_cmd_ft_create_attr *ft_attr) 20 { 21 uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; 22 uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; 23 struct mlx5dr_devx_obj *devx_obj; 24 void *ft_ctx; 25 26 devx_obj = simple_malloc(sizeof(*devx_obj)); 27 if (!devx_obj) { 28 DR_LOG(ERR, "Failed to allocate memory for flow table object"); 29 rte_errno = ENOMEM; 30 return NULL; 31 } 32 33 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); 34 MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); 35 36 ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); 37 MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); 38 MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid); 39 40 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 41 if (!devx_obj->obj) { 42 DR_LOG(ERR, "Failed to create FT"); 43 simple_free(devx_obj); 44 rte_errno = errno; 45 return NULL; 46 } 47 48 devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id); 49 50 return devx_obj; 51 } 52 53 int 54 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj, 55 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 56 { 57 uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; 58 uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; 59 void *ft_ctx; 60 int ret; 61 62 MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); 63 MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type); 64 MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs); 65 MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id); 66 67 ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context); 68 69 MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action); 70 MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id); 71 MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0); 72 MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1); 73 74 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 75 if (ret) { 76 DR_LOG(ERR, "Failed to modify FT"); 77 rte_errno = errno; 78 } 79 80 return ret; 81 } 82 83 static struct mlx5dr_devx_obj * 84 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx, 85 struct mlx5dr_cmd_fg_attr *fg_attr) 86 { 87 uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; 88 uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0}; 89 struct mlx5dr_devx_obj *devx_obj; 90 91 devx_obj = simple_malloc(sizeof(*devx_obj)); 92 if (!devx_obj) { 93 DR_LOG(ERR, "Failed to allocate memory for flow group object"); 94 rte_errno = ENOMEM; 95 return NULL; 96 } 97 98 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); 99 MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type); 100 MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id); 101 102 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 103 if (!devx_obj->obj) { 104 DR_LOG(ERR, "Failed to create Flow group"); 105 simple_free(devx_obj); 106 rte_errno = errno; 107 return NULL; 108 } 109 110 devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id); 111 112 return devx_obj; 113 } 114 115 static struct mlx5dr_devx_obj * 116 mlx5dr_cmd_set_vport_fte(struct ibv_context *ctx, 117 uint32_t table_type, 118 uint32_t table_id, 119 uint32_t group_id, 120 uint32_t vport_id) 121 { 122 uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0}; 123 uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; 124 struct mlx5dr_devx_obj *devx_obj; 125 void *in_flow_context; 126 void *in_dests; 127 128 devx_obj = simple_malloc(sizeof(*devx_obj)); 129 if (!devx_obj) { 130 DR_LOG(ERR, "Failed to allocate memory for fte object"); 131 rte_errno = ENOMEM; 132 return NULL; 133 } 134 135 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 136 MLX5_SET(set_fte_in, in, table_type, table_type); 137 MLX5_SET(set_fte_in, in, table_id, table_id); 138 139 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 140 MLX5_SET(flow_context, in_flow_context, group_id, group_id); 141 MLX5_SET(flow_context, in_flow_context, destination_list_size, 1); 142 MLX5_SET(flow_context, in_flow_context, action, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); 143 144 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); 145 MLX5_SET(dest_format, in_dests, destination_type, 146 MLX5_FLOW_DESTINATION_TYPE_VPORT); 147 MLX5_SET(dest_format, in_dests, destination_id, vport_id); 148 149 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 150 if (!devx_obj->obj) { 151 DR_LOG(ERR, "Failed to create FTE"); 152 simple_free(devx_obj); 153 rte_errno = errno; 154 return NULL; 155 } 156 157 return devx_obj; 158 } 159 160 void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl) 161 { 162 mlx5dr_cmd_destroy_obj(tbl->fte); 163 mlx5dr_cmd_destroy_obj(tbl->fg); 164 mlx5dr_cmd_destroy_obj(tbl->ft); 165 } 166 167 struct mlx5dr_cmd_forward_tbl * 168 mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx, 169 struct mlx5dr_cmd_ft_create_attr *ft_attr, 170 uint32_t vport) 171 { 172 struct mlx5dr_cmd_fg_attr fg_attr = {0}; 173 struct mlx5dr_cmd_forward_tbl *tbl; 174 175 tbl = simple_calloc(1, sizeof(*tbl)); 176 if (!tbl) { 177 DR_LOG(ERR, "Failed to allocate memory for forward default"); 178 rte_errno = ENOMEM; 179 return NULL; 180 } 181 182 tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr); 183 if (!tbl->ft) { 184 DR_LOG(ERR, "Failed to create FT for miss-table"); 185 goto free_tbl; 186 } 187 188 fg_attr.table_id = tbl->ft->id; 189 fg_attr.table_type = ft_attr->type; 190 191 tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr); 192 if (!tbl->fg) { 193 DR_LOG(ERR, "Failed to create FG for miss-table"); 194 goto free_ft; 195 } 196 197 tbl->fte = mlx5dr_cmd_set_vport_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, vport); 198 if (!tbl->fte) { 199 DR_LOG(ERR, "Failed to create FTE for miss-table"); 200 goto free_fg; 201 } 202 return tbl; 203 204 free_fg: 205 mlx5dr_cmd_destroy_obj(tbl->fg); 206 free_ft: 207 mlx5dr_cmd_destroy_obj(tbl->ft); 208 free_tbl: 209 simple_free(tbl); 210 return NULL; 211 } 212 213 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx, 214 uint32_t fw_ft_type, 215 enum mlx5dr_table_type type, 216 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 217 { 218 struct mlx5dr_devx_obj *default_miss_tbl; 219 220 if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx)) 221 return; 222 223 ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; 224 ft_attr->type = fw_ft_type; 225 ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; 226 227 if (type == MLX5DR_TABLE_TYPE_FDB) { 228 default_miss_tbl = ctx->common_res[type].default_miss->ft; 229 if (!default_miss_tbl) { 230 assert(false); 231 return; 232 } 233 ft_attr->table_miss_id = default_miss_tbl->id; 234 } else { 235 ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id; 236 } 237 } 238 239 struct mlx5dr_devx_obj * 240 mlx5dr_cmd_rtc_create(struct ibv_context *ctx, 241 struct mlx5dr_cmd_rtc_create_attr *rtc_attr) 242 { 243 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 244 uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0}; 245 struct mlx5dr_devx_obj *devx_obj; 246 void *attr; 247 248 devx_obj = simple_malloc(sizeof(*devx_obj)); 249 if (!devx_obj) { 250 DR_LOG(ERR, "Failed to allocate memory for RTC object"); 251 rte_errno = ENOMEM; 252 return NULL; 253 } 254 255 attr = MLX5_ADDR_OF(create_rtc_in, in, hdr); 256 MLX5_SET(general_obj_in_cmd_hdr, 257 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 258 MLX5_SET(general_obj_in_cmd_hdr, 259 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC); 260 261 attr = MLX5_ADDR_OF(create_rtc_in, in, rtc); 262 MLX5_SET(rtc, attr, ste_format, rtc_attr->is_jumbo ? 263 MLX5_IFC_RTC_STE_FORMAT_11DW : 264 MLX5_IFC_RTC_STE_FORMAT_8DW); 265 MLX5_SET(rtc, attr, pd, rtc_attr->pd); 266 MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode); 267 MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode); 268 MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); 269 MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth); 270 MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size); 271 MLX5_SET(rtc, attr, table_type, rtc_attr->table_type); 272 MLX5_SET(rtc, attr, match_definer_id, rtc_attr->definer_id); 273 MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); 274 MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); 275 MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); 276 MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); 277 MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS); 278 279 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 280 if (!devx_obj->obj) { 281 DR_LOG(ERR, "Failed to create RTC"); 282 simple_free(devx_obj); 283 rte_errno = errno; 284 return NULL; 285 } 286 287 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 288 289 return devx_obj; 290 } 291 292 struct mlx5dr_devx_obj * 293 mlx5dr_cmd_stc_create(struct ibv_context *ctx, 294 struct mlx5dr_cmd_stc_create_attr *stc_attr) 295 { 296 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 297 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 298 struct mlx5dr_devx_obj *devx_obj; 299 void *attr; 300 301 devx_obj = simple_malloc(sizeof(*devx_obj)); 302 if (!devx_obj) { 303 DR_LOG(ERR, "Failed to allocate memory for STC object"); 304 rte_errno = ENOMEM; 305 return NULL; 306 } 307 308 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 309 MLX5_SET(general_obj_in_cmd_hdr, 310 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 311 MLX5_SET(general_obj_in_cmd_hdr, 312 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 313 MLX5_SET(general_obj_in_cmd_hdr, 314 attr, log_obj_range, stc_attr->log_obj_range); 315 316 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 317 MLX5_SET(stc, attr, table_type, stc_attr->table_type); 318 319 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 320 if (!devx_obj->obj) { 321 DR_LOG(ERR, "Failed to create STC"); 322 simple_free(devx_obj); 323 rte_errno = errno; 324 return NULL; 325 } 326 327 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 328 329 return devx_obj; 330 } 331 332 static int 333 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr, 334 void *stc_parm) 335 { 336 switch (stc_attr->action_type) { 337 case MLX5_IFC_STC_ACTION_TYPE_COUNTER: 338 MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id); 339 break; 340 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: 341 MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num); 342 break; 343 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT: 344 MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id); 345 break; 346 case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST: 347 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 348 header_modify_pattern_id, stc_attr->modify_header.pattern_id); 349 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 350 header_modify_argument_id, stc_attr->modify_header.arg_id); 351 break; 352 case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE: 353 MLX5_SET(stc_ste_param_remove, stc_parm, action_type, 354 MLX5_MODIFICATION_TYPE_REMOVE); 355 MLX5_SET(stc_ste_param_remove, stc_parm, decap, 356 stc_attr->remove_header.decap); 357 MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor, 358 stc_attr->remove_header.start_anchor); 359 MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor, 360 stc_attr->remove_header.end_anchor); 361 break; 362 case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT: 363 MLX5_SET(stc_ste_param_insert, stc_parm, action_type, 364 MLX5_MODIFICATION_TYPE_INSERT); 365 MLX5_SET(stc_ste_param_insert, stc_parm, encap, 366 stc_attr->insert_header.encap); 367 MLX5_SET(stc_ste_param_insert, stc_parm, inline_data, 368 stc_attr->insert_header.is_inline); 369 MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor, 370 stc_attr->insert_header.insert_anchor); 371 /* HW gets the next 2 sizes in words */ 372 MLX5_SET(stc_ste_param_insert, stc_parm, insert_size, 373 stc_attr->insert_header.header_size / 2); 374 MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset, 375 stc_attr->insert_header.insert_offset / 2); 376 MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument, 377 stc_attr->insert_header.arg_id); 378 break; 379 case MLX5_IFC_STC_ACTION_TYPE_COPY: 380 case MLX5_IFC_STC_ACTION_TYPE_SET: 381 case MLX5_IFC_STC_ACTION_TYPE_ADD: 382 *(__be64 *)stc_parm = stc_attr->modify_action.data; 383 break; 384 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: 385 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK: 386 MLX5_SET(stc_ste_param_vport, stc_parm, vport_number, 387 stc_attr->vport.vport_num); 388 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id, 389 stc_attr->vport.esw_owner_vhca_id); 390 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1); 391 break; 392 case MLX5_IFC_STC_ACTION_TYPE_DROP: 393 case MLX5_IFC_STC_ACTION_TYPE_NOP: 394 case MLX5_IFC_STC_ACTION_TYPE_TAG: 395 case MLX5_IFC_STC_ACTION_TYPE_ALLOW: 396 break; 397 case MLX5_IFC_STC_ACTION_TYPE_ASO: 398 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id, 399 stc_attr->aso.devx_obj_id); 400 MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id, 401 stc_attr->aso.return_reg_id); 402 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type, 403 stc_attr->aso.aso_type); 404 break; 405 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: 406 MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id, 407 stc_attr->ste_table.ste_obj_id); 408 MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id, 409 stc_attr->ste_table.match_definer_id); 410 MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size, 411 stc_attr->ste_table.log_hash_size); 412 break; 413 case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS: 414 MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type, 415 MLX5_MODIFICATION_TYPE_REMOVE_WORDS); 416 MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor, 417 stc_attr->remove_words.start_anchor); 418 MLX5_SET(stc_ste_param_remove_words, stc_parm, 419 remove_size, stc_attr->remove_words.num_of_words); 420 break; 421 default: 422 DR_LOG(ERR, "Not supported type %d", stc_attr->action_type); 423 rte_errno = EINVAL; 424 return rte_errno; 425 } 426 return 0; 427 } 428 429 int 430 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj, 431 struct mlx5dr_cmd_stc_modify_attr *stc_attr) 432 { 433 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 434 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 435 void *stc_parm; 436 void *attr; 437 int ret; 438 439 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 440 MLX5_SET(general_obj_in_cmd_hdr, 441 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 442 MLX5_SET(general_obj_in_cmd_hdr, 443 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 444 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id); 445 MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset); 446 447 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 448 MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset); 449 MLX5_SET(stc, attr, action_type, stc_attr->action_type); 450 MLX5_SET64(stc, attr, modify_field_select, 451 MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC); 452 453 /* Set destination TIRN, TAG, FT ID, STE ID */ 454 stc_parm = MLX5_ADDR_OF(stc, attr, stc_param); 455 ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm); 456 if (ret) 457 return ret; 458 459 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 460 if (ret) { 461 DR_LOG(ERR, "Failed to modify STC FW action_type %d", stc_attr->action_type); 462 rte_errno = errno; 463 } 464 465 return ret; 466 } 467 468 struct mlx5dr_devx_obj * 469 mlx5dr_cmd_arg_create(struct ibv_context *ctx, 470 uint16_t log_obj_range, 471 uint32_t pd) 472 { 473 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 474 uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; 475 struct mlx5dr_devx_obj *devx_obj; 476 void *attr; 477 478 devx_obj = simple_malloc(sizeof(*devx_obj)); 479 if (!devx_obj) { 480 DR_LOG(ERR, "Failed to allocate memory for ARG object"); 481 rte_errno = ENOMEM; 482 return NULL; 483 } 484 485 attr = MLX5_ADDR_OF(create_arg_in, in, hdr); 486 MLX5_SET(general_obj_in_cmd_hdr, 487 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 488 MLX5_SET(general_obj_in_cmd_hdr, 489 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG); 490 MLX5_SET(general_obj_in_cmd_hdr, 491 attr, log_obj_range, log_obj_range); 492 493 attr = MLX5_ADDR_OF(create_arg_in, in, arg); 494 MLX5_SET(arg, attr, access_pd, pd); 495 496 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 497 if (!devx_obj->obj) { 498 DR_LOG(ERR, "Failed to create ARG"); 499 simple_free(devx_obj); 500 rte_errno = errno; 501 return NULL; 502 } 503 504 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 505 506 return devx_obj; 507 } 508 509 struct mlx5dr_devx_obj * 510 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, 511 uint32_t pattern_length, 512 uint8_t *actions) 513 { 514 uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; 515 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 516 struct mlx5dr_devx_obj *devx_obj; 517 void *pattern_data; 518 void *pattern; 519 void *attr; 520 521 if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { 522 DR_LOG(ERR, "Pattern length %d exceeds limit %d", 523 pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY); 524 rte_errno = EINVAL; 525 return NULL; 526 } 527 528 devx_obj = simple_malloc(sizeof(*devx_obj)); 529 if (!devx_obj) { 530 DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object"); 531 rte_errno = ENOMEM; 532 return NULL; 533 } 534 535 attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); 536 MLX5_SET(general_obj_in_cmd_hdr, 537 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 538 MLX5_SET(general_obj_in_cmd_hdr, 539 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN); 540 541 pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern); 542 /* Pattern_length is in ddwords */ 543 MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); 544 545 pattern_data = MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); 546 memcpy(pattern_data, actions, pattern_length); 547 548 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 549 if (!devx_obj->obj) { 550 DR_LOG(ERR, "Failed to create header_modify_pattern"); 551 rte_errno = errno; 552 goto free_obj; 553 } 554 555 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 556 557 return devx_obj; 558 559 free_obj: 560 simple_free(devx_obj); 561 return NULL; 562 } 563 564 struct mlx5dr_devx_obj * 565 mlx5dr_cmd_ste_create(struct ibv_context *ctx, 566 struct mlx5dr_cmd_ste_create_attr *ste_attr) 567 { 568 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 569 uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0}; 570 struct mlx5dr_devx_obj *devx_obj; 571 void *attr; 572 573 devx_obj = simple_malloc(sizeof(*devx_obj)); 574 if (!devx_obj) { 575 DR_LOG(ERR, "Failed to allocate memory for STE object"); 576 rte_errno = ENOMEM; 577 return NULL; 578 } 579 580 attr = MLX5_ADDR_OF(create_ste_in, in, hdr); 581 MLX5_SET(general_obj_in_cmd_hdr, 582 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 583 MLX5_SET(general_obj_in_cmd_hdr, 584 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE); 585 MLX5_SET(general_obj_in_cmd_hdr, 586 attr, log_obj_range, ste_attr->log_obj_range); 587 588 attr = MLX5_ADDR_OF(create_ste_in, in, ste); 589 MLX5_SET(ste, attr, table_type, ste_attr->table_type); 590 591 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 592 if (!devx_obj->obj) { 593 DR_LOG(ERR, "Failed to create STE"); 594 simple_free(devx_obj); 595 rte_errno = errno; 596 return NULL; 597 } 598 599 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 600 601 return devx_obj; 602 } 603 604 struct mlx5dr_devx_obj * 605 mlx5dr_cmd_definer_create(struct ibv_context *ctx, 606 struct mlx5dr_cmd_definer_create_attr *def_attr) 607 { 608 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 609 uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0}; 610 struct mlx5dr_devx_obj *devx_obj; 611 void *ptr; 612 613 devx_obj = simple_malloc(sizeof(*devx_obj)); 614 if (!devx_obj) { 615 DR_LOG(ERR, "Failed to allocate memory for definer object"); 616 rte_errno = ENOMEM; 617 return NULL; 618 } 619 620 MLX5_SET(general_obj_in_cmd_hdr, 621 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 622 MLX5_SET(general_obj_in_cmd_hdr, 623 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER); 624 625 ptr = MLX5_ADDR_OF(create_definer_in, in, definer); 626 MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT); 627 628 MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]); 629 MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]); 630 MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]); 631 MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]); 632 MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]); 633 MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]); 634 MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]); 635 MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]); 636 MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]); 637 638 MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]); 639 MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]); 640 MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]); 641 MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]); 642 MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]); 643 MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]); 644 MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]); 645 MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]); 646 647 ptr = MLX5_ADDR_OF(definer, ptr, match_mask); 648 memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask)); 649 650 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 651 if (!devx_obj->obj) { 652 DR_LOG(ERR, "Failed to create Definer"); 653 simple_free(devx_obj); 654 rte_errno = errno; 655 return NULL; 656 } 657 658 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 659 660 return devx_obj; 661 } 662 663 struct mlx5dr_devx_obj * 664 mlx5dr_cmd_sq_create(struct ibv_context *ctx, 665 struct mlx5dr_cmd_sq_create_attr *attr) 666 { 667 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 668 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 669 void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 670 void *wqc = MLX5_ADDR_OF(sqc, sqc, wq); 671 struct mlx5dr_devx_obj *devx_obj; 672 673 devx_obj = simple_malloc(sizeof(*devx_obj)); 674 if (!devx_obj) { 675 DR_LOG(ERR, "Failed to create SQ"); 676 rte_errno = ENOMEM; 677 return NULL; 678 } 679 680 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 681 MLX5_SET(sqc, sqc, cqn, attr->cqn); 682 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 683 MLX5_SET(sqc, sqc, non_wire, 1); 684 MLX5_SET(sqc, sqc, ts_format, attr->ts_format); 685 MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC); 686 MLX5_SET(wq, wqc, pd, attr->pdn); 687 MLX5_SET(wq, wqc, uar_page, attr->page_id); 688 MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB)); 689 MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz); 690 MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id); 691 MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id); 692 693 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 694 if (!devx_obj->obj) { 695 simple_free(devx_obj); 696 rte_errno = errno; 697 return NULL; 698 } 699 700 devx_obj->id = MLX5_GET(create_sq_out, out, sqn); 701 702 return devx_obj; 703 } 704 705 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj) 706 { 707 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 708 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 709 void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 710 int ret; 711 712 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 713 MLX5_SET(modify_sq_in, in, sqn, devx_obj->id); 714 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); 715 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); 716 717 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 718 if (ret) { 719 DR_LOG(ERR, "Failed to modify SQ"); 720 rte_errno = errno; 721 } 722 723 return ret; 724 } 725 726 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx, 727 struct mlx5dr_cmd_allow_other_vhca_access_attr *attr) 728 { 729 uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; 730 uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; 731 void *key; 732 int ret; 733 734 MLX5_SET(allow_other_vhca_access_in, 735 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); 736 MLX5_SET(allow_other_vhca_access_in, 737 in, object_type_to_be_accessed, attr->obj_type); 738 MLX5_SET(allow_other_vhca_access_in, 739 in, object_id_to_be_accessed, attr->obj_id); 740 741 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); 742 memcpy(key, attr->access_key, sizeof(attr->access_key)); 743 744 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 745 if (ret) { 746 DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command"); 747 rte_errno = errno; 748 return rte_errno; 749 } 750 751 return 0; 752 } 753 754 struct mlx5dr_devx_obj * 755 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx, 756 struct mlx5dr_cmd_alias_obj_create_attr *alias_attr) 757 { 758 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 759 uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; 760 struct mlx5dr_devx_obj *devx_obj; 761 void *attr; 762 void *key; 763 764 devx_obj = simple_malloc(sizeof(*devx_obj)); 765 if (!devx_obj) { 766 DR_LOG(ERR, "Failed to allocate memory for ALIAS general object"); 767 rte_errno = ENOMEM; 768 return NULL; 769 } 770 771 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); 772 MLX5_SET(general_obj_in_cmd_hdr, 773 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 774 MLX5_SET(general_obj_in_cmd_hdr, 775 attr, obj_type, alias_attr->obj_type); 776 MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1); 777 778 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); 779 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); 780 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); 781 782 key = MLX5_ADDR_OF(alias_context, attr, access_key); 783 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); 784 785 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 786 if (!devx_obj->obj) { 787 DR_LOG(ERR, "Failed to create ALIAS OBJ"); 788 simple_free(devx_obj); 789 rte_errno = errno; 790 return NULL; 791 } 792 793 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 794 795 return devx_obj; 796 } 797 798 int mlx5dr_cmd_query_caps(struct ibv_context *ctx, 799 struct mlx5dr_cmd_query_caps *caps) 800 { 801 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 802 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 803 const struct flow_hw_port_info *port_info; 804 struct ibv_device_attr_ex attr_ex; 805 u32 res; 806 int ret; 807 808 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 809 MLX5_SET(query_hca_cap_in, in, op_mod, 810 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 811 MLX5_HCA_CAP_OPMOD_GET_CUR); 812 813 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 814 if (ret) { 815 DR_LOG(ERR, "Failed to query device caps"); 816 rte_errno = errno; 817 return rte_errno; 818 } 819 820 caps->wqe_based_update = 821 MLX5_GET(query_hca_cap_out, out, 822 capability.cmd_hca_cap.wqe_based_flow_table_update_cap); 823 824 caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out, 825 capability.cmd_hca_cap.eswitch_manager); 826 827 caps->flex_protocols = MLX5_GET(query_hca_cap_out, out, 828 capability.cmd_hca_cap.flex_parser_protocols); 829 830 caps->log_header_modify_argument_granularity = 831 MLX5_GET(query_hca_cap_out, out, 832 capability.cmd_hca_cap.log_header_modify_argument_granularity); 833 834 caps->log_header_modify_argument_granularity -= 835 MLX5_GET(query_hca_cap_out, out, 836 capability.cmd_hca_cap. 837 log_header_modify_argument_granularity_offset); 838 839 caps->log_header_modify_argument_max_alloc = 840 MLX5_GET(query_hca_cap_out, out, 841 capability.cmd_hca_cap.log_header_modify_argument_max_alloc); 842 843 caps->definer_format_sup = 844 MLX5_GET64(query_hca_cap_out, out, 845 capability.cmd_hca_cap.match_definer_format_supported); 846 847 caps->vhca_id = MLX5_GET(query_hca_cap_out, out, 848 capability.cmd_hca_cap.vhca_id); 849 850 caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out, 851 capability.cmd_hca_cap.sq_ts_format); 852 853 MLX5_SET(query_hca_cap_in, in, op_mod, 854 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | 855 MLX5_HCA_CAP_OPMOD_GET_CUR); 856 857 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 858 if (ret) { 859 DR_LOG(ERR, "Failed to query device caps"); 860 rte_errno = errno; 861 return rte_errno; 862 } 863 864 caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out, 865 capability.cmd_hca_cap_2. 866 format_select_dw_8_6_ext); 867 868 caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out, 869 capability.cmd_hca_cap_2. 870 format_select_dw_gtpu_dw_0); 871 872 caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out, 873 capability.cmd_hca_cap_2. 874 format_select_dw_gtpu_dw_1); 875 876 caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out, 877 capability.cmd_hca_cap_2. 878 format_select_dw_gtpu_dw_2); 879 880 caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out, 881 capability.cmd_hca_cap_2. 882 format_select_dw_gtpu_first_ext_dw_0); 883 884 /* check cross-VHCA support in cap2 */ 885 res = 886 MLX5_GET(query_hca_cap_out, out, 887 capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported); 888 889 caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) && 890 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) && 891 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC); 892 893 res = 894 MLX5_GET(query_hca_cap_out, out, 895 capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access); 896 897 caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) && 898 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) && 899 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC); 900 901 MLX5_SET(query_hca_cap_in, in, op_mod, 902 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | 903 MLX5_HCA_CAP_OPMOD_GET_CUR); 904 905 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 906 if (ret) { 907 DR_LOG(ERR, "Failed to query flow table caps"); 908 rte_errno = errno; 909 return rte_errno; 910 } 911 912 caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out, 913 capability.flow_table_nic_cap. 914 flow_table_properties_nic_receive.max_ft_level); 915 916 caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out, 917 capability.flow_table_nic_cap. 918 flow_table_properties_nic_receive.reparse); 919 920 /* check cross-VHCA support in flow table properties */ 921 res = 922 MLX5_GET(query_hca_cap_out, out, 923 capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object); 924 caps->cross_vhca_resources &= res; 925 926 if (caps->wqe_based_update) { 927 MLX5_SET(query_hca_cap_in, in, op_mod, 928 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | 929 MLX5_HCA_CAP_OPMOD_GET_CUR); 930 931 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 932 if (ret) { 933 DR_LOG(ERR, "Failed to query WQE based FT caps"); 934 rte_errno = errno; 935 return rte_errno; 936 } 937 938 caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out, 939 capability.wqe_based_flow_table_cap. 940 rtc_reparse_mode); 941 942 caps->ste_format = MLX5_GET(query_hca_cap_out, out, 943 capability.wqe_based_flow_table_cap. 944 ste_format); 945 946 caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out, 947 capability.wqe_based_flow_table_cap. 948 rtc_index_mode); 949 950 caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out, 951 capability.wqe_based_flow_table_cap. 952 rtc_log_depth_max); 953 954 caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 955 capability.wqe_based_flow_table_cap. 956 ste_alloc_log_max); 957 958 caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 959 capability.wqe_based_flow_table_cap. 960 ste_alloc_log_granularity); 961 962 caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out, 963 capability.wqe_based_flow_table_cap. 964 trivial_match_definer); 965 966 caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 967 capability.wqe_based_flow_table_cap. 968 stc_alloc_log_max); 969 970 caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 971 capability.wqe_based_flow_table_cap. 972 stc_alloc_log_granularity); 973 } 974 975 if (caps->eswitch_manager) { 976 MLX5_SET(query_hca_cap_in, in, op_mod, 977 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | 978 MLX5_HCA_CAP_OPMOD_GET_CUR); 979 980 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 981 if (ret) { 982 DR_LOG(ERR, "Failed to query flow table esw caps"); 983 rte_errno = errno; 984 return rte_errno; 985 } 986 987 caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out, 988 capability.flow_table_nic_cap. 989 flow_table_properties_nic_receive.max_ft_level); 990 991 caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out, 992 capability.flow_table_nic_cap. 993 flow_table_properties_nic_receive.reparse); 994 995 MLX5_SET(query_hca_cap_in, in, op_mod, 996 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR); 997 998 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 999 if (ret) { 1000 DR_LOG(ERR, "Query eswitch capabilities failed %d\n", ret); 1001 rte_errno = errno; 1002 return rte_errno; 1003 } 1004 1005 if (MLX5_GET(query_hca_cap_out, out, 1006 capability.esw_cap.esw_manager_vport_number_valid)) 1007 caps->eswitch_manager_vport_number = 1008 MLX5_GET(query_hca_cap_out, out, 1009 capability.esw_cap.esw_manager_vport_number); 1010 } 1011 1012 ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 1013 if (ret) { 1014 DR_LOG(ERR, "Failed to query device attributes"); 1015 rte_errno = ret; 1016 return rte_errno; 1017 } 1018 1019 strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver)); 1020 1021 port_info = flow_hw_get_wire_port(ctx); 1022 if (port_info) { 1023 caps->wire_regc = port_info->regc_value; 1024 caps->wire_regc_mask = port_info->regc_mask; 1025 } else { 1026 DR_LOG(INFO, "Failed to query wire port regc value"); 1027 } 1028 1029 return ret; 1030 } 1031 1032 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx, 1033 struct mlx5dr_cmd_query_vport_caps *vport_caps, 1034 uint32_t port_num) 1035 { 1036 struct mlx5_port_info port_info = {0}; 1037 uint32_t flags; 1038 int ret; 1039 1040 flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID; 1041 1042 ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info); 1043 /* Check if query succeed and vport is enabled */ 1044 if (ret || (port_info.query_flags & flags) != flags) { 1045 rte_errno = ENOTSUP; 1046 return rte_errno; 1047 } 1048 1049 vport_caps->vport_num = port_info.vport_id; 1050 vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id; 1051 1052 if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) { 1053 vport_caps->metadata_c = port_info.vport_meta_tag; 1054 vport_caps->metadata_c_mask = port_info.vport_meta_mask; 1055 } 1056 1057 return 0; 1058 } 1059