1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates 3 */ 4 5 #include "mlx5dr_internal.h" 6 7 static uint32_t mlx5dr_cmd_get_syndrome(uint32_t *out) 8 { 9 /* Assumption: syndrome is always the second u32 */ 10 return be32toh(out[1]); 11 } 12 13 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj) 14 { 15 int ret; 16 17 ret = mlx5_glue->devx_obj_destroy(devx_obj->obj); 18 simple_free(devx_obj); 19 20 return ret; 21 } 22 23 struct mlx5dr_devx_obj * 24 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx, 25 struct mlx5dr_cmd_ft_create_attr *ft_attr) 26 { 27 uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; 28 uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; 29 struct mlx5dr_devx_obj *devx_obj; 30 void *ft_ctx; 31 32 devx_obj = simple_malloc(sizeof(*devx_obj)); 33 if (!devx_obj) { 34 DR_LOG(ERR, "Failed to allocate memory for flow table object"); 35 rte_errno = ENOMEM; 36 return NULL; 37 } 38 39 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); 40 MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); 41 42 ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); 43 MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); 44 MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid); 45 MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en); 46 47 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 48 if (!devx_obj->obj) { 49 DR_LOG(ERR, "Failed to create FT (syndrome: %#x)", 50 mlx5dr_cmd_get_syndrome(out)); 51 simple_free(devx_obj); 52 rte_errno = errno; 53 return NULL; 54 } 55 56 devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id); 57 58 return devx_obj; 59 } 60 61 int 62 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj, 63 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 64 { 65 uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; 66 uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; 67 void *ft_ctx; 68 int ret; 69 70 MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); 71 MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type); 72 MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs); 73 MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id); 74 75 ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context); 76 77 MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action); 78 MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id); 79 MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0); 80 MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1); 81 82 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 83 if (ret) { 84 DR_LOG(ERR, "Failed to modify FT (syndrome: %#x)", 85 mlx5dr_cmd_get_syndrome(out)); 86 rte_errno = errno; 87 } 88 89 return ret; 90 } 91 92 int 93 mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj, 94 struct mlx5dr_cmd_ft_query_attr *ft_attr, 95 uint64_t *icm_addr_0, uint64_t *icm_addr_1) 96 { 97 uint32_t out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0}; 98 uint32_t in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0}; 99 void *ft_ctx; 100 int ret; 101 102 MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE); 103 MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type); 104 MLX5_SET(query_flow_table_in, in, table_id, devx_obj->id); 105 106 ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 107 if (ret) { 108 DR_LOG(ERR, "Failed to query FT (syndrome: %#x)", 109 mlx5dr_cmd_get_syndrome(out)); 110 rte_errno = errno; 111 return ret; 112 } 113 114 ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context); 115 *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_0); 116 *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_1); 117 118 return ret; 119 } 120 121 static struct mlx5dr_devx_obj * 122 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx, 123 struct mlx5dr_cmd_fg_attr *fg_attr) 124 { 125 uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; 126 uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0}; 127 struct mlx5dr_devx_obj *devx_obj; 128 129 devx_obj = simple_malloc(sizeof(*devx_obj)); 130 if (!devx_obj) { 131 DR_LOG(ERR, "Failed to allocate memory for flow group object"); 132 rte_errno = ENOMEM; 133 return NULL; 134 } 135 136 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); 137 MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type); 138 MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id); 139 140 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 141 if (!devx_obj->obj) { 142 DR_LOG(ERR, "Failed to create Flow group(syndrome: %#x)", 143 mlx5dr_cmd_get_syndrome(out)); 144 simple_free(devx_obj); 145 rte_errno = errno; 146 return NULL; 147 } 148 149 devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id); 150 151 return devx_obj; 152 } 153 154 struct mlx5dr_devx_obj * 155 mlx5dr_cmd_set_fte(struct ibv_context *ctx, 156 uint32_t table_type, 157 uint32_t table_id, 158 uint32_t group_id, 159 struct mlx5dr_cmd_set_fte_attr *fte_attr) 160 { 161 uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0}; 162 uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; 163 struct mlx5dr_devx_obj *devx_obj; 164 void *in_flow_context; 165 uint32_t action_flags; 166 void *in_dests; 167 168 devx_obj = simple_malloc(sizeof(*devx_obj)); 169 if (!devx_obj) { 170 DR_LOG(ERR, "Failed to allocate memory for fte object"); 171 rte_errno = ENOMEM; 172 return NULL; 173 } 174 175 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 176 MLX5_SET(set_fte_in, in, table_type, table_type); 177 MLX5_SET(set_fte_in, in, table_id, table_id); 178 179 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 180 MLX5_SET(flow_context, in_flow_context, group_id, group_id); 181 MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source); 182 183 action_flags = fte_attr->action_flags; 184 MLX5_SET(flow_context, in_flow_context, action, action_flags); 185 186 if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT) 187 MLX5_SET(flow_context, in_flow_context, 188 packet_reformat_id, fte_attr->packet_reformat_id); 189 190 if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) { 191 MLX5_SET(flow_context, in_flow_context, 192 encrypt_decrypt_type, fte_attr->encrypt_decrypt_type); 193 MLX5_SET(flow_context, in_flow_context, 194 encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id); 195 } 196 197 if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 198 /* Only destination_list_size of size 1 is supported */ 199 MLX5_SET(flow_context, in_flow_context, destination_list_size, 1); 200 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); 201 MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type); 202 MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id); 203 MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level); 204 } 205 206 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 207 if (!devx_obj->obj) { 208 DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)", 209 mlx5dr_cmd_get_syndrome(out)); 210 rte_errno = errno; 211 goto free_devx; 212 } 213 214 return devx_obj; 215 216 free_devx: 217 simple_free(devx_obj); 218 return NULL; 219 } 220 221 struct mlx5dr_cmd_forward_tbl * 222 mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx, 223 struct mlx5dr_cmd_ft_create_attr *ft_attr, 224 struct mlx5dr_cmd_set_fte_attr *fte_attr) 225 { 226 struct mlx5dr_cmd_fg_attr fg_attr = {0}; 227 struct mlx5dr_cmd_forward_tbl *tbl; 228 229 tbl = simple_calloc(1, sizeof(*tbl)); 230 if (!tbl) { 231 DR_LOG(ERR, "Failed to allocate memory"); 232 rte_errno = ENOMEM; 233 return NULL; 234 } 235 236 tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr); 237 if (!tbl->ft) { 238 DR_LOG(ERR, "Failed to create FT"); 239 goto free_tbl; 240 } 241 242 fg_attr.table_id = tbl->ft->id; 243 fg_attr.table_type = ft_attr->type; 244 245 tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr); 246 if (!tbl->fg) { 247 DR_LOG(ERR, "Failed to create FG"); 248 goto free_ft; 249 } 250 251 tbl->fte = mlx5dr_cmd_set_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, fte_attr); 252 if (!tbl->fte) { 253 DR_LOG(ERR, "Failed to create FTE"); 254 goto free_fg; 255 } 256 return tbl; 257 258 free_fg: 259 mlx5dr_cmd_destroy_obj(tbl->fg); 260 free_ft: 261 mlx5dr_cmd_destroy_obj(tbl->ft); 262 free_tbl: 263 simple_free(tbl); 264 return NULL; 265 } 266 267 void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl) 268 { 269 mlx5dr_cmd_destroy_obj(tbl->fte); 270 mlx5dr_cmd_destroy_obj(tbl->fg); 271 mlx5dr_cmd_destroy_obj(tbl->ft); 272 simple_free(tbl); 273 } 274 275 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx, 276 uint32_t fw_ft_type, 277 enum mlx5dr_table_type type, 278 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 279 { 280 struct mlx5dr_devx_obj *default_miss_tbl; 281 282 if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx)) 283 return; 284 285 ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; 286 ft_attr->type = fw_ft_type; 287 ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; 288 289 if (type == MLX5DR_TABLE_TYPE_FDB) { 290 default_miss_tbl = ctx->common_res[type].default_miss->ft; 291 if (!default_miss_tbl) { 292 assert(false); 293 return; 294 } 295 ft_attr->table_miss_id = default_miss_tbl->id; 296 } else { 297 ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id; 298 } 299 } 300 301 struct mlx5dr_devx_obj * 302 mlx5dr_cmd_rtc_create(struct ibv_context *ctx, 303 struct mlx5dr_cmd_rtc_create_attr *rtc_attr) 304 { 305 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 306 uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0}; 307 struct mlx5dr_devx_obj *devx_obj; 308 void *attr; 309 310 devx_obj = simple_malloc(sizeof(*devx_obj)); 311 if (!devx_obj) { 312 DR_LOG(ERR, "Failed to allocate memory for RTC object"); 313 rte_errno = ENOMEM; 314 return NULL; 315 } 316 317 attr = MLX5_ADDR_OF(create_rtc_in, in, hdr); 318 MLX5_SET(general_obj_in_cmd_hdr, 319 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 320 MLX5_SET(general_obj_in_cmd_hdr, 321 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC); 322 323 attr = MLX5_ADDR_OF(create_rtc_in, in, rtc); 324 MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ? 325 MLX5_IFC_RTC_STE_FORMAT_11DW : 326 MLX5_IFC_RTC_STE_FORMAT_8DW); 327 328 if (rtc_attr->is_scnd_range) { 329 MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE); 330 MLX5_SET(rtc, attr, num_match_ste, 2); 331 } 332 333 MLX5_SET(rtc, attr, pd, rtc_attr->pd); 334 MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe); 335 MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode); 336 MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode); 337 MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); 338 MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth); 339 MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size); 340 MLX5_SET(rtc, attr, table_type, rtc_attr->table_type); 341 MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); 342 MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0); 343 MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1); 344 MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); 345 MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); 346 MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); 347 MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); 348 MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS); 349 350 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 351 if (!devx_obj->obj) { 352 DR_LOG(ERR, "Failed to create RTC (syndrome: %#x)", 353 mlx5dr_cmd_get_syndrome(out)); 354 simple_free(devx_obj); 355 rte_errno = errno; 356 return NULL; 357 } 358 359 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 360 361 return devx_obj; 362 } 363 364 struct mlx5dr_devx_obj * 365 mlx5dr_cmd_stc_create(struct ibv_context *ctx, 366 struct mlx5dr_cmd_stc_create_attr *stc_attr) 367 { 368 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 369 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 370 struct mlx5dr_devx_obj *devx_obj; 371 void *attr; 372 373 devx_obj = simple_malloc(sizeof(*devx_obj)); 374 if (!devx_obj) { 375 DR_LOG(ERR, "Failed to allocate memory for STC object"); 376 rte_errno = ENOMEM; 377 return NULL; 378 } 379 380 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 381 MLX5_SET(general_obj_in_cmd_hdr, 382 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 383 MLX5_SET(general_obj_in_cmd_hdr, 384 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 385 MLX5_SET(general_obj_in_cmd_hdr, 386 attr, log_obj_range, stc_attr->log_obj_range); 387 388 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 389 MLX5_SET(stc, attr, table_type, stc_attr->table_type); 390 391 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 392 if (!devx_obj->obj) { 393 DR_LOG(ERR, "Failed to create STC (syndrome: %#x)", 394 mlx5dr_cmd_get_syndrome(out)); 395 simple_free(devx_obj); 396 rte_errno = errno; 397 return NULL; 398 } 399 400 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 401 402 return devx_obj; 403 } 404 405 static int 406 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr, 407 void *stc_parm) 408 { 409 switch (stc_attr->action_type) { 410 case MLX5_IFC_STC_ACTION_TYPE_COUNTER: 411 MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id); 412 break; 413 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: 414 MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num); 415 break; 416 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT: 417 MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id); 418 break; 419 case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST: 420 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 421 header_modify_pattern_id, stc_attr->modify_header.pattern_id); 422 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 423 header_modify_argument_id, stc_attr->modify_header.arg_id); 424 break; 425 case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE: 426 MLX5_SET(stc_ste_param_remove, stc_parm, action_type, 427 MLX5_MODIFICATION_TYPE_REMOVE); 428 MLX5_SET(stc_ste_param_remove, stc_parm, decap, 429 stc_attr->remove_header.decap); 430 MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor, 431 stc_attr->remove_header.start_anchor); 432 MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor, 433 stc_attr->remove_header.end_anchor); 434 break; 435 case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT: 436 MLX5_SET(stc_ste_param_insert, stc_parm, action_type, 437 MLX5_MODIFICATION_TYPE_INSERT); 438 MLX5_SET(stc_ste_param_insert, stc_parm, encap, 439 stc_attr->insert_header.encap); 440 MLX5_SET(stc_ste_param_insert, stc_parm, inline_data, 441 stc_attr->insert_header.is_inline); 442 MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor, 443 stc_attr->insert_header.insert_anchor); 444 /* HW gets the next 2 sizes in words */ 445 MLX5_SET(stc_ste_param_insert, stc_parm, insert_size, 446 stc_attr->insert_header.header_size / 2); 447 MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset, 448 stc_attr->insert_header.insert_offset / 2); 449 MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument, 450 stc_attr->insert_header.arg_id); 451 break; 452 case MLX5_IFC_STC_ACTION_TYPE_COPY: 453 case MLX5_IFC_STC_ACTION_TYPE_SET: 454 case MLX5_IFC_STC_ACTION_TYPE_ADD: 455 case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD: 456 *(__be64 *)stc_parm = stc_attr->modify_action.data; 457 break; 458 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: 459 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK: 460 MLX5_SET(stc_ste_param_vport, stc_parm, vport_number, 461 stc_attr->vport.vport_num); 462 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id, 463 stc_attr->vport.esw_owner_vhca_id); 464 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1); 465 break; 466 case MLX5_IFC_STC_ACTION_TYPE_DROP: 467 case MLX5_IFC_STC_ACTION_TYPE_NOP: 468 case MLX5_IFC_STC_ACTION_TYPE_TAG: 469 case MLX5_IFC_STC_ACTION_TYPE_ALLOW: 470 break; 471 case MLX5_IFC_STC_ACTION_TYPE_ASO: 472 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id, 473 stc_attr->aso.devx_obj_id); 474 MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id, 475 stc_attr->aso.return_reg_id); 476 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type, 477 stc_attr->aso.aso_type); 478 break; 479 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: 480 MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id, 481 stc_attr->ste_table.ste_obj_id); 482 MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id, 483 stc_attr->ste_table.match_definer_id); 484 MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size, 485 stc_attr->ste_table.log_hash_size); 486 break; 487 case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS: 488 MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type, 489 MLX5_MODIFICATION_TYPE_REMOVE_WORDS); 490 MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor, 491 stc_attr->remove_words.start_anchor); 492 MLX5_SET(stc_ste_param_remove_words, stc_parm, 493 remove_size, stc_attr->remove_words.num_of_words); 494 break; 495 default: 496 DR_LOG(ERR, "Not supported type %d", stc_attr->action_type); 497 rte_errno = EINVAL; 498 return rte_errno; 499 } 500 return 0; 501 } 502 503 int 504 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj, 505 struct mlx5dr_cmd_stc_modify_attr *stc_attr) 506 { 507 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 508 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 509 void *stc_parm; 510 void *attr; 511 int ret; 512 513 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 514 MLX5_SET(general_obj_in_cmd_hdr, 515 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 516 MLX5_SET(general_obj_in_cmd_hdr, 517 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 518 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id); 519 MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset); 520 521 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 522 MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset); 523 MLX5_SET(stc, attr, action_type, stc_attr->action_type); 524 MLX5_SET64(stc, attr, modify_field_select, 525 MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC); 526 527 /* Set destination TIRN, TAG, FT ID, STE ID */ 528 stc_parm = MLX5_ADDR_OF(stc, attr, stc_param); 529 ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm); 530 if (ret) 531 return ret; 532 533 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 534 if (ret) { 535 DR_LOG(ERR, "Failed to modify STC FW action_type %d (syndrome: %#x)", 536 stc_attr->action_type, mlx5dr_cmd_get_syndrome(out)); 537 rte_errno = errno; 538 } 539 540 return ret; 541 } 542 543 struct mlx5dr_devx_obj * 544 mlx5dr_cmd_arg_create(struct ibv_context *ctx, 545 uint16_t log_obj_range, 546 uint32_t pd) 547 { 548 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 549 uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; 550 struct mlx5dr_devx_obj *devx_obj; 551 void *attr; 552 553 devx_obj = simple_malloc(sizeof(*devx_obj)); 554 if (!devx_obj) { 555 DR_LOG(ERR, "Failed to allocate memory for ARG object"); 556 rte_errno = ENOMEM; 557 return NULL; 558 } 559 560 attr = MLX5_ADDR_OF(create_arg_in, in, hdr); 561 MLX5_SET(general_obj_in_cmd_hdr, 562 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 563 MLX5_SET(general_obj_in_cmd_hdr, 564 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG); 565 MLX5_SET(general_obj_in_cmd_hdr, 566 attr, log_obj_range, log_obj_range); 567 568 attr = MLX5_ADDR_OF(create_arg_in, in, arg); 569 MLX5_SET(arg, attr, access_pd, pd); 570 571 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 572 if (!devx_obj->obj) { 573 DR_LOG(ERR, "Failed to create ARG (syndrome: %#x)", 574 mlx5dr_cmd_get_syndrome(out)); 575 simple_free(devx_obj); 576 rte_errno = errno; 577 return NULL; 578 } 579 580 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 581 582 return devx_obj; 583 } 584 585 struct mlx5dr_devx_obj * 586 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, 587 uint32_t pattern_length, 588 uint8_t *actions) 589 { 590 uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; 591 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 592 struct mlx5dr_devx_obj *devx_obj; 593 uint64_t *pattern_data; 594 int num_of_actions; 595 void *pattern; 596 void *attr; 597 int i; 598 599 if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { 600 DR_LOG(ERR, "Pattern length %d exceeds limit %d", 601 pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY); 602 rte_errno = EINVAL; 603 return NULL; 604 } 605 606 devx_obj = simple_malloc(sizeof(*devx_obj)); 607 if (!devx_obj) { 608 DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object"); 609 rte_errno = ENOMEM; 610 return NULL; 611 } 612 attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); 613 MLX5_SET(general_obj_in_cmd_hdr, 614 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 615 MLX5_SET(general_obj_in_cmd_hdr, 616 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN); 617 618 pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern); 619 /* Pattern_length is in ddwords */ 620 MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); 621 622 pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); 623 memcpy(pattern_data, actions, pattern_length); 624 625 num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE; 626 for (i = 0; i < num_of_actions; i++) { 627 int type; 628 629 type = MLX5_GET(set_action_in, &pattern_data[i], action_type); 630 if (type != MLX5_MODIFICATION_TYPE_COPY && 631 type != MLX5_MODIFICATION_TYPE_ADD_FIELD) 632 /* Action typ-copy use all bytes for control */ 633 MLX5_SET(set_action_in, &pattern_data[i], data, 0); 634 } 635 636 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 637 if (!devx_obj->obj) { 638 DR_LOG(ERR, "Failed to create header_modify_pattern (syndrome: %#x)", 639 mlx5dr_cmd_get_syndrome(out)); 640 rte_errno = errno; 641 goto free_obj; 642 } 643 644 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 645 646 return devx_obj; 647 648 free_obj: 649 simple_free(devx_obj); 650 return NULL; 651 } 652 653 struct mlx5dr_devx_obj * 654 mlx5dr_cmd_ste_create(struct ibv_context *ctx, 655 struct mlx5dr_cmd_ste_create_attr *ste_attr) 656 { 657 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 658 uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0}; 659 struct mlx5dr_devx_obj *devx_obj; 660 void *attr; 661 662 devx_obj = simple_malloc(sizeof(*devx_obj)); 663 if (!devx_obj) { 664 DR_LOG(ERR, "Failed to allocate memory for STE object"); 665 rte_errno = ENOMEM; 666 return NULL; 667 } 668 669 attr = MLX5_ADDR_OF(create_ste_in, in, hdr); 670 MLX5_SET(general_obj_in_cmd_hdr, 671 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 672 MLX5_SET(general_obj_in_cmd_hdr, 673 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE); 674 MLX5_SET(general_obj_in_cmd_hdr, 675 attr, log_obj_range, ste_attr->log_obj_range); 676 677 attr = MLX5_ADDR_OF(create_ste_in, in, ste); 678 MLX5_SET(ste, attr, table_type, ste_attr->table_type); 679 680 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 681 if (!devx_obj->obj) { 682 DR_LOG(ERR, "Failed to create STE (syndrome: %#x)", 683 mlx5dr_cmd_get_syndrome(out)); 684 simple_free(devx_obj); 685 rte_errno = errno; 686 return NULL; 687 } 688 689 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 690 691 return devx_obj; 692 } 693 694 struct mlx5dr_devx_obj * 695 mlx5dr_cmd_definer_create(struct ibv_context *ctx, 696 struct mlx5dr_cmd_definer_create_attr *def_attr) 697 { 698 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 699 uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0}; 700 struct mlx5dr_devx_obj *devx_obj; 701 void *ptr; 702 703 devx_obj = simple_malloc(sizeof(*devx_obj)); 704 if (!devx_obj) { 705 DR_LOG(ERR, "Failed to allocate memory for definer object"); 706 rte_errno = ENOMEM; 707 return NULL; 708 } 709 710 MLX5_SET(general_obj_in_cmd_hdr, 711 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 712 MLX5_SET(general_obj_in_cmd_hdr, 713 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER); 714 715 ptr = MLX5_ADDR_OF(create_definer_in, in, definer); 716 MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT); 717 718 MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]); 719 MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]); 720 MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]); 721 MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]); 722 MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]); 723 MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]); 724 MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]); 725 MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]); 726 MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]); 727 728 MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]); 729 MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]); 730 MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]); 731 MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]); 732 MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]); 733 MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]); 734 MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]); 735 MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]); 736 737 ptr = MLX5_ADDR_OF(definer, ptr, match_mask); 738 memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask)); 739 740 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 741 if (!devx_obj->obj) { 742 DR_LOG(ERR, "Failed to create Definer (syndrome: %#x)", 743 mlx5dr_cmd_get_syndrome(out)); 744 simple_free(devx_obj); 745 rte_errno = errno; 746 return NULL; 747 } 748 749 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 750 751 return devx_obj; 752 } 753 754 struct mlx5dr_devx_obj * 755 mlx5dr_cmd_sq_create(struct ibv_context *ctx, 756 struct mlx5dr_cmd_sq_create_attr *attr) 757 { 758 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 759 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 760 void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 761 void *wqc = MLX5_ADDR_OF(sqc, sqc, wq); 762 struct mlx5dr_devx_obj *devx_obj; 763 764 devx_obj = simple_malloc(sizeof(*devx_obj)); 765 if (!devx_obj) { 766 DR_LOG(ERR, "Failed to create SQ"); 767 rte_errno = ENOMEM; 768 return NULL; 769 } 770 771 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 772 MLX5_SET(sqc, sqc, cqn, attr->cqn); 773 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 774 MLX5_SET(sqc, sqc, non_wire, 1); 775 MLX5_SET(sqc, sqc, ts_format, attr->ts_format); 776 MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC); 777 MLX5_SET(wq, wqc, pd, attr->pdn); 778 MLX5_SET(wq, wqc, uar_page, attr->page_id); 779 MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB)); 780 MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz); 781 MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id); 782 MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id); 783 784 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 785 if (!devx_obj->obj) { 786 simple_free(devx_obj); 787 rte_errno = errno; 788 return NULL; 789 } 790 791 devx_obj->id = MLX5_GET(create_sq_out, out, sqn); 792 793 return devx_obj; 794 } 795 796 struct mlx5dr_devx_obj * 797 mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx, 798 struct mlx5dr_cmd_packet_reformat_create_attr *attr) 799 { 800 uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0}; 801 size_t insz, cmd_data_sz, cmd_total_sz; 802 struct mlx5dr_devx_obj *devx_obj; 803 void *prctx; 804 void *pdata; 805 void *in; 806 807 cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in); 808 cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in); 809 cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data); 810 insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE); 811 in = simple_calloc(1, insz); 812 if (!in) { 813 rte_errno = ENOMEM; 814 return NULL; 815 } 816 817 MLX5_SET(alloc_packet_reformat_context_in, in, opcode, 818 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); 819 820 prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, 821 packet_reformat_context); 822 pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data); 823 824 MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type); 825 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0); 826 MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz); 827 memcpy(pdata, attr->data, attr->data_sz); 828 829 devx_obj = simple_malloc(sizeof(*devx_obj)); 830 if (!devx_obj) { 831 DR_LOG(ERR, "Failed to allocate memory for packet reformat object"); 832 rte_errno = ENOMEM; 833 goto out_free_in; 834 } 835 836 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out)); 837 if (!devx_obj->obj) { 838 DR_LOG(ERR, "Failed to create packet reformat"); 839 rte_errno = errno; 840 goto out_free_devx; 841 } 842 843 devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id); 844 845 simple_free(in); 846 847 return devx_obj; 848 849 out_free_devx: 850 simple_free(devx_obj); 851 out_free_in: 852 simple_free(in); 853 return NULL; 854 } 855 856 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj) 857 { 858 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 859 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 860 void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 861 int ret; 862 863 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 864 MLX5_SET(modify_sq_in, in, sqn, devx_obj->id); 865 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); 866 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); 867 868 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 869 if (ret) { 870 DR_LOG(ERR, "Failed to modify SQ (syndrome: %#x)", 871 mlx5dr_cmd_get_syndrome(out)); 872 rte_errno = errno; 873 } 874 875 return ret; 876 } 877 878 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx, 879 struct mlx5dr_cmd_allow_other_vhca_access_attr *attr) 880 { 881 uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; 882 uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; 883 void *key; 884 int ret; 885 886 MLX5_SET(allow_other_vhca_access_in, 887 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); 888 MLX5_SET(allow_other_vhca_access_in, 889 in, object_type_to_be_accessed, attr->obj_type); 890 MLX5_SET(allow_other_vhca_access_in, 891 in, object_id_to_be_accessed, attr->obj_id); 892 893 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); 894 memcpy(key, attr->access_key, sizeof(attr->access_key)); 895 896 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 897 if (ret) { 898 DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command"); 899 rte_errno = errno; 900 return rte_errno; 901 } 902 903 return 0; 904 } 905 906 struct mlx5dr_devx_obj * 907 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx, 908 struct mlx5dr_cmd_alias_obj_create_attr *alias_attr) 909 { 910 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 911 uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; 912 struct mlx5dr_devx_obj *devx_obj; 913 void *attr; 914 void *key; 915 916 devx_obj = simple_malloc(sizeof(*devx_obj)); 917 if (!devx_obj) { 918 DR_LOG(ERR, "Failed to allocate memory for ALIAS general object"); 919 rte_errno = ENOMEM; 920 return NULL; 921 } 922 923 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); 924 MLX5_SET(general_obj_in_cmd_hdr, 925 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 926 MLX5_SET(general_obj_in_cmd_hdr, 927 attr, obj_type, alias_attr->obj_type); 928 MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1); 929 930 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); 931 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); 932 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); 933 934 key = MLX5_ADDR_OF(alias_context, attr, access_key); 935 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); 936 937 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 938 if (!devx_obj->obj) { 939 DR_LOG(ERR, "Failed to create ALIAS OBJ (syndrome: %#x)", 940 mlx5dr_cmd_get_syndrome(out)); 941 simple_free(devx_obj); 942 rte_errno = errno; 943 return NULL; 944 } 945 946 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 947 948 return devx_obj; 949 } 950 951 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx, 952 struct mlx5dr_cmd_generate_wqe_attr *attr, 953 struct mlx5_cqe64 *ret_cqe) 954 { 955 uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0}; 956 uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0}; 957 uint8_t status; 958 void *ptr; 959 int ret; 960 961 MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE); 962 MLX5_SET(generate_wqe_in, in, pdn, attr->pdn); 963 964 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl); 965 memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl)); 966 967 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl); 968 memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl)); 969 970 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0); 971 memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0)); 972 973 if (attr->gta_data_1) { 974 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1); 975 memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1)); 976 } 977 978 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 979 if (ret) { 980 DR_LOG(ERR, "Failed to write GTA WQE using FW"); 981 rte_errno = errno; 982 return rte_errno; 983 } 984 985 status = MLX5_GET(generate_wqe_out, out, status); 986 if (status) { 987 DR_LOG(ERR, "Invalid FW CQE status %d", status); 988 rte_errno = EINVAL; 989 return rte_errno; 990 } 991 992 ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data); 993 memcpy(ret_cqe, ptr, sizeof(*ret_cqe)); 994 995 return 0; 996 } 997 998 int mlx5dr_cmd_query_caps(struct ibv_context *ctx, 999 struct mlx5dr_cmd_query_caps *caps) 1000 { 1001 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 1002 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 1003 const struct flow_hw_port_info *port_info; 1004 struct ibv_device_attr_ex attr_ex; 1005 u32 res; 1006 int ret; 1007 1008 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1009 MLX5_SET(query_hca_cap_in, in, op_mod, 1010 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 1011 MLX5_HCA_CAP_OPMOD_GET_CUR); 1012 1013 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1014 if (ret) { 1015 DR_LOG(ERR, "Failed to query device caps"); 1016 rte_errno = errno; 1017 return rte_errno; 1018 } 1019 1020 caps->wqe_based_update = 1021 MLX5_GET(query_hca_cap_out, out, 1022 capability.cmd_hca_cap.wqe_based_flow_table_update_cap); 1023 1024 caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out, 1025 capability.cmd_hca_cap.eswitch_manager); 1026 1027 caps->flex_protocols = MLX5_GET(query_hca_cap_out, out, 1028 capability.cmd_hca_cap.flex_parser_protocols); 1029 1030 caps->log_header_modify_argument_granularity = 1031 MLX5_GET(query_hca_cap_out, out, 1032 capability.cmd_hca_cap.log_header_modify_argument_granularity); 1033 1034 caps->log_header_modify_argument_granularity -= 1035 MLX5_GET(query_hca_cap_out, out, 1036 capability.cmd_hca_cap. 1037 log_header_modify_argument_granularity_offset); 1038 1039 caps->log_header_modify_argument_max_alloc = 1040 MLX5_GET(query_hca_cap_out, out, 1041 capability.cmd_hca_cap.log_header_modify_argument_max_alloc); 1042 1043 caps->definer_format_sup = 1044 MLX5_GET64(query_hca_cap_out, out, 1045 capability.cmd_hca_cap.match_definer_format_supported); 1046 1047 caps->vhca_id = MLX5_GET(query_hca_cap_out, out, 1048 capability.cmd_hca_cap.vhca_id); 1049 1050 caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out, 1051 capability.cmd_hca_cap.sq_ts_format); 1052 1053 caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out, 1054 capability.cmd_hca_cap.ipsec_offload); 1055 1056 MLX5_SET(query_hca_cap_in, in, op_mod, 1057 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | 1058 MLX5_HCA_CAP_OPMOD_GET_CUR); 1059 1060 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1061 if (ret) { 1062 DR_LOG(ERR, "Failed to query device caps"); 1063 rte_errno = errno; 1064 return rte_errno; 1065 } 1066 1067 caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out, 1068 capability.cmd_hca_cap_2. 1069 format_select_dw_8_6_ext); 1070 1071 caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out, 1072 capability.cmd_hca_cap_2. 1073 format_select_dw_gtpu_dw_0); 1074 1075 caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out, 1076 capability.cmd_hca_cap_2. 1077 format_select_dw_gtpu_dw_1); 1078 1079 caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out, 1080 capability.cmd_hca_cap_2. 1081 format_select_dw_gtpu_dw_2); 1082 1083 caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out, 1084 capability.cmd_hca_cap_2. 1085 format_select_dw_gtpu_first_ext_dw_0); 1086 1087 caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1088 capability.cmd_hca_cap_2. 1089 generate_wqe_type); 1090 1091 /* check cross-VHCA support in cap2 */ 1092 res = 1093 MLX5_GET(query_hca_cap_out, out, 1094 capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported); 1095 1096 caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) && 1097 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) && 1098 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC); 1099 1100 res = 1101 MLX5_GET(query_hca_cap_out, out, 1102 capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access); 1103 1104 caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) && 1105 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) && 1106 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC); 1107 1108 MLX5_SET(query_hca_cap_in, in, op_mod, 1109 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | 1110 MLX5_HCA_CAP_OPMOD_GET_CUR); 1111 1112 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1113 if (ret) { 1114 DR_LOG(ERR, "Failed to query flow table caps"); 1115 rte_errno = errno; 1116 return rte_errno; 1117 } 1118 1119 caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out, 1120 capability.flow_table_nic_cap. 1121 flow_table_properties_nic_receive.max_ft_level); 1122 1123 caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out, 1124 capability.flow_table_nic_cap. 1125 flow_table_properties_nic_receive.reparse); 1126 1127 caps->nic_ft.ignore_flow_level_rtc_valid = 1128 MLX5_GET(query_hca_cap_out, 1129 out, 1130 capability.flow_table_nic_cap. 1131 flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); 1132 1133 /* check cross-VHCA support in flow table properties */ 1134 res = 1135 MLX5_GET(query_hca_cap_out, out, 1136 capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object); 1137 caps->cross_vhca_resources &= res; 1138 1139 if (caps->wqe_based_update) { 1140 MLX5_SET(query_hca_cap_in, in, op_mod, 1141 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | 1142 MLX5_HCA_CAP_OPMOD_GET_CUR); 1143 1144 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1145 if (ret) { 1146 DR_LOG(ERR, "Failed to query WQE based FT caps"); 1147 rte_errno = errno; 1148 return rte_errno; 1149 } 1150 1151 caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out, 1152 capability.wqe_based_flow_table_cap. 1153 rtc_reparse_mode); 1154 1155 caps->ste_format = MLX5_GET(query_hca_cap_out, out, 1156 capability.wqe_based_flow_table_cap. 1157 ste_format); 1158 1159 caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out, 1160 capability.wqe_based_flow_table_cap. 1161 rtc_index_mode); 1162 1163 caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out, 1164 capability.wqe_based_flow_table_cap. 1165 rtc_log_depth_max); 1166 1167 caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 1168 capability.wqe_based_flow_table_cap. 1169 ste_alloc_log_max); 1170 1171 caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 1172 capability.wqe_based_flow_table_cap. 1173 ste_alloc_log_granularity); 1174 1175 caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out, 1176 capability.wqe_based_flow_table_cap. 1177 trivial_match_definer); 1178 1179 caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 1180 capability.wqe_based_flow_table_cap. 1181 stc_alloc_log_max); 1182 1183 caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 1184 capability.wqe_based_flow_table_cap. 1185 stc_alloc_log_granularity); 1186 1187 caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out, 1188 capability.wqe_based_flow_table_cap. 1189 rtc_hash_split_table); 1190 1191 caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out, 1192 capability.wqe_based_flow_table_cap. 1193 rtc_linear_lookup_table); 1194 1195 caps->access_index_mode = MLX5_GET(query_hca_cap_out, out, 1196 capability.wqe_based_flow_table_cap. 1197 access_index_mode); 1198 1199 caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out, 1200 capability.wqe_based_flow_table_cap. 1201 linear_match_definer_reg_c3); 1202 1203 caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1204 capability.wqe_based_flow_table_cap. 1205 rtc_max_num_hash_definer_gen_wqe); 1206 1207 caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1208 capability.wqe_based_flow_table_cap. 1209 ste_format_gen_wqe); 1210 } 1211 1212 if (caps->eswitch_manager) { 1213 MLX5_SET(query_hca_cap_in, in, op_mod, 1214 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | 1215 MLX5_HCA_CAP_OPMOD_GET_CUR); 1216 1217 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1218 if (ret) { 1219 DR_LOG(ERR, "Failed to query flow table esw caps"); 1220 rte_errno = errno; 1221 return rte_errno; 1222 } 1223 1224 caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out, 1225 capability.flow_table_nic_cap. 1226 flow_table_properties_nic_receive.max_ft_level); 1227 1228 caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out, 1229 capability.flow_table_nic_cap. 1230 flow_table_properties_nic_receive.reparse); 1231 1232 MLX5_SET(query_hca_cap_in, in, op_mod, 1233 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR); 1234 1235 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1236 if (ret) { 1237 DR_LOG(ERR, "Query eswitch capabilities failed %d", ret); 1238 rte_errno = errno; 1239 return rte_errno; 1240 } 1241 1242 if (MLX5_GET(query_hca_cap_out, out, 1243 capability.esw_cap.esw_manager_vport_number_valid)) 1244 caps->eswitch_manager_vport_number = 1245 MLX5_GET(query_hca_cap_out, out, 1246 capability.esw_cap.esw_manager_vport_number); 1247 } 1248 1249 ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 1250 if (ret) { 1251 DR_LOG(ERR, "Failed to query device attributes"); 1252 rte_errno = ret; 1253 return rte_errno; 1254 } 1255 1256 strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver)); 1257 1258 port_info = flow_hw_get_wire_port(ctx); 1259 if (port_info) { 1260 caps->wire_regc = port_info->regc_value; 1261 caps->wire_regc_mask = port_info->regc_mask; 1262 } else { 1263 DR_LOG(INFO, "Failed to query wire port regc value"); 1264 } 1265 1266 return ret; 1267 } 1268 1269 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx, 1270 struct mlx5dr_cmd_query_vport_caps *vport_caps, 1271 uint32_t port_num) 1272 { 1273 struct mlx5_port_info port_info = {0}; 1274 uint32_t flags; 1275 int ret; 1276 1277 flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID; 1278 1279 ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info); 1280 /* Check if query succeed and vport is enabled */ 1281 if (ret || (port_info.query_flags & flags) != flags) { 1282 rte_errno = ENOTSUP; 1283 return rte_errno; 1284 } 1285 1286 vport_caps->vport_num = port_info.vport_id; 1287 vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id; 1288 1289 if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) { 1290 vport_caps->metadata_c = port_info.vport_meta_tag; 1291 vport_caps->metadata_c_mask = port_info.vport_meta_mask; 1292 } 1293 1294 return 0; 1295 } 1296