1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates 3 */ 4 5 #include "mlx5dr_internal.h" 6 7 static uint32_t mlx5dr_cmd_get_syndrome(uint32_t *out) 8 { 9 /* Assumption: syndrome is always the second u32 */ 10 return be32toh(out[1]); 11 } 12 13 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj) 14 { 15 int ret; 16 17 ret = mlx5_glue->devx_obj_destroy(devx_obj->obj); 18 simple_free(devx_obj); 19 20 return ret; 21 } 22 23 struct mlx5dr_devx_obj * 24 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx, 25 struct mlx5dr_cmd_ft_create_attr *ft_attr) 26 { 27 uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; 28 uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; 29 struct mlx5dr_devx_obj *devx_obj; 30 void *ft_ctx; 31 32 devx_obj = simple_malloc(sizeof(*devx_obj)); 33 if (!devx_obj) { 34 DR_LOG(ERR, "Failed to allocate memory for flow table object"); 35 rte_errno = ENOMEM; 36 return NULL; 37 } 38 39 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); 40 MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); 41 42 ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); 43 MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); 44 MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid); 45 MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en); 46 47 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 48 if (!devx_obj->obj) { 49 DR_LOG(ERR, "Failed to create FT (syndrome: %#x)", 50 mlx5dr_cmd_get_syndrome(out)); 51 simple_free(devx_obj); 52 rte_errno = errno; 53 return NULL; 54 } 55 56 devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id); 57 58 return devx_obj; 59 } 60 61 int 62 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj, 63 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 64 { 65 uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; 66 uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; 67 void *ft_ctx; 68 int ret; 69 70 MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); 71 MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type); 72 MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs); 73 MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id); 74 75 ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context); 76 77 MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action); 78 MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id); 79 MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0); 80 MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1); 81 82 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 83 if (ret) { 84 DR_LOG(ERR, "Failed to modify FT (syndrome: %#x)", 85 mlx5dr_cmd_get_syndrome(out)); 86 rte_errno = errno; 87 } 88 89 return ret; 90 } 91 92 int 93 mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj, 94 struct mlx5dr_cmd_ft_query_attr *ft_attr, 95 uint64_t *icm_addr_0, uint64_t *icm_addr_1) 96 { 97 uint32_t out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0}; 98 uint32_t in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0}; 99 void *ft_ctx; 100 int ret; 101 102 MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE); 103 MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type); 104 MLX5_SET(query_flow_table_in, in, table_id, devx_obj->id); 105 106 ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 107 if (ret) { 108 DR_LOG(ERR, "Failed to query FT (syndrome: %#x)", 109 mlx5dr_cmd_get_syndrome(out)); 110 rte_errno = errno; 111 return ret; 112 } 113 114 ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context); 115 *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_0); 116 *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_1); 117 118 return ret; 119 } 120 121 static struct mlx5dr_devx_obj * 122 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx, 123 struct mlx5dr_cmd_fg_attr *fg_attr) 124 { 125 uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; 126 uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0}; 127 struct mlx5dr_devx_obj *devx_obj; 128 129 devx_obj = simple_malloc(sizeof(*devx_obj)); 130 if (!devx_obj) { 131 DR_LOG(ERR, "Failed to allocate memory for flow group object"); 132 rte_errno = ENOMEM; 133 return NULL; 134 } 135 136 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); 137 MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type); 138 MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id); 139 140 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 141 if (!devx_obj->obj) { 142 DR_LOG(ERR, "Failed to create Flow group(syndrome: %#x)", 143 mlx5dr_cmd_get_syndrome(out)); 144 simple_free(devx_obj); 145 rte_errno = errno; 146 return NULL; 147 } 148 149 devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id); 150 151 return devx_obj; 152 } 153 154 struct mlx5dr_devx_obj * 155 mlx5dr_cmd_set_fte(struct ibv_context *ctx, 156 uint32_t table_type, 157 uint32_t table_id, 158 uint32_t group_id, 159 struct mlx5dr_cmd_set_fte_attr *fte_attr) 160 { 161 uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; 162 struct mlx5dr_devx_obj *devx_obj; 163 uint32_t dest_entry_sz; 164 uint32_t total_dest_sz; 165 void *in_flow_context; 166 uint32_t action_flags; 167 uint8_t *in_dests; 168 uint32_t inlen; 169 uint32_t *in; 170 uint32_t i; 171 172 dest_entry_sz = fte_attr->extended_dest ? 173 MLX5_ST_SZ_BYTES(extended_dest_format) : 174 MLX5_ST_SZ_BYTES(dest_format); 175 total_dest_sz = dest_entry_sz * fte_attr->dests_num; 176 inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE); 177 in = simple_calloc(1, inlen); 178 if (!in) { 179 rte_errno = ENOMEM; 180 return NULL; 181 } 182 183 devx_obj = simple_malloc(sizeof(*devx_obj)); 184 if (!devx_obj) { 185 DR_LOG(ERR, "Failed to allocate memory for fte object"); 186 rte_errno = ENOMEM; 187 goto free_in; 188 } 189 190 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 191 MLX5_SET(set_fte_in, in, table_type, table_type); 192 MLX5_SET(set_fte_in, in, table_id, table_id); 193 194 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 195 MLX5_SET(flow_context, in_flow_context, group_id, group_id); 196 MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source); 197 MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest); 198 MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level); 199 200 action_flags = fte_attr->action_flags; 201 MLX5_SET(flow_context, in_flow_context, action, action_flags); 202 203 if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT) 204 MLX5_SET(flow_context, in_flow_context, 205 packet_reformat_id, fte_attr->packet_reformat_id); 206 207 if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) { 208 MLX5_SET(flow_context, in_flow_context, 209 encrypt_decrypt_type, fte_attr->encrypt_decrypt_type); 210 MLX5_SET(flow_context, in_flow_context, 211 encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id); 212 } 213 214 if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 215 in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination); 216 217 for (i = 0; i < fte_attr->dests_num; i++) { 218 struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i]; 219 220 switch (dest->destination_type) { 221 case MLX5_FLOW_DESTINATION_TYPE_VPORT: 222 if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) { 223 MLX5_SET(dest_format, in_dests, 224 destination_eswitch_owner_vhca_id_valid, 1); 225 MLX5_SET(dest_format, in_dests, 226 destination_eswitch_owner_vhca_id, 227 dest->esw_owner_vhca_id); 228 } 229 /* Fall through */ 230 case MLX5_FLOW_DESTINATION_TYPE_TIR: 231 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 232 MLX5_SET(dest_format, in_dests, destination_type, 233 dest->destination_type); 234 MLX5_SET(dest_format, in_dests, destination_id, 235 dest->destination_id); 236 if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) { 237 MLX5_SET(dest_format, in_dests, packet_reformat, 1); 238 MLX5_SET(extended_dest_format, in_dests, packet_reformat_id, 239 dest->ext_reformat->id); 240 } 241 break; 242 default: 243 rte_errno = EOPNOTSUPP; 244 goto free_devx; 245 } 246 247 in_dests = in_dests + dest_entry_sz; 248 } 249 MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num); 250 } 251 252 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 253 if (!devx_obj->obj) { 254 DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)", 255 mlx5dr_cmd_get_syndrome(out)); 256 rte_errno = errno; 257 goto free_devx; 258 } 259 260 simple_free(in); 261 return devx_obj; 262 263 free_devx: 264 simple_free(devx_obj); 265 free_in: 266 simple_free(in); 267 return NULL; 268 } 269 270 struct mlx5dr_cmd_forward_tbl * 271 mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx, 272 struct mlx5dr_cmd_ft_create_attr *ft_attr, 273 struct mlx5dr_cmd_set_fte_attr *fte_attr) 274 { 275 struct mlx5dr_cmd_fg_attr fg_attr = {0}; 276 struct mlx5dr_cmd_forward_tbl *tbl; 277 278 tbl = simple_calloc(1, sizeof(*tbl)); 279 if (!tbl) { 280 DR_LOG(ERR, "Failed to allocate memory"); 281 rte_errno = ENOMEM; 282 return NULL; 283 } 284 285 tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr); 286 if (!tbl->ft) { 287 DR_LOG(ERR, "Failed to create FT"); 288 goto free_tbl; 289 } 290 291 fg_attr.table_id = tbl->ft->id; 292 fg_attr.table_type = ft_attr->type; 293 294 tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr); 295 if (!tbl->fg) { 296 DR_LOG(ERR, "Failed to create FG"); 297 goto free_ft; 298 } 299 300 tbl->fte = mlx5dr_cmd_set_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, fte_attr); 301 if (!tbl->fte) { 302 DR_LOG(ERR, "Failed to create FTE"); 303 goto free_fg; 304 } 305 return tbl; 306 307 free_fg: 308 mlx5dr_cmd_destroy_obj(tbl->fg); 309 free_ft: 310 mlx5dr_cmd_destroy_obj(tbl->ft); 311 free_tbl: 312 simple_free(tbl); 313 return NULL; 314 } 315 316 void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl) 317 { 318 mlx5dr_cmd_destroy_obj(tbl->fte); 319 mlx5dr_cmd_destroy_obj(tbl->fg); 320 mlx5dr_cmd_destroy_obj(tbl->ft); 321 simple_free(tbl); 322 } 323 324 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx, 325 uint32_t fw_ft_type, 326 enum mlx5dr_table_type type, 327 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 328 { 329 struct mlx5dr_devx_obj *default_miss_tbl; 330 331 if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx)) 332 return; 333 334 ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; 335 ft_attr->type = fw_ft_type; 336 ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; 337 338 if (type == MLX5DR_TABLE_TYPE_FDB) { 339 default_miss_tbl = ctx->common_res[type].default_miss->ft; 340 if (!default_miss_tbl) { 341 assert(false); 342 return; 343 } 344 ft_attr->table_miss_id = default_miss_tbl->id; 345 } else { 346 ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id; 347 } 348 } 349 350 struct mlx5dr_devx_obj * 351 mlx5dr_cmd_rtc_create(struct ibv_context *ctx, 352 struct mlx5dr_cmd_rtc_create_attr *rtc_attr) 353 { 354 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 355 uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0}; 356 struct mlx5dr_devx_obj *devx_obj; 357 void *attr; 358 359 devx_obj = simple_malloc(sizeof(*devx_obj)); 360 if (!devx_obj) { 361 DR_LOG(ERR, "Failed to allocate memory for RTC object"); 362 rte_errno = ENOMEM; 363 return NULL; 364 } 365 366 attr = MLX5_ADDR_OF(create_rtc_in, in, hdr); 367 MLX5_SET(general_obj_in_cmd_hdr, 368 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 369 MLX5_SET(general_obj_in_cmd_hdr, 370 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC); 371 372 attr = MLX5_ADDR_OF(create_rtc_in, in, rtc); 373 MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ? 374 MLX5_IFC_RTC_STE_FORMAT_11DW : 375 MLX5_IFC_RTC_STE_FORMAT_8DW); 376 377 if (rtc_attr->is_scnd_range) { 378 MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE); 379 MLX5_SET(rtc, attr, num_match_ste, 2); 380 } 381 382 MLX5_SET(rtc, attr, pd, rtc_attr->pd); 383 MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe); 384 MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode); 385 MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode); 386 MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); 387 MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth); 388 MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size); 389 MLX5_SET(rtc, attr, table_type, rtc_attr->table_type); 390 MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); 391 MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0); 392 MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1); 393 MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); 394 MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); 395 MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); 396 MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); 397 MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS); 398 399 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 400 if (!devx_obj->obj) { 401 DR_LOG(ERR, "Failed to create RTC (syndrome: %#x)", 402 mlx5dr_cmd_get_syndrome(out)); 403 simple_free(devx_obj); 404 rte_errno = errno; 405 return NULL; 406 } 407 408 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 409 410 return devx_obj; 411 } 412 413 struct mlx5dr_devx_obj * 414 mlx5dr_cmd_stc_create(struct ibv_context *ctx, 415 struct mlx5dr_cmd_stc_create_attr *stc_attr) 416 { 417 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 418 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 419 struct mlx5dr_devx_obj *devx_obj; 420 void *attr; 421 422 devx_obj = simple_malloc(sizeof(*devx_obj)); 423 if (!devx_obj) { 424 DR_LOG(ERR, "Failed to allocate memory for STC object"); 425 rte_errno = ENOMEM; 426 return NULL; 427 } 428 429 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 430 MLX5_SET(general_obj_in_cmd_hdr, 431 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 432 MLX5_SET(general_obj_in_cmd_hdr, 433 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 434 MLX5_SET(general_obj_in_cmd_hdr, 435 attr, log_obj_range, stc_attr->log_obj_range); 436 437 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 438 MLX5_SET(stc, attr, table_type, stc_attr->table_type); 439 440 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 441 if (!devx_obj->obj) { 442 DR_LOG(ERR, "Failed to create STC (syndrome: %#x)", 443 mlx5dr_cmd_get_syndrome(out)); 444 simple_free(devx_obj); 445 rte_errno = errno; 446 return NULL; 447 } 448 449 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 450 451 return devx_obj; 452 } 453 454 static int 455 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr, 456 void *stc_parm) 457 { 458 switch (stc_attr->action_type) { 459 case MLX5_IFC_STC_ACTION_TYPE_COUNTER: 460 MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id); 461 break; 462 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: 463 MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num); 464 break; 465 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT: 466 MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id); 467 break; 468 case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST: 469 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 470 header_modify_pattern_id, stc_attr->modify_header.pattern_id); 471 MLX5_SET(stc_ste_param_header_modify_list, stc_parm, 472 header_modify_argument_id, stc_attr->modify_header.arg_id); 473 break; 474 case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE: 475 MLX5_SET(stc_ste_param_remove, stc_parm, action_type, 476 MLX5_MODIFICATION_TYPE_REMOVE); 477 MLX5_SET(stc_ste_param_remove, stc_parm, decap, 478 stc_attr->remove_header.decap); 479 MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor, 480 stc_attr->remove_header.start_anchor); 481 MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor, 482 stc_attr->remove_header.end_anchor); 483 break; 484 case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT: 485 MLX5_SET(stc_ste_param_insert, stc_parm, action_type, 486 MLX5_MODIFICATION_TYPE_INSERT); 487 MLX5_SET(stc_ste_param_insert, stc_parm, encap, 488 stc_attr->insert_header.encap); 489 MLX5_SET(stc_ste_param_insert, stc_parm, inline_data, 490 stc_attr->insert_header.is_inline); 491 MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor, 492 stc_attr->insert_header.insert_anchor); 493 /* HW gets the next 2 sizes in words */ 494 MLX5_SET(stc_ste_param_insert, stc_parm, insert_size, 495 stc_attr->insert_header.header_size / W_SIZE); 496 MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset, 497 stc_attr->insert_header.insert_offset / W_SIZE); 498 MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument, 499 stc_attr->insert_header.arg_id); 500 break; 501 case MLX5_IFC_STC_ACTION_TYPE_COPY: 502 case MLX5_IFC_STC_ACTION_TYPE_SET: 503 case MLX5_IFC_STC_ACTION_TYPE_ADD: 504 case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD: 505 *(__be64 *)stc_parm = stc_attr->modify_action.data; 506 break; 507 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: 508 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK: 509 MLX5_SET(stc_ste_param_vport, stc_parm, vport_number, 510 stc_attr->vport.vport_num); 511 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id, 512 stc_attr->vport.esw_owner_vhca_id); 513 MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1); 514 break; 515 case MLX5_IFC_STC_ACTION_TYPE_DROP: 516 case MLX5_IFC_STC_ACTION_TYPE_NOP: 517 case MLX5_IFC_STC_ACTION_TYPE_TAG: 518 case MLX5_IFC_STC_ACTION_TYPE_ALLOW: 519 break; 520 case MLX5_IFC_STC_ACTION_TYPE_ASO: 521 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id, 522 stc_attr->aso.devx_obj_id); 523 MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id, 524 stc_attr->aso.return_reg_id); 525 MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type, 526 stc_attr->aso.aso_type); 527 break; 528 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: 529 MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id, 530 stc_attr->ste_table.ste_obj_id); 531 MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id, 532 stc_attr->ste_table.match_definer_id); 533 MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size, 534 stc_attr->ste_table.log_hash_size); 535 break; 536 case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS: 537 MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type, 538 MLX5_MODIFICATION_TYPE_REMOVE_WORDS); 539 MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor, 540 stc_attr->remove_words.start_anchor); 541 MLX5_SET(stc_ste_param_remove_words, stc_parm, 542 remove_size, stc_attr->remove_words.num_of_words); 543 break; 544 default: 545 DR_LOG(ERR, "Not supported type %d", stc_attr->action_type); 546 rte_errno = EINVAL; 547 return rte_errno; 548 } 549 return 0; 550 } 551 552 int 553 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj, 554 struct mlx5dr_cmd_stc_modify_attr *stc_attr) 555 { 556 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 557 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 558 void *stc_parm; 559 void *attr; 560 int ret; 561 562 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 563 MLX5_SET(general_obj_in_cmd_hdr, 564 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 565 MLX5_SET(general_obj_in_cmd_hdr, 566 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 567 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id); 568 MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset); 569 570 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 571 MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset); 572 MLX5_SET(stc, attr, action_type, stc_attr->action_type); 573 MLX5_SET64(stc, attr, modify_field_select, 574 MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC); 575 576 /* Set destination TIRN, TAG, FT ID, STE ID */ 577 stc_parm = MLX5_ADDR_OF(stc, attr, stc_param); 578 ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm); 579 if (ret) 580 return ret; 581 582 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 583 if (ret) { 584 DR_LOG(ERR, "Failed to modify STC FW action_type %d (syndrome: %#x)", 585 stc_attr->action_type, mlx5dr_cmd_get_syndrome(out)); 586 rte_errno = errno; 587 } 588 589 return ret; 590 } 591 592 struct mlx5dr_devx_obj * 593 mlx5dr_cmd_arg_create(struct ibv_context *ctx, 594 uint16_t log_obj_range, 595 uint32_t pd) 596 { 597 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 598 uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; 599 struct mlx5dr_devx_obj *devx_obj; 600 void *attr; 601 602 devx_obj = simple_malloc(sizeof(*devx_obj)); 603 if (!devx_obj) { 604 DR_LOG(ERR, "Failed to allocate memory for ARG object"); 605 rte_errno = ENOMEM; 606 return NULL; 607 } 608 609 attr = MLX5_ADDR_OF(create_arg_in, in, hdr); 610 MLX5_SET(general_obj_in_cmd_hdr, 611 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 612 MLX5_SET(general_obj_in_cmd_hdr, 613 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG); 614 MLX5_SET(general_obj_in_cmd_hdr, 615 attr, log_obj_range, log_obj_range); 616 617 attr = MLX5_ADDR_OF(create_arg_in, in, arg); 618 MLX5_SET(arg, attr, access_pd, pd); 619 620 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 621 if (!devx_obj->obj) { 622 DR_LOG(ERR, "Failed to create ARG (syndrome: %#x)", 623 mlx5dr_cmd_get_syndrome(out)); 624 simple_free(devx_obj); 625 rte_errno = errno; 626 return NULL; 627 } 628 629 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 630 631 return devx_obj; 632 } 633 634 struct mlx5dr_devx_obj * 635 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, 636 uint32_t pattern_length, 637 uint8_t *actions) 638 { 639 uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; 640 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 641 struct mlx5dr_devx_obj *devx_obj; 642 uint64_t *pattern_data; 643 int num_of_actions; 644 void *pattern; 645 void *attr; 646 int i; 647 648 if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { 649 DR_LOG(ERR, "Pattern length %d exceeds limit %d", 650 pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY); 651 rte_errno = EINVAL; 652 return NULL; 653 } 654 655 devx_obj = simple_malloc(sizeof(*devx_obj)); 656 if (!devx_obj) { 657 DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object"); 658 rte_errno = ENOMEM; 659 return NULL; 660 } 661 attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); 662 MLX5_SET(general_obj_in_cmd_hdr, 663 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 664 MLX5_SET(general_obj_in_cmd_hdr, 665 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN); 666 667 pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern); 668 /* Pattern_length is in ddwords */ 669 MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); 670 671 pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); 672 memcpy(pattern_data, actions, pattern_length); 673 674 num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE; 675 for (i = 0; i < num_of_actions; i++) { 676 int type; 677 678 type = MLX5_GET(set_action_in, &pattern_data[i], action_type); 679 if (type != MLX5_MODIFICATION_TYPE_COPY && 680 type != MLX5_MODIFICATION_TYPE_ADD_FIELD) 681 /* Action typ-copy use all bytes for control */ 682 MLX5_SET(set_action_in, &pattern_data[i], data, 0); 683 } 684 685 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 686 if (!devx_obj->obj) { 687 DR_LOG(ERR, "Failed to create header_modify_pattern (syndrome: %#x)", 688 mlx5dr_cmd_get_syndrome(out)); 689 rte_errno = errno; 690 goto free_obj; 691 } 692 693 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 694 695 return devx_obj; 696 697 free_obj: 698 simple_free(devx_obj); 699 return NULL; 700 } 701 702 struct mlx5dr_devx_obj * 703 mlx5dr_cmd_ste_create(struct ibv_context *ctx, 704 struct mlx5dr_cmd_ste_create_attr *ste_attr) 705 { 706 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 707 uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0}; 708 struct mlx5dr_devx_obj *devx_obj; 709 void *attr; 710 711 devx_obj = simple_malloc(sizeof(*devx_obj)); 712 if (!devx_obj) { 713 DR_LOG(ERR, "Failed to allocate memory for STE object"); 714 rte_errno = ENOMEM; 715 return NULL; 716 } 717 718 attr = MLX5_ADDR_OF(create_ste_in, in, hdr); 719 MLX5_SET(general_obj_in_cmd_hdr, 720 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 721 MLX5_SET(general_obj_in_cmd_hdr, 722 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE); 723 MLX5_SET(general_obj_in_cmd_hdr, 724 attr, log_obj_range, ste_attr->log_obj_range); 725 726 attr = MLX5_ADDR_OF(create_ste_in, in, ste); 727 MLX5_SET(ste, attr, table_type, ste_attr->table_type); 728 729 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 730 if (!devx_obj->obj) { 731 DR_LOG(ERR, "Failed to create STE (syndrome: %#x)", 732 mlx5dr_cmd_get_syndrome(out)); 733 simple_free(devx_obj); 734 rte_errno = errno; 735 return NULL; 736 } 737 738 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 739 740 return devx_obj; 741 } 742 743 struct mlx5dr_devx_obj * 744 mlx5dr_cmd_definer_create(struct ibv_context *ctx, 745 struct mlx5dr_cmd_definer_create_attr *def_attr) 746 { 747 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 748 uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0}; 749 struct mlx5dr_devx_obj *devx_obj; 750 void *ptr; 751 752 devx_obj = simple_malloc(sizeof(*devx_obj)); 753 if (!devx_obj) { 754 DR_LOG(ERR, "Failed to allocate memory for definer object"); 755 rte_errno = ENOMEM; 756 return NULL; 757 } 758 759 MLX5_SET(general_obj_in_cmd_hdr, 760 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 761 MLX5_SET(general_obj_in_cmd_hdr, 762 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER); 763 764 ptr = MLX5_ADDR_OF(create_definer_in, in, definer); 765 MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT); 766 767 MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]); 768 MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]); 769 MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]); 770 MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]); 771 MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]); 772 MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]); 773 MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]); 774 MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]); 775 MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]); 776 777 MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]); 778 MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]); 779 MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]); 780 MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]); 781 MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]); 782 MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]); 783 MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]); 784 MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]); 785 786 ptr = MLX5_ADDR_OF(definer, ptr, match_mask); 787 memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask)); 788 789 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 790 if (!devx_obj->obj) { 791 DR_LOG(ERR, "Failed to create Definer (syndrome: %#x)", 792 mlx5dr_cmd_get_syndrome(out)); 793 simple_free(devx_obj); 794 rte_errno = errno; 795 return NULL; 796 } 797 798 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 799 800 return devx_obj; 801 } 802 803 struct mlx5dr_devx_obj * 804 mlx5dr_cmd_sq_create(struct ibv_context *ctx, 805 struct mlx5dr_cmd_sq_create_attr *attr) 806 { 807 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 808 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 809 void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 810 void *wqc = MLX5_ADDR_OF(sqc, sqc, wq); 811 struct mlx5dr_devx_obj *devx_obj; 812 813 devx_obj = simple_malloc(sizeof(*devx_obj)); 814 if (!devx_obj) { 815 DR_LOG(ERR, "Failed to create SQ"); 816 rte_errno = ENOMEM; 817 return NULL; 818 } 819 820 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 821 MLX5_SET(sqc, sqc, cqn, attr->cqn); 822 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 823 MLX5_SET(sqc, sqc, non_wire, 1); 824 MLX5_SET(sqc, sqc, ts_format, attr->ts_format); 825 MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC); 826 MLX5_SET(wq, wqc, pd, attr->pdn); 827 MLX5_SET(wq, wqc, uar_page, attr->page_id); 828 MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB)); 829 MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz); 830 MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id); 831 MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id); 832 833 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 834 if (!devx_obj->obj) { 835 simple_free(devx_obj); 836 rte_errno = errno; 837 return NULL; 838 } 839 840 devx_obj->id = MLX5_GET(create_sq_out, out, sqn); 841 842 return devx_obj; 843 } 844 845 struct mlx5dr_devx_obj * 846 mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx, 847 struct mlx5dr_cmd_packet_reformat_create_attr *attr) 848 { 849 uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0}; 850 size_t insz, cmd_data_sz, cmd_total_sz; 851 struct mlx5dr_devx_obj *devx_obj; 852 void *prctx; 853 void *pdata; 854 void *in; 855 856 cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in); 857 cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in); 858 cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data); 859 insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE); 860 in = simple_calloc(1, insz); 861 if (!in) { 862 rte_errno = ENOMEM; 863 return NULL; 864 } 865 866 MLX5_SET(alloc_packet_reformat_context_in, in, opcode, 867 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); 868 869 prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, 870 packet_reformat_context); 871 pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data); 872 873 MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type); 874 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0); 875 MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz); 876 memcpy(pdata, attr->data, attr->data_sz); 877 878 devx_obj = simple_malloc(sizeof(*devx_obj)); 879 if (!devx_obj) { 880 DR_LOG(ERR, "Failed to allocate memory for packet reformat object"); 881 rte_errno = ENOMEM; 882 goto out_free_in; 883 } 884 885 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out)); 886 if (!devx_obj->obj) { 887 DR_LOG(ERR, "Failed to create packet reformat"); 888 rte_errno = errno; 889 goto out_free_devx; 890 } 891 892 devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id); 893 894 simple_free(in); 895 896 return devx_obj; 897 898 out_free_devx: 899 simple_free(devx_obj); 900 out_free_in: 901 simple_free(in); 902 return NULL; 903 } 904 905 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj) 906 { 907 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 908 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 909 void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 910 int ret; 911 912 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 913 MLX5_SET(modify_sq_in, in, sqn, devx_obj->id); 914 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); 915 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); 916 917 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 918 if (ret) { 919 DR_LOG(ERR, "Failed to modify SQ (syndrome: %#x)", 920 mlx5dr_cmd_get_syndrome(out)); 921 rte_errno = errno; 922 } 923 924 return ret; 925 } 926 927 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx, 928 struct mlx5dr_cmd_allow_other_vhca_access_attr *attr) 929 { 930 uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; 931 uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; 932 void *key; 933 int ret; 934 935 MLX5_SET(allow_other_vhca_access_in, 936 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); 937 MLX5_SET(allow_other_vhca_access_in, 938 in, object_type_to_be_accessed, attr->obj_type); 939 MLX5_SET(allow_other_vhca_access_in, 940 in, object_id_to_be_accessed, attr->obj_id); 941 942 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); 943 memcpy(key, attr->access_key, sizeof(attr->access_key)); 944 945 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 946 if (ret) { 947 DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command"); 948 rte_errno = errno; 949 return rte_errno; 950 } 951 952 return 0; 953 } 954 955 struct mlx5dr_devx_obj * 956 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx, 957 struct mlx5dr_cmd_alias_obj_create_attr *alias_attr) 958 { 959 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 960 uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; 961 struct mlx5dr_devx_obj *devx_obj; 962 void *attr; 963 void *key; 964 965 devx_obj = simple_malloc(sizeof(*devx_obj)); 966 if (!devx_obj) { 967 DR_LOG(ERR, "Failed to allocate memory for ALIAS general object"); 968 rte_errno = ENOMEM; 969 return NULL; 970 } 971 972 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); 973 MLX5_SET(general_obj_in_cmd_hdr, 974 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 975 MLX5_SET(general_obj_in_cmd_hdr, 976 attr, obj_type, alias_attr->obj_type); 977 MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1); 978 979 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); 980 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); 981 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); 982 983 key = MLX5_ADDR_OF(alias_context, attr, access_key); 984 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); 985 986 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 987 if (!devx_obj->obj) { 988 DR_LOG(ERR, "Failed to create ALIAS OBJ (syndrome: %#x)", 989 mlx5dr_cmd_get_syndrome(out)); 990 simple_free(devx_obj); 991 rte_errno = errno; 992 return NULL; 993 } 994 995 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 996 997 return devx_obj; 998 } 999 1000 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx, 1001 struct mlx5dr_cmd_generate_wqe_attr *attr, 1002 struct mlx5_cqe64 *ret_cqe) 1003 { 1004 uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0}; 1005 uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0}; 1006 uint8_t status; 1007 void *ptr; 1008 int ret; 1009 1010 MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE); 1011 MLX5_SET(generate_wqe_in, in, pdn, attr->pdn); 1012 1013 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl); 1014 memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl)); 1015 1016 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl); 1017 memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl)); 1018 1019 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0); 1020 memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0)); 1021 1022 if (attr->gta_data_1) { 1023 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1); 1024 memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1)); 1025 } 1026 1027 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1028 if (ret) { 1029 DR_LOG(ERR, "Failed to write GTA WQE using FW"); 1030 rte_errno = errno; 1031 return rte_errno; 1032 } 1033 1034 status = MLX5_GET(generate_wqe_out, out, status); 1035 if (status) { 1036 DR_LOG(ERR, "Invalid FW CQE status %d", status); 1037 rte_errno = EINVAL; 1038 return rte_errno; 1039 } 1040 1041 ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data); 1042 memcpy(ret_cqe, ptr, sizeof(*ret_cqe)); 1043 1044 return 0; 1045 } 1046 1047 int mlx5dr_cmd_query_caps(struct ibv_context *ctx, 1048 struct mlx5dr_cmd_query_caps *caps) 1049 { 1050 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 1051 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 1052 const struct flow_hw_port_info *port_info; 1053 struct ibv_device_attr_ex attr_ex; 1054 u32 res; 1055 int ret; 1056 1057 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1058 MLX5_SET(query_hca_cap_in, in, op_mod, 1059 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 1060 MLX5_HCA_CAP_OPMOD_GET_CUR); 1061 1062 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1063 if (ret) { 1064 DR_LOG(ERR, "Failed to query device caps"); 1065 rte_errno = errno; 1066 return rte_errno; 1067 } 1068 1069 caps->wqe_based_update = 1070 MLX5_GET(query_hca_cap_out, out, 1071 capability.cmd_hca_cap.wqe_based_flow_table_update_cap); 1072 1073 caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out, 1074 capability.cmd_hca_cap.eswitch_manager); 1075 1076 caps->flex_protocols = MLX5_GET(query_hca_cap_out, out, 1077 capability.cmd_hca_cap.flex_parser_protocols); 1078 1079 caps->log_header_modify_argument_granularity = 1080 MLX5_GET(query_hca_cap_out, out, 1081 capability.cmd_hca_cap.log_header_modify_argument_granularity); 1082 1083 caps->log_header_modify_argument_granularity -= 1084 MLX5_GET(query_hca_cap_out, out, 1085 capability.cmd_hca_cap. 1086 log_header_modify_argument_granularity_offset); 1087 1088 caps->log_header_modify_argument_max_alloc = 1089 MLX5_GET(query_hca_cap_out, out, 1090 capability.cmd_hca_cap.log_header_modify_argument_max_alloc); 1091 1092 caps->definer_format_sup = 1093 MLX5_GET64(query_hca_cap_out, out, 1094 capability.cmd_hca_cap.match_definer_format_supported); 1095 1096 caps->vhca_id = MLX5_GET(query_hca_cap_out, out, 1097 capability.cmd_hca_cap.vhca_id); 1098 1099 caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out, 1100 capability.cmd_hca_cap.sq_ts_format); 1101 1102 caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out, 1103 capability.cmd_hca_cap.ipsec_offload); 1104 1105 MLX5_SET(query_hca_cap_in, in, op_mod, 1106 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | 1107 MLX5_HCA_CAP_OPMOD_GET_CUR); 1108 1109 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1110 if (ret) { 1111 DR_LOG(ERR, "Failed to query device caps"); 1112 rte_errno = errno; 1113 return rte_errno; 1114 } 1115 1116 caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out, 1117 capability.cmd_hca_cap_2. 1118 format_select_dw_8_6_ext); 1119 1120 caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out, 1121 capability.cmd_hca_cap_2. 1122 format_select_dw_gtpu_dw_0); 1123 1124 caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out, 1125 capability.cmd_hca_cap_2. 1126 format_select_dw_gtpu_dw_1); 1127 1128 caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out, 1129 capability.cmd_hca_cap_2. 1130 format_select_dw_gtpu_dw_2); 1131 1132 caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out, 1133 capability.cmd_hca_cap_2. 1134 format_select_dw_gtpu_first_ext_dw_0); 1135 1136 caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1137 capability.cmd_hca_cap_2. 1138 generate_wqe_type); 1139 1140 /* check cross-VHCA support in cap2 */ 1141 res = 1142 MLX5_GET(query_hca_cap_out, out, 1143 capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported); 1144 1145 caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) && 1146 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) && 1147 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC); 1148 1149 res = 1150 MLX5_GET(query_hca_cap_out, out, 1151 capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access); 1152 1153 caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) && 1154 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) && 1155 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC); 1156 1157 caps->flow_table_hash_type = MLX5_GET(query_hca_cap_out, out, 1158 capability.cmd_hca_cap_2.flow_table_hash_type); 1159 1160 MLX5_SET(query_hca_cap_in, in, op_mod, 1161 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | 1162 MLX5_HCA_CAP_OPMOD_GET_CUR); 1163 1164 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1165 if (ret) { 1166 DR_LOG(ERR, "Failed to query flow table caps"); 1167 rte_errno = errno; 1168 return rte_errno; 1169 } 1170 1171 caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out, 1172 capability.flow_table_nic_cap. 1173 flow_table_properties_nic_receive.max_ft_level); 1174 1175 caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out, 1176 capability.flow_table_nic_cap. 1177 flow_table_properties_nic_receive.reparse); 1178 1179 caps->nic_ft.ignore_flow_level_rtc_valid = 1180 MLX5_GET(query_hca_cap_out, 1181 out, 1182 capability.flow_table_nic_cap. 1183 flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); 1184 1185 /* check cross-VHCA support in flow table properties */ 1186 res = 1187 MLX5_GET(query_hca_cap_out, out, 1188 capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object); 1189 caps->cross_vhca_resources &= res; 1190 1191 if (caps->wqe_based_update) { 1192 MLX5_SET(query_hca_cap_in, in, op_mod, 1193 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | 1194 MLX5_HCA_CAP_OPMOD_GET_CUR); 1195 1196 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1197 if (ret) { 1198 DR_LOG(ERR, "Failed to query WQE based FT caps"); 1199 rte_errno = errno; 1200 return rte_errno; 1201 } 1202 1203 caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out, 1204 capability.wqe_based_flow_table_cap. 1205 rtc_reparse_mode); 1206 1207 caps->ste_format = MLX5_GET(query_hca_cap_out, out, 1208 capability.wqe_based_flow_table_cap. 1209 ste_format); 1210 1211 caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out, 1212 capability.wqe_based_flow_table_cap. 1213 rtc_index_mode); 1214 1215 caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out, 1216 capability.wqe_based_flow_table_cap. 1217 rtc_log_depth_max); 1218 1219 caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 1220 capability.wqe_based_flow_table_cap. 1221 ste_alloc_log_max); 1222 1223 caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 1224 capability.wqe_based_flow_table_cap. 1225 ste_alloc_log_granularity); 1226 1227 caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out, 1228 capability.wqe_based_flow_table_cap. 1229 trivial_match_definer); 1230 1231 caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 1232 capability.wqe_based_flow_table_cap. 1233 stc_alloc_log_max); 1234 1235 caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 1236 capability.wqe_based_flow_table_cap. 1237 stc_alloc_log_granularity); 1238 1239 caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out, 1240 capability.wqe_based_flow_table_cap. 1241 rtc_hash_split_table); 1242 1243 caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out, 1244 capability.wqe_based_flow_table_cap. 1245 rtc_linear_lookup_table); 1246 1247 caps->access_index_mode = MLX5_GET(query_hca_cap_out, out, 1248 capability.wqe_based_flow_table_cap. 1249 access_index_mode); 1250 1251 caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out, 1252 capability.wqe_based_flow_table_cap. 1253 linear_match_definer_reg_c3); 1254 1255 caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1256 capability.wqe_based_flow_table_cap. 1257 rtc_max_num_hash_definer_gen_wqe); 1258 1259 caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1260 capability.wqe_based_flow_table_cap. 1261 ste_format_gen_wqe); 1262 1263 caps->fdb_tir_stc = MLX5_GET(query_hca_cap_out, out, 1264 capability.wqe_based_flow_table_cap. 1265 fdb_jump_to_tir_stc); 1266 } 1267 1268 if (caps->eswitch_manager) { 1269 MLX5_SET(query_hca_cap_in, in, op_mod, 1270 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | 1271 MLX5_HCA_CAP_OPMOD_GET_CUR); 1272 1273 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1274 if (ret) { 1275 DR_LOG(ERR, "Failed to query flow table esw caps"); 1276 rte_errno = errno; 1277 return rte_errno; 1278 } 1279 1280 caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out, 1281 capability.flow_table_nic_cap. 1282 flow_table_properties_nic_receive.max_ft_level); 1283 1284 caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out, 1285 capability.flow_table_nic_cap. 1286 flow_table_properties_nic_receive.reparse); 1287 1288 MLX5_SET(query_hca_cap_in, in, op_mod, 1289 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR); 1290 1291 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1292 if (ret) { 1293 DR_LOG(ERR, "Query eswitch capabilities failed %d", ret); 1294 rte_errno = errno; 1295 return rte_errno; 1296 } 1297 1298 if (MLX5_GET(query_hca_cap_out, out, 1299 capability.esw_cap.esw_manager_vport_number_valid)) 1300 caps->eswitch_manager_vport_number = 1301 MLX5_GET(query_hca_cap_out, out, 1302 capability.esw_cap.esw_manager_vport_number); 1303 1304 caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out, 1305 capability.esw_cap.merged_eswitch); 1306 } 1307 1308 ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 1309 if (ret) { 1310 DR_LOG(ERR, "Failed to query device attributes"); 1311 rte_errno = ret; 1312 return rte_errno; 1313 } 1314 1315 strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver)); 1316 1317 port_info = flow_hw_get_wire_port(ctx); 1318 if (port_info) { 1319 caps->wire_regc = port_info->regc_value; 1320 caps->wire_regc_mask = port_info->regc_mask; 1321 } else { 1322 DR_LOG(INFO, "Failed to query wire port regc value"); 1323 } 1324 1325 return ret; 1326 } 1327 1328 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx, 1329 struct mlx5dr_cmd_query_vport_caps *vport_caps, 1330 uint32_t port_num) 1331 { 1332 struct mlx5_port_info port_info = {0}; 1333 uint32_t flags; 1334 int ret; 1335 1336 flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID; 1337 1338 ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info); 1339 /* Check if query succeed and vport is enabled */ 1340 if (ret || (port_info.query_flags & flags) != flags) { 1341 rte_errno = ENOTSUP; 1342 return rte_errno; 1343 } 1344 1345 vport_caps->vport_num = port_info.vport_id; 1346 vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id; 1347 1348 if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) { 1349 vport_caps->metadata_c = port_info.vport_meta_tag; 1350 vport_caps->metadata_c_mask = port_info.vport_meta_mask; 1351 } 1352 1353 return 0; 1354 } 1355