1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2022 NVIDIA Corporation & Affiliates 3 */ 4 5 #include "mlx5dr_internal.h" 6 7 static uint32_t mlx5dr_cmd_get_syndrome(uint32_t *out) 8 { 9 /* Assumption: syndrome is always the second u32 */ 10 return be32toh(out[1]); 11 } 12 13 int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj) 14 { 15 int ret; 16 17 ret = mlx5_glue->devx_obj_destroy(devx_obj->obj); 18 simple_free(devx_obj); 19 20 return ret; 21 } 22 23 struct mlx5dr_devx_obj * 24 mlx5dr_cmd_flow_table_create(struct ibv_context *ctx, 25 struct mlx5dr_cmd_ft_create_attr *ft_attr) 26 { 27 uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; 28 uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; 29 struct mlx5dr_devx_obj *devx_obj; 30 void *ft_ctx; 31 32 devx_obj = simple_malloc(sizeof(*devx_obj)); 33 if (!devx_obj) { 34 DR_LOG(ERR, "Failed to allocate memory for flow table object"); 35 rte_errno = ENOMEM; 36 return NULL; 37 } 38 39 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); 40 MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); 41 42 ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); 43 MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); 44 MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid); 45 MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en); 46 47 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 48 if (!devx_obj->obj) { 49 DR_LOG(ERR, "Failed to create FT (syndrome: %#x)", 50 mlx5dr_cmd_get_syndrome(out)); 51 simple_free(devx_obj); 52 rte_errno = errno; 53 return NULL; 54 } 55 56 devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id); 57 58 return devx_obj; 59 } 60 61 int 62 mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj, 63 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 64 { 65 uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; 66 uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; 67 void *ft_ctx; 68 int ret; 69 70 MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); 71 MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type); 72 MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs); 73 MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id); 74 75 ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context); 76 77 MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action); 78 MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id); 79 MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0); 80 MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1); 81 82 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 83 if (ret) { 84 DR_LOG(ERR, "Failed to modify FT (syndrome: %#x)", 85 mlx5dr_cmd_get_syndrome(out)); 86 rte_errno = errno; 87 } 88 89 return ret; 90 } 91 92 int 93 mlx5dr_cmd_flow_table_query(struct mlx5dr_devx_obj *devx_obj, 94 struct mlx5dr_cmd_ft_query_attr *ft_attr, 95 uint64_t *icm_addr_0, uint64_t *icm_addr_1) 96 { 97 uint32_t out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0}; 98 uint32_t in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0}; 99 void *ft_ctx; 100 int ret; 101 102 MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE); 103 MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type); 104 MLX5_SET(query_flow_table_in, in, table_id, devx_obj->id); 105 106 ret = mlx5_glue->devx_obj_query(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 107 if (ret) { 108 DR_LOG(ERR, "Failed to query FT (syndrome: %#x)", 109 mlx5dr_cmd_get_syndrome(out)); 110 rte_errno = errno; 111 return ret; 112 } 113 114 ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context); 115 *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_0); 116 *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sw_owner_icm_root_1); 117 118 return ret; 119 } 120 121 static struct mlx5dr_devx_obj * 122 mlx5dr_cmd_flow_group_create(struct ibv_context *ctx, 123 struct mlx5dr_cmd_fg_attr *fg_attr) 124 { 125 uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; 126 uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0}; 127 struct mlx5dr_devx_obj *devx_obj; 128 129 devx_obj = simple_malloc(sizeof(*devx_obj)); 130 if (!devx_obj) { 131 DR_LOG(ERR, "Failed to allocate memory for flow group object"); 132 rte_errno = ENOMEM; 133 return NULL; 134 } 135 136 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); 137 MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type); 138 MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id); 139 140 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 141 if (!devx_obj->obj) { 142 DR_LOG(ERR, "Failed to create Flow group(syndrome: %#x)", 143 mlx5dr_cmd_get_syndrome(out)); 144 simple_free(devx_obj); 145 rte_errno = errno; 146 return NULL; 147 } 148 149 devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id); 150 151 return devx_obj; 152 } 153 154 struct mlx5dr_devx_obj * 155 mlx5dr_cmd_set_fte(struct ibv_context *ctx, 156 uint32_t table_type, 157 uint32_t table_id, 158 uint32_t group_id, 159 struct mlx5dr_cmd_set_fte_attr *fte_attr) 160 { 161 uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; 162 struct mlx5dr_devx_obj *devx_obj; 163 uint32_t dest_entry_sz; 164 uint32_t total_dest_sz; 165 void *in_flow_context; 166 uint32_t action_flags; 167 uint8_t *in_dests; 168 uint32_t inlen; 169 uint32_t *in; 170 uint32_t i; 171 172 dest_entry_sz = fte_attr->extended_dest ? 173 MLX5_ST_SZ_BYTES(extended_dest_format) : 174 MLX5_ST_SZ_BYTES(dest_format); 175 total_dest_sz = dest_entry_sz * fte_attr->dests_num; 176 inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE); 177 in = simple_calloc(1, inlen); 178 if (!in) { 179 rte_errno = ENOMEM; 180 return NULL; 181 } 182 183 devx_obj = simple_malloc(sizeof(*devx_obj)); 184 if (!devx_obj) { 185 DR_LOG(ERR, "Failed to allocate memory for fte object"); 186 rte_errno = ENOMEM; 187 goto free_in; 188 } 189 190 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 191 MLX5_SET(set_fte_in, in, table_type, table_type); 192 MLX5_SET(set_fte_in, in, table_id, table_id); 193 194 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 195 MLX5_SET(flow_context, in_flow_context, group_id, group_id); 196 MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source); 197 MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest); 198 MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level); 199 200 action_flags = fte_attr->action_flags; 201 MLX5_SET(flow_context, in_flow_context, action, action_flags); 202 203 if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT) 204 MLX5_SET(flow_context, in_flow_context, 205 packet_reformat_id, fte_attr->packet_reformat_id); 206 207 if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) { 208 MLX5_SET(flow_context, in_flow_context, 209 encrypt_decrypt_type, fte_attr->encrypt_decrypt_type); 210 MLX5_SET(flow_context, in_flow_context, 211 encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id); 212 } 213 214 if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 215 in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination); 216 217 for (i = 0; i < fte_attr->dests_num; i++) { 218 struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i]; 219 220 switch (dest->destination_type) { 221 case MLX5_FLOW_DESTINATION_TYPE_VPORT: 222 if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) { 223 MLX5_SET(dest_format, in_dests, 224 destination_eswitch_owner_vhca_id_valid, 1); 225 MLX5_SET(dest_format, in_dests, 226 destination_eswitch_owner_vhca_id, 227 dest->esw_owner_vhca_id); 228 } 229 /* Fall through */ 230 case MLX5_FLOW_DESTINATION_TYPE_TIR: 231 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 232 MLX5_SET(dest_format, in_dests, destination_type, 233 dest->destination_type); 234 MLX5_SET(dest_format, in_dests, destination_id, 235 dest->destination_id); 236 if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) { 237 MLX5_SET(dest_format, in_dests, packet_reformat, 1); 238 MLX5_SET(extended_dest_format, in_dests, packet_reformat_id, 239 dest->ext_reformat->id); 240 } 241 break; 242 default: 243 rte_errno = EOPNOTSUPP; 244 goto free_devx; 245 } 246 247 in_dests = in_dests + dest_entry_sz; 248 } 249 MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num); 250 } 251 252 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 253 if (!devx_obj->obj) { 254 DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)", 255 mlx5dr_cmd_get_syndrome(out)); 256 rte_errno = errno; 257 goto free_devx; 258 } 259 260 simple_free(in); 261 return devx_obj; 262 263 free_devx: 264 simple_free(devx_obj); 265 free_in: 266 simple_free(in); 267 return NULL; 268 } 269 270 struct mlx5dr_cmd_forward_tbl * 271 mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx, 272 struct mlx5dr_cmd_ft_create_attr *ft_attr, 273 struct mlx5dr_cmd_set_fte_attr *fte_attr) 274 { 275 struct mlx5dr_cmd_fg_attr fg_attr = {0}; 276 struct mlx5dr_cmd_forward_tbl *tbl; 277 278 tbl = simple_calloc(1, sizeof(*tbl)); 279 if (!tbl) { 280 DR_LOG(ERR, "Failed to allocate memory"); 281 rte_errno = ENOMEM; 282 return NULL; 283 } 284 285 tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr); 286 if (!tbl->ft) { 287 DR_LOG(ERR, "Failed to create FT"); 288 goto free_tbl; 289 } 290 291 fg_attr.table_id = tbl->ft->id; 292 fg_attr.table_type = ft_attr->type; 293 294 tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr); 295 if (!tbl->fg) { 296 DR_LOG(ERR, "Failed to create FG"); 297 goto free_ft; 298 } 299 300 tbl->fte = mlx5dr_cmd_set_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, fte_attr); 301 if (!tbl->fte) { 302 DR_LOG(ERR, "Failed to create FTE"); 303 goto free_fg; 304 } 305 return tbl; 306 307 free_fg: 308 mlx5dr_cmd_destroy_obj(tbl->fg); 309 free_ft: 310 mlx5dr_cmd_destroy_obj(tbl->ft); 311 free_tbl: 312 simple_free(tbl); 313 return NULL; 314 } 315 316 void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl) 317 { 318 mlx5dr_cmd_destroy_obj(tbl->fte); 319 mlx5dr_cmd_destroy_obj(tbl->fg); 320 mlx5dr_cmd_destroy_obj(tbl->ft); 321 simple_free(tbl); 322 } 323 324 void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx, 325 uint32_t fw_ft_type, 326 enum mlx5dr_table_type type, 327 struct mlx5dr_cmd_ft_modify_attr *ft_attr) 328 { 329 struct mlx5dr_devx_obj *default_miss_tbl; 330 331 if (type != MLX5DR_TABLE_TYPE_FDB && !mlx5dr_context_shared_gvmi_used(ctx)) 332 return; 333 334 ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION; 335 ft_attr->type = fw_ft_type; 336 ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL; 337 338 if (type == MLX5DR_TABLE_TYPE_FDB) { 339 default_miss_tbl = ctx->common_res[type].default_miss->ft; 340 if (!default_miss_tbl) { 341 assert(false); 342 return; 343 } 344 ft_attr->table_miss_id = default_miss_tbl->id; 345 } else { 346 ft_attr->table_miss_id = ctx->gvmi_res[type].aliased_end_ft->id; 347 } 348 } 349 350 struct mlx5dr_devx_obj * 351 mlx5dr_cmd_rtc_create(struct ibv_context *ctx, 352 struct mlx5dr_cmd_rtc_create_attr *rtc_attr) 353 { 354 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 355 uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0}; 356 struct mlx5dr_devx_obj *devx_obj; 357 void *attr; 358 359 devx_obj = simple_malloc(sizeof(*devx_obj)); 360 if (!devx_obj) { 361 DR_LOG(ERR, "Failed to allocate memory for RTC object"); 362 rte_errno = ENOMEM; 363 return NULL; 364 } 365 366 attr = MLX5_ADDR_OF(create_rtc_in, in, hdr); 367 MLX5_SET(general_obj_in_cmd_hdr, 368 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 369 MLX5_SET(general_obj_in_cmd_hdr, 370 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC); 371 372 attr = MLX5_ADDR_OF(create_rtc_in, in, rtc); 373 if (rtc_attr->is_compare) { 374 MLX5_SET(rtc, attr, ste_format_0, MLX5_IFC_RTC_STE_FORMAT_4DW_RANGE); 375 } else { 376 MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ? 377 MLX5_IFC_RTC_STE_FORMAT_11DW : MLX5_IFC_RTC_STE_FORMAT_8DW); 378 } 379 380 if (rtc_attr->is_scnd_range) { 381 MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE); 382 MLX5_SET(rtc, attr, num_match_ste, 2); 383 } 384 385 MLX5_SET(rtc, attr, pd, rtc_attr->pd); 386 MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe); 387 MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode); 388 MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode); 389 MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); 390 MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth); 391 MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size); 392 MLX5_SET(rtc, attr, table_type, rtc_attr->table_type); 393 MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer); 394 MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0); 395 MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1); 396 MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); 397 MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); 398 MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); 399 MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); 400 MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode); 401 402 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 403 if (!devx_obj->obj) { 404 DR_LOG(ERR, "Failed to create RTC (syndrome: %#x)", 405 mlx5dr_cmd_get_syndrome(out)); 406 simple_free(devx_obj); 407 rte_errno = errno; 408 return NULL; 409 } 410 411 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 412 413 return devx_obj; 414 } 415 416 struct mlx5dr_devx_obj * 417 mlx5dr_cmd_stc_create(struct ibv_context *ctx, 418 struct mlx5dr_cmd_stc_create_attr *stc_attr) 419 { 420 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 421 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 422 struct mlx5dr_devx_obj *devx_obj; 423 void *attr; 424 425 devx_obj = simple_malloc(sizeof(*devx_obj)); 426 if (!devx_obj) { 427 DR_LOG(ERR, "Failed to allocate memory for STC object"); 428 rte_errno = ENOMEM; 429 return NULL; 430 } 431 432 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 433 MLX5_SET(general_obj_in_cmd_hdr, 434 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 435 MLX5_SET(general_obj_in_cmd_hdr, 436 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 437 MLX5_SET(general_obj_in_cmd_hdr, 438 attr, log_obj_range, stc_attr->log_obj_range); 439 440 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 441 MLX5_SET(stc, attr, table_type, stc_attr->table_type); 442 443 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 444 if (!devx_obj->obj) { 445 DR_LOG(ERR, "Failed to create STC (syndrome: %#x)", 446 mlx5dr_cmd_get_syndrome(out)); 447 simple_free(devx_obj); 448 rte_errno = errno; 449 return NULL; 450 } 451 452 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 453 454 return devx_obj; 455 } 456 457 static int 458 mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr, 459 void *stc_param) 460 { 461 switch (stc_attr->action_type) { 462 case MLX5_IFC_STC_ACTION_TYPE_COUNTER: 463 MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id); 464 break; 465 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR: 466 MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num); 467 break; 468 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT: 469 MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id); 470 break; 471 case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST: 472 MLX5_SET(stc_ste_param_header_modify_list, stc_param, 473 header_modify_pattern_id, stc_attr->modify_header.pattern_id); 474 MLX5_SET(stc_ste_param_header_modify_list, stc_param, 475 header_modify_argument_id, stc_attr->modify_header.arg_id); 476 break; 477 case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE: 478 MLX5_SET(stc_ste_param_remove, stc_param, action_type, 479 MLX5_MODIFICATION_TYPE_REMOVE); 480 MLX5_SET(stc_ste_param_remove, stc_param, decap, 481 stc_attr->remove_header.decap); 482 MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor, 483 stc_attr->remove_header.start_anchor); 484 MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor, 485 stc_attr->remove_header.end_anchor); 486 break; 487 case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT: 488 MLX5_SET(stc_ste_param_insert, stc_param, action_type, 489 MLX5_MODIFICATION_TYPE_INSERT); 490 MLX5_SET(stc_ste_param_insert, stc_param, encap, 491 stc_attr->insert_header.encap); 492 MLX5_SET(stc_ste_param_insert, stc_param, push_esp, 493 stc_attr->insert_header.push_esp); 494 MLX5_SET(stc_ste_param_insert, stc_param, inline_data, 495 stc_attr->insert_header.is_inline); 496 MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor, 497 stc_attr->insert_header.insert_anchor); 498 /* HW gets the next 2 sizes in words */ 499 MLX5_SET(stc_ste_param_insert, stc_param, insert_size, 500 stc_attr->insert_header.header_size / W_SIZE); 501 MLX5_SET(stc_ste_param_insert, stc_param, insert_offset, 502 stc_attr->insert_header.insert_offset / W_SIZE); 503 MLX5_SET(stc_ste_param_insert, stc_param, insert_argument, 504 stc_attr->insert_header.arg_id); 505 break; 506 case MLX5_IFC_STC_ACTION_TYPE_COPY: 507 case MLX5_IFC_STC_ACTION_TYPE_SET: 508 case MLX5_IFC_STC_ACTION_TYPE_ADD: 509 case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD: 510 *(__be64 *)stc_param = stc_attr->modify_action.data; 511 break; 512 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT: 513 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK: 514 MLX5_SET(stc_ste_param_vport, stc_param, vport_number, 515 stc_attr->vport.vport_num); 516 MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id, 517 stc_attr->vport.esw_owner_vhca_id); 518 MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid, 1); 519 break; 520 case MLX5_IFC_STC_ACTION_TYPE_DROP: 521 case MLX5_IFC_STC_ACTION_TYPE_NOP: 522 case MLX5_IFC_STC_ACTION_TYPE_TAG: 523 case MLX5_IFC_STC_ACTION_TYPE_ALLOW: 524 break; 525 case MLX5_IFC_STC_ACTION_TYPE_ASO: 526 MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id, 527 stc_attr->aso.devx_obj_id); 528 MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id, 529 stc_attr->aso.return_reg_id); 530 MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type, 531 stc_attr->aso.aso_type); 532 break; 533 case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE: 534 MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id, 535 stc_attr->ste_table.ste_obj_id); 536 MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id, 537 stc_attr->ste_table.match_definer_id); 538 MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size, 539 stc_attr->ste_table.log_hash_size); 540 break; 541 case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS: 542 MLX5_SET(stc_ste_param_remove_words, stc_param, action_type, 543 MLX5_MODIFICATION_TYPE_REMOVE_WORDS); 544 MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor, 545 stc_attr->remove_words.start_anchor); 546 MLX5_SET(stc_ste_param_remove_words, stc_param, 547 remove_size, stc_attr->remove_words.num_of_words); 548 break; 549 default: 550 DR_LOG(ERR, "Not supported type %d", stc_attr->action_type); 551 rte_errno = EINVAL; 552 return rte_errno; 553 } 554 return 0; 555 } 556 557 int 558 mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj, 559 struct mlx5dr_cmd_stc_modify_attr *stc_attr) 560 { 561 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 562 uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0}; 563 void *stc_param; 564 void *attr; 565 int ret; 566 567 attr = MLX5_ADDR_OF(create_stc_in, in, hdr); 568 MLX5_SET(general_obj_in_cmd_hdr, 569 attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 570 MLX5_SET(general_obj_in_cmd_hdr, 571 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC); 572 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id); 573 MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset); 574 575 attr = MLX5_ADDR_OF(create_stc_in, in, stc); 576 MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset); 577 MLX5_SET(stc, attr, action_type, stc_attr->action_type); 578 MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode); 579 MLX5_SET64(stc, attr, modify_field_select, 580 MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC); 581 582 /* Set destination TIRN, TAG, FT ID, STE ID */ 583 stc_param = MLX5_ADDR_OF(stc, attr, stc_param); 584 ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_param); 585 if (ret) 586 return ret; 587 588 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 589 if (ret) { 590 DR_LOG(ERR, "Failed to modify STC FW action_type %d (syndrome: %#x)", 591 stc_attr->action_type, mlx5dr_cmd_get_syndrome(out)); 592 rte_errno = errno; 593 } 594 595 return ret; 596 } 597 598 struct mlx5dr_devx_obj * 599 mlx5dr_cmd_arg_create(struct ibv_context *ctx, 600 uint16_t log_obj_range, 601 uint32_t pd) 602 { 603 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 604 uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0}; 605 struct mlx5dr_devx_obj *devx_obj; 606 void *attr; 607 608 devx_obj = simple_malloc(sizeof(*devx_obj)); 609 if (!devx_obj) { 610 DR_LOG(ERR, "Failed to allocate memory for ARG object"); 611 rte_errno = ENOMEM; 612 return NULL; 613 } 614 615 attr = MLX5_ADDR_OF(create_arg_in, in, hdr); 616 MLX5_SET(general_obj_in_cmd_hdr, 617 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 618 MLX5_SET(general_obj_in_cmd_hdr, 619 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG); 620 MLX5_SET(general_obj_in_cmd_hdr, 621 attr, log_obj_range, log_obj_range); 622 623 attr = MLX5_ADDR_OF(create_arg_in, in, arg); 624 MLX5_SET(arg, attr, access_pd, pd); 625 626 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 627 if (!devx_obj->obj) { 628 DR_LOG(ERR, "Failed to create ARG (syndrome: %#x)", 629 mlx5dr_cmd_get_syndrome(out)); 630 simple_free(devx_obj); 631 rte_errno = errno; 632 return NULL; 633 } 634 635 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 636 637 return devx_obj; 638 } 639 640 struct mlx5dr_devx_obj * 641 mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx, 642 uint32_t pattern_length, 643 uint8_t *actions) 644 { 645 uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0}; 646 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 647 struct mlx5dr_devx_obj *devx_obj; 648 uint64_t *pattern_data; 649 int num_of_actions; 650 void *pattern; 651 void *attr; 652 int i; 653 654 if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) { 655 DR_LOG(ERR, "Pattern length %d exceeds limit %d", 656 pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY); 657 rte_errno = EINVAL; 658 return NULL; 659 } 660 661 devx_obj = simple_malloc(sizeof(*devx_obj)); 662 if (!devx_obj) { 663 DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object"); 664 rte_errno = ENOMEM; 665 return NULL; 666 } 667 attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr); 668 MLX5_SET(general_obj_in_cmd_hdr, 669 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 670 MLX5_SET(general_obj_in_cmd_hdr, 671 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN); 672 673 pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern); 674 /* Pattern_length is in ddwords */ 675 MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE)); 676 677 pattern_data = (uint64_t *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data); 678 memcpy(pattern_data, actions, pattern_length); 679 680 num_of_actions = pattern_length / MLX5DR_MODIFY_ACTION_SIZE; 681 for (i = 0; i < num_of_actions; i++) { 682 int type; 683 684 type = MLX5_GET(set_action_in, &pattern_data[i], action_type); 685 if (type != MLX5_MODIFICATION_TYPE_COPY && 686 type != MLX5_MODIFICATION_TYPE_ADD_FIELD) 687 /* Action typ-copy use all bytes for control */ 688 MLX5_SET(set_action_in, &pattern_data[i], data, 0); 689 } 690 691 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 692 if (!devx_obj->obj) { 693 DR_LOG(ERR, "Failed to create header_modify_pattern (syndrome: %#x)", 694 mlx5dr_cmd_get_syndrome(out)); 695 rte_errno = errno; 696 goto free_obj; 697 } 698 699 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 700 701 return devx_obj; 702 703 free_obj: 704 simple_free(devx_obj); 705 return NULL; 706 } 707 708 struct mlx5dr_devx_obj * 709 mlx5dr_cmd_ste_create(struct ibv_context *ctx, 710 struct mlx5dr_cmd_ste_create_attr *ste_attr) 711 { 712 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 713 uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0}; 714 struct mlx5dr_devx_obj *devx_obj; 715 void *attr; 716 717 devx_obj = simple_malloc(sizeof(*devx_obj)); 718 if (!devx_obj) { 719 DR_LOG(ERR, "Failed to allocate memory for STE object"); 720 rte_errno = ENOMEM; 721 return NULL; 722 } 723 724 attr = MLX5_ADDR_OF(create_ste_in, in, hdr); 725 MLX5_SET(general_obj_in_cmd_hdr, 726 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 727 MLX5_SET(general_obj_in_cmd_hdr, 728 attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE); 729 MLX5_SET(general_obj_in_cmd_hdr, 730 attr, log_obj_range, ste_attr->log_obj_range); 731 732 attr = MLX5_ADDR_OF(create_ste_in, in, ste); 733 MLX5_SET(ste, attr, table_type, ste_attr->table_type); 734 735 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 736 if (!devx_obj->obj) { 737 DR_LOG(ERR, "Failed to create STE (syndrome: %#x)", 738 mlx5dr_cmd_get_syndrome(out)); 739 simple_free(devx_obj); 740 rte_errno = errno; 741 return NULL; 742 } 743 744 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 745 746 return devx_obj; 747 } 748 749 struct mlx5dr_devx_obj * 750 mlx5dr_cmd_definer_create(struct ibv_context *ctx, 751 struct mlx5dr_cmd_definer_create_attr *def_attr) 752 { 753 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 754 uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0}; 755 struct mlx5dr_devx_obj *devx_obj; 756 void *ptr; 757 758 devx_obj = simple_malloc(sizeof(*devx_obj)); 759 if (!devx_obj) { 760 DR_LOG(ERR, "Failed to allocate memory for definer object"); 761 rte_errno = ENOMEM; 762 return NULL; 763 } 764 765 MLX5_SET(general_obj_in_cmd_hdr, 766 in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 767 MLX5_SET(general_obj_in_cmd_hdr, 768 in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER); 769 770 ptr = MLX5_ADDR_OF(create_definer_in, in, definer); 771 MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT); 772 773 MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]); 774 MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]); 775 MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]); 776 MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]); 777 MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]); 778 MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]); 779 MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]); 780 MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]); 781 MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]); 782 783 MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]); 784 MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]); 785 MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]); 786 MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]); 787 MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]); 788 MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]); 789 MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]); 790 MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]); 791 792 ptr = MLX5_ADDR_OF(definer, ptr, match_mask); 793 memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask)); 794 795 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 796 if (!devx_obj->obj) { 797 DR_LOG(ERR, "Failed to create Definer (syndrome: %#x)", 798 mlx5dr_cmd_get_syndrome(out)); 799 simple_free(devx_obj); 800 rte_errno = errno; 801 return NULL; 802 } 803 804 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 805 806 return devx_obj; 807 } 808 809 struct mlx5dr_devx_obj * 810 mlx5dr_cmd_sq_create(struct ibv_context *ctx, 811 struct mlx5dr_cmd_sq_create_attr *attr) 812 { 813 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 814 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 815 void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 816 void *wqc = MLX5_ADDR_OF(sqc, sqc, wq); 817 struct mlx5dr_devx_obj *devx_obj; 818 819 devx_obj = simple_malloc(sizeof(*devx_obj)); 820 if (!devx_obj) { 821 DR_LOG(ERR, "Failed to create SQ"); 822 rte_errno = ENOMEM; 823 return NULL; 824 } 825 826 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 827 MLX5_SET(sqc, sqc, cqn, attr->cqn); 828 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 829 MLX5_SET(sqc, sqc, non_wire, 1); 830 MLX5_SET(sqc, sqc, ts_format, attr->ts_format); 831 MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC); 832 MLX5_SET(wq, wqc, pd, attr->pdn); 833 MLX5_SET(wq, wqc, uar_page, attr->page_id); 834 MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB)); 835 MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz); 836 MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id); 837 MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id); 838 839 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 840 if (!devx_obj->obj) { 841 simple_free(devx_obj); 842 rte_errno = errno; 843 return NULL; 844 } 845 846 devx_obj->id = MLX5_GET(create_sq_out, out, sqn); 847 848 return devx_obj; 849 } 850 851 struct mlx5dr_devx_obj * 852 mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx, 853 struct mlx5dr_cmd_packet_reformat_create_attr *attr) 854 { 855 uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0}; 856 size_t insz, cmd_data_sz, cmd_total_sz; 857 struct mlx5dr_devx_obj *devx_obj; 858 void *prctx; 859 void *pdata; 860 void *in; 861 862 cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in); 863 cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in); 864 cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data); 865 insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE); 866 in = simple_calloc(1, insz); 867 if (!in) { 868 rte_errno = ENOMEM; 869 return NULL; 870 } 871 872 MLX5_SET(alloc_packet_reformat_context_in, in, opcode, 873 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); 874 875 prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, 876 packet_reformat_context); 877 pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data); 878 879 MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type); 880 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0); 881 MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz); 882 memcpy(pdata, attr->data, attr->data_sz); 883 884 devx_obj = simple_malloc(sizeof(*devx_obj)); 885 if (!devx_obj) { 886 DR_LOG(ERR, "Failed to allocate memory for packet reformat object"); 887 rte_errno = ENOMEM; 888 goto out_free_in; 889 } 890 891 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out)); 892 if (!devx_obj->obj) { 893 DR_LOG(ERR, "Failed to create packet reformat"); 894 rte_errno = errno; 895 goto out_free_devx; 896 } 897 898 devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id); 899 900 simple_free(in); 901 902 return devx_obj; 903 904 out_free_devx: 905 simple_free(devx_obj); 906 out_free_in: 907 simple_free(in); 908 return NULL; 909 } 910 911 int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj) 912 { 913 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 914 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 915 void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 916 int ret; 917 918 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 919 MLX5_SET(modify_sq_in, in, sqn, devx_obj->id); 920 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); 921 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); 922 923 ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out)); 924 if (ret) { 925 DR_LOG(ERR, "Failed to modify SQ (syndrome: %#x)", 926 mlx5dr_cmd_get_syndrome(out)); 927 rte_errno = errno; 928 } 929 930 return ret; 931 } 932 933 int mlx5dr_cmd_allow_other_vhca_access(struct ibv_context *ctx, 934 struct mlx5dr_cmd_allow_other_vhca_access_attr *attr) 935 { 936 uint32_t out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0}; 937 uint32_t in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0}; 938 void *key; 939 int ret; 940 941 MLX5_SET(allow_other_vhca_access_in, 942 in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); 943 MLX5_SET(allow_other_vhca_access_in, 944 in, object_type_to_be_accessed, attr->obj_type); 945 MLX5_SET(allow_other_vhca_access_in, 946 in, object_id_to_be_accessed, attr->obj_id); 947 948 key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); 949 memcpy(key, attr->access_key, sizeof(attr->access_key)); 950 951 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 952 if (ret) { 953 DR_LOG(ERR, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command"); 954 rte_errno = errno; 955 return rte_errno; 956 } 957 958 return 0; 959 } 960 961 struct mlx5dr_devx_obj * 962 mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx, 963 struct mlx5dr_cmd_alias_obj_create_attr *alias_attr) 964 { 965 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 966 uint32_t in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0}; 967 struct mlx5dr_devx_obj *devx_obj; 968 void *attr; 969 void *key; 970 971 devx_obj = simple_malloc(sizeof(*devx_obj)); 972 if (!devx_obj) { 973 DR_LOG(ERR, "Failed to allocate memory for ALIAS general object"); 974 rte_errno = ENOMEM; 975 return NULL; 976 } 977 978 attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); 979 MLX5_SET(general_obj_in_cmd_hdr, 980 attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 981 MLX5_SET(general_obj_in_cmd_hdr, 982 attr, obj_type, alias_attr->obj_type); 983 MLX5_SET(general_obj_in_cmd_hdr, attr, alias_object, 1); 984 985 attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); 986 MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); 987 MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); 988 989 key = MLX5_ADDR_OF(alias_context, attr, access_key); 990 memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); 991 992 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); 993 if (!devx_obj->obj) { 994 DR_LOG(ERR, "Failed to create ALIAS OBJ (syndrome: %#x)", 995 mlx5dr_cmd_get_syndrome(out)); 996 simple_free(devx_obj); 997 rte_errno = errno; 998 return NULL; 999 } 1000 1001 devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1002 1003 return devx_obj; 1004 } 1005 1006 int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx, 1007 struct mlx5dr_cmd_generate_wqe_attr *attr, 1008 struct mlx5_cqe64 *ret_cqe) 1009 { 1010 uint32_t out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0}; 1011 uint32_t in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0}; 1012 uint8_t status; 1013 void *ptr; 1014 int ret; 1015 1016 MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE); 1017 MLX5_SET(generate_wqe_in, in, pdn, attr->pdn); 1018 1019 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl); 1020 memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl)); 1021 1022 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl); 1023 memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl)); 1024 1025 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0); 1026 memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0)); 1027 1028 if (attr->gta_data_1) { 1029 ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1); 1030 memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1)); 1031 } 1032 1033 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1034 if (ret) { 1035 DR_LOG(ERR, "Failed to write GTA WQE using FW"); 1036 rte_errno = errno; 1037 return rte_errno; 1038 } 1039 1040 status = MLX5_GET(generate_wqe_out, out, status); 1041 if (status) { 1042 DR_LOG(ERR, "Invalid FW CQE status %d", status); 1043 rte_errno = EINVAL; 1044 return rte_errno; 1045 } 1046 1047 ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data); 1048 memcpy(ret_cqe, ptr, sizeof(*ret_cqe)); 1049 1050 return 0; 1051 } 1052 1053 int mlx5dr_cmd_query_caps(struct ibv_context *ctx, 1054 struct mlx5dr_cmd_query_caps *caps) 1055 { 1056 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 1057 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 1058 const struct flow_hw_port_info *port_info; 1059 struct ibv_device_attr_ex attr_ex; 1060 u32 res; 1061 int ret; 1062 1063 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 1064 MLX5_SET(query_hca_cap_in, in, op_mod, 1065 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 1066 MLX5_HCA_CAP_OPMOD_GET_CUR); 1067 1068 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1069 if (ret) { 1070 DR_LOG(ERR, "Failed to query device caps"); 1071 rte_errno = errno; 1072 return rte_errno; 1073 } 1074 1075 caps->wqe_based_update = 1076 MLX5_GET(query_hca_cap_out, out, 1077 capability.cmd_hca_cap.wqe_based_flow_table_update_cap); 1078 1079 caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out, 1080 capability.cmd_hca_cap.eswitch_manager); 1081 1082 caps->flex_protocols = MLX5_GET(query_hca_cap_out, out, 1083 capability.cmd_hca_cap.flex_parser_protocols); 1084 1085 caps->log_header_modify_argument_granularity = 1086 MLX5_GET(query_hca_cap_out, out, 1087 capability.cmd_hca_cap.log_header_modify_argument_granularity); 1088 1089 caps->log_header_modify_argument_granularity -= 1090 MLX5_GET(query_hca_cap_out, out, 1091 capability.cmd_hca_cap. 1092 log_header_modify_argument_granularity_offset); 1093 1094 caps->log_header_modify_argument_max_alloc = 1095 MLX5_GET(query_hca_cap_out, out, 1096 capability.cmd_hca_cap.log_header_modify_argument_max_alloc); 1097 1098 caps->definer_format_sup = 1099 MLX5_GET64(query_hca_cap_out, out, 1100 capability.cmd_hca_cap.match_definer_format_supported); 1101 1102 caps->vhca_id = MLX5_GET(query_hca_cap_out, out, 1103 capability.cmd_hca_cap.vhca_id); 1104 1105 caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out, 1106 capability.cmd_hca_cap.sq_ts_format); 1107 1108 caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out, 1109 capability.cmd_hca_cap.ipsec_offload); 1110 1111 caps->roce = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.roce); 1112 1113 MLX5_SET(query_hca_cap_in, in, op_mod, 1114 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | 1115 MLX5_HCA_CAP_OPMOD_GET_CUR); 1116 1117 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1118 if (ret) { 1119 DR_LOG(ERR, "Failed to query device caps"); 1120 rte_errno = errno; 1121 return rte_errno; 1122 } 1123 1124 caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out, 1125 capability.cmd_hca_cap_2. 1126 format_select_dw_8_6_ext); 1127 1128 caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out, 1129 capability.cmd_hca_cap_2. 1130 format_select_dw_gtpu_dw_0); 1131 1132 caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out, 1133 capability.cmd_hca_cap_2. 1134 format_select_dw_gtpu_dw_1); 1135 1136 caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out, 1137 capability.cmd_hca_cap_2. 1138 format_select_dw_gtpu_dw_2); 1139 1140 caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out, 1141 capability.cmd_hca_cap_2. 1142 format_select_dw_gtpu_first_ext_dw_0); 1143 1144 caps->supp_type_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1145 capability.cmd_hca_cap_2. 1146 generate_wqe_type); 1147 1148 /* check cross-VHCA support in cap2 */ 1149 res = 1150 MLX5_GET(query_hca_cap_out, out, 1151 capability.cmd_hca_cap_2.cross_vhca_object_to_object_supported); 1152 1153 caps->cross_vhca_resources = (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_TIR) && 1154 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_STC_TO_FT) && 1155 (res & MLX5_CROSS_VHCA_OBJ_TO_OBJ_TYPE_FT_TO_RTC); 1156 1157 res = 1158 MLX5_GET(query_hca_cap_out, out, 1159 capability.cmd_hca_cap_2.allowed_object_for_other_vhca_access); 1160 1161 caps->cross_vhca_resources &= (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_TIR) && 1162 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_FT) && 1163 (res & MLX5_CROSS_VHCA_ALLOWED_OBJS_RTC); 1164 1165 caps->flow_table_hash_type = MLX5_GET(query_hca_cap_out, out, 1166 capability.cmd_hca_cap_2.flow_table_hash_type); 1167 1168 caps->encap_entropy_hash_type = MLX5_GET(query_hca_cap_out, out, 1169 capability.cmd_hca_cap_2.encap_entropy_hash_type); 1170 1171 MLX5_SET(query_hca_cap_in, in, op_mod, 1172 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | 1173 MLX5_HCA_CAP_OPMOD_GET_CUR); 1174 1175 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1176 if (ret) { 1177 DR_LOG(ERR, "Failed to query flow table caps"); 1178 rte_errno = errno; 1179 return rte_errno; 1180 } 1181 1182 caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out, 1183 capability.flow_table_nic_cap. 1184 flow_table_properties_nic_receive.max_ft_level); 1185 1186 caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out, 1187 capability.flow_table_nic_cap. 1188 flow_table_properties_nic_receive.reparse); 1189 1190 caps->nic_ft.ignore_flow_level_rtc_valid = 1191 MLX5_GET(query_hca_cap_out, 1192 out, 1193 capability.flow_table_nic_cap. 1194 flow_table_properties_nic_receive.ignore_flow_level_rtc_valid); 1195 1196 /* check cross-VHCA support in flow table properties */ 1197 res = 1198 MLX5_GET(query_hca_cap_out, out, 1199 capability.flow_table_nic_cap.flow_table_properties_nic_receive.cross_vhca_object); 1200 caps->cross_vhca_resources &= res; 1201 1202 if (caps->wqe_based_update) { 1203 MLX5_SET(query_hca_cap_in, in, op_mod, 1204 MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | 1205 MLX5_HCA_CAP_OPMOD_GET_CUR); 1206 1207 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1208 if (ret) { 1209 DR_LOG(ERR, "Failed to query WQE based FT caps"); 1210 rte_errno = errno; 1211 return rte_errno; 1212 } 1213 1214 caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out, 1215 capability.wqe_based_flow_table_cap. 1216 rtc_reparse_mode); 1217 1218 caps->ste_format = MLX5_GET(query_hca_cap_out, out, 1219 capability.wqe_based_flow_table_cap. 1220 ste_format); 1221 1222 caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out, 1223 capability.wqe_based_flow_table_cap. 1224 rtc_index_mode); 1225 1226 caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out, 1227 capability.wqe_based_flow_table_cap. 1228 rtc_log_depth_max); 1229 1230 caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 1231 capability.wqe_based_flow_table_cap. 1232 ste_alloc_log_max); 1233 1234 caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 1235 capability.wqe_based_flow_table_cap. 1236 ste_alloc_log_granularity); 1237 1238 caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out, 1239 capability.wqe_based_flow_table_cap. 1240 trivial_match_definer); 1241 1242 caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out, 1243 capability.wqe_based_flow_table_cap. 1244 stc_alloc_log_max); 1245 1246 caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out, 1247 capability.wqe_based_flow_table_cap. 1248 stc_alloc_log_granularity); 1249 1250 caps->rtc_hash_split_table = MLX5_GET(query_hca_cap_out, out, 1251 capability.wqe_based_flow_table_cap. 1252 rtc_hash_split_table); 1253 1254 caps->rtc_linear_lookup_table = MLX5_GET(query_hca_cap_out, out, 1255 capability.wqe_based_flow_table_cap. 1256 rtc_linear_lookup_table); 1257 1258 caps->access_index_mode = MLX5_GET(query_hca_cap_out, out, 1259 capability.wqe_based_flow_table_cap. 1260 access_index_mode); 1261 1262 caps->linear_match_definer = MLX5_GET(query_hca_cap_out, out, 1263 capability.wqe_based_flow_table_cap. 1264 linear_match_definer_reg_c3); 1265 1266 caps->rtc_max_hash_def_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1267 capability.wqe_based_flow_table_cap. 1268 rtc_max_num_hash_definer_gen_wqe); 1269 1270 caps->supp_ste_format_gen_wqe = MLX5_GET(query_hca_cap_out, out, 1271 capability.wqe_based_flow_table_cap. 1272 ste_format_gen_wqe); 1273 1274 caps->fdb_tir_stc = MLX5_GET(query_hca_cap_out, out, 1275 capability.wqe_based_flow_table_cap. 1276 fdb_jump_to_tir_stc); 1277 } 1278 1279 if (caps->eswitch_manager) { 1280 MLX5_SET(query_hca_cap_in, in, op_mod, 1281 MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | 1282 MLX5_HCA_CAP_OPMOD_GET_CUR); 1283 1284 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1285 if (ret) { 1286 DR_LOG(ERR, "Failed to query flow table esw caps"); 1287 rte_errno = errno; 1288 return rte_errno; 1289 } 1290 1291 caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out, 1292 capability.flow_table_nic_cap. 1293 flow_table_properties_nic_receive.max_ft_level); 1294 1295 caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out, 1296 capability.flow_table_nic_cap. 1297 flow_table_properties_nic_receive.reparse); 1298 1299 MLX5_SET(query_hca_cap_in, in, op_mod, 1300 MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR); 1301 1302 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1303 if (ret) { 1304 DR_LOG(ERR, "Query eswitch capabilities failed %d", ret); 1305 rte_errno = errno; 1306 return rte_errno; 1307 } 1308 1309 if (MLX5_GET(query_hca_cap_out, out, 1310 capability.esw_cap.esw_manager_vport_number_valid)) 1311 caps->eswitch_manager_vport_number = 1312 MLX5_GET(query_hca_cap_out, out, 1313 capability.esw_cap.esw_manager_vport_number); 1314 1315 caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out, 1316 capability.esw_cap.merged_eswitch); 1317 } 1318 1319 if (caps->roce) { 1320 MLX5_SET(query_hca_cap_in, in, op_mod, 1321 MLX5_GET_HCA_CAP_OP_MOD_ROCE | 1322 MLX5_HCA_CAP_OPMOD_GET_CUR); 1323 1324 ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 1325 if (ret) { 1326 DR_LOG(ERR, "Failed to query roce caps"); 1327 rte_errno = errno; 1328 return rte_errno; 1329 } 1330 1331 caps->roce_max_src_udp_port = MLX5_GET(query_hca_cap_out, out, 1332 capability.roce_caps.r_roce_max_src_udp_port); 1333 caps->roce_min_src_udp_port = MLX5_GET(query_hca_cap_out, out, 1334 capability.roce_caps.r_roce_min_src_udp_port); 1335 } 1336 1337 ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex); 1338 if (ret) { 1339 DR_LOG(ERR, "Failed to query device attributes"); 1340 rte_errno = ret; 1341 return rte_errno; 1342 } 1343 1344 strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver)); 1345 1346 port_info = flow_hw_get_wire_port(ctx); 1347 if (port_info) 1348 caps->wire_regc_mask = port_info->regc_mask; 1349 else 1350 DR_LOG(INFO, "Failed to query wire port regc value"); 1351 1352 return ret; 1353 } 1354 1355 int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx, 1356 struct mlx5dr_cmd_query_vport_caps *vport_caps, 1357 uint32_t port_num) 1358 { 1359 struct mlx5_port_info port_info = {0}; 1360 uint32_t flags; 1361 int ret; 1362 1363 flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID; 1364 1365 ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info); 1366 /* Check if query succeed and vport is enabled */ 1367 if (ret || (port_info.query_flags & flags) != flags) { 1368 rte_errno = ENOTSUP; 1369 return rte_errno; 1370 } 1371 1372 vport_caps->vport_num = port_info.vport_id; 1373 vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id; 1374 1375 return 0; 1376 } 1377