1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 Mellanox Technologies, Ltd 3 */ 4 5 #ifndef RTE_PMD_MLX5_DEVX_CMDS_H_ 6 #define RTE_PMD_MLX5_DEVX_CMDS_H_ 7 8 #include <rte_compat.h> 9 #include <rte_bitops.h> 10 11 #include "mlx5_glue.h" 12 #include "mlx5_prm.h" 13 14 /* This is limitation of libibverbs: in length variable type is u16. */ 15 #define MLX5_DEVX_MAX_KLM_ENTRIES ((UINT16_MAX - \ 16 MLX5_ST_SZ_DW(create_mkey_in) * 4) / (MLX5_ST_SZ_DW(klm) * 4)) 17 18 struct mlx5_devx_counter_attr { 19 uint32_t pd_valid:1; 20 uint32_t pd:24; 21 uint32_t bulk_log_max_alloc:1; 22 union { 23 uint8_t flow_counter_bulk_log_size; 24 uint8_t bulk_n_128; 25 }; 26 }; 27 28 struct mlx5_devx_mkey_attr { 29 uint64_t addr; 30 uint64_t size; 31 uint32_t umem_id; 32 uint32_t pd; 33 uint32_t log_entity_size; 34 uint32_t pg_access:1; 35 uint32_t relaxed_ordering_write:1; 36 uint32_t relaxed_ordering_read:1; 37 uint32_t umr_en:1; 38 uint32_t crypto_en:2; 39 uint32_t set_remote_rw:1; 40 struct mlx5_klm *klm_array; 41 int klm_num; 42 }; 43 44 /* HCA qos attributes. */ 45 struct mlx5_hca_qos_attr { 46 uint32_t sup:1; /* Whether QOS is supported. */ 47 uint32_t flow_meter_old:1; /* Flow meter is supported, old version. */ 48 uint32_t packet_pacing:1; /* Packet pacing is supported. */ 49 uint32_t wqe_rate_pp:1; /* Packet pacing WQE rate mode. */ 50 uint32_t flow_meter:1; 51 /* 52 * Flow meter is supported, updated version. 53 * When flow_meter is 1, it indicates that REG_C sharing is supported. 54 * If flow_meter is 1, flow_meter_old is also 1. 55 * Using older driver versions, flow_meter_old can be 1 56 * while flow_meter is 0. 57 */ 58 uint32_t flow_meter_aso_sup:1; 59 /* Whether FLOW_METER_ASO Object is supported. */ 60 uint8_t log_max_flow_meter; 61 /* Power of the maximum supported meters. */ 62 uint8_t flow_meter_reg_c_ids; 63 /* Bitmap of the reg_Cs available for flow meter to use. */ 64 uint32_t log_meter_aso_granularity:5; 65 /* Power of the minimum allocation granularity Object. */ 66 uint32_t log_meter_aso_max_alloc:5; 67 /* Power of the maximum allocation granularity Object. */ 68 uint32_t log_max_num_meter_aso:5; 69 /* Power of the maximum number of supported objects. */ 70 71 }; 72 73 struct mlx5_hca_vdpa_attr { 74 uint8_t virtio_queue_type; 75 uint32_t valid:1; 76 uint32_t desc_tunnel_offload_type:1; 77 uint32_t eth_frame_offload_type:1; 78 uint32_t virtio_version_1_0:1; 79 uint32_t tso_ipv4:1; 80 uint32_t tso_ipv6:1; 81 uint32_t tx_csum:1; 82 uint32_t rx_csum:1; 83 uint32_t event_mode:3; 84 uint32_t log_doorbell_stride:5; 85 uint32_t log_doorbell_bar_size:5; 86 uint32_t queue_counters_valid:1; 87 uint32_t vnet_modify_ext:1; 88 uint32_t virtio_net_q_addr_modify:1; 89 uint32_t virtio_q_index_modify:1; 90 uint32_t max_num_virtio_queues; 91 struct { 92 uint32_t a; 93 uint32_t b; 94 } umems[3]; 95 uint64_t doorbell_bar_offset; 96 }; 97 98 struct mlx5_hca_flow_attr { 99 uint32_t tunnel_header_0_1; 100 uint32_t tunnel_header_2_3; 101 }; 102 103 /** 104 * Accumulate port PARSE_GRAPH_NODE capabilities from 105 * PARSE_GRAPH_NODE Capabilities and HCA Capabilities 2 tables 106 */ 107 __extension__ 108 struct mlx5_hca_flex_attr { 109 uint32_t node_in; 110 uint32_t node_out; 111 uint16_t header_length_mode; 112 uint16_t sample_offset_mode; 113 uint8_t max_num_arc_in; 114 uint8_t max_num_arc_out; 115 uint8_t max_num_sample; 116 uint8_t max_num_prog_sample:5; /* From HCA CAP 2 */ 117 uint8_t anchor_en:1; 118 uint8_t ext_sample_id:1; 119 uint8_t sample_tunnel_inner2:1; 120 uint8_t zero_size_supported:1; 121 uint8_t sample_id_in_out:1; 122 uint16_t max_base_header_length; 123 uint8_t max_sample_base_offset; 124 uint16_t max_next_header_offset; 125 uint8_t header_length_mask_width; 126 }; 127 128 /* ISO C restricts enumerator values to range of 'int' */ 129 __extension__ 130 enum { 131 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_HEAD = RTE_BIT32(1), 132 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MAC = RTE_BIT32(2), 133 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IP = RTE_BIT32(3), 134 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GRE = RTE_BIT32(4), 135 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_UDP = RTE_BIT32(5), 136 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MPLS = RTE_BIT32(6), 137 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_TCP = RTE_BIT32(7), 138 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_VXLAN_GRE = RTE_BIT32(8), 139 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GENEVE = RTE_BIT32(9), 140 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPSEC_ESP = RTE_BIT32(10), 141 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV4 = RTE_BIT32(11), 142 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV6 = RTE_BIT32(12), 143 PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_PROGRAMMABLE = RTE_BIT32(31) 144 }; 145 146 enum { 147 PARSE_GRAPH_NODE_CAP_LENGTH_MODE_FIXED = RTE_BIT32(0), 148 PARSE_GRAPH_NODE_CAP_LENGTH_MODE_EXPLISIT_FIELD = RTE_BIT32(1), 149 PARSE_GRAPH_NODE_CAP_LENGTH_MODE_BITMASK_FIELD = RTE_BIT32(2) 150 }; 151 152 /* 153 * DWORD shift is the base for calculating header_length_field_mask 154 * value in the MLX5_GRAPH_NODE_LEN_FIELD mode. 155 */ 156 #define MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD 0x02 157 158 static inline uint32_t 159 mlx5_hca_parse_graph_node_base_hdr_len_mask 160 (const struct mlx5_hca_flex_attr *attr) 161 { 162 return (1 << attr->header_length_mask_width) - 1; 163 } 164 165 /* HCA supports this number of time periods for LRO. */ 166 #define MLX5_LRO_NUM_SUPP_PERIODS 4 167 168 /* HCA attributes. */ 169 struct mlx5_hca_attr { 170 uint32_t eswitch_manager:1; 171 uint32_t flow_counters_dump:1; 172 uint32_t mem_rq_rmp:1; 173 uint32_t log_max_rmp:5; 174 uint32_t log_max_rqt_size:5; 175 uint32_t parse_graph_flex_node:1; 176 uint8_t flow_counter_bulk_alloc_bitmap; 177 uint32_t eth_net_offloads:1; 178 uint32_t eth_virt:1; 179 uint32_t wqe_vlan_insert:1; 180 uint32_t csum_cap:1; 181 uint32_t vlan_cap:1; 182 uint32_t wqe_inline_mode:2; 183 uint32_t vport_inline_mode:3; 184 uint32_t tunnel_stateless_geneve_rx:1; 185 uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */ 186 uint32_t tunnel_stateless_gtp:1; 187 uint32_t max_lso_cap; 188 uint32_t scatter_fcs:1; 189 uint32_t lro_cap:1; 190 uint32_t tunnel_lro_gre:1; 191 uint32_t tunnel_lro_vxlan:1; 192 uint32_t tunnel_stateless_gre:1; 193 uint32_t tunnel_stateless_vxlan:1; 194 uint32_t swp:1; 195 uint32_t swp_csum:1; 196 uint32_t swp_lso:1; 197 uint32_t lro_max_msg_sz_mode:2; 198 uint32_t rq_delay_drop:1; 199 uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS]; 200 uint16_t lro_min_mss_size; 201 uint32_t flex_parser_protocols; 202 uint32_t max_geneve_tlv_options; 203 uint32_t max_geneve_tlv_option_data_len; 204 uint32_t hairpin:1; 205 uint32_t log_max_hairpin_queues:5; 206 uint32_t log_max_hairpin_wq_data_sz:5; 207 uint32_t log_max_hairpin_num_packets:5; 208 uint32_t hairpin_sq_wqe_bb_size:4; 209 uint32_t hairpin_sq_wq_in_host_mem:1; 210 uint32_t hairpin_data_buffer_locked:1; 211 uint32_t vhca_id:16; 212 uint32_t relaxed_ordering_write:1; 213 uint32_t relaxed_ordering_read:1; 214 uint32_t access_register_user:1; 215 uint32_t wqe_index_ignore:1; 216 uint32_t cross_channel:1; 217 uint32_t non_wire_sq:1; /* SQ with non-wire ops is supported. */ 218 uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */ 219 uint32_t num_lag_ports:4; /* Number of ports can be bonded. */ 220 uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */ 221 uint32_t scatter_fcs_w_decap_disable:1; 222 uint32_t flow_hit_aso:1; /* General obj type FLOW_HIT_ASO supported. */ 223 uint32_t roce:1; 224 uint32_t wait_on_time:1; 225 uint32_t rq_ts_format:2; 226 uint32_t sq_ts_format:2; 227 uint32_t steering_format_version:4; 228 uint32_t qp_ts_format:2; 229 uint32_t regexp_params:1; 230 uint32_t regexp_version:3; 231 uint32_t reg_c_preserve:1; 232 uint32_t ct_offload:1; /* General obj type ASO CT offload supported. */ 233 uint32_t crypto:1; /* Crypto engine is supported. */ 234 uint32_t aes_xts:1; /* AES-XTS crypto is supported. */ 235 uint32_t dek:1; /* General obj type DEK is supported. */ 236 uint32_t import_kek:1; /* General obj type IMPORT_KEK supported. */ 237 uint32_t credential:1; /* General obj type CREDENTIAL supported. */ 238 uint32_t crypto_login:1; /* General obj type CRYPTO_LOGIN supported. */ 239 uint32_t regexp_num_of_engines; 240 uint32_t log_max_ft_sampler_num:8; 241 uint32_t inner_ipv4_ihl:1; 242 uint32_t outer_ipv4_ihl:1; 243 uint32_t geneve_tlv_opt; 244 uint32_t cqe_compression:1; 245 uint32_t mini_cqe_resp_flow_tag:1; 246 uint32_t mini_cqe_resp_l3_l4_tag:1; 247 uint32_t enhanced_cqe_compression:1; 248 uint32_t pkt_integrity_match:1; /* 1 if HW supports integrity item */ 249 struct mlx5_hca_qos_attr qos; 250 struct mlx5_hca_vdpa_attr vdpa; 251 struct mlx5_hca_flow_attr flow; 252 struct mlx5_hca_flex_attr flex; 253 int log_max_qp_sz; 254 int log_max_cq_sz; 255 int log_max_qp; 256 int log_max_cq; 257 uint32_t log_max_pd; 258 uint32_t log_max_mrw_sz; 259 uint32_t log_max_srq; 260 uint32_t log_max_srq_sz; 261 uint32_t rss_ind_tbl_cap; 262 uint32_t mmo_dma_sq_en:1; 263 uint32_t mmo_compress_sq_en:1; 264 uint32_t mmo_decompress_sq_en:1; 265 uint32_t mmo_dma_qp_en:1; 266 uint32_t mmo_compress_qp_en:1; 267 uint32_t decomp_deflate_v1_en:1; 268 uint32_t decomp_deflate_v2_en:1; 269 uint32_t mmo_regex_qp_en:1; 270 uint32_t mmo_regex_sq_en:1; 271 uint32_t compress_min_block_size:4; 272 uint32_t log_max_mmo_dma:5; 273 uint32_t log_max_mmo_compress:5; 274 uint32_t log_max_mmo_decompress:5; 275 uint32_t decomp_lz4_data_only_en:1; 276 uint32_t decomp_lz4_no_checksum_en:1; 277 uint32_t decomp_lz4_checksum_en:1; 278 uint32_t umr_modify_entity_size_disabled:1; 279 uint32_t umr_indirect_mkey_disabled:1; 280 uint32_t log_min_stride_wqe_sz:5; 281 uint32_t esw_mgr_vport_id_valid:1; /* E-Switch Mgr vport ID is valid. */ 282 uint32_t crypto_wrapped_import_method:1; 283 uint16_t esw_mgr_vport_id; /* E-Switch Mgr vport ID . */ 284 uint16_t max_wqe_sz_sq; 285 uint32_t set_reg_c:8; 286 uint32_t nic_flow_table:1; 287 uint32_t modify_outer_ip_ecn:1; 288 union { 289 uint32_t max_flow_counter; 290 struct { 291 uint16_t max_flow_counter_15_0; 292 uint16_t max_flow_counter_31_16; 293 }; 294 }; 295 uint32_t flow_counter_bulk_log_max_alloc:5; 296 uint32_t flow_counter_bulk_log_granularity:5; 297 uint32_t alloc_flow_counter_pd:1; 298 uint32_t flow_counter_access_aso:1; 299 uint32_t flow_access_aso_opc_mod:8; 300 uint32_t cross_vhca:1; 301 uint32_t lag_rx_port_affinity:1; 302 }; 303 304 /* LAG Context. */ 305 struct mlx5_devx_lag_context { 306 uint32_t fdb_selection_mode:1; 307 uint32_t port_select_mode:3; 308 uint32_t lag_state:3; 309 uint32_t tx_remap_affinity_1:4; 310 uint32_t tx_remap_affinity_2:4; 311 }; 312 313 struct mlx5_devx_wq_attr { 314 uint32_t wq_type:4; 315 uint32_t wq_signature:1; 316 uint32_t end_padding_mode:2; 317 uint32_t cd_slave:1; 318 uint32_t hds_skip_first_sge:1; 319 uint32_t log2_hds_buf_size:3; 320 uint32_t page_offset:5; 321 uint32_t lwm:16; 322 uint32_t pd:24; 323 uint32_t uar_page:24; 324 uint64_t dbr_addr; 325 uint32_t hw_counter; 326 uint32_t sw_counter; 327 uint32_t log_wq_stride:4; 328 uint32_t log_wq_pg_sz:5; 329 uint32_t log_wq_sz:5; 330 uint32_t dbr_umem_valid:1; 331 uint32_t wq_umem_valid:1; 332 uint32_t log_hairpin_num_packets:5; 333 uint32_t log_hairpin_data_sz:5; 334 uint32_t single_wqe_log_num_of_strides:4; 335 uint32_t two_byte_shift_en:1; 336 uint32_t single_stride_log_num_of_bytes:3; 337 uint32_t dbr_umem_id; 338 uint32_t wq_umem_id; 339 uint64_t wq_umem_offset; 340 }; 341 342 /* Create RQ attributes structure, used by create RQ operation. */ 343 struct mlx5_devx_create_rq_attr { 344 uint32_t rlky:1; 345 uint32_t delay_drop_en:1; 346 uint32_t scatter_fcs:1; 347 uint32_t vsd:1; 348 uint32_t mem_rq_type:4; 349 uint32_t state:4; 350 uint32_t flush_in_error_en:1; 351 uint32_t hairpin:1; 352 uint32_t hairpin_data_buffer_type:3; 353 uint32_t ts_format:2; 354 uint32_t user_index:24; 355 uint32_t cqn:24; 356 uint32_t counter_set_id:8; 357 uint32_t rmpn:24; 358 struct mlx5_devx_wq_attr wq_attr; 359 }; 360 361 /* Modify RQ attributes structure, used by modify RQ operation. */ 362 struct mlx5_devx_modify_rq_attr { 363 uint32_t rqn:24; 364 uint32_t rq_state:4; /* Current RQ state. */ 365 uint32_t state:4; /* Required RQ state. */ 366 uint32_t scatter_fcs:1; 367 uint32_t vsd:1; 368 uint32_t counter_set_id:8; 369 uint32_t hairpin_peer_sq:24; 370 uint32_t hairpin_peer_vhca:16; 371 uint64_t modify_bitmask; 372 uint32_t lwm:16; /* Contained WQ lwm. */ 373 }; 374 375 /* Create RMP attributes structure, used by create RMP operation. */ 376 struct mlx5_devx_create_rmp_attr { 377 uint32_t rsvd0:8; 378 uint32_t state:4; 379 uint32_t rsvd1:20; 380 uint32_t basic_cyclic_rcv_wqe:1; 381 uint32_t rsvd4:31; 382 uint32_t rsvd8[10]; 383 struct mlx5_devx_wq_attr wq_attr; 384 }; 385 386 struct mlx5_rx_hash_field_select { 387 uint32_t l3_prot_type:1; 388 uint32_t l4_prot_type:1; 389 uint32_t selected_fields:30; 390 }; 391 392 /* TIR attributes structure, used by TIR operations. */ 393 struct mlx5_devx_tir_attr { 394 uint32_t disp_type:4; 395 uint32_t lro_timeout_period_usecs:16; 396 uint32_t lro_enable_mask:4; 397 uint32_t lro_max_msg_sz:8; 398 uint32_t inline_rqn:24; 399 uint32_t rx_hash_symmetric:1; 400 uint32_t tunneled_offload_en:1; 401 uint32_t indirect_table:24; 402 uint32_t rx_hash_fn:4; 403 uint32_t self_lb_block:2; 404 uint32_t transport_domain:24; 405 uint8_t rx_hash_toeplitz_key[MLX5_RSS_HASH_KEY_LEN]; 406 struct mlx5_rx_hash_field_select rx_hash_field_selector_outer; 407 struct mlx5_rx_hash_field_select rx_hash_field_selector_inner; 408 }; 409 410 /* TIR attributes structure, used by TIR modify. */ 411 struct mlx5_devx_modify_tir_attr { 412 uint32_t tirn:24; 413 uint64_t modify_bitmask; 414 struct mlx5_devx_tir_attr tir; 415 }; 416 417 /* RQT attributes structure, used by RQT operations. */ 418 struct mlx5_devx_rqt_attr { 419 uint8_t rq_type; 420 uint32_t rqt_max_size:16; 421 uint32_t rqt_actual_size:16; 422 uint32_t rq_list[]; 423 }; 424 425 /* TIS attributes structure. */ 426 struct mlx5_devx_tis_attr { 427 uint32_t strict_lag_tx_port_affinity:1; 428 uint32_t tls_en:1; 429 uint32_t lag_tx_port_affinity:4; 430 uint32_t prio:4; 431 uint32_t transport_domain:24; 432 }; 433 434 /* SQ attributes structure, used by SQ create operation. */ 435 struct mlx5_devx_create_sq_attr { 436 uint32_t rlky:1; 437 uint32_t cd_master:1; 438 uint32_t fre:1; 439 uint32_t flush_in_error_en:1; 440 uint32_t allow_multi_pkt_send_wqe:1; 441 uint32_t min_wqe_inline_mode:3; 442 uint32_t state:4; 443 uint32_t reg_umr:1; 444 uint32_t allow_swp:1; 445 uint32_t hairpin:1; 446 uint32_t non_wire:1; 447 uint32_t static_sq_wq:1; 448 uint32_t ts_format:2; 449 uint32_t hairpin_wq_buffer_type:3; 450 uint32_t user_index:24; 451 uint32_t cqn:24; 452 uint32_t packet_pacing_rate_limit_index:16; 453 uint32_t tis_lst_sz:16; 454 uint32_t tis_num:24; 455 struct mlx5_devx_wq_attr wq_attr; 456 }; 457 458 /* SQ attributes structure, used by SQ modify operation. */ 459 struct mlx5_devx_modify_sq_attr { 460 uint32_t sq_state:4; 461 uint32_t state:4; 462 uint32_t hairpin_peer_rq:24; 463 uint32_t hairpin_peer_vhca:16; 464 }; 465 466 467 /* CQ attributes structure, used by CQ operations. */ 468 struct mlx5_devx_cq_attr { 469 uint32_t q_umem_valid:1; 470 uint32_t db_umem_valid:1; 471 uint32_t use_first_only:1; 472 uint32_t overrun_ignore:1; 473 uint32_t cqe_comp_en:1; 474 uint32_t mini_cqe_res_format:2; 475 uint32_t mini_cqe_res_format_ext:2; 476 uint32_t cqe_comp_layout:2; 477 uint32_t log_cq_size:5; 478 uint32_t log_page_size:5; 479 uint32_t uar_page_id; 480 uint32_t q_umem_id; 481 uint64_t q_umem_offset; 482 uint32_t db_umem_id; 483 uint64_t db_umem_offset; 484 uint32_t eqn; 485 uint64_t db_addr; 486 }; 487 488 /* Virtq attributes structure, used by VIRTQ operations. */ 489 struct mlx5_devx_virtq_attr { 490 uint16_t hw_available_index; 491 uint16_t hw_used_index; 492 uint16_t q_size; 493 uint32_t pd:24; 494 uint32_t virtio_version_1_0:1; 495 uint32_t tso_ipv4:1; 496 uint32_t tso_ipv6:1; 497 uint32_t tx_csum:1; 498 uint32_t rx_csum:1; 499 uint32_t event_mode:3; 500 uint32_t state:4; 501 uint32_t hw_latency_mode:2; 502 uint32_t hw_max_latency_us:12; 503 uint32_t hw_max_pending_comp:16; 504 uint32_t dirty_bitmap_dump_enable:1; 505 uint32_t dirty_bitmap_mkey; 506 uint32_t dirty_bitmap_size; 507 uint32_t mkey; 508 uint32_t qp_id; 509 uint32_t queue_index; 510 uint32_t tis_id; 511 uint32_t counters_obj_id; 512 uint64_t dirty_bitmap_addr; 513 uint64_t mod_fields_bitmap; 514 uint64_t desc_addr; 515 uint64_t used_addr; 516 uint64_t available_addr; 517 struct { 518 uint32_t id; 519 uint32_t size; 520 uint64_t offset; 521 } umems[3]; 522 uint8_t error_type; 523 uint8_t q_type; 524 }; 525 526 527 struct mlx5_devx_qp_attr { 528 uint32_t pd:24; 529 uint32_t uar_index:24; 530 uint32_t cqn:24; 531 uint32_t log_page_size:5; 532 uint32_t num_of_receive_wqes:17; /* Must be power of 2. */ 533 uint32_t log_rq_stride:3; 534 uint32_t num_of_send_wqbbs:17; /* Must be power of 2. */ 535 uint32_t ts_format:2; 536 uint32_t dbr_umem_valid:1; 537 uint32_t dbr_umem_id; 538 uint64_t dbr_address; 539 uint32_t wq_umem_id; 540 uint64_t wq_umem_offset; 541 uint32_t user_index:24; 542 uint32_t mmo:1; 543 }; 544 545 struct mlx5_devx_virtio_q_couners_attr { 546 uint64_t received_desc; 547 uint64_t completed_desc; 548 uint32_t error_cqes; 549 uint32_t bad_desc_errors; 550 uint32_t exceed_max_chain; 551 uint32_t invalid_buffer; 552 }; 553 554 /* 555 * graph flow match sample attributes structure, 556 * used by flex parser operations. 557 */ 558 struct mlx5_devx_match_sample_attr { 559 uint32_t flow_match_sample_en:1; 560 uint32_t flow_match_sample_field_offset:16; 561 uint32_t flow_match_sample_offset_mode:4; 562 uint32_t flow_match_sample_field_offset_mask; 563 uint32_t flow_match_sample_field_offset_shift:4; 564 uint32_t flow_match_sample_field_base_offset:8; 565 uint32_t flow_match_sample_tunnel_mode:3; 566 uint32_t flow_match_sample_field_id; 567 }; 568 569 /* graph node arc attributes structure, used by flex parser operations. */ 570 struct mlx5_devx_graph_arc_attr { 571 uint32_t compare_condition_value:16; 572 uint32_t start_inner_tunnel:1; 573 uint32_t arc_parse_graph_node:8; 574 uint32_t parse_graph_node_handle; 575 }; 576 577 /* Maximal number of samples per graph node. */ 578 #define MLX5_GRAPH_NODE_SAMPLE_NUM 8 579 580 /* Maximal number of input/output arcs per graph node. */ 581 #define MLX5_GRAPH_NODE_ARC_NUM 8 582 583 /* parse graph node attributes structure, used by flex parser operations. */ 584 struct mlx5_devx_graph_node_attr { 585 uint32_t modify_field_select; 586 uint32_t header_length_mode:4; 587 uint32_t header_length_base_value:16; 588 uint32_t header_length_field_shift:4; 589 uint32_t header_length_field_offset:16; 590 uint32_t header_length_field_mask; 591 struct mlx5_devx_match_sample_attr sample[MLX5_GRAPH_NODE_SAMPLE_NUM]; 592 uint32_t next_header_field_offset:16; 593 uint32_t next_header_field_size:5; 594 struct mlx5_devx_graph_arc_attr in[MLX5_GRAPH_NODE_ARC_NUM]; 595 struct mlx5_devx_graph_arc_attr out[MLX5_GRAPH_NODE_ARC_NUM]; 596 }; 597 598 /* Encryption key size is up to 1024 bit, 128 bytes. */ 599 #define MLX5_CRYPTO_KEY_MAX_SIZE 128 600 601 struct mlx5_devx_dek_attr { 602 uint32_t key_size:4; 603 uint32_t has_keytag:1; 604 uint32_t key_purpose:4; 605 uint32_t pd:24; 606 uint64_t opaque; 607 uint8_t key[MLX5_CRYPTO_KEY_MAX_SIZE]; 608 }; 609 610 struct mlx5_devx_import_kek_attr { 611 uint64_t modify_field_select; 612 uint32_t state:8; 613 uint32_t key_size:4; 614 uint8_t key[MLX5_CRYPTO_KEY_MAX_SIZE]; 615 }; 616 617 #define MLX5_CRYPTO_CREDENTIAL_SIZE 48 618 619 struct mlx5_devx_credential_attr { 620 uint64_t modify_field_select; 621 uint32_t state:8; 622 uint32_t credential_role:8; 623 uint8_t credential[MLX5_CRYPTO_CREDENTIAL_SIZE]; 624 }; 625 626 struct mlx5_devx_crypto_login_attr { 627 uint64_t modify_field_select; 628 uint32_t credential_pointer:24; 629 uint32_t session_import_kek_ptr:24; 630 uint8_t credential[MLX5_CRYPTO_CREDENTIAL_SIZE]; 631 }; 632 633 /* mlx5_devx_cmds.c */ 634 635 __rte_internal 636 struct mlx5_devx_obj * 637 mlx5_devx_cmd_flow_counter_alloc_general(void *ctx, 638 struct mlx5_devx_counter_attr *attr); 639 640 __rte_internal 641 struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(void *ctx, 642 uint32_t bulk_sz); 643 __rte_internal 644 int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj); 645 __rte_internal 646 int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 647 int clear, uint32_t n_counters, 648 uint64_t *pkts, uint64_t *bytes, 649 uint32_t mkey, void *addr, 650 void *cmd_comp, 651 uint64_t async_id); 652 __rte_internal 653 int mlx5_devx_cmd_query_hca_attr(void *ctx, 654 struct mlx5_hca_attr *attr); 655 __rte_internal 656 struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(void *ctx, 657 struct mlx5_devx_mkey_attr *attr); 658 __rte_internal 659 int mlx5_devx_get_out_command_status(void *out); 660 __rte_internal 661 int mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, 662 uint32_t *tis_td); 663 __rte_internal 664 struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(void *ctx, 665 struct mlx5_devx_create_rq_attr *rq_attr, 666 int socket); 667 __rte_internal 668 int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 669 struct mlx5_devx_modify_rq_attr *rq_attr); 670 __rte_internal 671 struct mlx5_devx_obj *mlx5_devx_cmd_create_rmp(void *ctx, 672 struct mlx5_devx_create_rmp_attr *rq_attr, int socket); 673 __rte_internal 674 struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(void *ctx, 675 struct mlx5_devx_tir_attr *tir_attr); 676 __rte_internal 677 struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(void *ctx, 678 struct mlx5_devx_rqt_attr *rqt_attr); 679 __rte_internal 680 struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(void *ctx, 681 struct mlx5_devx_create_sq_attr *sq_attr); 682 __rte_internal 683 int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 684 struct mlx5_devx_modify_sq_attr *sq_attr); 685 __rte_internal 686 struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(void *ctx, 687 struct mlx5_devx_tis_attr *tis_attr); 688 __rte_internal 689 struct mlx5_devx_obj *mlx5_devx_cmd_create_td(void *ctx); 690 __rte_internal 691 int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain, 692 FILE *file); 693 __rte_internal 694 int mlx5_devx_cmd_flow_single_dump(void *rule, FILE *file); 695 __rte_internal 696 struct mlx5_devx_obj *mlx5_devx_cmd_create_cq(void *ctx, 697 struct mlx5_devx_cq_attr *attr); 698 __rte_internal 699 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtq(void *ctx, 700 struct mlx5_devx_virtq_attr *attr); 701 __rte_internal 702 int mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 703 struct mlx5_devx_virtq_attr *attr); 704 __rte_internal 705 int mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 706 struct mlx5_devx_virtq_attr *attr); 707 __rte_internal 708 struct mlx5_devx_obj *mlx5_devx_cmd_create_qp(void *ctx, 709 struct mlx5_devx_qp_attr *attr); 710 __rte_internal 711 int mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, 712 uint32_t qp_st_mod_op, uint32_t remote_qp_id); 713 __rte_internal 714 int mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 715 struct mlx5_devx_rqt_attr *rqt_attr); 716 __rte_internal 717 int mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj *tir, 718 struct mlx5_devx_modify_tir_attr *tir_attr); 719 __rte_internal 720 int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj, 721 struct mlx5_ext_sample_id ids[], 722 uint32_t num, uint8_t *anchor); 723 724 __rte_internal 725 struct mlx5_devx_obj * 726 mlx5_devx_cmd_create_flex_parser(void *ctx, 727 struct mlx5_devx_graph_node_attr *data); 728 729 __rte_internal 730 int mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, 731 uint32_t arg, uint32_t *data, uint32_t dw_cnt); 732 733 __rte_internal 734 int mlx5_devx_cmd_register_write(void *ctx, uint16_t reg_id, 735 uint32_t arg, uint32_t *data, uint32_t dw_cnt); 736 737 __rte_internal 738 struct mlx5_devx_obj * 739 mlx5_devx_cmd_create_geneve_tlv_option(void *ctx, 740 uint16_t class, uint8_t type, uint8_t len); 741 742 /** 743 * Create virtio queue counters object DevX API. 744 * 745 * @param[in] ctx 746 * Device context. 747 748 * @return 749 * The DevX object created, NULL otherwise and rte_errno is set. 750 */ 751 __rte_internal 752 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtio_q_counters(void *ctx); 753 754 /** 755 * Query virtio queue counters object using DevX API. 756 * 757 * @param[in] couners_obj 758 * Pointer to virtq object structure. 759 * @param [in/out] attr 760 * Pointer to virtio queue counters attributes structure. 761 * 762 * @return 763 * 0 on success, a negative errno value otherwise and rte_errno is set. 764 */ 765 __rte_internal 766 int mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj, 767 struct mlx5_devx_virtio_q_couners_attr *attr); 768 __rte_internal 769 struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx, 770 uint32_t pd); 771 __rte_internal 772 struct mlx5_devx_obj *mlx5_devx_cmd_alloc_pd(void *ctx); 773 774 __rte_internal 775 int mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id); 776 777 __rte_internal 778 struct mlx5_devx_obj *mlx5_devx_cmd_queue_counter_alloc(void *ctx); 779 __rte_internal 780 int mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear, 781 uint32_t *out_of_buffers); 782 __rte_internal 783 struct mlx5_devx_obj *mlx5_devx_cmd_create_conn_track_offload_obj(void *ctx, 784 uint32_t pd, uint32_t log_obj_size); 785 786 /** 787 * Create general object of type FLOW_METER_ASO using DevX API.. 788 * 789 * @param[in] ctx 790 * Device context. 791 * @param [in] pd 792 * PD value to associate the FLOW_METER_ASO object with. 793 * @param [in] log_obj_size 794 * log_obj_size define to allocate number of 2 * meters 795 * in one FLOW_METER_ASO object. 796 * 797 * @return 798 * The DevX object created, NULL otherwise and rte_errno is set. 799 */ 800 __rte_internal 801 struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_meter_aso_obj(void *ctx, 802 uint32_t pd, uint32_t log_obj_size); 803 __rte_internal 804 struct mlx5_devx_obj * 805 mlx5_devx_cmd_create_dek_obj(void *ctx, struct mlx5_devx_dek_attr *attr); 806 807 __rte_internal 808 struct mlx5_devx_obj * 809 mlx5_devx_cmd_create_import_kek_obj(void *ctx, 810 struct mlx5_devx_import_kek_attr *attr); 811 812 __rte_internal 813 struct mlx5_devx_obj * 814 mlx5_devx_cmd_create_credential_obj(void *ctx, 815 struct mlx5_devx_credential_attr *attr); 816 817 __rte_internal 818 struct mlx5_devx_obj * 819 mlx5_devx_cmd_create_crypto_login_obj(void *ctx, 820 struct mlx5_devx_crypto_login_attr *attr); 821 822 __rte_internal 823 int 824 mlx5_devx_cmd_query_lag(void *ctx, 825 struct mlx5_devx_lag_context *lag_ctx); 826 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */ 827