1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 Mellanox Technologies, Ltd 3 */ 4 5 #ifndef RTE_PMD_MLX5_DEVX_CMDS_H_ 6 #define RTE_PMD_MLX5_DEVX_CMDS_H_ 7 8 #include "mlx5_glue.h" 9 #include "mlx5_prm.h" 10 11 /* 12 * Defines the amount of retries to allocate the first UAR in the page. 13 * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as 14 * UAR base address if UAR was not the first object in the UAR page. 15 * It caused the PMD failure and we should try to get another UAR 16 * till we get the first one with non-NULL base address returned. 17 */ 18 #define MLX5_ALLOC_UAR_RETRY 32 19 20 /* This is limitation of libibverbs: in length variable type is u16. */ 21 #define MLX5_DEVX_MAX_KLM_ENTRIES ((UINT16_MAX - \ 22 MLX5_ST_SZ_DW(create_mkey_in) * 4) / (MLX5_ST_SZ_DW(klm) * 4)) 23 24 struct mlx5_devx_mkey_attr { 25 uint64_t addr; 26 uint64_t size; 27 uint32_t umem_id; 28 uint32_t pd; 29 uint32_t log_entity_size; 30 uint32_t pg_access:1; 31 uint32_t relaxed_ordering_write:1; 32 uint32_t relaxed_ordering_read:1; 33 struct mlx5_klm *klm_array; 34 int klm_num; 35 }; 36 37 /* HCA qos attributes. */ 38 struct mlx5_hca_qos_attr { 39 uint32_t sup:1; /* Whether QOS is supported. */ 40 uint32_t srtcm_sup:1; /* Whether srTCM mode is supported. */ 41 uint32_t packet_pacing:1; /* Packet pacing is supported. */ 42 uint32_t wqe_rate_pp:1; /* Packet pacing WQE rate mode. */ 43 uint32_t flow_meter_reg_share:1; 44 /* Whether reg_c share is supported. */ 45 uint8_t log_max_flow_meter; 46 /* Power of the maximum supported meters. */ 47 uint8_t flow_meter_reg_c_ids; 48 /* Bitmap of the reg_Cs available for flow meter to use. */ 49 50 }; 51 52 struct mlx5_hca_vdpa_attr { 53 uint8_t virtio_queue_type; 54 uint32_t valid:1; 55 uint32_t desc_tunnel_offload_type:1; 56 uint32_t eth_frame_offload_type:1; 57 uint32_t virtio_version_1_0:1; 58 uint32_t tso_ipv4:1; 59 uint32_t tso_ipv6:1; 60 uint32_t tx_csum:1; 61 uint32_t rx_csum:1; 62 uint32_t event_mode:3; 63 uint32_t log_doorbell_stride:5; 64 uint32_t log_doorbell_bar_size:5; 65 uint32_t queue_counters_valid:1; 66 uint32_t max_num_virtio_queues; 67 struct { 68 uint32_t a; 69 uint32_t b; 70 } umems[3]; 71 uint64_t doorbell_bar_offset; 72 }; 73 74 /* HCA supports this number of time periods for LRO. */ 75 #define MLX5_LRO_NUM_SUPP_PERIODS 4 76 77 /* HCA attributes. */ 78 struct mlx5_hca_attr { 79 uint32_t eswitch_manager:1; 80 uint32_t flow_counters_dump:1; 81 uint32_t log_max_rqt_size:5; 82 uint32_t parse_graph_flex_node:1; 83 uint8_t flow_counter_bulk_alloc_bitmap; 84 uint32_t eth_net_offloads:1; 85 uint32_t eth_virt:1; 86 uint32_t wqe_vlan_insert:1; 87 uint32_t wqe_inline_mode:2; 88 uint32_t vport_inline_mode:3; 89 uint32_t tunnel_stateless_geneve_rx:1; 90 uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */ 91 uint32_t tunnel_stateless_gtp:1; 92 uint32_t lro_cap:1; 93 uint32_t tunnel_lro_gre:1; 94 uint32_t tunnel_lro_vxlan:1; 95 uint32_t lro_max_msg_sz_mode:2; 96 uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS]; 97 uint16_t lro_min_mss_size; 98 uint32_t flex_parser_protocols; 99 uint32_t hairpin:1; 100 uint32_t log_max_hairpin_queues:5; 101 uint32_t log_max_hairpin_wq_data_sz:5; 102 uint32_t log_max_hairpin_num_packets:5; 103 uint32_t vhca_id:16; 104 uint32_t relaxed_ordering_write:1; 105 uint32_t relaxed_ordering_read:1; 106 uint32_t access_register_user:1; 107 uint32_t wqe_index_ignore:1; 108 uint32_t cross_channel:1; 109 uint32_t non_wire_sq:1; /* SQ with non-wire ops is supported. */ 110 uint32_t log_max_static_sq_wq:5; /* Static WQE size SQ. */ 111 uint32_t num_lag_ports:4; /* Number of ports can be bonded. */ 112 uint32_t dev_freq_khz; /* Timestamp counter frequency, kHz. */ 113 uint32_t scatter_fcs_w_decap_disable:1; 114 uint32_t flow_hit_aso:1; /* General obj type FLOW_HIT_ASO supported. */ 115 uint32_t regex:1; 116 uint32_t regexp_num_of_engines; 117 uint32_t log_max_ft_sampler_num:8; 118 struct mlx5_hca_qos_attr qos; 119 struct mlx5_hca_vdpa_attr vdpa; 120 }; 121 122 struct mlx5_devx_wq_attr { 123 uint32_t wq_type:4; 124 uint32_t wq_signature:1; 125 uint32_t end_padding_mode:2; 126 uint32_t cd_slave:1; 127 uint32_t hds_skip_first_sge:1; 128 uint32_t log2_hds_buf_size:3; 129 uint32_t page_offset:5; 130 uint32_t lwm:16; 131 uint32_t pd:24; 132 uint32_t uar_page:24; 133 uint64_t dbr_addr; 134 uint32_t hw_counter; 135 uint32_t sw_counter; 136 uint32_t log_wq_stride:4; 137 uint32_t log_wq_pg_sz:5; 138 uint32_t log_wq_sz:5; 139 uint32_t dbr_umem_valid:1; 140 uint32_t wq_umem_valid:1; 141 uint32_t log_hairpin_num_packets:5; 142 uint32_t log_hairpin_data_sz:5; 143 uint32_t single_wqe_log_num_of_strides:4; 144 uint32_t two_byte_shift_en:1; 145 uint32_t single_stride_log_num_of_bytes:3; 146 uint32_t dbr_umem_id; 147 uint32_t wq_umem_id; 148 uint64_t wq_umem_offset; 149 }; 150 151 /* Create RQ attributes structure, used by create RQ operation. */ 152 struct mlx5_devx_create_rq_attr { 153 uint32_t rlky:1; 154 uint32_t delay_drop_en:1; 155 uint32_t scatter_fcs:1; 156 uint32_t vsd:1; 157 uint32_t mem_rq_type:4; 158 uint32_t state:4; 159 uint32_t flush_in_error_en:1; 160 uint32_t hairpin:1; 161 uint32_t user_index:24; 162 uint32_t cqn:24; 163 uint32_t counter_set_id:8; 164 uint32_t rmpn:24; 165 struct mlx5_devx_wq_attr wq_attr; 166 }; 167 168 /* Modify RQ attributes structure, used by modify RQ operation. */ 169 struct mlx5_devx_modify_rq_attr { 170 uint32_t rqn:24; 171 uint32_t rq_state:4; /* Current RQ state. */ 172 uint32_t state:4; /* Required RQ state. */ 173 uint32_t scatter_fcs:1; 174 uint32_t vsd:1; 175 uint32_t counter_set_id:8; 176 uint32_t hairpin_peer_sq:24; 177 uint32_t hairpin_peer_vhca:16; 178 uint64_t modify_bitmask; 179 uint32_t lwm:16; /* Contained WQ lwm. */ 180 }; 181 182 struct mlx5_rx_hash_field_select { 183 uint32_t l3_prot_type:1; 184 uint32_t l4_prot_type:1; 185 uint32_t selected_fields:30; 186 }; 187 188 /* TIR attributes structure, used by TIR operations. */ 189 struct mlx5_devx_tir_attr { 190 uint32_t disp_type:4; 191 uint32_t lro_timeout_period_usecs:16; 192 uint32_t lro_enable_mask:4; 193 uint32_t lro_max_msg_sz:8; 194 uint32_t inline_rqn:24; 195 uint32_t rx_hash_symmetric:1; 196 uint32_t tunneled_offload_en:1; 197 uint32_t indirect_table:24; 198 uint32_t rx_hash_fn:4; 199 uint32_t self_lb_block:2; 200 uint32_t transport_domain:24; 201 uint8_t rx_hash_toeplitz_key[MLX5_RSS_HASH_KEY_LEN]; 202 struct mlx5_rx_hash_field_select rx_hash_field_selector_outer; 203 struct mlx5_rx_hash_field_select rx_hash_field_selector_inner; 204 }; 205 206 /* TIR attributes structure, used by TIR modify. */ 207 struct mlx5_devx_modify_tir_attr { 208 uint32_t tirn:24; 209 uint64_t modify_bitmask; 210 struct mlx5_devx_tir_attr tir; 211 }; 212 213 /* RQT attributes structure, used by RQT operations. */ 214 struct mlx5_devx_rqt_attr { 215 uint8_t rq_type; 216 uint32_t rqt_max_size:16; 217 uint32_t rqt_actual_size:16; 218 uint32_t rq_list[]; 219 }; 220 221 /* TIS attributes structure. */ 222 struct mlx5_devx_tis_attr { 223 uint32_t strict_lag_tx_port_affinity:1; 224 uint32_t tls_en:1; 225 uint32_t lag_tx_port_affinity:4; 226 uint32_t prio:4; 227 uint32_t transport_domain:24; 228 }; 229 230 /* SQ attributes structure, used by SQ create operation. */ 231 struct mlx5_devx_create_sq_attr { 232 uint32_t rlky:1; 233 uint32_t cd_master:1; 234 uint32_t fre:1; 235 uint32_t flush_in_error_en:1; 236 uint32_t allow_multi_pkt_send_wqe:1; 237 uint32_t min_wqe_inline_mode:3; 238 uint32_t state:4; 239 uint32_t reg_umr:1; 240 uint32_t allow_swp:1; 241 uint32_t hairpin:1; 242 uint32_t non_wire:1; 243 uint32_t static_sq_wq:1; 244 uint32_t user_index:24; 245 uint32_t cqn:24; 246 uint32_t packet_pacing_rate_limit_index:16; 247 uint32_t tis_lst_sz:16; 248 uint32_t tis_num:24; 249 struct mlx5_devx_wq_attr wq_attr; 250 }; 251 252 /* SQ attributes structure, used by SQ modify operation. */ 253 struct mlx5_devx_modify_sq_attr { 254 uint32_t sq_state:4; 255 uint32_t state:4; 256 uint32_t hairpin_peer_rq:24; 257 uint32_t hairpin_peer_vhca:16; 258 }; 259 260 261 /* CQ attributes structure, used by CQ operations. */ 262 struct mlx5_devx_cq_attr { 263 uint32_t q_umem_valid:1; 264 uint32_t db_umem_valid:1; 265 uint32_t use_first_only:1; 266 uint32_t overrun_ignore:1; 267 uint32_t cqe_comp_en:1; 268 uint32_t mini_cqe_res_format:2; 269 uint32_t mini_cqe_res_format_ext:2; 270 uint32_t cqe_size:3; 271 uint32_t log_cq_size:5; 272 uint32_t log_page_size:5; 273 uint32_t uar_page_id; 274 uint32_t q_umem_id; 275 uint64_t q_umem_offset; 276 uint32_t db_umem_id; 277 uint64_t db_umem_offset; 278 uint32_t eqn; 279 uint64_t db_addr; 280 }; 281 282 /* Virtq attributes structure, used by VIRTQ operations. */ 283 struct mlx5_devx_virtq_attr { 284 uint16_t hw_available_index; 285 uint16_t hw_used_index; 286 uint16_t q_size; 287 uint32_t pd:24; 288 uint32_t virtio_version_1_0:1; 289 uint32_t tso_ipv4:1; 290 uint32_t tso_ipv6:1; 291 uint32_t tx_csum:1; 292 uint32_t rx_csum:1; 293 uint32_t event_mode:3; 294 uint32_t state:4; 295 uint32_t dirty_bitmap_dump_enable:1; 296 uint32_t dirty_bitmap_mkey; 297 uint32_t dirty_bitmap_size; 298 uint32_t mkey; 299 uint32_t qp_id; 300 uint32_t queue_index; 301 uint32_t tis_id; 302 uint32_t counters_obj_id; 303 uint64_t dirty_bitmap_addr; 304 uint64_t type; 305 uint64_t desc_addr; 306 uint64_t used_addr; 307 uint64_t available_addr; 308 struct { 309 uint32_t id; 310 uint32_t size; 311 uint64_t offset; 312 } umems[3]; 313 uint8_t error_type; 314 }; 315 316 317 struct mlx5_devx_qp_attr { 318 uint32_t pd:24; 319 uint32_t uar_index:24; 320 uint32_t cqn:24; 321 uint32_t log_page_size:5; 322 uint32_t rq_size:17; /* Must be power of 2. */ 323 uint32_t log_rq_stride:3; 324 uint32_t sq_size:17; /* Must be power of 2. */ 325 uint32_t dbr_umem_valid:1; 326 uint32_t dbr_umem_id; 327 uint64_t dbr_address; 328 uint32_t wq_umem_id; 329 uint64_t wq_umem_offset; 330 }; 331 332 struct mlx5_devx_virtio_q_couners_attr { 333 uint64_t received_desc; 334 uint64_t completed_desc; 335 uint32_t error_cqes; 336 uint32_t bad_desc_errors; 337 uint32_t exceed_max_chain; 338 uint32_t invalid_buffer; 339 }; 340 341 /* 342 * graph flow match sample attributes structure, 343 * used by flex parser operations. 344 */ 345 struct mlx5_devx_match_sample_attr { 346 uint32_t flow_match_sample_en:1; 347 uint32_t flow_match_sample_field_offset:16; 348 uint32_t flow_match_sample_offset_mode:4; 349 uint32_t flow_match_sample_field_offset_mask; 350 uint32_t flow_match_sample_field_offset_shift:4; 351 uint32_t flow_match_sample_field_base_offset:8; 352 uint32_t flow_match_sample_tunnel_mode:3; 353 uint32_t flow_match_sample_field_id; 354 }; 355 356 /* graph node arc attributes structure, used by flex parser operations. */ 357 struct mlx5_devx_graph_arc_attr { 358 uint32_t compare_condition_value:16; 359 uint32_t start_inner_tunnel:1; 360 uint32_t arc_parse_graph_node:8; 361 uint32_t parse_graph_node_handle; 362 }; 363 364 /* Maximal number of samples per graph node. */ 365 #define MLX5_GRAPH_NODE_SAMPLE_NUM 8 366 367 /* Maximal number of input/output arcs per graph node. */ 368 #define MLX5_GRAPH_NODE_ARC_NUM 8 369 370 /* parse graph node attributes structure, used by flex parser operations. */ 371 struct mlx5_devx_graph_node_attr { 372 uint32_t modify_field_select; 373 uint32_t header_length_mode:4; 374 uint32_t header_length_base_value:16; 375 uint32_t header_length_field_shift:4; 376 uint32_t header_length_field_offset:16; 377 uint32_t header_length_field_mask; 378 struct mlx5_devx_match_sample_attr sample[MLX5_GRAPH_NODE_SAMPLE_NUM]; 379 uint32_t next_header_field_offset:16; 380 uint32_t next_header_field_size:5; 381 struct mlx5_devx_graph_arc_attr in[MLX5_GRAPH_NODE_ARC_NUM]; 382 struct mlx5_devx_graph_arc_attr out[MLX5_GRAPH_NODE_ARC_NUM]; 383 }; 384 385 /* mlx5_devx_cmds.c */ 386 387 __rte_internal 388 struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(void *ctx, 389 uint32_t bulk_sz); 390 __rte_internal 391 int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj); 392 __rte_internal 393 int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 394 int clear, uint32_t n_counters, 395 uint64_t *pkts, uint64_t *bytes, 396 uint32_t mkey, void *addr, 397 void *cmd_comp, 398 uint64_t async_id); 399 __rte_internal 400 int mlx5_devx_cmd_query_hca_attr(void *ctx, 401 struct mlx5_hca_attr *attr); 402 __rte_internal 403 struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(void *ctx, 404 struct mlx5_devx_mkey_attr *attr); 405 __rte_internal 406 int mlx5_devx_get_out_command_status(void *out); 407 __rte_internal 408 int mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, 409 uint32_t *tis_td); 410 __rte_internal 411 struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(void *ctx, 412 struct mlx5_devx_create_rq_attr *rq_attr, 413 int socket); 414 __rte_internal 415 int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 416 struct mlx5_devx_modify_rq_attr *rq_attr); 417 __rte_internal 418 struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(void *ctx, 419 struct mlx5_devx_tir_attr *tir_attr); 420 __rte_internal 421 struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(void *ctx, 422 struct mlx5_devx_rqt_attr *rqt_attr); 423 __rte_internal 424 struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(void *ctx, 425 struct mlx5_devx_create_sq_attr *sq_attr); 426 __rte_internal 427 int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 428 struct mlx5_devx_modify_sq_attr *sq_attr); 429 __rte_internal 430 struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(void *ctx, 431 struct mlx5_devx_tis_attr *tis_attr); 432 __rte_internal 433 struct mlx5_devx_obj *mlx5_devx_cmd_create_td(void *ctx); 434 __rte_internal 435 int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain, 436 FILE *file); 437 __rte_internal 438 struct mlx5_devx_obj *mlx5_devx_cmd_create_cq(void *ctx, 439 struct mlx5_devx_cq_attr *attr); 440 __rte_internal 441 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtq(void *ctx, 442 struct mlx5_devx_virtq_attr *attr); 443 __rte_internal 444 int mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 445 struct mlx5_devx_virtq_attr *attr); 446 __rte_internal 447 int mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 448 struct mlx5_devx_virtq_attr *attr); 449 __rte_internal 450 struct mlx5_devx_obj *mlx5_devx_cmd_create_qp(void *ctx, 451 struct mlx5_devx_qp_attr *attr); 452 __rte_internal 453 int mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, 454 uint32_t qp_st_mod_op, uint32_t remote_qp_id); 455 __rte_internal 456 int mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 457 struct mlx5_devx_rqt_attr *rqt_attr); 458 __rte_internal 459 int mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj *tir, 460 struct mlx5_devx_modify_tir_attr *tir_attr); 461 __rte_internal 462 int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj, 463 uint32_t ids[], uint32_t num); 464 465 __rte_internal 466 struct mlx5_devx_obj *mlx5_devx_cmd_create_flex_parser(void *ctx, 467 struct mlx5_devx_graph_node_attr *data); 468 469 __rte_internal 470 int mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, 471 uint32_t arg, uint32_t *data, uint32_t dw_cnt); 472 /** 473 * Create virtio queue counters object DevX API. 474 * 475 * @param[in] ctx 476 * Device context. 477 478 * @return 479 * The DevX object created, NULL otherwise and rte_errno is set. 480 */ 481 __rte_internal 482 struct mlx5_devx_obj *mlx5_devx_cmd_create_virtio_q_counters(void *ctx); 483 484 /** 485 * Query virtio queue counters object using DevX API. 486 * 487 * @param[in] couners_obj 488 * Pointer to virtq object structure. 489 * @param [in/out] attr 490 * Pointer to virtio queue counters attributes structure. 491 * 492 * @return 493 * 0 on success, a negative errno value otherwise and rte_errno is set. 494 */ 495 __rte_internal 496 int mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj, 497 struct mlx5_devx_virtio_q_couners_attr *attr); 498 499 __rte_internal 500 struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx, 501 uint32_t pd); 502 503 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */ 504