1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_H_ 7 #define RTE_PMD_MLX5_H_ 8 9 #include <stddef.h> 10 #include <stdbool.h> 11 #include <stdint.h> 12 #include <limits.h> 13 #include <net/if.h> 14 #include <netinet/in.h> 15 #include <sys/queue.h> 16 17 /* Verbs header. */ 18 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 19 #ifdef PEDANTIC 20 #pragma GCC diagnostic ignored "-Wpedantic" 21 #endif 22 #include <infiniband/verbs.h> 23 #ifdef PEDANTIC 24 #pragma GCC diagnostic error "-Wpedantic" 25 #endif 26 27 #include <rte_pci.h> 28 #include <rte_ether.h> 29 #include <rte_ethdev_driver.h> 30 #include <rte_rwlock.h> 31 #include <rte_interrupts.h> 32 #include <rte_errno.h> 33 #include <rte_flow.h> 34 35 #include "mlx5_utils.h" 36 #include "mlx5_mr.h" 37 #include "mlx5_autoconf.h" 38 #include "mlx5_defs.h" 39 #include "mlx5_glue.h" 40 41 enum { 42 PCI_VENDOR_ID_MELLANOX = 0x15b3, 43 }; 44 45 enum { 46 PCI_DEVICE_ID_MELLANOX_CONNECTX4 = 0x1013, 47 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014, 48 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015, 49 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016, 50 PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017, 51 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018, 52 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019, 53 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a, 54 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2, 55 PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF = 0xa2d3, 56 PCI_DEVICE_ID_MELLANOX_CONNECTX6 = 0x101b, 57 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF = 0x101c, 58 }; 59 60 /* Request types for IPC. */ 61 enum mlx5_mp_req_type { 62 MLX5_MP_REQ_VERBS_CMD_FD = 1, 63 MLX5_MP_REQ_CREATE_MR, 64 MLX5_MP_REQ_START_RXTX, 65 MLX5_MP_REQ_STOP_RXTX, 66 MLX5_MP_REQ_QUEUE_STATE_MODIFY, 67 }; 68 69 struct mlx5_mp_arg_queue_state_modify { 70 uint8_t is_wq; /* Set if WQ. */ 71 uint16_t queue_id; /* DPDK queue ID. */ 72 enum ibv_wq_state state; /* WQ requested state. */ 73 }; 74 75 /* Pameters for IPC. */ 76 struct mlx5_mp_param { 77 enum mlx5_mp_req_type type; 78 int port_id; 79 int result; 80 RTE_STD_C11 81 union { 82 uintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */ 83 struct mlx5_mp_arg_queue_state_modify state_modify; 84 /* MLX5_MP_REQ_QUEUE_STATE_MODIFY */ 85 } args; 86 }; 87 88 /** Request timeout for IPC. */ 89 #define MLX5_MP_REQ_TIMEOUT_SEC 5 90 91 /** Key string for IPC. */ 92 #define MLX5_MP_NAME "net_mlx5_mp" 93 94 /* Recognized Infiniband device physical port name types. */ 95 enum mlx5_phys_port_name_type { 96 MLX5_PHYS_PORT_NAME_TYPE_NOTSET = 0, /* Not set. */ 97 MLX5_PHYS_PORT_NAME_TYPE_LEGACY, /* before kernel ver < 5.0 */ 98 MLX5_PHYS_PORT_NAME_TYPE_UPLINK, /* p0, kernel ver >= 5.0 */ 99 MLX5_PHYS_PORT_NAME_TYPE_PFVF, /* pf0vf0, kernel ver >= 5.0 */ 100 MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN, /* Unrecognized. */ 101 }; 102 103 /** Switch information returned by mlx5_nl_switch_info(). */ 104 struct mlx5_switch_info { 105 uint32_t master:1; /**< Master device. */ 106 uint32_t representor:1; /**< Representor device. */ 107 enum mlx5_phys_port_name_type name_type; /** < Port name type. */ 108 int32_t pf_num; /**< PF number (valid for pfxvfx format only). */ 109 int32_t port_name; /**< Representor port name. */ 110 uint64_t switch_id; /**< Switch identifier. */ 111 }; 112 113 LIST_HEAD(mlx5_dev_list, mlx5_ibv_shared); 114 115 /* Shared data between primary and secondary processes. */ 116 struct mlx5_shared_data { 117 rte_spinlock_t lock; 118 /* Global spinlock for primary and secondary processes. */ 119 int init_done; /* Whether primary has done initialization. */ 120 unsigned int secondary_cnt; /* Number of secondary processes init'd. */ 121 struct mlx5_dev_list mem_event_cb_list; 122 rte_rwlock_t mem_event_rwlock; 123 }; 124 125 /* Per-process data structure, not visible to other processes. */ 126 struct mlx5_local_data { 127 int init_done; /* Whether a secondary has done initialization. */ 128 }; 129 130 extern struct mlx5_shared_data *mlx5_shared_data; 131 132 struct mlx5_counter_ctrl { 133 /* Name of the counter. */ 134 char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE]; 135 /* Name of the counter on the device table. */ 136 char ctr_name[RTE_ETH_XSTATS_NAME_SIZE]; 137 uint32_t ib:1; /**< Nonzero for IB counters. */ 138 }; 139 140 struct mlx5_xstats_ctrl { 141 /* Number of device stats. */ 142 uint16_t stats_n; 143 /* Number of device stats identified by PMD. */ 144 uint16_t mlx5_stats_n; 145 /* Index in the device counters table. */ 146 uint16_t dev_table_idx[MLX5_MAX_XSTATS]; 147 uint64_t base[MLX5_MAX_XSTATS]; 148 struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; 149 }; 150 151 struct mlx5_stats_ctrl { 152 /* Base for imissed counter. */ 153 uint64_t imissed_base; 154 }; 155 156 /* devX creation object */ 157 struct mlx5_devx_obj { 158 struct mlx5dv_devx_obj *obj; /* The DV object. */ 159 int id; /* The object ID. */ 160 }; 161 162 struct mlx5_devx_mkey_attr { 163 uint64_t addr; 164 uint64_t size; 165 uint32_t umem_id; 166 uint32_t pd; 167 }; 168 169 /* HCA supports this number of time periods for LRO. */ 170 #define MLX5_LRO_NUM_SUPP_PERIODS 4 171 172 /* HCA attributes. */ 173 struct mlx5_hca_attr { 174 uint32_t eswitch_manager:1; 175 uint32_t flow_counters_dump:1; 176 uint8_t flow_counter_bulk_alloc_bitmap; 177 uint32_t eth_net_offloads:1; 178 uint32_t eth_virt:1; 179 uint32_t wqe_vlan_insert:1; 180 uint32_t wqe_inline_mode:2; 181 uint32_t vport_inline_mode:3; 182 uint32_t lro_cap:1; 183 uint32_t tunnel_lro_gre:1; 184 uint32_t tunnel_lro_vxlan:1; 185 uint32_t lro_max_msg_sz_mode:2; 186 uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS]; 187 }; 188 189 /* Flow list . */ 190 TAILQ_HEAD(mlx5_flows, rte_flow); 191 192 /* Default PMD specific parameter value. */ 193 #define MLX5_ARG_UNSET (-1) 194 195 #define MLX5_LRO_SUPPORTED(dev) \ 196 (((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported) 197 198 /* LRO configurations structure. */ 199 struct mlx5_lro_config { 200 uint32_t supported:1; /* Whether LRO is supported. */ 201 uint32_t timeout; /* User configuration. */ 202 }; 203 204 /* 205 * Device configuration structure. 206 * 207 * Merged configuration from: 208 * 209 * - Device capabilities, 210 * - User device parameters disabled features. 211 */ 212 struct mlx5_dev_config { 213 unsigned int hw_csum:1; /* Checksum offload is supported. */ 214 unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ 215 unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */ 216 unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ 217 unsigned int hw_padding:1; /* End alignment padding is supported. */ 218 unsigned int vf:1; /* This is a VF. */ 219 unsigned int tunnel_en:1; 220 /* Whether tunnel stateless offloads are supported. */ 221 unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ 222 unsigned int cqe_comp:1; /* CQE compression is enabled. */ 223 unsigned int cqe_pad:1; /* CQE padding is enabled. */ 224 unsigned int tso:1; /* Whether TSO is supported. */ 225 unsigned int rx_vec_en:1; /* Rx vector is enabled. */ 226 unsigned int mr_ext_memseg_en:1; 227 /* Whether memseg should be extended for MR creation. */ 228 unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */ 229 unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ 230 unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */ 231 unsigned int dv_flow_en:1; /* Enable DV flow. */ 232 unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ 233 unsigned int devx:1; /* Whether devx interface is available or not. */ 234 unsigned int dest_tir:1; /* Whether advanced DR API is available. */ 235 struct { 236 unsigned int enabled:1; /* Whether MPRQ is enabled. */ 237 unsigned int stride_num_n; /* Number of strides. */ 238 unsigned int min_stride_size_n; /* Min size of a stride. */ 239 unsigned int max_stride_size_n; /* Max size of a stride. */ 240 unsigned int max_memcpy_len; 241 /* Maximum packet size to memcpy Rx packets. */ 242 unsigned int min_rxqs_num; 243 /* Rx queue count threshold to enable MPRQ. */ 244 } mprq; /* Configurations for Multi-Packet RQ. */ 245 int mps; /* Multi-packet send supported mode. */ 246 unsigned int flow_prio; /* Number of flow priorities. */ 247 unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ 248 unsigned int ind_table_max_size; /* Maximum indirection table size. */ 249 unsigned int max_dump_files_num; /* Maximum dump files per queue. */ 250 int txqs_inline; /* Queue number threshold for inlining. */ 251 int txq_inline_min; /* Minimal amount of data bytes to inline. */ 252 int txq_inline_max; /* Max packet size for inlining with SEND. */ 253 int txq_inline_mpw; /* Max packet size for inlining with eMPW. */ 254 struct mlx5_hca_attr hca_attr; /* HCA attributes. */ 255 struct mlx5_lro_config lro; /* LRO configuration. */ 256 }; 257 258 struct mlx5_devx_wq_attr { 259 uint32_t wq_type:4; 260 uint32_t wq_signature:1; 261 uint32_t end_padding_mode:2; 262 uint32_t cd_slave:1; 263 uint32_t hds_skip_first_sge:1; 264 uint32_t log2_hds_buf_size:3; 265 uint32_t page_offset:5; 266 uint32_t lwm:16; 267 uint32_t pd:24; 268 uint32_t uar_page:24; 269 uint64_t dbr_addr; 270 uint32_t hw_counter; 271 uint32_t sw_counter; 272 uint32_t log_wq_stride:4; 273 uint32_t log_wq_pg_sz:5; 274 uint32_t log_wq_sz:5; 275 uint32_t dbr_umem_valid:1; 276 uint32_t wq_umem_valid:1; 277 uint32_t log_hairpin_num_packets:5; 278 uint32_t log_hairpin_data_sz:5; 279 uint32_t single_wqe_log_num_of_strides:4; 280 uint32_t two_byte_shift_en:1; 281 uint32_t single_stride_log_num_of_bytes:3; 282 uint32_t dbr_umem_id; 283 uint32_t wq_umem_id; 284 uint64_t wq_umem_offset; 285 }; 286 287 /* Create RQ attributes structure, used by create RQ operation. */ 288 struct mlx5_devx_create_rq_attr { 289 uint32_t rlky:1; 290 uint32_t delay_drop_en:1; 291 uint32_t scatter_fcs:1; 292 uint32_t vsd:1; 293 uint32_t mem_rq_type:4; 294 uint32_t state:4; 295 uint32_t flush_in_error_en:1; 296 uint32_t hairpin:1; 297 uint32_t user_index:24; 298 uint32_t cqn:24; 299 uint32_t counter_set_id:8; 300 uint32_t rmpn:24; 301 struct mlx5_devx_wq_attr wq_attr; 302 }; 303 304 /* Modify RQ attributes structure, used by modify RQ operation. */ 305 struct mlx5_devx_modify_rq_attr { 306 uint32_t rqn:24; 307 uint32_t rq_state:4; /* Current RQ state. */ 308 uint32_t state:4; /* Required RQ state. */ 309 uint32_t scatter_fcs:1; 310 uint32_t vsd:1; 311 uint32_t counter_set_id:8; 312 uint32_t hairpin_peer_sq:24; 313 uint32_t hairpin_peer_vhca:16; 314 uint64_t modify_bitmask; 315 uint32_t lwm:16; /* Contained WQ lwm. */ 316 }; 317 318 struct mlx5_rx_hash_field_select { 319 uint32_t l3_prot_type:1; 320 uint32_t l4_prot_type:1; 321 uint32_t selected_fields:30; 322 }; 323 324 /* TIR attributes structure, used by TIR operations. */ 325 struct mlx5_devx_tir_attr { 326 uint32_t disp_type:4; 327 uint32_t lro_timeout_period_usecs:16; 328 uint32_t lro_enable_mask:4; 329 uint32_t lro_max_msg_sz:8; 330 uint32_t inline_rqn:24; 331 uint32_t rx_hash_symmetric:1; 332 uint32_t tunneled_offload_en:1; 333 uint32_t indirect_table:24; 334 uint32_t rx_hash_fn:4; 335 uint32_t self_lb_block:2; 336 uint32_t transport_domain:24; 337 uint32_t rx_hash_toeplitz_key[10]; 338 struct mlx5_rx_hash_field_select rx_hash_field_selector_outer; 339 struct mlx5_rx_hash_field_select rx_hash_field_selector_inner; 340 }; 341 342 /* RQT attributes structure, used by RQT operations. */ 343 struct mlx5_devx_rqt_attr { 344 uint32_t rqt_max_size:16; 345 uint32_t rqt_actual_size:16; 346 uint32_t rq_list[]; 347 }; 348 349 /** 350 * Type of object being allocated. 351 */ 352 enum mlx5_verbs_alloc_type { 353 MLX5_VERBS_ALLOC_TYPE_NONE, 354 MLX5_VERBS_ALLOC_TYPE_TX_QUEUE, 355 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, 356 }; 357 358 /* VLAN netdev for VLAN workaround. */ 359 struct mlx5_vlan_dev { 360 uint32_t refcnt; 361 uint32_t ifindex; /**< Own interface index. */ 362 }; 363 364 /* Structure for VF VLAN workaround. */ 365 struct mlx5_vf_vlan { 366 uint32_t tag:12; 367 uint32_t created:1; 368 }; 369 370 /* 371 * Array of VLAN devices created on the base of VF 372 * used for workaround in virtual environments. 373 */ 374 struct mlx5_vlan_vmwa_context { 375 int nl_socket; 376 uint32_t nl_sn; 377 uint32_t vf_ifindex; 378 struct rte_eth_dev *dev; 379 struct mlx5_vlan_dev vlan_dev[4096]; 380 }; 381 382 /** 383 * Verbs allocator needs a context to know in the callback which kind of 384 * resources it is allocating. 385 */ 386 struct mlx5_verbs_alloc_ctx { 387 enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */ 388 const void *obj; /* Pointer to the DPDK object. */ 389 }; 390 391 LIST_HEAD(mlx5_mr_list, mlx5_mr); 392 393 /* Flow drop context necessary due to Verbs API. */ 394 struct mlx5_drop { 395 struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */ 396 struct mlx5_rxq_obj *rxq; /* Rx queue object. */ 397 }; 398 399 #define MLX5_COUNTERS_PER_POOL 512 400 #define MLX5_MAX_PENDING_QUERIES 4 401 402 struct mlx5_flow_counter_pool; 403 404 struct flow_counter_stats { 405 uint64_t hits; 406 uint64_t bytes; 407 }; 408 409 /* Counters information. */ 410 struct mlx5_flow_counter { 411 TAILQ_ENTRY(mlx5_flow_counter) next; 412 /**< Pointer to the next flow counter structure. */ 413 uint32_t shared:1; /**< Share counter ID with other flow rules. */ 414 uint32_t batch: 1; 415 /**< Whether the counter was allocated by batch command. */ 416 uint32_t ref_cnt:30; /**< Reference counter. */ 417 uint32_t id; /**< Counter ID. */ 418 union { /**< Holds the counters for the rule. */ 419 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 420 struct ibv_counter_set *cs; 421 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 422 struct ibv_counters *cs; 423 #endif 424 struct mlx5_devx_obj *dcs; /**< Counter Devx object. */ 425 struct mlx5_flow_counter_pool *pool; /**< The counter pool. */ 426 }; 427 union { 428 uint64_t hits; /**< Reset value of hits packets. */ 429 int64_t query_gen; /**< Generation of the last release. */ 430 }; 431 uint64_t bytes; /**< Reset value of bytes. */ 432 void *action; /**< Pointer to the dv action. */ 433 }; 434 435 TAILQ_HEAD(mlx5_counters, mlx5_flow_counter); 436 437 /* Counter pool structure - query is in pool resolution. */ 438 struct mlx5_flow_counter_pool { 439 TAILQ_ENTRY(mlx5_flow_counter_pool) next; 440 struct mlx5_counters counters; /* Free counter list. */ 441 union { 442 struct mlx5_devx_obj *min_dcs; 443 rte_atomic64_t a64_dcs; 444 }; 445 /* The devx object of the minimum counter ID. */ 446 rte_atomic64_t query_gen; 447 uint32_t n_counters: 16; /* Number of devx allocated counters. */ 448 rte_spinlock_t sl; /* The pool lock. */ 449 struct mlx5_counter_stats_raw *raw; 450 struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */ 451 struct mlx5_flow_counter counters_raw[]; /* The pool counters memory. */ 452 }; 453 454 struct mlx5_counter_stats_raw; 455 456 /* Memory management structure for group of counter statistics raws. */ 457 struct mlx5_counter_stats_mem_mng { 458 LIST_ENTRY(mlx5_counter_stats_mem_mng) next; 459 struct mlx5_counter_stats_raw *raws; 460 struct mlx5_devx_obj *dm; 461 struct mlx5dv_devx_umem *umem; 462 }; 463 464 /* Raw memory structure for the counter statistics values of a pool. */ 465 struct mlx5_counter_stats_raw { 466 LIST_ENTRY(mlx5_counter_stats_raw) next; 467 int min_dcs_id; 468 struct mlx5_counter_stats_mem_mng *mem_mng; 469 volatile struct flow_counter_stats *data; 470 }; 471 472 TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool); 473 474 /* Container structure for counter pools. */ 475 struct mlx5_pools_container { 476 rte_atomic16_t n_valid; /* Number of valid pools. */ 477 uint16_t n; /* Number of pools. */ 478 struct mlx5_counter_pools pool_list; /* Counter pool list. */ 479 struct mlx5_flow_counter_pool **pools; /* Counter pool array. */ 480 struct mlx5_counter_stats_mem_mng *init_mem_mng; 481 /* Hold the memory management for the next allocated pools raws. */ 482 }; 483 484 /* Counter global management structure. */ 485 struct mlx5_flow_counter_mng { 486 uint8_t mhi[2]; /* master \ host container index. */ 487 struct mlx5_pools_container ccont[2 * 2]; 488 /* 2 containers for single and for batch for double-buffer. */ 489 struct mlx5_counters flow_counters; /* Legacy flow counter list. */ 490 uint8_t pending_queries; 491 uint8_t batch; 492 uint16_t pool_index; 493 uint8_t query_thread_on; 494 LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs; 495 LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws; 496 }; 497 498 /* Per port data of shared IB device. */ 499 struct mlx5_ibv_shared_port { 500 uint32_t ih_port_id; 501 /* 502 * Interrupt handler port_id. Used by shared interrupt 503 * handler to find the corresponding rte_eth device 504 * by IB port index. If value is equal or greater 505 * RTE_MAX_ETHPORTS it means there is no subhandler 506 * installed for specified IB port index. 507 */ 508 }; 509 510 /* Table structure. */ 511 struct mlx5_flow_tbl_resource { 512 void *obj; /**< Pointer to DR table object. */ 513 rte_atomic32_t refcnt; /**< Reference counter. */ 514 }; 515 516 #define MLX5_MAX_TABLES UINT16_MAX 517 #define MLX5_MAX_TABLES_FDB UINT16_MAX 518 519 #define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */ 520 #define MLX5_DBR_SIZE 8 521 #define MLX5_DBR_PER_PAGE (MLX5_DBR_PAGE_SIZE / MLX5_DBR_SIZE) 522 #define MLX5_DBR_BITMAP_SIZE (MLX5_DBR_PER_PAGE / 64) 523 524 struct mlx5_devx_dbr_page { 525 /* Door-bell records, must be first member in structure. */ 526 uint8_t dbrs[MLX5_DBR_PAGE_SIZE]; 527 LIST_ENTRY(mlx5_devx_dbr_page) next; /* Pointer to the next element. */ 528 struct mlx5dv_devx_umem *umem; 529 uint32_t dbr_count; /* Number of door-bell records in use. */ 530 /* 1 bit marks matching door-bell is in use. */ 531 uint64_t dbr_bitmap[MLX5_DBR_BITMAP_SIZE]; 532 }; 533 534 /* 535 * Shared Infiniband device context for Master/Representors 536 * which belong to same IB device with multiple IB ports. 537 **/ 538 struct mlx5_ibv_shared { 539 LIST_ENTRY(mlx5_ibv_shared) next; 540 uint32_t refcnt; 541 uint32_t devx:1; /* Opened with DV. */ 542 uint32_t max_port; /* Maximal IB device port index. */ 543 struct ibv_context *ctx; /* Verbs/DV context. */ 544 struct ibv_pd *pd; /* Protection Domain. */ 545 uint32_t pdn; /* Protection Domain number. */ 546 uint32_t tdn; /* Transport Domain number. */ 547 char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */ 548 char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */ 549 struct ibv_device_attr_ex device_attr; /* Device properties. */ 550 struct rte_pci_device *pci_dev; /* Backend PCI device. */ 551 LIST_ENTRY(mlx5_ibv_shared) mem_event_cb; 552 /**< Called by memory event callback. */ 553 struct { 554 uint32_t dev_gen; /* Generation number to flush local caches. */ 555 rte_rwlock_t rwlock; /* MR Lock. */ 556 struct mlx5_mr_btree cache; /* Global MR cache table. */ 557 struct mlx5_mr_list mr_list; /* Registered MR list. */ 558 struct mlx5_mr_list mr_free_list; /* Freed MR list. */ 559 } mr; 560 /* Shared DV/DR flow data section. */ 561 pthread_mutex_t dv_mutex; /* DV context mutex. */ 562 uint32_t dv_refcnt; /* DV/DR data reference counter. */ 563 void *fdb_domain; /* FDB Direct Rules name space handle. */ 564 struct mlx5_flow_tbl_resource fdb_tbl[MLX5_MAX_TABLES_FDB]; 565 /* FDB Direct Rules tables. */ 566 void *rx_domain; /* RX Direct Rules name space handle. */ 567 struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES]; 568 /* RX Direct Rules tables. */ 569 void *tx_domain; /* TX Direct Rules name space handle. */ 570 struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES]; 571 /* TX Direct Rules tables. */ 572 void *esw_drop_action; /* Pointer to DR E-Switch drop action. */ 573 void *pop_vlan_action; /* Pointer to DR pop VLAN action. */ 574 /* TX Direct Rules tables/ */ 575 LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers; 576 LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps; 577 LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds; 578 LIST_HEAD(tag, mlx5_flow_dv_tag_resource) tags; 579 LIST_HEAD(jump, mlx5_flow_dv_jump_tbl_resource) jump_tbl; 580 LIST_HEAD(port_id_action_list, mlx5_flow_dv_port_id_action_resource) 581 port_id_action_list; /* List of port ID actions. */ 582 LIST_HEAD(push_vlan_action_list, mlx5_flow_dv_push_vlan_action_resource) 583 push_vlan_action_list; /* List of push VLAN actions. */ 584 struct mlx5_flow_counter_mng cmng; /* Counters management structure. */ 585 /* Shared interrupt handler section. */ 586 pthread_mutex_t intr_mutex; /* Interrupt config mutex. */ 587 uint32_t intr_cnt; /* Interrupt handler reference counter. */ 588 struct rte_intr_handle intr_handle; /* Interrupt handler for device. */ 589 struct rte_intr_handle intr_handle_devx; /* DEVX interrupt handler. */ 590 struct mlx5dv_devx_cmd_comp *devx_comp; /* DEVX async comp obj. */ 591 struct mlx5_ibv_shared_port port[]; /* per device port data array. */ 592 }; 593 594 /* Per-process private structure. */ 595 struct mlx5_proc_priv { 596 size_t uar_table_sz; 597 /* Size of UAR register table. */ 598 void *uar_table[]; 599 /* Table of UAR registers for each process. */ 600 }; 601 602 #define MLX5_PROC_PRIV(port_id) \ 603 ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private) 604 605 struct mlx5_priv { 606 struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ 607 struct mlx5_ibv_shared *sh; /* Shared IB device context. */ 608 uint32_t ibv_port; /* IB device port number. */ 609 struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ 610 BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); 611 /* Bit-field of MAC addresses owned by the PMD. */ 612 uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */ 613 unsigned int vlan_filter_n; /* Number of configured VLAN filters. */ 614 /* Device properties. */ 615 uint16_t mtu; /* Configured MTU. */ 616 unsigned int isolated:1; /* Whether isolated mode is enabled. */ 617 unsigned int representor:1; /* Device is a port representor. */ 618 unsigned int master:1; /* Device is a E-Switch master. */ 619 unsigned int dr_shared:1; /* DV/DR data is shared. */ 620 unsigned int counter_fallback:1; /* Use counter fallback management. */ 621 uint16_t domain_id; /* Switch domain identifier. */ 622 uint16_t vport_id; /* Associated VF vport index (if any). */ 623 int32_t representor_id; /* Port representor identifier. */ 624 unsigned int if_index; /* Associated kernel network device index. */ 625 /* RX/TX queues. */ 626 unsigned int rxqs_n; /* RX queues array size. */ 627 unsigned int txqs_n; /* TX queues array size. */ 628 struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */ 629 struct mlx5_txq_data *(*txqs)[]; /* TX queues. */ 630 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ 631 struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ 632 unsigned int (*reta_idx)[]; /* RETA index table. */ 633 unsigned int reta_idx_n; /* RETA index size. */ 634 struct mlx5_drop drop_queue; /* Flow drop queues. */ 635 struct mlx5_flows flows; /* RTE Flow rules. */ 636 struct mlx5_flows ctrl_flows; /* Control flow rules. */ 637 LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ 638 LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ 639 LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */ 640 LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ 641 LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */ 642 /* Indirection tables. */ 643 LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls; 644 /* Pointer to next element. */ 645 rte_atomic32_t refcnt; /**< Reference counter. */ 646 struct ibv_flow_action *verbs_action; 647 /**< Verbs modify header action object. */ 648 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 649 uint8_t max_lro_msg_size; 650 /* Tags resources cache. */ 651 uint32_t link_speed_capa; /* Link speed capabilities. */ 652 struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ 653 struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ 654 struct mlx5_dev_config config; /* Device configuration. */ 655 struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; 656 /* Context for Verbs allocator. */ 657 int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ 658 int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ 659 uint32_t nl_sn; /* Netlink message sequence number. */ 660 LIST_HEAD(dbrpage, mlx5_devx_dbr_page) dbrpgs; /* Door-bell pages. */ 661 struct mlx5_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ 662 #ifndef RTE_ARCH_64 663 rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ 664 rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; 665 /* UAR same-page access control required in 32bit implementations. */ 666 #endif 667 }; 668 669 #define PORT_ID(priv) ((priv)->dev_data->port_id) 670 #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) 671 672 /* mlx5.c */ 673 674 int mlx5_getenv_int(const char *); 675 int mlx5_proc_priv_init(struct rte_eth_dev *dev); 676 int64_t mlx5_get_dbr(struct rte_eth_dev *dev, 677 struct mlx5_devx_dbr_page **dbr_page); 678 int32_t mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, 679 uint64_t offset); 680 int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev, 681 struct rte_eth_udp_tunnel *udp_tunnel); 682 683 /* mlx5_ethdev.c */ 684 685 int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); 686 int mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]); 687 unsigned int mlx5_ifindex(const struct rte_eth_dev *dev); 688 int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr); 689 int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); 690 int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, 691 unsigned int flags); 692 int mlx5_dev_configure(struct rte_eth_dev *dev); 693 int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); 694 int mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock); 695 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size); 696 const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); 697 int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); 698 int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status); 699 int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 700 int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, 701 struct rte_eth_fc_conf *fc_conf); 702 int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, 703 struct rte_eth_fc_conf *fc_conf); 704 int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 705 struct rte_pci_addr *pci_addr); 706 void mlx5_dev_link_status_handler(void *arg); 707 void mlx5_dev_interrupt_handler(void *arg); 708 void mlx5_dev_interrupt_handler_devx(void *arg); 709 void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); 710 void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); 711 int mlx5_set_link_down(struct rte_eth_dev *dev); 712 int mlx5_set_link_up(struct rte_eth_dev *dev); 713 int mlx5_is_removed(struct rte_eth_dev *dev); 714 eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); 715 eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); 716 unsigned int mlx5_dev_to_port_id(const struct rte_device *dev, 717 uint16_t *port_list, 718 unsigned int port_list_n); 719 int mlx5_port_to_eswitch_info(uint16_t port, uint16_t *es_domain_id, 720 uint16_t *es_port_id); 721 int mlx5_sysfs_switch_info(unsigned int ifindex, 722 struct mlx5_switch_info *info); 723 void mlx5_sysfs_check_switch_info(bool device_dir, 724 struct mlx5_switch_info *switch_info); 725 void mlx5_nl_check_switch_info(bool nun_vf_set, 726 struct mlx5_switch_info *switch_info); 727 void mlx5_translate_port_name(const char *port_name_in, 728 struct mlx5_switch_info *port_info_out); 729 void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle, 730 rte_intr_callback_fn cb_fn, void *cb_arg); 731 int mlx5_get_module_info(struct rte_eth_dev *dev, 732 struct rte_eth_dev_module_info *modinfo); 733 int mlx5_get_module_eeprom(struct rte_eth_dev *dev, 734 struct rte_dev_eeprom_info *info); 735 736 /* mlx5_mac.c */ 737 738 int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]); 739 void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 740 int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 741 uint32_t index, uint32_t vmdq); 742 int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); 743 int mlx5_set_mc_addr_list(struct rte_eth_dev *dev, 744 struct rte_ether_addr *mc_addr_set, 745 uint32_t nb_mc_addr); 746 747 /* mlx5_rss.c */ 748 749 int mlx5_rss_hash_update(struct rte_eth_dev *dev, 750 struct rte_eth_rss_conf *rss_conf); 751 int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, 752 struct rte_eth_rss_conf *rss_conf); 753 int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size); 754 int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, 755 struct rte_eth_rss_reta_entry64 *reta_conf, 756 uint16_t reta_size); 757 int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, 758 struct rte_eth_rss_reta_entry64 *reta_conf, 759 uint16_t reta_size); 760 761 /* mlx5_rxmode.c */ 762 763 int mlx5_promiscuous_enable(struct rte_eth_dev *dev); 764 int mlx5_promiscuous_disable(struct rte_eth_dev *dev); 765 int mlx5_allmulticast_enable(struct rte_eth_dev *dev); 766 int mlx5_allmulticast_disable(struct rte_eth_dev *dev); 767 768 /* mlx5_stats.c */ 769 770 void mlx5_stats_init(struct rte_eth_dev *dev); 771 int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 772 int mlx5_stats_reset(struct rte_eth_dev *dev); 773 int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 774 unsigned int n); 775 int mlx5_xstats_reset(struct rte_eth_dev *dev); 776 int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 777 struct rte_eth_xstat_name *xstats_names, 778 unsigned int n); 779 780 /* mlx5_vlan.c */ 781 782 int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 783 void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); 784 int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); 785 786 /* mlx5_trigger.c */ 787 788 int mlx5_dev_start(struct rte_eth_dev *dev); 789 void mlx5_dev_stop(struct rte_eth_dev *dev); 790 int mlx5_traffic_enable(struct rte_eth_dev *dev); 791 void mlx5_traffic_disable(struct rte_eth_dev *dev); 792 int mlx5_traffic_restart(struct rte_eth_dev *dev); 793 794 /* mlx5_flow.c */ 795 796 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); 797 void mlx5_flow_print(struct rte_flow *flow); 798 int mlx5_flow_validate(struct rte_eth_dev *dev, 799 const struct rte_flow_attr *attr, 800 const struct rte_flow_item items[], 801 const struct rte_flow_action actions[], 802 struct rte_flow_error *error); 803 struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, 804 const struct rte_flow_attr *attr, 805 const struct rte_flow_item items[], 806 const struct rte_flow_action actions[], 807 struct rte_flow_error *error); 808 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, 809 struct rte_flow_error *error); 810 void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list); 811 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); 812 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, 813 const struct rte_flow_action *action, void *data, 814 struct rte_flow_error *error); 815 int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, 816 struct rte_flow_error *error); 817 int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 818 enum rte_filter_type filter_type, 819 enum rte_filter_op filter_op, 820 void *arg); 821 int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list); 822 void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list); 823 int mlx5_flow_verify(struct rte_eth_dev *dev); 824 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 825 struct rte_flow_item_eth *eth_spec, 826 struct rte_flow_item_eth *eth_mask, 827 struct rte_flow_item_vlan *vlan_spec, 828 struct rte_flow_item_vlan *vlan_mask); 829 int mlx5_ctrl_flow(struct rte_eth_dev *dev, 830 struct rte_flow_item_eth *eth_spec, 831 struct rte_flow_item_eth *eth_mask); 832 struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev); 833 int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev); 834 void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); 835 void mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh, 836 uint64_t async_id, int status); 837 void mlx5_set_query_alarm(struct mlx5_ibv_shared *sh); 838 void mlx5_flow_query_alarm(void *arg); 839 840 /* mlx5_mp.c */ 841 void mlx5_mp_req_start_rxtx(struct rte_eth_dev *dev); 842 void mlx5_mp_req_stop_rxtx(struct rte_eth_dev *dev); 843 int mlx5_mp_req_mr_create(struct rte_eth_dev *dev, uintptr_t addr); 844 int mlx5_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev); 845 int mlx5_mp_req_queue_state_modify(struct rte_eth_dev *dev, 846 struct mlx5_mp_arg_queue_state_modify *sm); 847 int mlx5_mp_init_primary(void); 848 void mlx5_mp_uninit_primary(void); 849 int mlx5_mp_init_secondary(void); 850 void mlx5_mp_uninit_secondary(void); 851 852 /* mlx5_nl.c */ 853 854 int mlx5_nl_init(int protocol); 855 int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 856 uint32_t index); 857 int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 858 uint32_t index); 859 void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev); 860 void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev); 861 int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable); 862 int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable); 863 unsigned int mlx5_nl_portnum(int nl, const char *name); 864 unsigned int mlx5_nl_ifindex(int nl, const char *name, uint32_t pindex); 865 int mlx5_nl_switch_info(int nl, unsigned int ifindex, 866 struct mlx5_switch_info *info); 867 868 struct mlx5_vlan_vmwa_context *mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, 869 uint32_t ifindex); 870 void mlx5_vlan_vmwa_exit(struct mlx5_vlan_vmwa_context *ctx); 871 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev, 872 struct mlx5_vf_vlan *vf_vlan); 873 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev, 874 struct mlx5_vf_vlan *vf_vlan); 875 876 /* mlx5_devx_cmds.c */ 877 878 struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, 879 uint32_t bulk_sz); 880 int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj); 881 int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 882 int clear, uint32_t n_counters, 883 uint64_t *pkts, uint64_t *bytes, 884 uint32_t mkey, void *addr, 885 struct mlx5dv_devx_cmd_comp *cmd_comp, 886 uint64_t async_id); 887 int mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx, 888 struct mlx5_hca_attr *attr); 889 struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(struct ibv_context *ctx, 890 struct mlx5_devx_mkey_attr *attr); 891 int mlx5_devx_get_out_command_status(void *out); 892 int mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num, 893 uint32_t *tis_td); 894 struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(struct ibv_context *ctx, 895 struct mlx5_devx_create_rq_attr *rq_attr, 896 int socket); 897 int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 898 struct mlx5_devx_modify_rq_attr *rq_attr); 899 struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(struct ibv_context *ctx, 900 struct mlx5_devx_tir_attr *tir_attr); 901 struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(struct ibv_context *ctx, 902 struct mlx5_devx_rqt_attr *rqt_attr); 903 904 #endif /* RTE_PMD_MLX5_H_ */ 905