1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_H_ 7 #define RTE_PMD_MLX5_H_ 8 9 #include <stddef.h> 10 #include <stdint.h> 11 #include <limits.h> 12 #include <net/if.h> 13 #include <netinet/in.h> 14 #include <sys/queue.h> 15 16 /* Verbs header. */ 17 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 18 #ifdef PEDANTIC 19 #pragma GCC diagnostic ignored "-Wpedantic" 20 #endif 21 #include <infiniband/verbs.h> 22 #ifdef PEDANTIC 23 #pragma GCC diagnostic error "-Wpedantic" 24 #endif 25 26 #include <rte_pci.h> 27 #include <rte_ether.h> 28 #include <rte_ethdev_driver.h> 29 #include <rte_rwlock.h> 30 #include <rte_interrupts.h> 31 #include <rte_errno.h> 32 #include <rte_flow.h> 33 34 #include "mlx5_utils.h" 35 #include "mlx5_mr.h" 36 #include "mlx5_autoconf.h" 37 #include "mlx5_defs.h" 38 39 enum { 40 PCI_VENDOR_ID_MELLANOX = 0x15b3, 41 }; 42 43 enum { 44 PCI_DEVICE_ID_MELLANOX_CONNECTX4 = 0x1013, 45 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014, 46 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015, 47 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016, 48 PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017, 49 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018, 50 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019, 51 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a, 52 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2, 53 PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF = 0xa2d3, 54 PCI_DEVICE_ID_MELLANOX_CONNECTX6 = 0x101b, 55 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF = 0x101c, 56 }; 57 58 /* Request types for IPC. */ 59 enum mlx5_mp_req_type { 60 MLX5_MP_REQ_VERBS_CMD_FD = 1, 61 MLX5_MP_REQ_CREATE_MR, 62 MLX5_MP_REQ_START_RXTX, 63 MLX5_MP_REQ_STOP_RXTX, 64 }; 65 66 /* Pameters for IPC. */ 67 struct mlx5_mp_param { 68 enum mlx5_mp_req_type type; 69 int port_id; 70 int result; 71 RTE_STD_C11 72 union { 73 uintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */ 74 } args; 75 }; 76 77 /** Request timeout for IPC. */ 78 #define MLX5_MP_REQ_TIMEOUT_SEC 5 79 80 /** Key string for IPC. */ 81 #define MLX5_MP_NAME "net_mlx5_mp" 82 83 /* Recognized Infiniband device physical port name types. */ 84 enum mlx5_phys_port_name_type { 85 MLX5_PHYS_PORT_NAME_TYPE_NOTSET = 0, /* Not set. */ 86 MLX5_PHYS_PORT_NAME_TYPE_LEGACY, /* before kernel ver < 5.0 */ 87 MLX5_PHYS_PORT_NAME_TYPE_UPLINK, /* p0, kernel ver >= 5.0 */ 88 MLX5_PHYS_PORT_NAME_TYPE_PFVF, /* pf0vf0, kernel ver >= 5.0 */ 89 MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN, /* Unrecognized. */ 90 }; 91 92 /** Switch information returned by mlx5_nl_switch_info(). */ 93 struct mlx5_switch_info { 94 uint32_t master:1; /**< Master device. */ 95 uint32_t representor:1; /**< Representor device. */ 96 enum mlx5_phys_port_name_type name_type; /** < Port name type. */ 97 int32_t pf_num; /**< PF number (valid for pfxvfx format only). */ 98 int32_t port_name; /**< Representor port name. */ 99 uint64_t switch_id; /**< Switch identifier. */ 100 }; 101 102 LIST_HEAD(mlx5_dev_list, mlx5_ibv_shared); 103 104 /* Shared data between primary and secondary processes. */ 105 struct mlx5_shared_data { 106 rte_spinlock_t lock; 107 /* Global spinlock for primary and secondary processes. */ 108 int init_done; /* Whether primary has done initialization. */ 109 unsigned int secondary_cnt; /* Number of secondary processes init'd. */ 110 struct mlx5_dev_list mem_event_cb_list; 111 rte_rwlock_t mem_event_rwlock; 112 }; 113 114 /* Per-process data structure, not visible to other processes. */ 115 struct mlx5_local_data { 116 int init_done; /* Whether a secondary has done initialization. */ 117 }; 118 119 extern struct mlx5_shared_data *mlx5_shared_data; 120 121 struct mlx5_counter_ctrl { 122 /* Name of the counter. */ 123 char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE]; 124 /* Name of the counter on the device table. */ 125 char ctr_name[RTE_ETH_XSTATS_NAME_SIZE]; 126 uint32_t ib:1; /**< Nonzero for IB counters. */ 127 }; 128 129 struct mlx5_xstats_ctrl { 130 /* Number of device stats. */ 131 uint16_t stats_n; 132 /* Number of device stats identified by PMD. */ 133 uint16_t mlx5_stats_n; 134 /* Index in the device counters table. */ 135 uint16_t dev_table_idx[MLX5_MAX_XSTATS]; 136 uint64_t base[MLX5_MAX_XSTATS]; 137 struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; 138 }; 139 140 struct mlx5_stats_ctrl { 141 /* Base for imissed counter. */ 142 uint64_t imissed_base; 143 }; 144 145 /* devx counter object */ 146 struct mlx5_devx_counter_set { 147 struct mlx5dv_devx_obj *obj; 148 int id; /* Flow counter ID */ 149 }; 150 151 /* HCA attributes. */ 152 struct mlx5_hca_attr { 153 uint32_t eswitch_manager:1; 154 }; 155 156 /* Flow list . */ 157 TAILQ_HEAD(mlx5_flows, rte_flow); 158 159 /* Default PMD specific parameter value. */ 160 #define MLX5_ARG_UNSET (-1) 161 162 /* 163 * Device configuration structure. 164 * 165 * Merged configuration from: 166 * 167 * - Device capabilities, 168 * - User device parameters disabled features. 169 */ 170 struct mlx5_dev_config { 171 unsigned int hw_csum:1; /* Checksum offload is supported. */ 172 unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ 173 unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ 174 unsigned int hw_padding:1; /* End alignment padding is supported. */ 175 unsigned int vf:1; /* This is a VF. */ 176 unsigned int tunnel_en:1; 177 /* Whether tunnel stateless offloads are supported. */ 178 unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ 179 unsigned int cqe_comp:1; /* CQE compression is enabled. */ 180 unsigned int cqe_pad:1; /* CQE padding is enabled. */ 181 unsigned int tso:1; /* Whether TSO is supported. */ 182 unsigned int tx_vec_en:1; /* Tx vector is enabled. */ 183 unsigned int rx_vec_en:1; /* Rx vector is enabled. */ 184 unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ 185 unsigned int mr_ext_memseg_en:1; 186 /* Whether memseg should be extended for MR creation. */ 187 unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */ 188 unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ 189 unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */ 190 unsigned int dv_flow_en:1; /* Enable DV flow. */ 191 unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ 192 unsigned int devx:1; /* Whether devx interface is available or not. */ 193 struct { 194 unsigned int enabled:1; /* Whether MPRQ is enabled. */ 195 unsigned int stride_num_n; /* Number of strides. */ 196 unsigned int min_stride_size_n; /* Min size of a stride. */ 197 unsigned int max_stride_size_n; /* Max size of a stride. */ 198 unsigned int max_memcpy_len; 199 /* Maximum packet size to memcpy Rx packets. */ 200 unsigned int min_rxqs_num; 201 /* Rx queue count threshold to enable MPRQ. */ 202 } mprq; /* Configurations for Multi-Packet RQ. */ 203 int mps; /* Multi-packet send supported mode. */ 204 unsigned int flow_prio; /* Number of flow priorities. */ 205 unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ 206 unsigned int ind_table_max_size; /* Maximum indirection table size. */ 207 int txq_inline; /* Maximum packet size for inlining. */ 208 int txqs_inline; /* Queue number threshold for inlining. */ 209 int txqs_vec; /* Queue number threshold for vectorized Tx. */ 210 int inline_max_packet_sz; /* Max packet size for inlining. */ 211 struct mlx5_hca_attr hca_attr; /* HCA attributes. */ 212 }; 213 214 /** 215 * Type of objet being allocated. 216 */ 217 enum mlx5_verbs_alloc_type { 218 MLX5_VERBS_ALLOC_TYPE_NONE, 219 MLX5_VERBS_ALLOC_TYPE_TX_QUEUE, 220 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, 221 }; 222 223 /** 224 * Verbs allocator needs a context to know in the callback which kind of 225 * resources it is allocating. 226 */ 227 struct mlx5_verbs_alloc_ctx { 228 enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */ 229 const void *obj; /* Pointer to the DPDK object. */ 230 }; 231 232 LIST_HEAD(mlx5_mr_list, mlx5_mr); 233 234 /* Flow drop context necessary due to Verbs API. */ 235 struct mlx5_drop { 236 struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */ 237 struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */ 238 }; 239 240 struct mlx5_flow_tcf_context; 241 242 /* Per port data of shared IB device. */ 243 struct mlx5_ibv_shared_port { 244 uint32_t ih_port_id; 245 /* 246 * Interrupt handler port_id. Used by shared interrupt 247 * handler to find the corresponding rte_eth device 248 * by IB port index. If value is equal or greater 249 * RTE_MAX_ETHPORTS it means there is no subhandler 250 * installed for specified IB port index. 251 */ 252 }; 253 254 /* Table structure. */ 255 struct mlx5_flow_tbl_resource { 256 void *obj; /**< Pointer to DR table object. */ 257 rte_atomic32_t refcnt; /**< Reference counter. */ 258 }; 259 260 #define MLX5_MAX_TABLES 1024 261 #define MLX5_MAX_TABLES_FDB 32 262 #define MLX5_GROUP_FACTOR 1 263 264 /* 265 * Shared Infiniband device context for Master/Representors 266 * which belong to same IB device with multiple IB ports. 267 **/ 268 struct mlx5_ibv_shared { 269 LIST_ENTRY(mlx5_ibv_shared) next; 270 uint32_t refcnt; 271 uint32_t devx:1; /* Opened with DV. */ 272 uint32_t max_port; /* Maximal IB device port index. */ 273 struct ibv_context *ctx; /* Verbs/DV context. */ 274 struct ibv_pd *pd; /* Protection Domain. */ 275 char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */ 276 char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */ 277 struct ibv_device_attr_ex device_attr; /* Device properties. */ 278 struct rte_pci_device *pci_dev; /* Backend PCI device. */ 279 LIST_ENTRY(mlx5_ibv_shared) mem_event_cb; 280 /**< Called by memory event callback. */ 281 struct { 282 uint32_t dev_gen; /* Generation number to flush local caches. */ 283 rte_rwlock_t rwlock; /* MR Lock. */ 284 struct mlx5_mr_btree cache; /* Global MR cache table. */ 285 struct mlx5_mr_list mr_list; /* Registered MR list. */ 286 struct mlx5_mr_list mr_free_list; /* Freed MR list. */ 287 } mr; 288 /* Shared DV/DR flow data section. */ 289 pthread_mutex_t dv_mutex; /* DV context mutex. */ 290 uint32_t dv_refcnt; /* DV/DR data reference counter. */ 291 void *fdb_domain; /* FDB Direct Rules name space handle. */ 292 struct mlx5_flow_tbl_resource fdb_tbl[MLX5_MAX_TABLES_FDB]; 293 /* FDB Direct Rules tables. */ 294 void *rx_domain; /* RX Direct Rules name space handle. */ 295 struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES]; 296 /* RX Direct Rules tables. */ 297 void *tx_domain; /* TX Direct Rules name space handle. */ 298 struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES]; 299 void *esw_drop_action; /* Pointer to DR E-Switch drop action. */ 300 /* TX Direct Rules tables/ */ 301 LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers; 302 LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps; 303 LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds; 304 LIST_HEAD(tag, mlx5_flow_dv_tag_resource) tags; 305 LIST_HEAD(jump, mlx5_flow_dv_jump_tbl_resource) jump_tbl; 306 LIST_HEAD(port_id_action_list, mlx5_flow_dv_port_id_action_resource) 307 port_id_action_list; /* List of port ID actions. */ 308 /* Shared interrupt handler section. */ 309 pthread_mutex_t intr_mutex; /* Interrupt config mutex. */ 310 uint32_t intr_cnt; /* Interrupt handler reference counter. */ 311 struct rte_intr_handle intr_handle; /* Interrupt handler for device. */ 312 struct mlx5_ibv_shared_port port[]; /* per device port data array. */ 313 }; 314 315 /* Per-process private structure. */ 316 struct mlx5_proc_priv { 317 size_t uar_table_sz; 318 /* Size of UAR register table. */ 319 void *uar_table[]; 320 /* Table of UAR registers for each process. */ 321 }; 322 323 #define MLX5_PROC_PRIV(port_id) \ 324 ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private) 325 326 struct mlx5_priv { 327 struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ 328 struct mlx5_ibv_shared *sh; /* Shared IB device context. */ 329 uint32_t ibv_port; /* IB device port number. */ 330 struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ 331 BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); 332 /* Bit-field of MAC addresses owned by the PMD. */ 333 uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */ 334 unsigned int vlan_filter_n; /* Number of configured VLAN filters. */ 335 /* Device properties. */ 336 uint16_t mtu; /* Configured MTU. */ 337 unsigned int isolated:1; /* Whether isolated mode is enabled. */ 338 unsigned int representor:1; /* Device is a port representor. */ 339 unsigned int master:1; /* Device is a E-Switch master. */ 340 unsigned int dr_shared:1; /* DV/DR data is shared. */ 341 uint16_t domain_id; /* Switch domain identifier. */ 342 uint16_t vport_id; /* Associated VF vport index (if any). */ 343 int32_t representor_id; /* Port representor identifier. */ 344 /* RX/TX queues. */ 345 unsigned int rxqs_n; /* RX queues array size. */ 346 unsigned int txqs_n; /* TX queues array size. */ 347 struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */ 348 struct mlx5_txq_data *(*txqs)[]; /* TX queues. */ 349 struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ 350 struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ 351 unsigned int (*reta_idx)[]; /* RETA index table. */ 352 unsigned int reta_idx_n; /* RETA index size. */ 353 struct mlx5_drop drop_queue; /* Flow drop queues. */ 354 struct mlx5_flows flows; /* RTE Flow rules. */ 355 struct mlx5_flows ctrl_flows; /* Control flow rules. */ 356 LIST_HEAD(counters, mlx5_flow_counter) flow_counters; 357 /* Flow counters. */ 358 LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ 359 LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */ 360 LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */ 361 LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ 362 LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */ 363 /* Verbs Indirection tables. */ 364 LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls; 365 /* Pointer to next element. */ 366 rte_atomic32_t refcnt; /**< Reference counter. */ 367 struct ibv_flow_action *verbs_action; 368 /**< Verbs modify header action object. */ 369 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 370 /* Tags resources cache. */ 371 uint32_t link_speed_capa; /* Link speed capabilities. */ 372 struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ 373 struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ 374 struct mlx5_dev_config config; /* Device configuration. */ 375 struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; 376 /* Context for Verbs allocator. */ 377 int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ 378 int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ 379 uint32_t nl_sn; /* Netlink message sequence number. */ 380 #ifndef RTE_ARCH_64 381 rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ 382 rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; 383 /* UAR same-page access control required in 32bit implementations. */ 384 #endif 385 struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */ 386 }; 387 388 #define PORT_ID(priv) ((priv)->dev_data->port_id) 389 #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) 390 391 /* mlx5.c */ 392 393 int mlx5_getenv_int(const char *); 394 int mlx5_proc_priv_init(struct rte_eth_dev *dev); 395 396 /* mlx5_ethdev.c */ 397 398 int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); 399 int mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]); 400 unsigned int mlx5_ifindex(const struct rte_eth_dev *dev); 401 int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr); 402 int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); 403 int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, 404 unsigned int flags); 405 int mlx5_dev_configure(struct rte_eth_dev *dev); 406 void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); 407 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size); 408 const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); 409 int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); 410 int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status); 411 int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 412 int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, 413 struct rte_eth_fc_conf *fc_conf); 414 int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, 415 struct rte_eth_fc_conf *fc_conf); 416 int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 417 struct rte_pci_addr *pci_addr); 418 void mlx5_dev_link_status_handler(void *arg); 419 void mlx5_dev_interrupt_handler(void *arg); 420 void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); 421 void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); 422 int mlx5_set_link_down(struct rte_eth_dev *dev); 423 int mlx5_set_link_up(struct rte_eth_dev *dev); 424 int mlx5_is_removed(struct rte_eth_dev *dev); 425 eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); 426 eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); 427 unsigned int mlx5_dev_to_port_id(const struct rte_device *dev, 428 uint16_t *port_list, 429 unsigned int port_list_n); 430 int mlx5_port_to_eswitch_info(uint16_t port, uint16_t *es_domain_id, 431 uint16_t *es_port_id); 432 int mlx5_sysfs_switch_info(unsigned int ifindex, 433 struct mlx5_switch_info *info); 434 void mlx5_sysfs_check_switch_info(bool device_dir, 435 struct mlx5_switch_info *switch_info); 436 void mlx5_nl_check_switch_info(bool nun_vf_set, 437 struct mlx5_switch_info *switch_info); 438 void mlx5_translate_port_name(const char *port_name_in, 439 struct mlx5_switch_info *port_info_out); 440 441 /* mlx5_mac.c */ 442 443 int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]); 444 void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 445 int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 446 uint32_t index, uint32_t vmdq); 447 int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr); 448 int mlx5_set_mc_addr_list(struct rte_eth_dev *dev, 449 struct rte_ether_addr *mc_addr_set, 450 uint32_t nb_mc_addr); 451 452 /* mlx5_rss.c */ 453 454 int mlx5_rss_hash_update(struct rte_eth_dev *dev, 455 struct rte_eth_rss_conf *rss_conf); 456 int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, 457 struct rte_eth_rss_conf *rss_conf); 458 int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size); 459 int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, 460 struct rte_eth_rss_reta_entry64 *reta_conf, 461 uint16_t reta_size); 462 int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, 463 struct rte_eth_rss_reta_entry64 *reta_conf, 464 uint16_t reta_size); 465 466 /* mlx5_rxmode.c */ 467 468 void mlx5_promiscuous_enable(struct rte_eth_dev *dev); 469 void mlx5_promiscuous_disable(struct rte_eth_dev *dev); 470 void mlx5_allmulticast_enable(struct rte_eth_dev *dev); 471 void mlx5_allmulticast_disable(struct rte_eth_dev *dev); 472 473 /* mlx5_stats.c */ 474 475 void mlx5_stats_init(struct rte_eth_dev *dev); 476 int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 477 void mlx5_stats_reset(struct rte_eth_dev *dev); 478 int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 479 unsigned int n); 480 void mlx5_xstats_reset(struct rte_eth_dev *dev); 481 int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, 482 struct rte_eth_xstat_name *xstats_names, 483 unsigned int n); 484 485 /* mlx5_vlan.c */ 486 487 int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 488 void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); 489 int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); 490 491 /* mlx5_trigger.c */ 492 493 int mlx5_dev_start(struct rte_eth_dev *dev); 494 void mlx5_dev_stop(struct rte_eth_dev *dev); 495 int mlx5_traffic_enable(struct rte_eth_dev *dev); 496 void mlx5_traffic_disable(struct rte_eth_dev *dev); 497 int mlx5_traffic_restart(struct rte_eth_dev *dev); 498 499 /* mlx5_flow.c */ 500 501 int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); 502 void mlx5_flow_print(struct rte_flow *flow); 503 int mlx5_flow_validate(struct rte_eth_dev *dev, 504 const struct rte_flow_attr *attr, 505 const struct rte_flow_item items[], 506 const struct rte_flow_action actions[], 507 struct rte_flow_error *error); 508 struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, 509 const struct rte_flow_attr *attr, 510 const struct rte_flow_item items[], 511 const struct rte_flow_action actions[], 512 struct rte_flow_error *error); 513 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, 514 struct rte_flow_error *error); 515 void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list); 516 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); 517 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, 518 const struct rte_flow_action *action, void *data, 519 struct rte_flow_error *error); 520 int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, 521 struct rte_flow_error *error); 522 int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, 523 enum rte_filter_type filter_type, 524 enum rte_filter_op filter_op, 525 void *arg); 526 int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list); 527 void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list); 528 int mlx5_flow_verify(struct rte_eth_dev *dev); 529 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, 530 struct rte_flow_item_eth *eth_spec, 531 struct rte_flow_item_eth *eth_mask, 532 struct rte_flow_item_vlan *vlan_spec, 533 struct rte_flow_item_vlan *vlan_mask); 534 int mlx5_ctrl_flow(struct rte_eth_dev *dev, 535 struct rte_flow_item_eth *eth_spec, 536 struct rte_flow_item_eth *eth_mask); 537 int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev); 538 void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); 539 540 /* mlx5_mp.c */ 541 void mlx5_mp_req_start_rxtx(struct rte_eth_dev *dev); 542 void mlx5_mp_req_stop_rxtx(struct rte_eth_dev *dev); 543 int mlx5_mp_req_mr_create(struct rte_eth_dev *dev, uintptr_t addr); 544 int mlx5_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev); 545 void mlx5_mp_init_primary(void); 546 void mlx5_mp_uninit_primary(void); 547 void mlx5_mp_init_secondary(void); 548 void mlx5_mp_uninit_secondary(void); 549 550 /* mlx5_nl.c */ 551 552 int mlx5_nl_init(int protocol); 553 int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 554 uint32_t index); 555 int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 556 uint32_t index); 557 void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev); 558 void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev); 559 int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable); 560 int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable); 561 unsigned int mlx5_nl_portnum(int nl, const char *name); 562 unsigned int mlx5_nl_ifindex(int nl, const char *name, uint32_t pindex); 563 int mlx5_nl_switch_info(int nl, unsigned int ifindex, 564 struct mlx5_switch_info *info); 565 566 /* mlx5_devx_cmds.c */ 567 568 int mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, 569 struct mlx5_devx_counter_set *dcx); 570 int mlx5_devx_cmd_flow_counter_free(struct mlx5dv_devx_obj *obj); 571 int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_counter_set *dcx, 572 int clear, 573 uint64_t *pkts, uint64_t *bytes); 574 int mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx, 575 struct mlx5_hca_attr *attr); 576 #endif /* RTE_PMD_MLX5_H_ */ 577