1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <rte_string_fns.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_cycles.h> 17 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <bus_pci_driver.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_kvargs.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <ethdev_driver.h> 30 #include <ethdev_pci.h> 31 #include <rte_malloc.h> 32 #include <rte_random.h> 33 #include <dev_driver.h> 34 #include <rte_hash_crc.h> 35 #ifdef RTE_LIB_SECURITY 36 #include <rte_security_driver.h> 37 #endif 38 #include <rte_os_shim.h> 39 40 #include "ixgbe_logs.h" 41 #include "base/ixgbe_api.h" 42 #include "base/ixgbe_vf.h" 43 #include "base/ixgbe_common.h" 44 #include "ixgbe_ethdev.h" 45 #include "ixgbe_bypass.h" 46 #include "ixgbe_rxtx.h" 47 #include "base/ixgbe_type.h" 48 #include "base/ixgbe_phy.h" 49 #include "base/ixgbe_osdep.h" 50 #include "ixgbe_regs.h" 51 52 /* 53 * High threshold controlling when to start sending XOFF frames. Must be at 54 * least 8 bytes less than receive packet buffer size. This value is in units 55 * of 1024 bytes. 56 */ 57 #define IXGBE_FC_HI 0x80 58 59 /* 60 * Low threshold controlling when to start sending XON frames. This value is 61 * in units of 1024 bytes. 62 */ 63 #define IXGBE_FC_LO 0x40 64 65 /* Timer value included in XOFF frames. */ 66 #define IXGBE_FC_PAUSE 0x680 67 68 /*Default value of Max Rx Queue*/ 69 #define IXGBE_MAX_RX_QUEUE_NUM 128 70 71 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 72 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 73 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 74 75 #define IXGBE_MMW_SIZE_DEFAULT 0x4 76 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 77 #define IXGBE_MAX_RING_DESC 8192 /* replicate define from rxtx */ 78 79 /* 80 * Default values for RX/TX configuration 81 */ 82 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 83 #define IXGBE_DEFAULT_RX_PTHRESH 8 84 #define IXGBE_DEFAULT_RX_HTHRESH 8 85 #define IXGBE_DEFAULT_RX_WTHRESH 0 86 87 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 88 #define IXGBE_DEFAULT_TX_PTHRESH 32 89 #define IXGBE_DEFAULT_TX_HTHRESH 0 90 #define IXGBE_DEFAULT_TX_WTHRESH 0 91 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 92 93 /* Bit shift and mask */ 94 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 95 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 96 #define IXGBE_8_BIT_WIDTH CHAR_BIT 97 #define IXGBE_8_BIT_MASK UINT8_MAX 98 99 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 100 101 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 102 103 /* Additional timesync values. */ 104 #define NSEC_PER_SEC 1000000000L 105 #define IXGBE_INCVAL_10GB 0x66666666 106 #define IXGBE_INCVAL_1GB 0x40000000 107 #define IXGBE_INCVAL_100 0x50000000 108 #define IXGBE_INCVAL_SHIFT_10GB 28 109 #define IXGBE_INCVAL_SHIFT_1GB 24 110 #define IXGBE_INCVAL_SHIFT_100 21 111 #define IXGBE_INCVAL_SHIFT_82599 7 112 #define IXGBE_INCPER_SHIFT_82599 24 113 114 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 115 116 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 117 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 118 #define IXGBE_ETAG_ETYPE 0x00005084 119 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 120 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 121 #define IXGBE_RAH_ADTYPE 0x40000000 122 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 123 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 124 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 125 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 126 #define IXGBE_QDE_STRIP_TAG 0x00000004 127 #define IXGBE_VTEICR_MASK 0x07 128 129 #define IXGBE_EXVET_VET_EXT_SHIFT 16 130 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 131 132 #define IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE "fiber_sdp3_no_tx_disable" 133 134 static const char * const ixgbe_valid_arguments[] = { 135 IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE, 136 NULL 137 }; 138 139 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 140 141 static const char * const ixgbevf_valid_arguments[] = { 142 IXGBEVF_DEVARG_PFLINK_FULLCHK, 143 NULL 144 }; 145 146 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 147 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 148 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 149 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 150 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 151 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 152 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 153 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 154 static int ixgbe_dev_start(struct rte_eth_dev *dev); 155 static int ixgbe_dev_stop(struct rte_eth_dev *dev); 156 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 157 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 158 static int ixgbe_dev_close(struct rte_eth_dev *dev); 159 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 160 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 161 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 162 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 163 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 164 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 165 int wait_to_complete); 166 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 167 struct rte_eth_stats *stats); 168 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 169 struct rte_eth_xstat *xstats, unsigned n); 170 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 171 struct rte_eth_xstat *xstats, unsigned n); 172 static int 173 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 174 uint64_t *values, unsigned int n); 175 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 176 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 177 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 178 struct rte_eth_xstat_name *xstats_names, 179 unsigned int size); 180 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 181 struct rte_eth_xstat_name *xstats_names, unsigned limit); 182 static int ixgbe_dev_xstats_get_names_by_id( 183 struct rte_eth_dev *dev, 184 const uint64_t *ids, 185 struct rte_eth_xstat_name *xstats_names, 186 unsigned int limit); 187 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 188 uint16_t queue_id, 189 uint8_t stat_idx, 190 uint8_t is_rx); 191 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 192 size_t fw_size); 193 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 194 struct rte_eth_dev_info *dev_info); 195 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, 196 size_t *no_of_elements); 197 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 198 struct rte_eth_dev_info *dev_info); 199 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 200 201 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 202 uint16_t vlan_id, int on); 203 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 204 enum rte_vlan_type vlan_type, 205 uint16_t tpid_id); 206 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 207 uint16_t queue, bool on); 208 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 209 int on); 210 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 211 int mask); 212 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 213 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 214 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 215 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 216 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 217 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 218 219 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 220 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 221 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 222 struct rte_eth_fc_conf *fc_conf); 223 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 224 struct rte_eth_fc_conf *fc_conf); 225 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 226 struct rte_eth_pfc_conf *pfc_conf); 227 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 228 struct rte_eth_rss_reta_entry64 *reta_conf, 229 uint16_t reta_size); 230 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 231 struct rte_eth_rss_reta_entry64 *reta_conf, 232 uint16_t reta_size); 233 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 234 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 235 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 236 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 237 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 238 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 239 static void ixgbe_dev_interrupt_handler(void *param); 240 static void ixgbe_dev_interrupt_delayed_handler(void *param); 241 static uint32_t ixgbe_dev_setup_link_thread_handler(void *param); 242 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, 243 uint32_t timeout_ms); 244 245 static int ixgbe_add_rar(struct rte_eth_dev *dev, 246 struct rte_ether_addr *mac_addr, 247 uint32_t index, uint32_t pool); 248 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 249 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 250 struct rte_ether_addr *mac_addr); 251 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 252 static bool is_device_supported(struct rte_eth_dev *dev, 253 struct rte_pci_driver *drv); 254 255 /* For Virtual Function support */ 256 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 257 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 258 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 259 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 260 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 261 int wait_to_complete); 262 static int ixgbevf_dev_stop(struct rte_eth_dev *dev); 263 static int ixgbevf_dev_close(struct rte_eth_dev *dev); 264 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 265 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 266 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 267 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 268 struct rte_eth_stats *stats); 269 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 270 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 271 uint16_t vlan_id, int on); 272 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 273 uint16_t queue, int on); 274 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 275 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 276 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 277 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 278 uint16_t queue_id); 279 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 280 uint16_t queue_id); 281 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 282 uint8_t queue, uint8_t msix_vector); 283 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 284 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 285 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 286 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 287 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 288 289 /* For Eth VMDQ APIs support */ 290 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 291 rte_ether_addr * mac_addr, uint8_t on); 292 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 293 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 294 uint16_t queue_id); 295 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 296 uint16_t queue_id); 297 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 298 uint8_t queue, uint8_t msix_vector); 299 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 300 301 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 302 struct rte_ether_addr *mac_addr, 303 uint32_t index, uint32_t pool); 304 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 305 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 306 struct rte_ether_addr *mac_addr); 307 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 308 struct ixgbe_5tuple_filter *filter); 309 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 310 struct ixgbe_5tuple_filter *filter); 311 static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev, 312 const struct rte_flow_ops **ops); 313 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 314 315 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 316 struct rte_ether_addr *mc_addr_set, 317 uint32_t nb_mc_addr); 318 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 319 struct rte_eth_dcb_info *dcb_info); 320 321 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 322 static int ixgbe_get_regs(struct rte_eth_dev *dev, 323 struct rte_dev_reg_info *regs); 324 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 325 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 326 struct rte_dev_eeprom_info *eeprom); 327 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 328 struct rte_dev_eeprom_info *eeprom); 329 330 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 331 struct rte_eth_dev_module_info *modinfo); 332 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 333 struct rte_dev_eeprom_info *info); 334 335 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 336 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 337 struct rte_dev_reg_info *regs); 338 339 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 340 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 341 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 342 struct timespec *timestamp, 343 uint32_t flags); 344 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 345 struct timespec *timestamp); 346 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 347 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 348 struct timespec *timestamp); 349 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 350 const struct timespec *timestamp); 351 static void ixgbevf_dev_interrupt_handler(void *param); 352 353 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 354 struct rte_eth_udp_tunnel *udp_tunnel); 355 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 356 struct rte_eth_udp_tunnel *udp_tunnel); 357 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 358 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 359 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 360 static int devarg_handle_int(__rte_unused const char *key, const char *value, 361 void *extra_args); 362 363 /* 364 * Define VF Stats MACRO for Non "cleared on read" register 365 */ 366 #define UPDATE_VF_STAT(reg, last, cur) \ 367 { \ 368 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 369 cur += (latest - last) & UINT_MAX; \ 370 last = latest; \ 371 } 372 373 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 374 { \ 375 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 376 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 377 u64 latest = ((new_msb << 32) | new_lsb); \ 378 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 379 last = latest; \ 380 } 381 382 #define IXGBE_SET_HWSTRIP(h, q) do {\ 383 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 384 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 385 (h)->bitmap[idx] |= 1 << bit;\ 386 } while (0) 387 388 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 389 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 390 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 391 (h)->bitmap[idx] &= ~(1 << bit);\ 392 } while (0) 393 394 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 395 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 396 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 397 (r) = (h)->bitmap[idx] >> bit & 1;\ 398 } while (0) 399 400 /* 401 * The set of PCI devices this driver supports 402 */ 403 static const struct rte_pci_id pci_id_ixgbe_map[] = { 404 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 405 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 406 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 407 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 408 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 409 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 410 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 411 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 412 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 413 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 446 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 448 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 449 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 450 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII) }, 458 #ifdef RTE_LIBRTE_IXGBE_BYPASS 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 460 #endif 461 { .vendor_id = 0, /* sentinel */ }, 462 }; 463 464 /* 465 * The set of PCI devices this driver supports (for 82599 VF) 466 */ 467 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 468 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 469 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 470 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 471 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 472 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 473 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 474 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 475 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 476 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 477 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 478 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_VF) }, 479 { .vendor_id = 0, /* sentinel */ }, 480 }; 481 482 static const struct rte_eth_desc_lim rx_desc_lim = { 483 .nb_max = IXGBE_MAX_RING_DESC, 484 .nb_min = IXGBE_MIN_RING_DESC, 485 .nb_align = IXGBE_RXD_ALIGN, 486 }; 487 488 static const struct rte_eth_desc_lim tx_desc_lim = { 489 .nb_max = IXGBE_MAX_RING_DESC, 490 .nb_min = IXGBE_MIN_RING_DESC, 491 .nb_align = IXGBE_TXD_ALIGN, 492 .nb_seg_max = IXGBE_TX_MAX_SEG, 493 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 494 }; 495 496 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 497 .dev_configure = ixgbe_dev_configure, 498 .dev_start = ixgbe_dev_start, 499 .dev_stop = ixgbe_dev_stop, 500 .dev_set_link_up = ixgbe_dev_set_link_up, 501 .dev_set_link_down = ixgbe_dev_set_link_down, 502 .dev_close = ixgbe_dev_close, 503 .dev_reset = ixgbe_dev_reset, 504 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 505 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 506 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 507 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 508 .link_update = ixgbe_dev_link_update, 509 .stats_get = ixgbe_dev_stats_get, 510 .xstats_get = ixgbe_dev_xstats_get, 511 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 512 .stats_reset = ixgbe_dev_stats_reset, 513 .xstats_reset = ixgbe_dev_xstats_reset, 514 .xstats_get_names = ixgbe_dev_xstats_get_names, 515 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 516 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 517 .fw_version_get = ixgbe_fw_version_get, 518 .dev_infos_get = ixgbe_dev_info_get, 519 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 520 .mtu_set = ixgbe_dev_mtu_set, 521 .vlan_filter_set = ixgbe_vlan_filter_set, 522 .vlan_tpid_set = ixgbe_vlan_tpid_set, 523 .vlan_offload_set = ixgbe_vlan_offload_set, 524 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 525 .rx_queue_start = ixgbe_dev_rx_queue_start, 526 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 527 .tx_queue_start = ixgbe_dev_tx_queue_start, 528 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 529 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 530 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 531 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 532 .rx_queue_release = ixgbe_dev_rx_queue_release, 533 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 534 .tx_queue_release = ixgbe_dev_tx_queue_release, 535 .dev_led_on = ixgbe_dev_led_on, 536 .dev_led_off = ixgbe_dev_led_off, 537 .flow_ctrl_get = ixgbe_flow_ctrl_get, 538 .flow_ctrl_set = ixgbe_flow_ctrl_set, 539 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 540 .mac_addr_add = ixgbe_add_rar, 541 .mac_addr_remove = ixgbe_remove_rar, 542 .mac_addr_set = ixgbe_set_default_mac_addr, 543 .uc_hash_table_set = ixgbe_uc_hash_table_set, 544 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 545 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 546 .reta_update = ixgbe_dev_rss_reta_update, 547 .reta_query = ixgbe_dev_rss_reta_query, 548 .rss_hash_update = ixgbe_dev_rss_hash_update, 549 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 550 .flow_ops_get = ixgbe_dev_flow_ops_get, 551 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 552 .rxq_info_get = ixgbe_rxq_info_get, 553 .txq_info_get = ixgbe_txq_info_get, 554 .recycle_rxq_info_get = ixgbe_recycle_rxq_info_get, 555 .timesync_enable = ixgbe_timesync_enable, 556 .timesync_disable = ixgbe_timesync_disable, 557 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 558 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 559 .get_reg = ixgbe_get_regs, 560 .get_eeprom_length = ixgbe_get_eeprom_length, 561 .get_eeprom = ixgbe_get_eeprom, 562 .set_eeprom = ixgbe_set_eeprom, 563 .get_module_info = ixgbe_get_module_info, 564 .get_module_eeprom = ixgbe_get_module_eeprom, 565 .get_dcb_info = ixgbe_dev_get_dcb_info, 566 .timesync_adjust_time = ixgbe_timesync_adjust_time, 567 .timesync_read_time = ixgbe_timesync_read_time, 568 .timesync_write_time = ixgbe_timesync_write_time, 569 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 570 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 571 .tm_ops_get = ixgbe_tm_ops_get, 572 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 573 .get_monitor_addr = ixgbe_get_monitor_addr, 574 }; 575 576 /* 577 * dev_ops for virtual function, bare necessities for basic vf 578 * operation have been implemented 579 */ 580 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 581 .dev_configure = ixgbevf_dev_configure, 582 .dev_start = ixgbevf_dev_start, 583 .dev_stop = ixgbevf_dev_stop, 584 .link_update = ixgbevf_dev_link_update, 585 .stats_get = ixgbevf_dev_stats_get, 586 .xstats_get = ixgbevf_dev_xstats_get, 587 .stats_reset = ixgbevf_dev_stats_reset, 588 .xstats_reset = ixgbevf_dev_stats_reset, 589 .xstats_get_names = ixgbevf_dev_xstats_get_names, 590 .dev_close = ixgbevf_dev_close, 591 .dev_reset = ixgbevf_dev_reset, 592 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 593 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 594 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 595 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 596 .dev_infos_get = ixgbevf_dev_info_get, 597 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 598 .mtu_set = ixgbevf_dev_set_mtu, 599 .vlan_filter_set = ixgbevf_vlan_filter_set, 600 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 601 .vlan_offload_set = ixgbevf_vlan_offload_set, 602 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 603 .rx_queue_release = ixgbe_dev_rx_queue_release, 604 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 605 .tx_queue_release = ixgbe_dev_tx_queue_release, 606 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 607 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 608 .mac_addr_add = ixgbevf_add_mac_addr, 609 .mac_addr_remove = ixgbevf_remove_mac_addr, 610 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 611 .rxq_info_get = ixgbe_rxq_info_get, 612 .txq_info_get = ixgbe_txq_info_get, 613 .mac_addr_set = ixgbevf_set_default_mac_addr, 614 .get_reg = ixgbevf_get_regs, 615 .reta_update = ixgbe_dev_rss_reta_update, 616 .reta_query = ixgbe_dev_rss_reta_query, 617 .rss_hash_update = ixgbe_dev_rss_hash_update, 618 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 619 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 620 .get_monitor_addr = ixgbe_get_monitor_addr, 621 }; 622 623 /* store statistics names and its offset in stats structure */ 624 struct rte_ixgbe_xstats_name_off { 625 char name[RTE_ETH_XSTATS_NAME_SIZE]; 626 unsigned offset; 627 }; 628 629 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 630 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 631 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 632 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 633 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 634 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 635 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 636 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 637 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 638 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 639 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 640 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 641 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 642 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 643 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 644 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 645 prc1023)}, 646 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 647 prc1522)}, 648 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 649 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 650 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 651 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 652 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 653 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 654 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 655 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 656 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 657 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 658 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 659 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 660 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 661 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 662 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 663 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 664 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 665 ptc1023)}, 666 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 667 ptc1522)}, 668 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 669 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 670 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 671 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 672 673 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 674 fdirustat_add)}, 675 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 676 fdirustat_remove)}, 677 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 678 fdirfstat_fadd)}, 679 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 680 fdirfstat_fremove)}, 681 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 682 fdirmatch)}, 683 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 684 fdirmiss)}, 685 686 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 687 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 688 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 689 fclast)}, 690 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 691 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 692 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 693 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 694 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 695 fcoe_noddp)}, 696 {"rx_fcoe_no_direct_data_placement_ext_buff", 697 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 698 699 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 700 lxontxc)}, 701 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 702 lxonrxc)}, 703 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 704 lxofftxc)}, 705 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 706 lxoffrxc)}, 707 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 708 }; 709 710 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 711 sizeof(rte_ixgbe_stats_strings[0])) 712 713 /* MACsec statistics */ 714 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 715 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 716 out_pkts_untagged)}, 717 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 718 out_pkts_encrypted)}, 719 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 720 out_pkts_protected)}, 721 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 722 out_octets_encrypted)}, 723 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 724 out_octets_protected)}, 725 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 726 in_pkts_untagged)}, 727 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 728 in_pkts_badtag)}, 729 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 730 in_pkts_nosci)}, 731 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 732 in_pkts_unknownsci)}, 733 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 734 in_octets_decrypted)}, 735 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 736 in_octets_validated)}, 737 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 738 in_pkts_unchecked)}, 739 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 740 in_pkts_delayed)}, 741 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 742 in_pkts_late)}, 743 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 744 in_pkts_ok)}, 745 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 746 in_pkts_invalid)}, 747 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 748 in_pkts_notvalid)}, 749 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 750 in_pkts_unusedsa)}, 751 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 752 in_pkts_notusingsa)}, 753 }; 754 755 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 756 sizeof(rte_ixgbe_macsec_strings[0])) 757 758 /* Per-queue statistics */ 759 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 760 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 761 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 762 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 763 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 764 }; 765 766 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 767 sizeof(rte_ixgbe_rxq_strings[0])) 768 #define IXGBE_NB_RXQ_PRIO_VALUES 8 769 770 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 771 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 772 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 773 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 774 pxon2offc)}, 775 }; 776 777 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 778 sizeof(rte_ixgbe_txq_strings[0])) 779 #define IXGBE_NB_TXQ_PRIO_VALUES 8 780 781 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 782 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 783 }; 784 785 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 786 sizeof(rte_ixgbevf_stats_strings[0])) 787 788 /* 789 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 790 */ 791 static inline int 792 ixgbe_is_sfp(struct ixgbe_hw *hw) 793 { 794 switch (hw->phy.type) { 795 case ixgbe_phy_sfp_avago: 796 case ixgbe_phy_sfp_ftl: 797 case ixgbe_phy_sfp_intel: 798 case ixgbe_phy_sfp_unknown: 799 case ixgbe_phy_sfp_passive_tyco: 800 case ixgbe_phy_sfp_passive_unknown: 801 return 1; 802 default: 803 /* x550em devices may be SFP, check media type */ 804 switch (hw->mac.type) { 805 case ixgbe_mac_X550EM_x: 806 case ixgbe_mac_X550EM_a: 807 switch (ixgbe_get_media_type(hw)) { 808 case ixgbe_media_type_fiber: 809 case ixgbe_media_type_fiber_qsfp: 810 return 1; 811 default: 812 break; 813 } 814 default: 815 break; 816 } 817 return 0; 818 } 819 } 820 821 static inline int32_t 822 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 823 { 824 uint32_t ctrl_ext; 825 int32_t status; 826 827 status = ixgbe_reset_hw(hw); 828 829 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 830 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 831 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 832 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 833 IXGBE_WRITE_FLUSH(hw); 834 835 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 836 status = IXGBE_SUCCESS; 837 return status; 838 } 839 840 static inline void 841 ixgbe_enable_intr(struct rte_eth_dev *dev) 842 { 843 struct ixgbe_interrupt *intr = 844 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 845 struct ixgbe_hw *hw = 846 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 847 848 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 849 IXGBE_WRITE_FLUSH(hw); 850 } 851 852 /* 853 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 854 */ 855 static void 856 ixgbe_disable_intr(struct ixgbe_hw *hw) 857 { 858 PMD_INIT_FUNC_TRACE(); 859 860 if (hw->mac.type == ixgbe_mac_82598EB) { 861 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 862 } else { 863 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 864 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 865 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 866 } 867 IXGBE_WRITE_FLUSH(hw); 868 } 869 870 /* 871 * This function resets queue statistics mapping registers. 872 * From Niantic datasheet, Initialization of Statistics section: 873 * "...if software requires the queue counters, the RQSMR and TQSM registers 874 * must be re-programmed following a device reset. 875 */ 876 static void 877 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 878 { 879 uint32_t i; 880 881 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 882 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 883 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 884 } 885 } 886 887 888 static int 889 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 890 uint16_t queue_id, 891 uint8_t stat_idx, 892 uint8_t is_rx) 893 { 894 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 895 #define NB_QMAP_FIELDS_PER_QSM_REG 4 896 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 897 898 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 899 struct ixgbe_stat_mapping_registers *stat_mappings = 900 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 901 uint32_t qsmr_mask = 0; 902 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 903 uint32_t q_map; 904 uint8_t n, offset; 905 906 if ((hw->mac.type != ixgbe_mac_82599EB) && 907 (hw->mac.type != ixgbe_mac_X540) && 908 (hw->mac.type != ixgbe_mac_X550) && 909 (hw->mac.type != ixgbe_mac_X550EM_x) && 910 (hw->mac.type != ixgbe_mac_X550EM_a)) 911 return -ENOSYS; 912 913 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 914 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 915 queue_id, stat_idx); 916 917 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 918 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 919 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 920 return -EIO; 921 } 922 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 923 924 /* Now clear any previous stat_idx set */ 925 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 926 if (!is_rx) 927 stat_mappings->tqsm[n] &= ~clearing_mask; 928 else 929 stat_mappings->rqsmr[n] &= ~clearing_mask; 930 931 q_map = (uint32_t)stat_idx; 932 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 933 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 934 if (!is_rx) 935 stat_mappings->tqsm[n] |= qsmr_mask; 936 else 937 stat_mappings->rqsmr[n] |= qsmr_mask; 938 939 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 940 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 941 queue_id, stat_idx); 942 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 943 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 944 945 /* Now write the mapping in the appropriate register */ 946 if (is_rx) { 947 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 948 stat_mappings->rqsmr[n], n); 949 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 950 } else { 951 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 952 stat_mappings->tqsm[n], n); 953 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 954 } 955 return 0; 956 } 957 958 static void 959 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 960 { 961 struct ixgbe_stat_mapping_registers *stat_mappings = 962 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 963 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 964 int i; 965 966 /* write whatever was in stat mapping table to the NIC */ 967 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 968 /* rx */ 969 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 970 971 /* tx */ 972 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 973 } 974 } 975 976 static void 977 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 978 { 979 uint8_t i; 980 struct ixgbe_dcb_tc_config *tc; 981 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 982 983 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 984 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 985 for (i = 0; i < dcb_max_tc; i++) { 986 tc = &dcb_config->tc_config[i]; 987 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 988 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 989 (uint8_t)(100/dcb_max_tc + (i & 1)); 990 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 991 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 992 (uint8_t)(100/dcb_max_tc + (i & 1)); 993 tc->pfc = ixgbe_dcb_pfc_disabled; 994 } 995 996 /* Initialize default user to priority mapping, UPx->TC0 */ 997 tc = &dcb_config->tc_config[0]; 998 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 999 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 1000 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 1001 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 1002 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 1003 } 1004 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 1005 dcb_config->pfc_mode_enable = false; 1006 dcb_config->vt_mode = true; 1007 dcb_config->round_robin_enable = false; 1008 /* support all DCB capabilities in 82599 */ 1009 dcb_config->support.capabilities = 0xFF; 1010 1011 /*we only support 4 Tcs for X540, X550 */ 1012 if (hw->mac.type == ixgbe_mac_X540 || 1013 hw->mac.type == ixgbe_mac_X550 || 1014 hw->mac.type == ixgbe_mac_X550EM_x || 1015 hw->mac.type == ixgbe_mac_X550EM_a) { 1016 dcb_config->num_tcs.pg_tcs = 4; 1017 dcb_config->num_tcs.pfc_tcs = 4; 1018 } 1019 } 1020 1021 /* 1022 * Ensure that all locks are released before first NVM or PHY access 1023 */ 1024 static void 1025 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 1026 { 1027 uint16_t mask; 1028 1029 /* 1030 * Phy lock should not fail in this early stage. If this is the case, 1031 * it is due to an improper exit of the application. 1032 * So force the release of the faulty lock. Release of common lock 1033 * is done automatically by swfw_sync function. 1034 */ 1035 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1036 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1037 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1038 } 1039 ixgbe_release_swfw_semaphore(hw, mask); 1040 1041 /* 1042 * These ones are more tricky since they are common to all ports; but 1043 * swfw_sync retries last long enough (1s) to be almost sure that if 1044 * lock can not be taken it is due to an improper lock of the 1045 * semaphore. 1046 */ 1047 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1048 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1049 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1050 } 1051 ixgbe_release_swfw_semaphore(hw, mask); 1052 } 1053 1054 static void 1055 ixgbe_parse_devargs(struct ixgbe_adapter *adapter, 1056 struct rte_devargs *devargs) 1057 { 1058 struct rte_kvargs *kvlist; 1059 uint16_t sdp3_no_tx_disable; 1060 1061 if (devargs == NULL) 1062 return; 1063 1064 kvlist = rte_kvargs_parse(devargs->args, ixgbe_valid_arguments); 1065 if (kvlist == NULL) 1066 return; 1067 1068 if (rte_kvargs_count(kvlist, IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE) == 1 && 1069 rte_kvargs_process(kvlist, IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE, 1070 devarg_handle_int, &sdp3_no_tx_disable) == 0 && 1071 sdp3_no_tx_disable == 1) 1072 adapter->sdp3_no_tx_disable = 1; 1073 1074 rte_kvargs_free(kvlist); 1075 } 1076 1077 /* 1078 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1079 * It returns 0 on success. 1080 */ 1081 static int 1082 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1083 { 1084 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1085 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1086 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1087 struct ixgbe_hw *hw = 1088 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1089 struct ixgbe_vfta *shadow_vfta = 1090 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1091 struct ixgbe_hwstrip *hwstrip = 1092 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1093 struct ixgbe_dcb_config *dcb_config = 1094 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1095 struct ixgbe_filter_info *filter_info = 1096 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1097 struct ixgbe_bw_conf *bw_conf = 1098 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1099 uint32_t ctrl_ext; 1100 uint16_t csum; 1101 int diag, i, ret; 1102 1103 PMD_INIT_FUNC_TRACE(); 1104 1105 ixgbe_dev_macsec_setting_reset(eth_dev); 1106 1107 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1108 eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count; 1109 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1110 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1111 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1112 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1113 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1114 1115 /* 1116 * For secondary processes, we don't initialise any further as primary 1117 * has already done this work. Only check we don't need a different 1118 * RX and TX function. 1119 */ 1120 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1121 struct ci_tx_queue *txq; 1122 /* TX queue function in primary, set by last queue initialized 1123 * Tx queue may not initialized by primary process 1124 */ 1125 if (eth_dev->data->tx_queues) { 1126 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1127 ixgbe_set_tx_function(eth_dev, txq); 1128 } else { 1129 /* Use default TX function if we get here */ 1130 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1131 "Using default TX function."); 1132 } 1133 1134 ixgbe_set_rx_function(eth_dev); 1135 1136 return 0; 1137 } 1138 1139 /* NOTE: review for potential ordering optimization */ 1140 rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst); 1141 ixgbe_parse_devargs(eth_dev->data->dev_private, 1142 pci_dev->device.devargs); 1143 rte_eth_copy_pci_info(eth_dev, pci_dev); 1144 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1145 1146 /* Vendor and Device ID need to be set before init of shared code */ 1147 hw->device_id = pci_dev->id.device_id; 1148 hw->vendor_id = pci_dev->id.vendor_id; 1149 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1150 hw->allow_unsupported_sfp = 1; 1151 1152 /* Initialize the shared code (base driver) */ 1153 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1154 diag = ixgbe_bypass_init_shared_code(hw); 1155 #else 1156 diag = ixgbe_init_shared_code(hw); 1157 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1158 1159 if (diag != IXGBE_SUCCESS) { 1160 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1161 return -EIO; 1162 } 1163 1164 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1165 PMD_INIT_LOG(ERR, "ERROR: Firmware recovery mode detected. Limiting functionality."); 1166 return -EIO; 1167 } 1168 1169 /* pick up the PCI bus settings for reporting later */ 1170 ixgbe_get_bus_info(hw); 1171 1172 /* Unlock any pending hardware semaphore */ 1173 ixgbe_swfw_lock_reset(hw); 1174 1175 #ifdef RTE_LIB_SECURITY 1176 /* Initialize security_ctx only for primary process*/ 1177 if (ixgbe_ipsec_ctx_create(eth_dev)) 1178 return -ENOMEM; 1179 #endif 1180 1181 /* Initialize DCB configuration*/ 1182 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1183 ixgbe_dcb_init(hw, dcb_config); 1184 /* Get Hardware Flow Control setting */ 1185 hw->fc.requested_mode = ixgbe_fc_none; 1186 hw->fc.current_mode = ixgbe_fc_none; 1187 hw->fc.pause_time = IXGBE_FC_PAUSE; 1188 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1189 hw->fc.low_water[i] = IXGBE_FC_LO; 1190 hw->fc.high_water[i] = IXGBE_FC_HI; 1191 } 1192 hw->fc.send_xon = 1; 1193 1194 /* Make sure we have a good EEPROM before we read from it */ 1195 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1196 if (diag != IXGBE_SUCCESS) { 1197 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1198 ret = -EIO; 1199 goto err_exit; 1200 } 1201 1202 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1203 diag = ixgbe_bypass_init_hw(hw); 1204 #else 1205 diag = ixgbe_init_hw(hw); 1206 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1207 1208 /* 1209 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1210 * is called too soon after the kernel driver unbinding/binding occurs. 1211 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1212 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1213 * also called. See ixgbe_identify_phy_82599(). The reason for the 1214 * failure is not known, and only occuts when virtualisation features 1215 * are disabled in the bios. A delay of 100ms was found to be enough by 1216 * trial-and-error, and is doubled to be safe. 1217 */ 1218 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1219 rte_delay_ms(200); 1220 diag = ixgbe_init_hw(hw); 1221 } 1222 1223 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1224 diag = IXGBE_SUCCESS; 1225 1226 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1227 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1228 "LOM. Please be aware there may be issues associated " 1229 "with your hardware."); 1230 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1231 "please contact your Intel or hardware representative " 1232 "who provided you with this hardware."); 1233 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1234 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1235 if (diag) { 1236 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1237 ret = -EIO; 1238 goto err_exit; 1239 } 1240 1241 /* Reset the hw statistics */ 1242 ixgbe_dev_stats_reset(eth_dev); 1243 1244 /* disable interrupt */ 1245 ixgbe_disable_intr(hw); 1246 1247 /* reset mappings for queue statistics hw counters*/ 1248 ixgbe_reset_qstat_mappings(hw); 1249 1250 /* Allocate memory for storing MAC addresses */ 1251 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1252 hw->mac.num_rar_entries, 0); 1253 if (eth_dev->data->mac_addrs == NULL) { 1254 PMD_INIT_LOG(ERR, 1255 "Failed to allocate %u bytes needed to store " 1256 "MAC addresses", 1257 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1258 ret = -ENOMEM; 1259 goto err_exit; 1260 } 1261 /* Copy the permanent MAC address */ 1262 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1263 ð_dev->data->mac_addrs[0]); 1264 1265 /* Allocate memory for storing hash filter MAC addresses */ 1266 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1267 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1268 if (eth_dev->data->hash_mac_addrs == NULL) { 1269 PMD_INIT_LOG(ERR, 1270 "Failed to allocate %d bytes needed to store MAC addresses", 1271 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1272 rte_free(eth_dev->data->mac_addrs); 1273 eth_dev->data->mac_addrs = NULL; 1274 ret = -ENOMEM; 1275 goto err_exit; 1276 } 1277 1278 /* initialize the vfta */ 1279 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1280 1281 /* initialize the hw strip bitmap*/ 1282 memset(hwstrip, 0, sizeof(*hwstrip)); 1283 1284 /* initialize PF if max_vfs not zero */ 1285 ret = ixgbe_pf_host_init(eth_dev); 1286 if (ret) 1287 goto err_pf_host_init; 1288 1289 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1290 /* let hardware know driver is loaded */ 1291 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1292 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1293 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1294 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1295 IXGBE_WRITE_FLUSH(hw); 1296 1297 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1298 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1299 (int) hw->mac.type, (int) hw->phy.type, 1300 (int) hw->phy.sfp_type); 1301 else 1302 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1303 (int) hw->mac.type, (int) hw->phy.type); 1304 1305 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1306 eth_dev->data->port_id, pci_dev->id.vendor_id, 1307 pci_dev->id.device_id); 1308 1309 rte_intr_callback_register(intr_handle, 1310 ixgbe_dev_interrupt_handler, eth_dev); 1311 1312 /* enable uio/vfio intr/eventfd mapping */ 1313 rte_intr_enable(intr_handle); 1314 1315 /* enable support intr */ 1316 ixgbe_enable_intr(eth_dev); 1317 1318 /* initialize filter info */ 1319 memset(filter_info, 0, 1320 sizeof(struct ixgbe_filter_info)); 1321 1322 /* initialize 5tuple filter list */ 1323 TAILQ_INIT(&filter_info->fivetuple_list); 1324 1325 /* initialize flow director filter list & hash */ 1326 ret = ixgbe_fdir_filter_init(eth_dev); 1327 if (ret) 1328 goto err_fdir_filter_init; 1329 1330 /* initialize l2 tunnel filter list & hash */ 1331 ret = ixgbe_l2_tn_filter_init(eth_dev); 1332 if (ret) 1333 goto err_l2_tn_filter_init; 1334 1335 /* initialize flow filter lists */ 1336 ixgbe_filterlist_init(); 1337 1338 /* initialize bandwidth configuration info */ 1339 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1340 1341 /* initialize Traffic Manager configuration */ 1342 ixgbe_tm_conf_init(eth_dev); 1343 1344 return 0; 1345 1346 err_l2_tn_filter_init: 1347 ixgbe_fdir_filter_uninit(eth_dev); 1348 err_fdir_filter_init: 1349 ixgbe_disable_intr(hw); 1350 rte_intr_disable(intr_handle); 1351 rte_intr_callback_unregister(intr_handle, 1352 ixgbe_dev_interrupt_handler, eth_dev); 1353 ixgbe_pf_host_uninit(eth_dev); 1354 err_pf_host_init: 1355 rte_free(eth_dev->data->mac_addrs); 1356 eth_dev->data->mac_addrs = NULL; 1357 rte_free(eth_dev->data->hash_mac_addrs); 1358 eth_dev->data->hash_mac_addrs = NULL; 1359 err_exit: 1360 #ifdef RTE_LIB_SECURITY 1361 rte_free(eth_dev->security_ctx); 1362 eth_dev->security_ctx = NULL; 1363 #endif 1364 return ret; 1365 } 1366 1367 static int 1368 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1369 { 1370 PMD_INIT_FUNC_TRACE(); 1371 1372 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1373 return 0; 1374 1375 ixgbe_dev_close(eth_dev); 1376 1377 return 0; 1378 } 1379 1380 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1381 { 1382 struct ixgbe_filter_info *filter_info = 1383 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1384 struct ixgbe_5tuple_filter *p_5tuple; 1385 1386 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1387 TAILQ_REMOVE(&filter_info->fivetuple_list, 1388 p_5tuple, 1389 entries); 1390 rte_free(p_5tuple); 1391 } 1392 memset(filter_info->fivetuple_mask, 0, 1393 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1394 1395 return 0; 1396 } 1397 1398 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1399 { 1400 struct ixgbe_hw_fdir_info *fdir_info = 1401 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1402 struct ixgbe_fdir_filter *fdir_filter; 1403 1404 rte_free(fdir_info->hash_map); 1405 rte_hash_free(fdir_info->hash_handle); 1406 1407 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1408 TAILQ_REMOVE(&fdir_info->fdir_list, 1409 fdir_filter, 1410 entries); 1411 rte_free(fdir_filter); 1412 } 1413 1414 return 0; 1415 } 1416 1417 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1418 { 1419 struct ixgbe_l2_tn_info *l2_tn_info = 1420 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1421 struct ixgbe_l2_tn_filter *l2_tn_filter; 1422 1423 rte_free(l2_tn_info->hash_map); 1424 rte_hash_free(l2_tn_info->hash_handle); 1425 1426 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1427 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1428 l2_tn_filter, 1429 entries); 1430 rte_free(l2_tn_filter); 1431 } 1432 1433 return 0; 1434 } 1435 1436 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1437 { 1438 struct ixgbe_hw_fdir_info *fdir_info = 1439 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1440 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1441 struct rte_hash_parameters fdir_hash_params = { 1442 .name = fdir_hash_name, 1443 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1444 .key_len = sizeof(union ixgbe_atr_input), 1445 .hash_func = rte_hash_crc, 1446 .hash_func_init_val = 0, 1447 .socket_id = rte_socket_id(), 1448 }; 1449 1450 TAILQ_INIT(&fdir_info->fdir_list); 1451 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1452 "fdir_%s", eth_dev->device->name); 1453 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1454 if (!fdir_info->hash_handle) { 1455 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1456 return -EINVAL; 1457 } 1458 fdir_info->hash_map = rte_zmalloc("ixgbe", 1459 sizeof(struct ixgbe_fdir_filter *) * 1460 IXGBE_MAX_FDIR_FILTER_NUM, 1461 0); 1462 if (!fdir_info->hash_map) { 1463 PMD_INIT_LOG(ERR, 1464 "Failed to allocate memory for fdir hash map!"); 1465 rte_hash_free(fdir_info->hash_handle); 1466 return -ENOMEM; 1467 } 1468 fdir_info->mask_added = FALSE; 1469 1470 return 0; 1471 } 1472 1473 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1474 { 1475 struct ixgbe_l2_tn_info *l2_tn_info = 1476 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1477 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1478 struct rte_hash_parameters l2_tn_hash_params = { 1479 .name = l2_tn_hash_name, 1480 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1481 .key_len = sizeof(struct ixgbe_l2_tn_key), 1482 .hash_func = rte_hash_crc, 1483 .hash_func_init_val = 0, 1484 .socket_id = rte_socket_id(), 1485 }; 1486 1487 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1488 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1489 "l2_tn_%s", eth_dev->device->name); 1490 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1491 if (!l2_tn_info->hash_handle) { 1492 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1493 return -EINVAL; 1494 } 1495 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1496 sizeof(struct ixgbe_l2_tn_filter *) * 1497 IXGBE_MAX_L2_TN_FILTER_NUM, 1498 0); 1499 if (!l2_tn_info->hash_map) { 1500 PMD_INIT_LOG(ERR, 1501 "Failed to allocate memory for L2 TN hash map!"); 1502 rte_hash_free(l2_tn_info->hash_handle); 1503 return -ENOMEM; 1504 } 1505 l2_tn_info->e_tag_en = FALSE; 1506 l2_tn_info->e_tag_fwd_en = FALSE; 1507 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1508 1509 return 0; 1510 } 1511 /* 1512 * Negotiate mailbox API version with the PF. 1513 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1514 * Then we try to negotiate starting with the most recent one. 1515 * If all negotiation attempts fail, then we will proceed with 1516 * the default one (ixgbe_mbox_api_10). 1517 */ 1518 static void 1519 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1520 { 1521 int32_t i; 1522 1523 /* start with highest supported, proceed down */ 1524 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1525 ixgbe_mbox_api_13, 1526 ixgbe_mbox_api_12, 1527 ixgbe_mbox_api_11, 1528 ixgbe_mbox_api_10, 1529 }; 1530 1531 for (i = 0; 1532 i != RTE_DIM(sup_ver) && 1533 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1534 i++) 1535 ; 1536 } 1537 1538 static void 1539 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1540 { 1541 uint64_t random; 1542 1543 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1544 mac_addr->addr_bytes[0] = 0x00; 1545 mac_addr->addr_bytes[1] = 0x09; 1546 mac_addr->addr_bytes[2] = 0xC0; 1547 /* Force indication of locally assigned MAC address. */ 1548 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1549 /* Generate the last 3 bytes of the MAC address with a random number. */ 1550 random = rte_rand(); 1551 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1552 } 1553 1554 static int 1555 devarg_handle_int(__rte_unused const char *key, const char *value, 1556 void *extra_args) 1557 { 1558 uint16_t *n = extra_args; 1559 1560 if (value == NULL || extra_args == NULL) 1561 return -EINVAL; 1562 1563 *n = (uint16_t)strtoul(value, NULL, 0); 1564 if (*n == USHRT_MAX && errno == ERANGE) 1565 return -1; 1566 1567 return 0; 1568 } 1569 1570 static void 1571 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1572 struct rte_devargs *devargs) 1573 { 1574 struct rte_kvargs *kvlist; 1575 uint16_t pflink_fullchk; 1576 1577 if (devargs == NULL) 1578 return; 1579 1580 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1581 if (kvlist == NULL) 1582 return; 1583 1584 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1585 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1586 devarg_handle_int, &pflink_fullchk) == 0 && 1587 pflink_fullchk == 1) 1588 adapter->pflink_fullchk = 1; 1589 1590 rte_kvargs_free(kvlist); 1591 } 1592 1593 /* 1594 * Virtual Function device init 1595 */ 1596 static int 1597 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1598 { 1599 int diag; 1600 uint32_t tc, tcs; 1601 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1602 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1603 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1604 struct ixgbe_hw *hw = 1605 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1606 struct ixgbe_vfta *shadow_vfta = 1607 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1608 struct ixgbe_hwstrip *hwstrip = 1609 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1610 struct rte_ether_addr *perm_addr = 1611 (struct rte_ether_addr *)hw->mac.perm_addr; 1612 1613 PMD_INIT_FUNC_TRACE(); 1614 1615 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1616 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1617 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1618 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1619 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1620 1621 /* for secondary processes, we don't initialise any further as primary 1622 * has already done this work. Only check we don't need a different 1623 * RX function 1624 */ 1625 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1626 struct ci_tx_queue *txq; 1627 /* TX queue function in primary, set by last queue initialized 1628 * Tx queue may not initialized by primary process 1629 */ 1630 if (eth_dev->data->tx_queues) { 1631 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1632 ixgbe_set_tx_function(eth_dev, txq); 1633 } else { 1634 /* Use default TX function if we get here */ 1635 PMD_INIT_LOG(NOTICE, 1636 "No TX queues configured yet. Using default TX function."); 1637 } 1638 1639 ixgbe_set_rx_function(eth_dev); 1640 1641 return 0; 1642 } 1643 1644 /* NOTE: review for potential ordering optimization */ 1645 rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst); 1646 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1647 pci_dev->device.devargs); 1648 1649 rte_eth_copy_pci_info(eth_dev, pci_dev); 1650 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1651 1652 hw->device_id = pci_dev->id.device_id; 1653 hw->vendor_id = pci_dev->id.vendor_id; 1654 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1655 1656 /* initialize the vfta */ 1657 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1658 1659 /* initialize the hw strip bitmap*/ 1660 memset(hwstrip, 0, sizeof(*hwstrip)); 1661 1662 /* Initialize the shared code (base driver) */ 1663 diag = ixgbe_init_shared_code(hw); 1664 if (diag != IXGBE_SUCCESS) { 1665 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1666 return -EIO; 1667 } 1668 1669 /* init_mailbox_params */ 1670 hw->mbx.ops[0].init_params(hw); 1671 1672 /* Reset the hw statistics */ 1673 ixgbevf_dev_stats_reset(eth_dev); 1674 1675 /* Disable the interrupts for VF */ 1676 ixgbevf_intr_disable(eth_dev); 1677 1678 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1679 diag = hw->mac.ops.reset_hw(hw); 1680 1681 /* 1682 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1683 * the underlying PF driver has not assigned a MAC address to the VF. 1684 * In this case, assign a random MAC address. 1685 */ 1686 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1687 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1688 /* 1689 * This error code will be propagated to the app by 1690 * rte_eth_dev_reset, so use a public error code rather than 1691 * the internal-only IXGBE_ERR_RESET_FAILED 1692 */ 1693 return -EAGAIN; 1694 } 1695 1696 /* negotiate mailbox API version to use with the PF. */ 1697 ixgbevf_negotiate_api(hw); 1698 1699 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1700 ixgbevf_get_queues(hw, &tcs, &tc); 1701 1702 /* Allocate memory for storing MAC addresses */ 1703 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1704 hw->mac.num_rar_entries, 0); 1705 if (eth_dev->data->mac_addrs == NULL) { 1706 PMD_INIT_LOG(ERR, 1707 "Failed to allocate %u bytes needed to store " 1708 "MAC addresses", 1709 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1710 return -ENOMEM; 1711 } 1712 1713 /* Generate a random MAC address, if none was assigned by PF. */ 1714 if (rte_is_zero_ether_addr(perm_addr)) { 1715 generate_random_mac_addr(perm_addr); 1716 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1717 if (diag) { 1718 rte_free(eth_dev->data->mac_addrs); 1719 eth_dev->data->mac_addrs = NULL; 1720 return diag; 1721 } 1722 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1723 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1724 RTE_ETHER_ADDR_PRT_FMT, 1725 RTE_ETHER_ADDR_BYTES(perm_addr)); 1726 } 1727 1728 /* Copy the permanent MAC address */ 1729 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1730 1731 /* reset the hardware with the new settings */ 1732 diag = hw->mac.ops.start_hw(hw); 1733 switch (diag) { 1734 case 0: 1735 break; 1736 1737 default: 1738 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1739 rte_free(eth_dev->data->mac_addrs); 1740 eth_dev->data->mac_addrs = NULL; 1741 return -EIO; 1742 } 1743 1744 rte_intr_callback_register(intr_handle, 1745 ixgbevf_dev_interrupt_handler, eth_dev); 1746 rte_intr_enable(intr_handle); 1747 ixgbevf_intr_enable(eth_dev); 1748 1749 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1750 eth_dev->data->port_id, pci_dev->id.vendor_id, 1751 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1752 1753 return 0; 1754 } 1755 1756 /* Virtual Function device uninit */ 1757 1758 static int 1759 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1760 { 1761 PMD_INIT_FUNC_TRACE(); 1762 1763 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1764 return 0; 1765 1766 ixgbevf_dev_close(eth_dev); 1767 1768 return 0; 1769 } 1770 1771 static int 1772 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1773 struct rte_pci_device *pci_dev) 1774 { 1775 char name[RTE_ETH_NAME_MAX_LEN]; 1776 struct rte_eth_dev *pf_ethdev; 1777 struct rte_eth_devargs eth_da; 1778 int i, retval; 1779 1780 if (pci_dev->device.devargs) { 1781 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1782 ð_da, 1); 1783 if (retval < 0) 1784 return retval; 1785 } else 1786 memset(ð_da, 0, sizeof(eth_da)); 1787 1788 if (eth_da.nb_representor_ports > 0 && 1789 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1790 PMD_DRV_LOG(ERR, "unsupported representor type: %s", 1791 pci_dev->device.devargs->args); 1792 return -ENOTSUP; 1793 } 1794 1795 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1796 sizeof(struct ixgbe_adapter), 1797 eth_dev_pci_specific_init, pci_dev, 1798 eth_ixgbe_dev_init, NULL); 1799 1800 if (retval || eth_da.nb_representor_ports < 1) 1801 return retval; 1802 1803 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1804 if (pf_ethdev == NULL) 1805 return -ENODEV; 1806 1807 /* probe VF representor ports */ 1808 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1809 struct ixgbe_vf_info *vfinfo; 1810 struct ixgbe_vf_representor representor; 1811 1812 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1813 pf_ethdev->data->dev_private); 1814 if (vfinfo == NULL) { 1815 PMD_DRV_LOG(ERR, 1816 "no virtual functions supported by PF"); 1817 break; 1818 } 1819 1820 representor.vf_id = eth_da.representor_ports[i]; 1821 representor.switch_domain_id = vfinfo->switch_domain_id; 1822 representor.pf_ethdev = pf_ethdev; 1823 1824 /* representor port net_bdf_port */ 1825 snprintf(name, sizeof(name), "net_%s_representor_%d", 1826 pci_dev->device.name, 1827 eth_da.representor_ports[i]); 1828 1829 retval = rte_eth_dev_create(&pci_dev->device, name, 1830 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1831 ixgbe_vf_representor_init, &representor); 1832 1833 if (retval) 1834 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1835 "representor %s.", name); 1836 } 1837 1838 return 0; 1839 } 1840 1841 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1842 { 1843 struct rte_eth_dev *ethdev; 1844 1845 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1846 if (!ethdev) 1847 return 0; 1848 1849 if (rte_eth_dev_is_repr(ethdev)) 1850 return rte_eth_dev_pci_generic_remove(pci_dev, 1851 ixgbe_vf_representor_uninit); 1852 else 1853 return rte_eth_dev_pci_generic_remove(pci_dev, 1854 eth_ixgbe_dev_uninit); 1855 } 1856 1857 static struct rte_pci_driver rte_ixgbe_pmd = { 1858 .id_table = pci_id_ixgbe_map, 1859 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1860 .probe = eth_ixgbe_pci_probe, 1861 .remove = eth_ixgbe_pci_remove, 1862 }; 1863 1864 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1865 struct rte_pci_device *pci_dev) 1866 { 1867 return rte_eth_dev_pci_generic_probe(pci_dev, 1868 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1869 } 1870 1871 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1872 { 1873 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1874 } 1875 1876 /* 1877 * virtual function driver struct 1878 */ 1879 static struct rte_pci_driver rte_ixgbevf_pmd = { 1880 .id_table = pci_id_ixgbevf_map, 1881 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1882 .probe = eth_ixgbevf_pci_probe, 1883 .remove = eth_ixgbevf_pci_remove, 1884 }; 1885 1886 static int 1887 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1888 { 1889 struct ixgbe_hw *hw = 1890 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1891 struct ixgbe_vfta *shadow_vfta = 1892 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1893 uint32_t vfta; 1894 uint32_t vid_idx; 1895 uint32_t vid_bit; 1896 1897 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1898 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1899 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1900 if (on) 1901 vfta |= vid_bit; 1902 else 1903 vfta &= ~vid_bit; 1904 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1905 1906 /* update local VFTA copy */ 1907 shadow_vfta->vfta[vid_idx] = vfta; 1908 1909 return 0; 1910 } 1911 1912 static void 1913 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1914 { 1915 if (on) 1916 ixgbe_vlan_hw_strip_enable(dev, queue); 1917 else 1918 ixgbe_vlan_hw_strip_disable(dev, queue); 1919 } 1920 1921 static int 1922 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1923 enum rte_vlan_type vlan_type, 1924 uint16_t tpid) 1925 { 1926 struct ixgbe_hw *hw = 1927 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1928 int ret = 0; 1929 uint32_t reg; 1930 uint32_t qinq; 1931 1932 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1933 qinq &= IXGBE_DMATXCTL_GDV; 1934 1935 switch (vlan_type) { 1936 case RTE_ETH_VLAN_TYPE_INNER: 1937 if (qinq) { 1938 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1939 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1940 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1941 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1942 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1943 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1944 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1945 } else { 1946 ret = -ENOTSUP; 1947 PMD_DRV_LOG(ERR, "Inner type is not supported" 1948 " by single VLAN"); 1949 } 1950 break; 1951 case RTE_ETH_VLAN_TYPE_OUTER: 1952 if (qinq) { 1953 /* Only the high 16-bits is valid */ 1954 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1955 IXGBE_EXVET_VET_EXT_SHIFT); 1956 } else { 1957 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1958 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1959 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1960 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1961 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1962 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1963 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1964 } 1965 1966 break; 1967 default: 1968 ret = -EINVAL; 1969 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1970 break; 1971 } 1972 1973 return ret; 1974 } 1975 1976 void 1977 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1978 { 1979 struct ixgbe_hw *hw = 1980 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1981 uint32_t vlnctrl; 1982 1983 PMD_INIT_FUNC_TRACE(); 1984 1985 /* Filter Table Disable */ 1986 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1987 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1988 1989 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1990 } 1991 1992 void 1993 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1994 { 1995 struct ixgbe_hw *hw = 1996 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1997 struct ixgbe_vfta *shadow_vfta = 1998 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1999 uint32_t vlnctrl; 2000 uint16_t i; 2001 2002 PMD_INIT_FUNC_TRACE(); 2003 2004 /* Filter Table Enable */ 2005 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2006 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 2007 vlnctrl |= IXGBE_VLNCTRL_VFE; 2008 2009 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2010 2011 /* write whatever is in local vfta copy */ 2012 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2013 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 2014 } 2015 2016 static void 2017 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 2018 { 2019 struct ixgbe_hwstrip *hwstrip = 2020 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 2021 struct ixgbe_rx_queue *rxq; 2022 2023 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 2024 return; 2025 2026 if (on) 2027 IXGBE_SET_HWSTRIP(hwstrip, queue); 2028 else 2029 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 2030 2031 if (queue >= dev->data->nb_rx_queues) 2032 return; 2033 2034 rxq = dev->data->rx_queues[queue]; 2035 2036 if (on) { 2037 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 2038 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2039 } else { 2040 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 2041 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2042 } 2043 } 2044 2045 static void 2046 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 2047 { 2048 struct ixgbe_hw *hw = 2049 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2050 uint32_t ctrl; 2051 2052 PMD_INIT_FUNC_TRACE(); 2053 2054 if (hw->mac.type == ixgbe_mac_82598EB) { 2055 /* No queue level support */ 2056 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2057 return; 2058 } 2059 2060 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2061 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2062 ctrl &= ~IXGBE_RXDCTL_VME; 2063 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2064 2065 /* record those setting for HW strip per queue */ 2066 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 2067 } 2068 2069 static void 2070 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 2071 { 2072 struct ixgbe_hw *hw = 2073 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2074 uint32_t ctrl; 2075 2076 PMD_INIT_FUNC_TRACE(); 2077 2078 if (hw->mac.type == ixgbe_mac_82598EB) { 2079 /* No queue level supported */ 2080 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2081 return; 2082 } 2083 2084 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2085 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2086 ctrl |= IXGBE_RXDCTL_VME; 2087 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2088 2089 /* record those setting for HW strip per queue */ 2090 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2091 } 2092 2093 static void 2094 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2095 { 2096 struct ixgbe_hw *hw = 2097 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2098 uint32_t ctrl; 2099 2100 PMD_INIT_FUNC_TRACE(); 2101 2102 /* DMATXCTRL: Geric Double VLAN Disable */ 2103 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2104 ctrl &= ~IXGBE_DMATXCTL_GDV; 2105 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2106 2107 /* CTRL_EXT: Global Double VLAN Disable */ 2108 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2109 ctrl &= ~IXGBE_EXTENDED_VLAN; 2110 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2111 2112 } 2113 2114 static void 2115 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2116 { 2117 struct ixgbe_hw *hw = 2118 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2119 uint32_t ctrl; 2120 2121 PMD_INIT_FUNC_TRACE(); 2122 2123 /* DMATXCTRL: Geric Double VLAN Enable */ 2124 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2125 ctrl |= IXGBE_DMATXCTL_GDV; 2126 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2127 2128 /* CTRL_EXT: Global Double VLAN Enable */ 2129 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2130 ctrl |= IXGBE_EXTENDED_VLAN; 2131 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2132 2133 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2134 if (hw->mac.type == ixgbe_mac_X550 || 2135 hw->mac.type == ixgbe_mac_X550EM_x || 2136 hw->mac.type == ixgbe_mac_X550EM_a) { 2137 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2138 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2139 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2140 } 2141 2142 /* 2143 * VET EXT field in the EXVET register = 0x8100 by default 2144 * So no need to change. Same to VT field of DMATXCTL register 2145 */ 2146 } 2147 2148 void 2149 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2150 { 2151 struct ixgbe_hw *hw = 2152 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2153 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2154 uint32_t ctrl; 2155 uint16_t i; 2156 struct ixgbe_rx_queue *rxq; 2157 bool on; 2158 2159 PMD_INIT_FUNC_TRACE(); 2160 2161 if (hw->mac.type == ixgbe_mac_82598EB) { 2162 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2163 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2164 ctrl |= IXGBE_VLNCTRL_VME; 2165 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2166 } else { 2167 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2168 ctrl &= ~IXGBE_VLNCTRL_VME; 2169 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2170 } 2171 } else { 2172 /* 2173 * Other 10G NIC, the VLAN strip can be setup 2174 * per queue in RXDCTL 2175 */ 2176 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2177 rxq = dev->data->rx_queues[i]; 2178 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2179 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 2180 ctrl |= IXGBE_RXDCTL_VME; 2181 on = TRUE; 2182 } else { 2183 ctrl &= ~IXGBE_RXDCTL_VME; 2184 on = FALSE; 2185 } 2186 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2187 2188 /* record those setting for HW strip per queue */ 2189 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2190 } 2191 } 2192 } 2193 2194 static void 2195 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2196 { 2197 uint16_t i; 2198 struct rte_eth_rxmode *rxmode; 2199 struct ixgbe_rx_queue *rxq; 2200 2201 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2202 rxmode = &dev->data->dev_conf.rxmode; 2203 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 2204 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2205 rxq = dev->data->rx_queues[i]; 2206 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2207 } 2208 else 2209 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2210 rxq = dev->data->rx_queues[i]; 2211 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 2212 } 2213 } 2214 } 2215 2216 static int 2217 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2218 { 2219 struct rte_eth_rxmode *rxmode; 2220 rxmode = &dev->data->dev_conf.rxmode; 2221 2222 if (mask & RTE_ETH_VLAN_STRIP_MASK) 2223 ixgbe_vlan_hw_strip_config(dev); 2224 2225 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2226 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 2227 ixgbe_vlan_hw_filter_enable(dev); 2228 else 2229 ixgbe_vlan_hw_filter_disable(dev); 2230 } 2231 2232 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2233 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2234 ixgbe_vlan_hw_extend_enable(dev); 2235 else 2236 ixgbe_vlan_hw_extend_disable(dev); 2237 } 2238 2239 return 0; 2240 } 2241 2242 static int 2243 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2244 { 2245 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2246 2247 ixgbe_vlan_offload_config(dev, mask); 2248 2249 return 0; 2250 } 2251 2252 static void 2253 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2254 { 2255 struct ixgbe_hw *hw = 2256 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2257 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2258 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2259 2260 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2261 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2262 } 2263 2264 static int 2265 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2266 { 2267 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2268 2269 switch (nb_rx_q) { 2270 case 1: 2271 case 2: 2272 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS; 2273 break; 2274 case 4: 2275 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS; 2276 break; 2277 default: 2278 return -EINVAL; 2279 } 2280 2281 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2282 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2283 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2284 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2285 return 0; 2286 } 2287 2288 static int 2289 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2290 { 2291 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2292 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2293 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2294 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2295 2296 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2297 /* check multi-queue mode */ 2298 switch (dev_conf->rxmode.mq_mode) { 2299 case RTE_ETH_MQ_RX_VMDQ_DCB: 2300 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2301 break; 2302 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: 2303 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2304 PMD_INIT_LOG(ERR, "SRIOV active," 2305 " unsupported mq_mode rx %d.", 2306 dev_conf->rxmode.mq_mode); 2307 return -EINVAL; 2308 case RTE_ETH_MQ_RX_RSS: 2309 case RTE_ETH_MQ_RX_VMDQ_RSS: 2310 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; 2311 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2312 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2313 PMD_INIT_LOG(ERR, "SRIOV is active," 2314 " invalid queue number" 2315 " for VMDQ RSS, allowed" 2316 " value are 1, 2 or 4."); 2317 return -EINVAL; 2318 } 2319 break; 2320 case RTE_ETH_MQ_RX_VMDQ_ONLY: 2321 case RTE_ETH_MQ_RX_NONE: 2322 /* if nothing mq mode configure, use default scheme */ 2323 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY; 2324 break; 2325 default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/ 2326 /* SRIOV only works in VMDq enable mode */ 2327 PMD_INIT_LOG(ERR, "SRIOV is active," 2328 " wrong mq_mode rx %d.", 2329 dev_conf->rxmode.mq_mode); 2330 return -EINVAL; 2331 } 2332 2333 switch (dev_conf->txmode.mq_mode) { 2334 case RTE_ETH_MQ_TX_VMDQ_DCB: 2335 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2336 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 2337 break; 2338 default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */ 2339 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_ONLY; 2340 break; 2341 } 2342 2343 /* check valid queue number */ 2344 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2345 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2346 PMD_INIT_LOG(ERR, "SRIOV is active," 2347 " nb_rx_q=%d nb_tx_q=%d queue number" 2348 " must be less than or equal to %d.", 2349 nb_rx_q, nb_tx_q, 2350 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2351 return -EINVAL; 2352 } 2353 } else { 2354 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { 2355 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2356 " not supported."); 2357 return -EINVAL; 2358 } 2359 /* check configuration for vmdb+dcb mode */ 2360 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 2361 const struct rte_eth_vmdq_dcb_conf *conf; 2362 2363 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2364 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2365 IXGBE_VMDQ_DCB_NB_QUEUES); 2366 return -EINVAL; 2367 } 2368 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2369 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2370 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2371 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2372 " nb_queue_pools must be %d or %d.", 2373 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2374 return -EINVAL; 2375 } 2376 } 2377 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { 2378 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2379 2380 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2381 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2382 IXGBE_VMDQ_DCB_NB_QUEUES); 2383 return -EINVAL; 2384 } 2385 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2386 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 2387 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 2388 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2389 " nb_queue_pools != %d and" 2390 " nb_queue_pools != %d.", 2391 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 2392 return -EINVAL; 2393 } 2394 } 2395 2396 /* For DCB mode check our configuration before we go further */ 2397 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) { 2398 const struct rte_eth_dcb_rx_conf *conf; 2399 2400 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2401 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2402 conf->nb_tcs == RTE_ETH_8_TCS)) { 2403 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2404 " and nb_tcs != %d.", 2405 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2406 return -EINVAL; 2407 } 2408 } 2409 2410 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { 2411 const struct rte_eth_dcb_tx_conf *conf; 2412 2413 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2414 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 2415 conf->nb_tcs == RTE_ETH_8_TCS)) { 2416 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2417 " and nb_tcs != %d.", 2418 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 2419 return -EINVAL; 2420 } 2421 } 2422 2423 /* 2424 * When DCB/VT is off, maximum number of queues changes, 2425 * except for 82598EB, which remains constant. 2426 */ 2427 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 2428 hw->mac.type != ixgbe_mac_82598EB) { 2429 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2430 PMD_INIT_LOG(ERR, 2431 "Neither VT nor DCB are enabled, " 2432 "nb_tx_q > %d.", 2433 IXGBE_NONE_MODE_TX_NB_QUEUES); 2434 return -EINVAL; 2435 } 2436 } 2437 } 2438 return 0; 2439 } 2440 2441 static int 2442 ixgbe_dev_configure(struct rte_eth_dev *dev) 2443 { 2444 struct ixgbe_interrupt *intr = 2445 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2446 struct ixgbe_adapter *adapter = dev->data->dev_private; 2447 int ret; 2448 2449 PMD_INIT_FUNC_TRACE(); 2450 2451 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2452 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2453 2454 /* multiple queue mode checking */ 2455 ret = ixgbe_check_mq_mode(dev); 2456 if (ret != 0) { 2457 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2458 ret); 2459 return ret; 2460 } 2461 2462 /* set flag to update link status after init */ 2463 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2464 2465 /* 2466 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2467 * allocation or vector Rx preconditions we will reset it. 2468 */ 2469 adapter->rx_bulk_alloc_allowed = true; 2470 adapter->rx_vec_allowed = true; 2471 2472 return 0; 2473 } 2474 2475 static void 2476 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2477 { 2478 struct ixgbe_hw *hw = 2479 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2480 struct ixgbe_interrupt *intr = 2481 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2482 uint32_t gpie; 2483 2484 /* only set up it on X550EM_X */ 2485 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2486 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2487 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2488 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2489 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2490 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2491 } 2492 } 2493 2494 int 2495 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2496 uint32_t tx_rate, uint64_t q_msk) 2497 { 2498 struct ixgbe_hw *hw; 2499 struct ixgbe_vf_info *vfinfo; 2500 struct rte_eth_link link; 2501 uint8_t nb_q_per_pool; 2502 uint32_t queue_stride; 2503 uint32_t queue_idx, idx = 0, vf_idx; 2504 uint32_t queue_end; 2505 uint16_t total_rate = 0; 2506 struct rte_pci_device *pci_dev; 2507 int ret; 2508 2509 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2510 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2511 if (ret < 0) 2512 return ret; 2513 2514 if (vf >= pci_dev->max_vfs) 2515 return -EINVAL; 2516 2517 if (tx_rate > link.link_speed) 2518 return -EINVAL; 2519 2520 if (q_msk == 0) 2521 return 0; 2522 2523 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2524 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2525 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2526 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2527 queue_idx = vf * queue_stride; 2528 queue_end = queue_idx + nb_q_per_pool - 1; 2529 if (queue_end >= hw->mac.max_tx_queues) 2530 return -EINVAL; 2531 2532 if (vfinfo) { 2533 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2534 if (vf_idx == vf) 2535 continue; 2536 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2537 idx++) 2538 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2539 } 2540 } else { 2541 return -EINVAL; 2542 } 2543 2544 /* Store tx_rate for this vf. */ 2545 for (idx = 0; idx < nb_q_per_pool; idx++) { 2546 if (((uint64_t)0x1 << idx) & q_msk) { 2547 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2548 vfinfo[vf].tx_rate[idx] = tx_rate; 2549 total_rate += tx_rate; 2550 } 2551 } 2552 2553 if (total_rate > dev->data->dev_link.link_speed) { 2554 /* Reset stored TX rate of the VF if it causes exceed 2555 * link speed. 2556 */ 2557 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2558 return -EINVAL; 2559 } 2560 2561 /* Set RTTBCNRC of each queue/pool for vf X */ 2562 for (; queue_idx <= queue_end; queue_idx++) { 2563 if (0x1 & q_msk) 2564 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2565 q_msk = q_msk >> 1; 2566 } 2567 2568 return 0; 2569 } 2570 2571 static int 2572 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2573 { 2574 struct ixgbe_adapter *adapter = dev->data->dev_private; 2575 int err; 2576 uint32_t mflcn; 2577 2578 ixgbe_setup_fc(hw); 2579 2580 err = ixgbe_fc_enable(hw); 2581 2582 /* Not negotiated is not an error case */ 2583 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2584 /* 2585 *check if we want to forward MAC frames - driver doesn't 2586 *have native capability to do that, 2587 *so we'll write the registers ourselves 2588 */ 2589 2590 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2591 2592 /* set or clear MFLCN.PMCF bit depending on configuration */ 2593 if (adapter->mac_ctrl_frame_fwd != 0) 2594 mflcn |= IXGBE_MFLCN_PMCF; 2595 else 2596 mflcn &= ~IXGBE_MFLCN_PMCF; 2597 2598 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2599 IXGBE_WRITE_FLUSH(hw); 2600 2601 return 0; 2602 } 2603 return err; 2604 } 2605 2606 /* 2607 * Configure device link speed and setup link. 2608 * It returns 0 on success. 2609 */ 2610 static int 2611 ixgbe_dev_start(struct rte_eth_dev *dev) 2612 { 2613 struct ixgbe_hw *hw = 2614 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2615 struct ixgbe_vf_info *vfinfo = 2616 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2617 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2618 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2619 uint32_t intr_vector = 0; 2620 int err; 2621 bool link_up = false, negotiate = 0; 2622 uint32_t speed = 0; 2623 uint32_t allowed_speeds = 0; 2624 int mask = 0; 2625 int status; 2626 uint16_t vf, idx; 2627 uint32_t *link_speeds; 2628 struct ixgbe_tm_conf *tm_conf = 2629 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2630 struct ixgbe_macsec_setting *macsec_setting = 2631 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2632 2633 PMD_INIT_FUNC_TRACE(); 2634 2635 /* Stop the link setup handler before resetting the HW. */ 2636 ixgbe_dev_wait_setup_link_complete(dev, 0); 2637 2638 /* disable uio/vfio intr/eventfd mapping */ 2639 rte_intr_disable(intr_handle); 2640 2641 /* stop adapter */ 2642 hw->adapter_stopped = 0; 2643 ixgbe_stop_adapter(hw); 2644 2645 /* reinitialize adapter 2646 * this calls reset and start 2647 */ 2648 status = ixgbe_pf_reset_hw(hw); 2649 if (status != 0) 2650 return -1; 2651 hw->mac.ops.start_hw(hw); 2652 hw->mac.get_link_status = true; 2653 2654 /* configure PF module if SRIOV enabled */ 2655 ixgbe_pf_host_configure(dev); 2656 2657 ixgbe_dev_phy_intr_setup(dev); 2658 2659 /* check and configure queue intr-vector mapping */ 2660 if ((rte_intr_cap_multiple(intr_handle) || 2661 !RTE_ETH_DEV_SRIOV(dev).active) && 2662 dev->data->dev_conf.intr_conf.rxq != 0) { 2663 intr_vector = dev->data->nb_rx_queues; 2664 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2665 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2666 IXGBE_MAX_INTR_QUEUE_NUM); 2667 return -ENOTSUP; 2668 } 2669 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2670 return -1; 2671 } 2672 2673 if (rte_intr_dp_is_en(intr_handle)) { 2674 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 2675 dev->data->nb_rx_queues)) { 2676 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2677 " intr_vec", dev->data->nb_rx_queues); 2678 return -ENOMEM; 2679 } 2680 } 2681 2682 /* configure MSI-X for sleep until Rx interrupt */ 2683 ixgbe_configure_msix(dev); 2684 2685 /* initialize transmission unit */ 2686 ixgbe_dev_tx_init(dev); 2687 2688 /* This can fail when allocating mbufs for descriptor rings */ 2689 err = ixgbe_dev_rx_init(dev); 2690 if (err) { 2691 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2692 goto error; 2693 } 2694 2695 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 2696 RTE_ETH_VLAN_EXTEND_MASK; 2697 err = ixgbe_vlan_offload_config(dev, mask); 2698 if (err) { 2699 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2700 goto error; 2701 } 2702 2703 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { 2704 /* Enable vlan filtering for VMDq */ 2705 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2706 } 2707 2708 /* Configure DCB hw */ 2709 ixgbe_configure_dcb(dev); 2710 2711 if (IXGBE_DEV_FDIR_CONF(dev)->mode != RTE_FDIR_MODE_NONE) { 2712 err = ixgbe_fdir_configure(dev); 2713 if (err) 2714 goto error; 2715 } 2716 2717 /* Restore vf rate limit */ 2718 if (vfinfo != NULL) { 2719 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2720 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2721 if (vfinfo[vf].tx_rate[idx] != 0) 2722 ixgbe_set_vf_rate_limit( 2723 dev, vf, 2724 vfinfo[vf].tx_rate[idx], 2725 1 << idx); 2726 } 2727 2728 ixgbe_restore_statistics_mapping(dev); 2729 2730 err = ixgbe_flow_ctrl_enable(dev, hw); 2731 if (err < 0) { 2732 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2733 goto error; 2734 } 2735 2736 err = ixgbe_dev_rxtx_start(dev); 2737 if (err < 0) { 2738 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2739 goto error; 2740 } 2741 2742 /* Skip link setup if loopback mode is enabled. */ 2743 if (dev->data->dev_conf.lpbk_mode != 0) { 2744 err = ixgbe_check_supported_loopback_mode(dev); 2745 if (err < 0) { 2746 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2747 goto error; 2748 } else { 2749 goto skip_link_setup; 2750 } 2751 } 2752 2753 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2754 err = hw->mac.ops.setup_sfp(hw); 2755 if (err) 2756 goto error; 2757 } 2758 2759 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2760 /* Turn on the copper */ 2761 ixgbe_set_phy_power(hw, true); 2762 } else { 2763 /* Turn on the laser */ 2764 ixgbe_enable_tx_laser(hw); 2765 } 2766 2767 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2768 if (err) 2769 goto error; 2770 dev->data->dev_link.link_status = link_up; 2771 2772 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2773 if (err) 2774 goto error; 2775 2776 switch (hw->mac.type) { 2777 case ixgbe_mac_X550: 2778 case ixgbe_mac_X550EM_x: 2779 case ixgbe_mac_X550EM_a: 2780 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2781 RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_5G | 2782 RTE_ETH_LINK_SPEED_10G; 2783 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2784 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2785 allowed_speeds = RTE_ETH_LINK_SPEED_10M | 2786 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 2787 break; 2788 default: 2789 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 2790 RTE_ETH_LINK_SPEED_10G; 2791 } 2792 2793 link_speeds = &dev->data->dev_conf.link_speeds; 2794 2795 /* Ignore autoneg flag bit and check the validity of 2796 * link_speed 2797 */ 2798 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2799 PMD_INIT_LOG(ERR, "Invalid link setting"); 2800 goto error; 2801 } 2802 2803 speed = 0x0; 2804 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 2805 switch (hw->mac.type) { 2806 case ixgbe_mac_82598EB: 2807 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2808 break; 2809 case ixgbe_mac_82599EB: 2810 case ixgbe_mac_X540: 2811 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2812 break; 2813 case ixgbe_mac_X550: 2814 case ixgbe_mac_X550EM_x: 2815 case ixgbe_mac_X550EM_a: 2816 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2817 break; 2818 default: 2819 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2820 } 2821 } else { 2822 if (*link_speeds & RTE_ETH_LINK_SPEED_10G) 2823 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2824 if (*link_speeds & RTE_ETH_LINK_SPEED_5G) 2825 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2826 if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G) 2827 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2828 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 2829 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2830 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 2831 speed |= IXGBE_LINK_SPEED_100_FULL; 2832 if (*link_speeds & RTE_ETH_LINK_SPEED_10M) 2833 speed |= IXGBE_LINK_SPEED_10_FULL; 2834 } 2835 2836 err = ixgbe_setup_link(hw, speed, link_up); 2837 if (err) 2838 goto error; 2839 2840 skip_link_setup: 2841 2842 if (rte_intr_allow_others(intr_handle)) { 2843 /* check if lsc interrupt is enabled */ 2844 if (dev->data->dev_conf.intr_conf.lsc != 0) 2845 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2846 else 2847 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2848 ixgbe_dev_macsec_interrupt_setup(dev); 2849 } else { 2850 rte_intr_callback_unregister(intr_handle, 2851 ixgbe_dev_interrupt_handler, dev); 2852 if (dev->data->dev_conf.intr_conf.lsc != 0) 2853 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2854 " no intr multiplex"); 2855 } 2856 2857 /* check if rxq interrupt is enabled */ 2858 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2859 rte_intr_dp_is_en(intr_handle)) 2860 ixgbe_dev_rxq_interrupt_setup(dev); 2861 2862 /* enable uio/vfio intr/eventfd mapping */ 2863 rte_intr_enable(intr_handle); 2864 2865 /* resume enabled intr since hw reset */ 2866 ixgbe_enable_intr(dev); 2867 ixgbe_l2_tunnel_conf(dev); 2868 ixgbe_filter_restore(dev); 2869 2870 if (tm_conf->root && !tm_conf->committed) 2871 PMD_DRV_LOG(WARNING, 2872 "please call hierarchy_commit() " 2873 "before starting the port"); 2874 2875 /* wait for the controller to acquire link */ 2876 err = ixgbe_wait_for_link_up(hw); 2877 if (err) 2878 goto error; 2879 2880 /* 2881 * Update link status right before return, because it may 2882 * start link configuration process in a separate thread. 2883 */ 2884 ixgbe_dev_link_update(dev, 0); 2885 2886 /* setup the macsec setting register */ 2887 if (macsec_setting->offload_en) 2888 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2889 2890 return 0; 2891 2892 error: 2893 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2894 ixgbe_dev_clear_queues(dev); 2895 return -EIO; 2896 } 2897 2898 /* 2899 * Stop device: disable rx and tx functions to allow for reconfiguring. 2900 */ 2901 static int 2902 ixgbe_dev_stop(struct rte_eth_dev *dev) 2903 { 2904 struct rte_eth_link link; 2905 struct ixgbe_adapter *adapter = dev->data->dev_private; 2906 struct ixgbe_hw *hw = 2907 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2908 struct ixgbe_vf_info *vfinfo = 2909 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2910 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2911 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2912 int vf; 2913 struct ixgbe_tm_conf *tm_conf = 2914 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2915 2916 if (hw->adapter_stopped) 2917 return 0; 2918 2919 PMD_INIT_FUNC_TRACE(); 2920 2921 ixgbe_dev_wait_setup_link_complete(dev, 0); 2922 2923 /* disable interrupts */ 2924 ixgbe_disable_intr(hw); 2925 2926 /* reset the NIC */ 2927 ixgbe_pf_reset_hw(hw); 2928 hw->adapter_stopped = 0; 2929 2930 /* stop adapter */ 2931 ixgbe_stop_adapter(hw); 2932 2933 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2934 vfinfo[vf].clear_to_send = false; 2935 2936 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2937 /* Turn off the copper */ 2938 ixgbe_set_phy_power(hw, false); 2939 } else { 2940 /* Turn off the laser */ 2941 ixgbe_disable_tx_laser(hw); 2942 } 2943 2944 ixgbe_dev_clear_queues(dev); 2945 2946 /* Clear stored conf */ 2947 dev->data->scattered_rx = 0; 2948 dev->data->lro = 0; 2949 2950 /* Clear recorded link status */ 2951 memset(&link, 0, sizeof(link)); 2952 rte_eth_linkstatus_set(dev, &link); 2953 2954 if (!rte_intr_allow_others(intr_handle)) 2955 /* resume to the default handler */ 2956 rte_intr_callback_register(intr_handle, 2957 ixgbe_dev_interrupt_handler, 2958 (void *)dev); 2959 2960 /* Clean datapath event and queue/vec mapping */ 2961 rte_intr_efd_disable(intr_handle); 2962 rte_intr_vec_list_free(intr_handle); 2963 2964 /* reset hierarchy commit */ 2965 tm_conf->committed = false; 2966 2967 adapter->rss_reta_updated = 0; 2968 2969 hw->adapter_stopped = true; 2970 dev->data->dev_started = 0; 2971 2972 return 0; 2973 } 2974 2975 /* 2976 * Set device link up: enable tx. 2977 */ 2978 static int 2979 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2980 { 2981 struct ixgbe_hw *hw = 2982 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2983 if (hw->mac.type == ixgbe_mac_82599EB) { 2984 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2985 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2986 /* Not supported in bypass mode */ 2987 PMD_INIT_LOG(ERR, "Set link up is not supported " 2988 "by device id 0x%x", hw->device_id); 2989 return -ENOTSUP; 2990 } 2991 #endif 2992 } 2993 2994 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2995 /* Turn on the copper */ 2996 ixgbe_set_phy_power(hw, true); 2997 } else { 2998 /* Turn on the laser */ 2999 ixgbe_enable_tx_laser(hw); 3000 ixgbe_dev_link_update(dev, 0); 3001 } 3002 3003 return 0; 3004 } 3005 3006 /* 3007 * Set device link down: disable tx. 3008 */ 3009 static int 3010 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 3011 { 3012 struct ixgbe_hw *hw = 3013 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3014 if (hw->mac.type == ixgbe_mac_82599EB) { 3015 #ifdef RTE_LIBRTE_IXGBE_BYPASS 3016 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 3017 /* Not supported in bypass mode */ 3018 PMD_INIT_LOG(ERR, "Set link down is not supported " 3019 "by device id 0x%x", hw->device_id); 3020 return -ENOTSUP; 3021 } 3022 #endif 3023 } 3024 3025 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 3026 /* Turn off the copper */ 3027 ixgbe_set_phy_power(hw, false); 3028 } else { 3029 /* Turn off the laser */ 3030 ixgbe_disable_tx_laser(hw); 3031 ixgbe_dev_link_update(dev, 0); 3032 } 3033 3034 return 0; 3035 } 3036 3037 /* 3038 * Reset and stop device. 3039 */ 3040 static int 3041 ixgbe_dev_close(struct rte_eth_dev *dev) 3042 { 3043 struct ixgbe_hw *hw = 3044 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3045 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3046 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3047 int retries = 0; 3048 int ret; 3049 3050 PMD_INIT_FUNC_TRACE(); 3051 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3052 return 0; 3053 3054 ixgbe_pf_reset_hw(hw); 3055 3056 ret = ixgbe_dev_stop(dev); 3057 3058 ixgbe_dev_free_queues(dev); 3059 3060 ixgbe_disable_pcie_primary(hw); 3061 3062 /* reprogram the RAR[0] in case user changed it. */ 3063 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3064 3065 /* Unlock any pending hardware semaphore */ 3066 ixgbe_swfw_lock_reset(hw); 3067 3068 /* disable uio intr before callback unregister */ 3069 rte_intr_disable(intr_handle); 3070 3071 do { 3072 ret = rte_intr_callback_unregister(intr_handle, 3073 ixgbe_dev_interrupt_handler, dev); 3074 if (ret >= 0 || ret == -ENOENT) { 3075 break; 3076 } else if (ret != -EAGAIN) { 3077 PMD_INIT_LOG(ERR, 3078 "intr callback unregister failed: %d", 3079 ret); 3080 } 3081 rte_delay_ms(100); 3082 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3083 3084 /* cancel the delay handler before remove dev */ 3085 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3086 3087 /* uninitialize PF if max_vfs not zero */ 3088 ixgbe_pf_host_uninit(dev); 3089 3090 /* remove all the fdir filters & hash */ 3091 ixgbe_fdir_filter_uninit(dev); 3092 3093 /* remove all the L2 tunnel filters & hash */ 3094 ixgbe_l2_tn_filter_uninit(dev); 3095 3096 /* Remove all ntuple filters of the device */ 3097 ixgbe_ntuple_filter_uninit(dev); 3098 3099 /* clear all the filters list */ 3100 ixgbe_filterlist_flush(); 3101 3102 /* Remove all Traffic Manager configuration */ 3103 ixgbe_tm_conf_uninit(dev); 3104 3105 #ifdef RTE_LIB_SECURITY 3106 rte_free(dev->security_ctx); 3107 dev->security_ctx = NULL; 3108 #endif 3109 3110 return ret; 3111 } 3112 3113 /* 3114 * Reset PF device. 3115 */ 3116 static int 3117 ixgbe_dev_reset(struct rte_eth_dev *dev) 3118 { 3119 int ret; 3120 3121 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3122 * its VF to make them align with it. The detailed notification 3123 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3124 * To avoid unexpected behavior in VF, currently reset of PF with 3125 * SR-IOV activation is not supported. It might be supported later. 3126 */ 3127 if (dev->data->sriov.active) 3128 return -ENOTSUP; 3129 3130 ret = eth_ixgbe_dev_uninit(dev); 3131 if (ret) 3132 return ret; 3133 3134 ret = eth_ixgbe_dev_init(dev, NULL); 3135 3136 return ret; 3137 } 3138 3139 static void 3140 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3141 struct ixgbe_hw_stats *hw_stats, 3142 struct ixgbe_macsec_stats *macsec_stats, 3143 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3144 uint64_t *total_qprc, uint64_t *total_qprdc) 3145 { 3146 uint32_t bprc, lxon, lxoff, total; 3147 uint32_t delta_gprc = 0; 3148 unsigned i; 3149 /* Workaround for RX byte count not including CRC bytes when CRC 3150 * strip is enabled. CRC bytes are removed from counters when crc_strip 3151 * is disabled. 3152 */ 3153 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3154 IXGBE_HLREG0_RXCRCSTRP); 3155 3156 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3157 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3158 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3159 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3160 3161 for (i = 0; i < 8; i++) { 3162 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3163 3164 /* global total per queue */ 3165 hw_stats->mpc[i] += mp; 3166 /* Running comprehensive total for stats display */ 3167 *total_missed_rx += hw_stats->mpc[i]; 3168 if (hw->mac.type == ixgbe_mac_82598EB) { 3169 hw_stats->rnbc[i] += 3170 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3171 hw_stats->pxonrxc[i] += 3172 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3173 hw_stats->pxoffrxc[i] += 3174 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3175 } else { 3176 hw_stats->pxonrxc[i] += 3177 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3178 hw_stats->pxoffrxc[i] += 3179 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3180 hw_stats->pxon2offc[i] += 3181 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3182 } 3183 hw_stats->pxontxc[i] += 3184 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3185 hw_stats->pxofftxc[i] += 3186 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3187 } 3188 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3189 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3190 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3191 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3192 3193 delta_gprc += delta_qprc; 3194 3195 hw_stats->qprc[i] += delta_qprc; 3196 hw_stats->qptc[i] += delta_qptc; 3197 3198 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3199 hw_stats->qbrc[i] += 3200 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3201 if (crc_strip == 0) 3202 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3203 3204 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3205 hw_stats->qbtc[i] += 3206 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3207 3208 hw_stats->qprdc[i] += delta_qprdc; 3209 *total_qprdc += hw_stats->qprdc[i]; 3210 3211 *total_qprc += hw_stats->qprc[i]; 3212 *total_qbrc += hw_stats->qbrc[i]; 3213 } 3214 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3215 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3216 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3217 3218 /* 3219 * An errata states that gprc actually counts good + missed packets: 3220 * Workaround to set gprc to summated queue packet receives 3221 */ 3222 hw_stats->gprc = *total_qprc; 3223 3224 if (hw->mac.type != ixgbe_mac_82598EB) { 3225 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3226 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3227 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3228 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3229 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3230 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3231 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3232 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3233 } else { 3234 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3235 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3236 /* 82598 only has a counter in the high register */ 3237 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3238 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3239 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3240 } 3241 uint64_t old_tpr = hw_stats->tpr; 3242 3243 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3244 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3245 3246 if (crc_strip == 0) 3247 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3248 3249 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3250 hw_stats->gptc += delta_gptc; 3251 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3252 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3253 3254 /* 3255 * Workaround: mprc hardware is incorrectly counting 3256 * broadcasts, so for now we subtract those. 3257 */ 3258 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3259 hw_stats->bprc += bprc; 3260 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3261 if (hw->mac.type == ixgbe_mac_82598EB) 3262 hw_stats->mprc -= bprc; 3263 3264 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3265 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3266 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3267 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3268 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3269 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3270 3271 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3272 hw_stats->lxontxc += lxon; 3273 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3274 hw_stats->lxofftxc += lxoff; 3275 total = lxon + lxoff; 3276 3277 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3278 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3279 hw_stats->gptc -= total; 3280 hw_stats->mptc -= total; 3281 hw_stats->ptc64 -= total; 3282 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3283 3284 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3285 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3286 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3287 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3288 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3289 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3290 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3291 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3292 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3293 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3294 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3295 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3296 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3297 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3298 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3299 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3300 /* Only read FCOE on 82599 */ 3301 if (hw->mac.type != ixgbe_mac_82598EB) { 3302 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3303 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3304 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3305 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3306 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3307 } 3308 3309 /* Flow Director Stats registers */ 3310 if (hw->mac.type != ixgbe_mac_82598EB) { 3311 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3312 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3313 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3314 IXGBE_FDIRUSTAT) & 0xFFFF; 3315 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3316 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3317 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3318 IXGBE_FDIRFSTAT) & 0xFFFF; 3319 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3320 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3321 } 3322 /* MACsec Stats registers */ 3323 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3324 macsec_stats->out_pkts_encrypted += 3325 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3326 macsec_stats->out_pkts_protected += 3327 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3328 macsec_stats->out_octets_encrypted += 3329 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3330 macsec_stats->out_octets_protected += 3331 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3332 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3333 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3334 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3335 macsec_stats->in_pkts_unknownsci += 3336 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3337 macsec_stats->in_octets_decrypted += 3338 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3339 macsec_stats->in_octets_validated += 3340 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3341 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3342 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3343 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3344 for (i = 0; i < 2; i++) { 3345 macsec_stats->in_pkts_ok += 3346 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3347 macsec_stats->in_pkts_invalid += 3348 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3349 macsec_stats->in_pkts_notvalid += 3350 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3351 } 3352 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3353 macsec_stats->in_pkts_notusingsa += 3354 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3355 } 3356 3357 /* 3358 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3359 */ 3360 static int 3361 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3362 { 3363 struct ixgbe_hw *hw = 3364 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3365 struct ixgbe_hw_stats *hw_stats = 3366 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3367 struct ixgbe_macsec_stats *macsec_stats = 3368 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3369 dev->data->dev_private); 3370 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3371 unsigned i; 3372 3373 total_missed_rx = 0; 3374 total_qbrc = 0; 3375 total_qprc = 0; 3376 total_qprdc = 0; 3377 3378 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3379 &total_qbrc, &total_qprc, &total_qprdc); 3380 3381 if (stats == NULL) 3382 return -EINVAL; 3383 3384 /* Fill out the rte_eth_stats statistics structure */ 3385 stats->ipackets = total_qprc; 3386 stats->ibytes = total_qbrc; 3387 stats->opackets = hw_stats->gptc; 3388 stats->obytes = hw_stats->gotc; 3389 3390 for (i = 0; i < RTE_MIN_T(IXGBE_QUEUE_STAT_COUNTERS, 3391 RTE_ETHDEV_QUEUE_STAT_CNTRS, typeof(i)); i++) { 3392 stats->q_ipackets[i] = hw_stats->qprc[i]; 3393 stats->q_opackets[i] = hw_stats->qptc[i]; 3394 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3395 stats->q_obytes[i] = hw_stats->qbtc[i]; 3396 stats->q_errors[i] = hw_stats->qprdc[i]; 3397 } 3398 3399 /* Rx Errors */ 3400 stats->imissed = total_missed_rx; 3401 stats->ierrors = hw_stats->crcerrs + 3402 hw_stats->mspdc + 3403 hw_stats->rlec + 3404 hw_stats->ruc + 3405 hw_stats->roc + 3406 hw_stats->illerrc + 3407 hw_stats->errbc + 3408 hw_stats->rfc + 3409 hw_stats->fccrc + 3410 hw_stats->fclast; 3411 3412 /* 3413 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum 3414 * errors. 3415 */ 3416 if (hw->mac.type != ixgbe_mac_82599EB) 3417 stats->ierrors += hw_stats->xec; 3418 3419 /* Tx Errors */ 3420 stats->oerrors = 0; 3421 return 0; 3422 } 3423 3424 static int 3425 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3426 { 3427 struct ixgbe_hw_stats *stats = 3428 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3429 3430 /* HW registers are cleared on read */ 3431 ixgbe_dev_stats_get(dev, NULL); 3432 3433 /* Reset software totals */ 3434 memset(stats, 0, sizeof(*stats)); 3435 3436 return 0; 3437 } 3438 3439 /* This function calculates the number of xstats based on the current config */ 3440 static unsigned 3441 ixgbe_xstats_calc_num(void) { 3442 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3443 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3444 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3445 } 3446 3447 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3448 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3449 { 3450 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3451 unsigned stat, i, count; 3452 3453 if (xstats_names != NULL) { 3454 count = 0; 3455 3456 /* Note: limit >= cnt_stats checked upstream 3457 * in rte_eth_xstats_names() 3458 */ 3459 3460 /* Extended stats from ixgbe_hw_stats */ 3461 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3462 strlcpy(xstats_names[count].name, 3463 rte_ixgbe_stats_strings[i].name, 3464 sizeof(xstats_names[count].name)); 3465 count++; 3466 } 3467 3468 /* MACsec Stats */ 3469 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3470 strlcpy(xstats_names[count].name, 3471 rte_ixgbe_macsec_strings[i].name, 3472 sizeof(xstats_names[count].name)); 3473 count++; 3474 } 3475 3476 /* RX Priority Stats */ 3477 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3478 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3479 snprintf(xstats_names[count].name, 3480 sizeof(xstats_names[count].name), 3481 "rx_priority%u_%s", i, 3482 rte_ixgbe_rxq_strings[stat].name); 3483 count++; 3484 } 3485 } 3486 3487 /* TX Priority Stats */ 3488 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3489 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3490 snprintf(xstats_names[count].name, 3491 sizeof(xstats_names[count].name), 3492 "tx_priority%u_%s", i, 3493 rte_ixgbe_txq_strings[stat].name); 3494 count++; 3495 } 3496 } 3497 } 3498 return cnt_stats; 3499 } 3500 3501 static int ixgbe_dev_xstats_get_names_by_id( 3502 struct rte_eth_dev *dev, 3503 const uint64_t *ids, 3504 struct rte_eth_xstat_name *xstats_names, 3505 unsigned int limit) 3506 { 3507 if (!ids) { 3508 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3509 unsigned int stat, i, count; 3510 3511 if (xstats_names != NULL) { 3512 count = 0; 3513 3514 /* Note: limit >= cnt_stats checked upstream 3515 * in rte_eth_xstats_names() 3516 */ 3517 3518 /* Extended stats from ixgbe_hw_stats */ 3519 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3520 strlcpy(xstats_names[count].name, 3521 rte_ixgbe_stats_strings[i].name, 3522 sizeof(xstats_names[count].name)); 3523 count++; 3524 } 3525 3526 /* MACsec Stats */ 3527 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3528 strlcpy(xstats_names[count].name, 3529 rte_ixgbe_macsec_strings[i].name, 3530 sizeof(xstats_names[count].name)); 3531 count++; 3532 } 3533 3534 /* RX Priority Stats */ 3535 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3536 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3537 snprintf(xstats_names[count].name, 3538 sizeof(xstats_names[count].name), 3539 "rx_priority%u_%s", i, 3540 rte_ixgbe_rxq_strings[stat].name); 3541 count++; 3542 } 3543 } 3544 3545 /* TX Priority Stats */ 3546 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3547 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3548 snprintf(xstats_names[count].name, 3549 sizeof(xstats_names[count].name), 3550 "tx_priority%u_%s", i, 3551 rte_ixgbe_txq_strings[stat].name); 3552 count++; 3553 } 3554 } 3555 } 3556 return cnt_stats; 3557 } 3558 3559 uint16_t i; 3560 uint16_t size = ixgbe_xstats_calc_num(); 3561 struct rte_eth_xstat_name xstats_names_copy[size]; 3562 3563 ixgbe_dev_xstats_get_names_by_id(dev, NULL, xstats_names_copy, 3564 size); 3565 3566 for (i = 0; i < limit; i++) { 3567 if (ids[i] >= size) { 3568 PMD_INIT_LOG(ERR, "id value isn't valid"); 3569 return -1; 3570 } 3571 strcpy(xstats_names[i].name, 3572 xstats_names_copy[ids[i]].name); 3573 } 3574 return limit; 3575 } 3576 3577 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3578 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3579 { 3580 unsigned i; 3581 3582 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3583 return -ENOMEM; 3584 3585 if (xstats_names != NULL) 3586 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3587 strlcpy(xstats_names[i].name, 3588 rte_ixgbevf_stats_strings[i].name, 3589 sizeof(xstats_names[i].name)); 3590 return IXGBEVF_NB_XSTATS; 3591 } 3592 3593 static int 3594 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3595 unsigned n) 3596 { 3597 struct ixgbe_hw *hw = 3598 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3599 struct ixgbe_hw_stats *hw_stats = 3600 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3601 struct ixgbe_macsec_stats *macsec_stats = 3602 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3603 dev->data->dev_private); 3604 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3605 unsigned i, stat, count = 0; 3606 3607 count = ixgbe_xstats_calc_num(); 3608 3609 if (n < count) 3610 return count; 3611 3612 total_missed_rx = 0; 3613 total_qbrc = 0; 3614 total_qprc = 0; 3615 total_qprdc = 0; 3616 3617 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3618 &total_qbrc, &total_qprc, &total_qprdc); 3619 3620 /* If this is a reset xstats is NULL, and we have cleared the 3621 * registers by reading them. 3622 */ 3623 if (!xstats) 3624 return 0; 3625 3626 /* Extended stats from ixgbe_hw_stats */ 3627 count = 0; 3628 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3629 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3630 rte_ixgbe_stats_strings[i].offset); 3631 xstats[count].id = count; 3632 count++; 3633 } 3634 3635 /* MACsec Stats */ 3636 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3637 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3638 rte_ixgbe_macsec_strings[i].offset); 3639 xstats[count].id = count; 3640 count++; 3641 } 3642 3643 /* RX Priority Stats */ 3644 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3645 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3646 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3647 rte_ixgbe_rxq_strings[stat].offset + 3648 (sizeof(uint64_t) * i)); 3649 xstats[count].id = count; 3650 count++; 3651 } 3652 } 3653 3654 /* TX Priority Stats */ 3655 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3656 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3657 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3658 rte_ixgbe_txq_strings[stat].offset + 3659 (sizeof(uint64_t) * i)); 3660 xstats[count].id = count; 3661 count++; 3662 } 3663 } 3664 return count; 3665 } 3666 3667 static int 3668 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3669 uint64_t *values, unsigned int n) 3670 { 3671 if (!ids) { 3672 struct ixgbe_hw *hw = 3673 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3674 struct ixgbe_hw_stats *hw_stats = 3675 IXGBE_DEV_PRIVATE_TO_STATS( 3676 dev->data->dev_private); 3677 struct ixgbe_macsec_stats *macsec_stats = 3678 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3679 dev->data->dev_private); 3680 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3681 unsigned int i, stat, count = 0; 3682 3683 count = ixgbe_xstats_calc_num(); 3684 3685 if (!ids && n < count) 3686 return count; 3687 3688 total_missed_rx = 0; 3689 total_qbrc = 0; 3690 total_qprc = 0; 3691 total_qprdc = 0; 3692 3693 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3694 &total_missed_rx, &total_qbrc, &total_qprc, 3695 &total_qprdc); 3696 3697 /* If this is a reset xstats is NULL, and we have cleared the 3698 * registers by reading them. 3699 */ 3700 if (!ids && !values) 3701 return 0; 3702 3703 /* Extended stats from ixgbe_hw_stats */ 3704 count = 0; 3705 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3706 values[count] = *(uint64_t *)(((char *)hw_stats) + 3707 rte_ixgbe_stats_strings[i].offset); 3708 count++; 3709 } 3710 3711 /* MACsec Stats */ 3712 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3713 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3714 rte_ixgbe_macsec_strings[i].offset); 3715 count++; 3716 } 3717 3718 /* RX Priority Stats */ 3719 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3720 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3721 values[count] = 3722 *(uint64_t *)(((char *)hw_stats) + 3723 rte_ixgbe_rxq_strings[stat].offset + 3724 (sizeof(uint64_t) * i)); 3725 count++; 3726 } 3727 } 3728 3729 /* TX Priority Stats */ 3730 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3731 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3732 values[count] = 3733 *(uint64_t *)(((char *)hw_stats) + 3734 rte_ixgbe_txq_strings[stat].offset + 3735 (sizeof(uint64_t) * i)); 3736 count++; 3737 } 3738 } 3739 return count; 3740 } 3741 3742 uint16_t i; 3743 uint16_t size = ixgbe_xstats_calc_num(); 3744 uint64_t values_copy[size]; 3745 3746 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3747 3748 for (i = 0; i < n; i++) { 3749 if (ids[i] >= size) { 3750 PMD_INIT_LOG(ERR, "id value isn't valid"); 3751 return -1; 3752 } 3753 values[i] = values_copy[ids[i]]; 3754 } 3755 return n; 3756 } 3757 3758 static int 3759 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3760 { 3761 struct ixgbe_hw_stats *stats = 3762 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3763 struct ixgbe_macsec_stats *macsec_stats = 3764 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3765 dev->data->dev_private); 3766 3767 unsigned count = ixgbe_xstats_calc_num(); 3768 3769 /* HW registers are cleared on read */ 3770 ixgbe_dev_xstats_get(dev, NULL, count); 3771 3772 /* Reset software totals */ 3773 memset(stats, 0, sizeof(*stats)); 3774 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3775 3776 return 0; 3777 } 3778 3779 static void 3780 ixgbevf_update_stats(struct rte_eth_dev *dev) 3781 { 3782 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3783 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3784 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3785 3786 /* Good Rx packet, include VF loopback */ 3787 UPDATE_VF_STAT(IXGBE_VFGPRC, 3788 hw_stats->last_vfgprc, hw_stats->vfgprc); 3789 3790 /* Good Rx octets, include VF loopback */ 3791 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3792 hw_stats->last_vfgorc, hw_stats->vfgorc); 3793 3794 /* Good Tx packet, include VF loopback */ 3795 UPDATE_VF_STAT(IXGBE_VFGPTC, 3796 hw_stats->last_vfgptc, hw_stats->vfgptc); 3797 3798 /* Good Tx octets, include VF loopback */ 3799 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3800 hw_stats->last_vfgotc, hw_stats->vfgotc); 3801 3802 /* Rx Multicst Packet */ 3803 UPDATE_VF_STAT(IXGBE_VFMPRC, 3804 hw_stats->last_vfmprc, hw_stats->vfmprc); 3805 } 3806 3807 static int 3808 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3809 unsigned n) 3810 { 3811 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3812 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3813 unsigned i; 3814 3815 if (n < IXGBEVF_NB_XSTATS) 3816 return IXGBEVF_NB_XSTATS; 3817 3818 ixgbevf_update_stats(dev); 3819 3820 if (!xstats) 3821 return 0; 3822 3823 /* Extended stats */ 3824 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3825 xstats[i].id = i; 3826 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3827 rte_ixgbevf_stats_strings[i].offset); 3828 } 3829 3830 return IXGBEVF_NB_XSTATS; 3831 } 3832 3833 static int 3834 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3835 { 3836 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3837 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3838 3839 ixgbevf_update_stats(dev); 3840 3841 if (stats == NULL) 3842 return -EINVAL; 3843 3844 stats->ipackets = hw_stats->vfgprc; 3845 stats->ibytes = hw_stats->vfgorc; 3846 stats->opackets = hw_stats->vfgptc; 3847 stats->obytes = hw_stats->vfgotc; 3848 return 0; 3849 } 3850 3851 static int 3852 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3853 { 3854 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3855 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3856 3857 /* Sync HW register to the last stats */ 3858 ixgbevf_dev_stats_get(dev, NULL); 3859 3860 /* reset HW current stats*/ 3861 hw_stats->vfgprc = 0; 3862 hw_stats->vfgorc = 0; 3863 hw_stats->vfgptc = 0; 3864 hw_stats->vfgotc = 0; 3865 hw_stats->vfmprc = 0; 3866 3867 return 0; 3868 } 3869 3870 static int 3871 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3872 { 3873 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3874 struct ixgbe_nvm_version nvm_ver; 3875 int ret; 3876 3877 ixgbe_get_oem_prod_version(hw, &nvm_ver); 3878 if (nvm_ver.oem_valid) { 3879 snprintf(fw_version, fw_size, "%x.%x.%x", 3880 nvm_ver.oem_major, nvm_ver.oem_minor, 3881 nvm_ver.oem_release); 3882 return 0; 3883 } 3884 3885 ixgbe_get_etk_id(hw, &nvm_ver); 3886 ixgbe_get_orom_version(hw, &nvm_ver); 3887 3888 if (nvm_ver.or_valid) { 3889 snprintf(fw_version, fw_size, "0x%08x, %d.%d.%d", 3890 nvm_ver.etk_id, nvm_ver.or_major, 3891 nvm_ver.or_build, nvm_ver.or_patch); 3892 return 0; 3893 } 3894 3895 ret = snprintf(fw_version, fw_size, "0x%08x", nvm_ver.etk_id); 3896 if (ret < 0) 3897 return -EINVAL; 3898 3899 return (fw_size < (size_t)ret++) ? ret : 0; 3900 } 3901 3902 static int 3903 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3904 { 3905 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3906 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3907 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3908 3909 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3910 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3911 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3912 /* 3913 * When DCB/VT is off, maximum number of queues changes, 3914 * except for 82598EB, which remains constant. 3915 */ 3916 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE && 3917 hw->mac.type != ixgbe_mac_82598EB) 3918 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3919 } 3920 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3921 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3922 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3923 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3924 dev_info->max_vfs = pci_dev->max_vfs; 3925 if (hw->mac.type == ixgbe_mac_82598EB) 3926 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 3927 else 3928 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 3929 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3930 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3931 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3932 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3933 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3934 dev_info->rx_queue_offload_capa); 3935 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3936 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3937 3938 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3939 .rx_thresh = { 3940 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3941 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3942 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3943 }, 3944 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3945 .rx_drop_en = 0, 3946 .offloads = 0, 3947 }; 3948 3949 dev_info->default_txconf = (struct rte_eth_txconf) { 3950 .tx_thresh = { 3951 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3952 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3953 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3954 }, 3955 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3956 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3957 .offloads = 0, 3958 }; 3959 3960 dev_info->rx_desc_lim = rx_desc_lim; 3961 dev_info->tx_desc_lim = tx_desc_lim; 3962 3963 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3964 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3965 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3966 3967 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; 3968 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3969 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3970 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | 3971 RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G; 3972 3973 if (hw->mac.type == ixgbe_mac_X540 || 3974 hw->mac.type == ixgbe_mac_X540_vf || 3975 hw->mac.type == ixgbe_mac_X550 || 3976 hw->mac.type == ixgbe_mac_X550_vf) { 3977 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; 3978 } 3979 if (hw->mac.type == ixgbe_mac_X550) { 3980 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 3981 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G; 3982 } 3983 3984 /* Driver-preferred Rx/Tx parameters */ 3985 dev_info->default_rxportconf.burst_size = 32; 3986 dev_info->default_txportconf.burst_size = 32; 3987 dev_info->default_rxportconf.nb_queues = 1; 3988 dev_info->default_txportconf.nb_queues = 1; 3989 dev_info->default_rxportconf.ring_size = 256; 3990 dev_info->default_txportconf.ring_size = 256; 3991 3992 return 0; 3993 } 3994 3995 static const uint32_t * 3996 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 3997 { 3998 static const uint32_t ptypes[] = { 3999 /* For non-vec functions, 4000 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 4001 * for vec functions, 4002 * refers to _recv_raw_pkts_vec(). 4003 */ 4004 RTE_PTYPE_L2_ETHER, 4005 RTE_PTYPE_L3_IPV4, 4006 RTE_PTYPE_L3_IPV4_EXT, 4007 RTE_PTYPE_L3_IPV6, 4008 RTE_PTYPE_L3_IPV6_EXT, 4009 RTE_PTYPE_L4_SCTP, 4010 RTE_PTYPE_L4_TCP, 4011 RTE_PTYPE_L4_UDP, 4012 RTE_PTYPE_TUNNEL_IP, 4013 RTE_PTYPE_INNER_L3_IPV6, 4014 RTE_PTYPE_INNER_L3_IPV6_EXT, 4015 RTE_PTYPE_INNER_L4_TCP, 4016 RTE_PTYPE_INNER_L4_UDP, 4017 }; 4018 4019 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 4020 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 4021 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 4022 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) { 4023 *no_of_elements = RTE_DIM(ptypes); 4024 return ptypes; 4025 } 4026 4027 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON) 4028 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 4029 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) { 4030 *no_of_elements = RTE_DIM(ptypes); 4031 return ptypes; 4032 } 4033 #endif 4034 return NULL; 4035 } 4036 4037 static int 4038 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 4039 struct rte_eth_dev_info *dev_info) 4040 { 4041 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4042 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4043 4044 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 4045 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 4046 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 4047 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 4048 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 4049 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 4050 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 4051 dev_info->max_vfs = pci_dev->max_vfs; 4052 if (hw->mac.type == ixgbe_mac_82598EB) 4053 dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; 4054 else 4055 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 4056 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 4057 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 4058 dev_info->rx_queue_offload_capa); 4059 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 4060 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 4061 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 4062 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 4063 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 4064 4065 dev_info->default_rxconf = (struct rte_eth_rxconf) { 4066 .rx_thresh = { 4067 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 4068 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 4069 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 4070 }, 4071 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 4072 .rx_drop_en = 0, 4073 .offloads = 0, 4074 }; 4075 4076 dev_info->default_txconf = (struct rte_eth_txconf) { 4077 .tx_thresh = { 4078 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 4079 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 4080 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 4081 }, 4082 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 4083 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 4084 .offloads = 0, 4085 }; 4086 4087 dev_info->rx_desc_lim = rx_desc_lim; 4088 dev_info->tx_desc_lim = tx_desc_lim; 4089 4090 dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE; 4091 4092 return 0; 4093 } 4094 4095 static int 4096 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4097 bool *link_up, int wait_to_complete) 4098 { 4099 struct ixgbe_adapter *adapter = container_of(hw, 4100 struct ixgbe_adapter, hw); 4101 struct ixgbe_mbx_info *mbx = &hw->mbx; 4102 struct ixgbe_mac_info *mac = &hw->mac; 4103 uint32_t links_reg, in_msg; 4104 int ret_val = 0; 4105 4106 /* If we were hit with a reset drop the link */ 4107 if (!mbx->ops[0].check_for_rst(hw, 0) || !mbx->timeout) 4108 mac->get_link_status = true; 4109 4110 if (!mac->get_link_status) 4111 goto out; 4112 4113 /* if link status is down no point in checking to see if pf is up */ 4114 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4115 if (!(links_reg & IXGBE_LINKS_UP)) 4116 goto out; 4117 4118 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4119 * before the link status is correct 4120 */ 4121 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4122 int i; 4123 4124 for (i = 0; i < 5; i++) { 4125 rte_delay_us(100); 4126 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4127 4128 if (!(links_reg & IXGBE_LINKS_UP)) 4129 goto out; 4130 } 4131 } 4132 4133 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4134 case IXGBE_LINKS_SPEED_10G_82599: 4135 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4136 if (hw->mac.type >= ixgbe_mac_X550) { 4137 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4138 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4139 } 4140 break; 4141 case IXGBE_LINKS_SPEED_1G_82599: 4142 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4143 break; 4144 case IXGBE_LINKS_SPEED_100_82599: 4145 *speed = IXGBE_LINK_SPEED_100_FULL; 4146 if (hw->mac.type == ixgbe_mac_X550) { 4147 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4148 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4149 } 4150 break; 4151 case IXGBE_LINKS_SPEED_10_X550EM_A: 4152 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4153 /* Since Reserved in older MAC's */ 4154 if (hw->mac.type >= ixgbe_mac_X550) 4155 *speed = IXGBE_LINK_SPEED_10_FULL; 4156 break; 4157 default: 4158 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4159 } 4160 4161 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4162 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4163 mac->get_link_status = true; 4164 else 4165 mac->get_link_status = false; 4166 4167 goto out; 4168 } 4169 4170 /* if the read failed it could just be a mailbox collision, best wait 4171 * until we are called again and don't report an error 4172 */ 4173 4174 /* 4175 * on MinGW, the read op call is interpreted as call into read() macro, 4176 * so avoid calling it directly. 4177 */ 4178 if ((mbx->ops[0].read)(hw, &in_msg, 1, 0)) 4179 goto out; 4180 4181 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4182 /* msg is not CTS and is FAILURE we must have lost CTS status */ 4183 if (in_msg & IXGBE_VT_MSGTYPE_FAILURE) 4184 mac->get_link_status = false; 4185 goto out; 4186 } 4187 4188 /* the pf is talking, if we timed out in the past we reinit */ 4189 if (!mbx->timeout) { 4190 ret_val = -1; 4191 goto out; 4192 } 4193 4194 /* if we passed all the tests above then the link is up and we no 4195 * longer need to check for link 4196 */ 4197 mac->get_link_status = false; 4198 4199 out: 4200 *link_up = !mac->get_link_status; 4201 return ret_val; 4202 } 4203 4204 /* 4205 * If @timeout_ms was 0, it means that it will not return until link complete. 4206 * It returns 1 on complete, return 0 on timeout. 4207 */ 4208 static int 4209 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) 4210 { 4211 #define WARNING_TIMEOUT 9000 /* 9s in total */ 4212 struct ixgbe_adapter *ad = dev->data->dev_private; 4213 uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; 4214 4215 /* NOTE: review for potential ordering optimization */ 4216 while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) { 4217 msec_delay(1); 4218 timeout--; 4219 4220 if (timeout_ms) { 4221 if (!timeout) 4222 return 0; 4223 } else if (!timeout) { 4224 /* It will not return until link complete */ 4225 timeout = WARNING_TIMEOUT; 4226 PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); 4227 } 4228 } 4229 4230 return 1; 4231 } 4232 4233 static uint32_t 4234 ixgbe_dev_setup_link_thread_handler(void *param) 4235 { 4236 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4237 struct ixgbe_adapter *ad = dev->data->dev_private; 4238 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4239 struct ixgbe_interrupt *intr = 4240 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4241 u32 speed; 4242 bool autoneg = false; 4243 4244 rte_thread_detach(rte_thread_self()); 4245 speed = hw->phy.autoneg_advertised; 4246 if (!speed) 4247 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4248 4249 ixgbe_setup_link(hw, speed, true); 4250 4251 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4252 /* NOTE: review for potential ordering optimization */ 4253 rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst); 4254 return 0; 4255 } 4256 4257 /* 4258 * In freebsd environment, nic_uio drivers do not support interrupts, 4259 * rte_intr_callback_register() will fail to register interrupts. 4260 * We can not make link status to change from down to up by interrupt 4261 * callback. So we need to wait for the controller to acquire link 4262 * when ports start. 4263 * It returns 0 on link up. 4264 */ 4265 static int 4266 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4267 { 4268 #ifdef RTE_EXEC_ENV_FREEBSD 4269 int err, i; 4270 bool link_up = false; 4271 uint32_t speed = 0; 4272 const int nb_iter = 25; 4273 4274 for (i = 0; i < nb_iter; i++) { 4275 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4276 if (err) 4277 return err; 4278 if (link_up) 4279 return 0; 4280 msec_delay(200); 4281 } 4282 4283 return 0; 4284 #else 4285 RTE_SET_USED(hw); 4286 return 0; 4287 #endif 4288 } 4289 4290 /* return 0 means link status changed, -1 means not changed */ 4291 int 4292 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4293 int wait_to_complete, int vf) 4294 { 4295 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4296 struct ixgbe_adapter *ad = dev->data->dev_private; 4297 struct rte_eth_link link; 4298 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4299 struct ixgbe_interrupt *intr = 4300 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4301 bool link_up; 4302 int diag; 4303 int wait = 1; 4304 u32 esdp_reg; 4305 4306 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 4307 return -1; 4308 4309 memset(&link, 0, sizeof(link)); 4310 link.link_status = RTE_ETH_LINK_DOWN; 4311 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 4312 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 4313 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4314 RTE_ETH_LINK_SPEED_FIXED); 4315 4316 hw->mac.get_link_status = true; 4317 4318 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4319 return rte_eth_linkstatus_set(dev, &link); 4320 4321 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4322 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4323 wait = 0; 4324 4325 if (vf) 4326 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4327 else 4328 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4329 4330 if (diag != 0) { 4331 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4332 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4333 return rte_eth_linkstatus_set(dev, &link); 4334 } 4335 4336 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber && 4337 !ad->sdp3_no_tx_disable) { 4338 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4339 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4340 link_up = 0; 4341 } 4342 4343 if (link_up == 0) { 4344 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4345 ixgbe_dev_wait_setup_link_complete(dev, 0); 4346 /* NOTE: review for potential ordering optimization */ 4347 if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1, 4348 rte_memory_order_seq_cst)) { 4349 /* To avoid race condition between threads, set 4350 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only 4351 * when there is no link thread running. 4352 */ 4353 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4354 if (rte_thread_create_internal_control(&ad->link_thread_tid, 4355 "ixgbe-link", 4356 ixgbe_dev_setup_link_thread_handler, dev) < 0) { 4357 PMD_DRV_LOG(ERR, 4358 "Create link thread failed!"); 4359 /* NOTE: review for potential ordering optimization */ 4360 rte_atomic_store_explicit(&ad->link_thread_running, 0, 4361 rte_memory_order_seq_cst); 4362 } 4363 } else { 4364 PMD_DRV_LOG(ERR, 4365 "Other link thread is running now!"); 4366 } 4367 } 4368 return rte_eth_linkstatus_set(dev, &link); 4369 } 4370 4371 link.link_status = RTE_ETH_LINK_UP; 4372 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4373 4374 switch (link_speed) { 4375 default: 4376 case IXGBE_LINK_SPEED_UNKNOWN: 4377 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 4378 break; 4379 4380 case IXGBE_LINK_SPEED_10_FULL: 4381 link.link_speed = RTE_ETH_SPEED_NUM_10M; 4382 break; 4383 4384 case IXGBE_LINK_SPEED_100_FULL: 4385 link.link_speed = RTE_ETH_SPEED_NUM_100M; 4386 break; 4387 4388 case IXGBE_LINK_SPEED_1GB_FULL: 4389 link.link_speed = RTE_ETH_SPEED_NUM_1G; 4390 break; 4391 4392 case IXGBE_LINK_SPEED_2_5GB_FULL: 4393 link.link_speed = RTE_ETH_SPEED_NUM_2_5G; 4394 break; 4395 4396 case IXGBE_LINK_SPEED_5GB_FULL: 4397 link.link_speed = RTE_ETH_SPEED_NUM_5G; 4398 break; 4399 4400 case IXGBE_LINK_SPEED_10GB_FULL: 4401 link.link_speed = RTE_ETH_SPEED_NUM_10G; 4402 break; 4403 } 4404 4405 return rte_eth_linkstatus_set(dev, &link); 4406 } 4407 4408 static int 4409 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4410 { 4411 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4412 } 4413 4414 static int 4415 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4416 { 4417 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4418 } 4419 4420 static int 4421 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4422 { 4423 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4424 uint32_t fctrl; 4425 4426 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4427 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4428 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4429 4430 return 0; 4431 } 4432 4433 static int 4434 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4435 { 4436 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4437 uint32_t fctrl; 4438 4439 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4440 fctrl &= (~IXGBE_FCTRL_UPE); 4441 if (dev->data->all_multicast == 1) 4442 fctrl |= IXGBE_FCTRL_MPE; 4443 else 4444 fctrl &= (~IXGBE_FCTRL_MPE); 4445 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4446 4447 return 0; 4448 } 4449 4450 static int 4451 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4452 { 4453 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4454 uint32_t fctrl; 4455 4456 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4457 fctrl |= IXGBE_FCTRL_MPE; 4458 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4459 4460 return 0; 4461 } 4462 4463 static int 4464 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4465 { 4466 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4467 uint32_t fctrl; 4468 4469 if (dev->data->promiscuous == 1) 4470 return 0; /* must remain in all_multicast mode */ 4471 4472 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4473 fctrl &= (~IXGBE_FCTRL_MPE); 4474 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4475 4476 return 0; 4477 } 4478 4479 /** 4480 * It clears the interrupt causes and enables the interrupt. 4481 * It will be called once only during nic initialized. 4482 * 4483 * @param dev 4484 * Pointer to struct rte_eth_dev. 4485 * @param on 4486 * Enable or Disable. 4487 * 4488 * @return 4489 * - On success, zero. 4490 * - On failure, a negative value. 4491 */ 4492 static int 4493 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4494 { 4495 struct ixgbe_interrupt *intr = 4496 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4497 4498 ixgbe_dev_link_status_print(dev); 4499 if (on) 4500 intr->mask |= IXGBE_EICR_LSC; 4501 else 4502 intr->mask &= ~IXGBE_EICR_LSC; 4503 4504 return 0; 4505 } 4506 4507 /** 4508 * It clears the interrupt causes and enables the interrupt. 4509 * It will be called once only during nic initialized. 4510 * 4511 * @param dev 4512 * Pointer to struct rte_eth_dev. 4513 * 4514 * @return 4515 * - On success, zero. 4516 * - On failure, a negative value. 4517 */ 4518 static int 4519 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4520 { 4521 struct ixgbe_interrupt *intr = 4522 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4523 4524 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4525 4526 return 0; 4527 } 4528 4529 /** 4530 * It clears the interrupt causes and enables the interrupt. 4531 * It will be called once only during nic initialized. 4532 * 4533 * @param dev 4534 * Pointer to struct rte_eth_dev. 4535 * 4536 * @return 4537 * - On success, zero. 4538 * - On failure, a negative value. 4539 */ 4540 static int 4541 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4542 { 4543 struct ixgbe_interrupt *intr = 4544 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4545 4546 intr->mask |= IXGBE_EICR_LINKSEC; 4547 4548 return 0; 4549 } 4550 4551 /* 4552 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4553 * 4554 * @param dev 4555 * Pointer to struct rte_eth_dev. 4556 * 4557 * @return 4558 * - On success, zero. 4559 * - On failure, a negative value. 4560 */ 4561 static int 4562 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4563 { 4564 uint32_t eicr; 4565 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4566 struct ixgbe_interrupt *intr = 4567 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4568 4569 /* clear all cause mask */ 4570 ixgbe_disable_intr(hw); 4571 4572 /* read-on-clear nic registers here */ 4573 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4574 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4575 4576 intr->flags = 0; 4577 4578 /* set flag for async link update */ 4579 if (eicr & IXGBE_EICR_LSC) 4580 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4581 4582 if (eicr & IXGBE_EICR_MAILBOX) 4583 intr->flags |= IXGBE_FLAG_MAILBOX; 4584 4585 if (eicr & IXGBE_EICR_LINKSEC) 4586 intr->flags |= IXGBE_FLAG_MACSEC; 4587 4588 if (hw->mac.type == ixgbe_mac_X550EM_x && 4589 hw->phy.type == ixgbe_phy_x550em_ext_t && 4590 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4591 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4592 4593 return 0; 4594 } 4595 4596 /** 4597 * It gets and then prints the link status. 4598 * 4599 * @param dev 4600 * Pointer to struct rte_eth_dev. 4601 * 4602 * @return 4603 * - On success, zero. 4604 * - On failure, a negative value. 4605 */ 4606 static void 4607 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4608 { 4609 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4610 struct rte_eth_link link; 4611 4612 rte_eth_linkstatus_get(dev, &link); 4613 4614 if (link.link_status) { 4615 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4616 (int)(dev->data->port_id), 4617 (unsigned)link.link_speed, 4618 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 4619 "full-duplex" : "half-duplex"); 4620 } else { 4621 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4622 (int)(dev->data->port_id)); 4623 } 4624 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4625 pci_dev->addr.domain, 4626 pci_dev->addr.bus, 4627 pci_dev->addr.devid, 4628 pci_dev->addr.function); 4629 } 4630 4631 /* 4632 * It executes link_update after knowing an interrupt occurred. 4633 * 4634 * @param dev 4635 * Pointer to struct rte_eth_dev. 4636 * 4637 * @return 4638 * - On success, zero. 4639 * - On failure, a negative value. 4640 */ 4641 static int 4642 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4643 { 4644 struct ixgbe_interrupt *intr = 4645 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4646 int64_t timeout; 4647 struct ixgbe_hw *hw = 4648 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4649 4650 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4651 4652 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4653 ixgbe_pf_mbx_process(dev); 4654 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4655 } 4656 4657 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4658 ixgbe_handle_lasi(hw); 4659 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4660 } 4661 4662 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4663 struct rte_eth_link link; 4664 4665 /* get the link status before link update, for predicting later */ 4666 rte_eth_linkstatus_get(dev, &link); 4667 4668 ixgbe_dev_link_update(dev, 0); 4669 4670 /* likely to up */ 4671 if (!link.link_status) 4672 /* handle it 1 sec later, wait it being stable */ 4673 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4674 /* likely to down */ 4675 else 4676 /* handle it 4 sec later, wait it being stable */ 4677 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4678 4679 ixgbe_dev_link_status_print(dev); 4680 4681 /* Don't program delayed handler if LSC interrupt is disabled. 4682 * It means one is already programmed. 4683 */ 4684 if (intr->mask & IXGBE_EIMS_LSC) { 4685 if (rte_eal_alarm_set(timeout * 1000, 4686 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4687 PMD_DRV_LOG(ERR, "Error setting alarm"); 4688 else { 4689 /* remember original mask */ 4690 intr->mask_original = intr->mask; 4691 /* only disable lsc interrupt */ 4692 intr->mask &= ~IXGBE_EIMS_LSC; 4693 } 4694 } 4695 } 4696 4697 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4698 ixgbe_enable_intr(dev); 4699 4700 return 0; 4701 } 4702 4703 /** 4704 * Interrupt handler which shall be registered for alarm callback for delayed 4705 * handling specific interrupt to wait for the stable nic state. As the 4706 * NIC interrupt state is not stable for ixgbe after link is just down, 4707 * it needs to wait 4 seconds to get the stable status. 4708 * 4709 * @param handle 4710 * Pointer to interrupt handle. 4711 * @param param 4712 * The address of parameter (struct rte_eth_dev *) registered before. 4713 * 4714 * @return 4715 * void 4716 */ 4717 static void 4718 ixgbe_dev_interrupt_delayed_handler(void *param) 4719 { 4720 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4721 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4722 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 4723 struct ixgbe_interrupt *intr = 4724 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4725 struct ixgbe_hw *hw = 4726 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4727 uint32_t eicr; 4728 4729 ixgbe_disable_intr(hw); 4730 4731 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4732 if (eicr & IXGBE_EICR_MAILBOX) 4733 ixgbe_pf_mbx_process(dev); 4734 4735 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4736 ixgbe_handle_lasi(hw); 4737 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4738 } 4739 4740 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4741 ixgbe_dev_link_update(dev, 0); 4742 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4743 ixgbe_dev_link_status_print(dev); 4744 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4745 } 4746 4747 if (intr->flags & IXGBE_FLAG_MACSEC) { 4748 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL); 4749 intr->flags &= ~IXGBE_FLAG_MACSEC; 4750 } 4751 4752 /* restore original mask */ 4753 intr->mask = intr->mask_original; 4754 intr->mask_original = 0; 4755 4756 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4757 ixgbe_enable_intr(dev); 4758 rte_intr_ack(intr_handle); 4759 } 4760 4761 /** 4762 * Interrupt handler triggered by NIC for handling 4763 * specific interrupt. 4764 * 4765 * @param handle 4766 * Pointer to interrupt handle. 4767 * @param param 4768 * The address of parameter (struct rte_eth_dev *) registered before. 4769 * 4770 * @return 4771 * void 4772 */ 4773 static void 4774 ixgbe_dev_interrupt_handler(void *param) 4775 { 4776 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4777 4778 ixgbe_dev_interrupt_get_status(dev); 4779 ixgbe_dev_interrupt_action(dev); 4780 } 4781 4782 static int 4783 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4784 { 4785 struct ixgbe_hw *hw; 4786 4787 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4788 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4789 } 4790 4791 static int 4792 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4793 { 4794 struct ixgbe_hw *hw; 4795 4796 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4797 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4798 } 4799 4800 static int 4801 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4802 { 4803 struct ixgbe_hw *hw; 4804 uint32_t mflcn_reg; 4805 uint32_t fccfg_reg; 4806 int rx_pause; 4807 int tx_pause; 4808 4809 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4810 4811 fc_conf->pause_time = hw->fc.pause_time; 4812 fc_conf->high_water = hw->fc.high_water[0]; 4813 fc_conf->low_water = hw->fc.low_water[0]; 4814 fc_conf->send_xon = hw->fc.send_xon; 4815 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4816 4817 /* 4818 * Return rx_pause status according to actual setting of 4819 * MFLCN register. 4820 */ 4821 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4822 if (mflcn_reg & IXGBE_MFLCN_PMCF) 4823 fc_conf->mac_ctrl_frame_fwd = 1; 4824 else 4825 fc_conf->mac_ctrl_frame_fwd = 0; 4826 4827 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4828 rx_pause = 1; 4829 else 4830 rx_pause = 0; 4831 4832 /* 4833 * Return tx_pause status according to actual setting of 4834 * FCCFG register. 4835 */ 4836 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4837 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4838 tx_pause = 1; 4839 else 4840 tx_pause = 0; 4841 4842 if (rx_pause && tx_pause) 4843 fc_conf->mode = RTE_ETH_FC_FULL; 4844 else if (rx_pause) 4845 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 4846 else if (tx_pause) 4847 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 4848 else 4849 fc_conf->mode = RTE_ETH_FC_NONE; 4850 4851 return 0; 4852 } 4853 4854 static int 4855 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4856 { 4857 struct ixgbe_hw *hw; 4858 struct ixgbe_adapter *adapter = dev->data->dev_private; 4859 int err; 4860 uint32_t rx_buf_size; 4861 uint32_t max_high_water; 4862 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4863 ixgbe_fc_none, 4864 ixgbe_fc_rx_pause, 4865 ixgbe_fc_tx_pause, 4866 ixgbe_fc_full 4867 }; 4868 4869 PMD_INIT_FUNC_TRACE(); 4870 4871 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4872 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4873 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4874 4875 /* 4876 * At least reserve one Ethernet frame for watermark 4877 * high_water/low_water in kilo bytes for ixgbe 4878 */ 4879 max_high_water = (rx_buf_size - 4880 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4881 if ((fc_conf->high_water > max_high_water) || 4882 (fc_conf->high_water < fc_conf->low_water)) { 4883 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4884 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4885 return -EINVAL; 4886 } 4887 4888 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4889 hw->fc.pause_time = fc_conf->pause_time; 4890 hw->fc.high_water[0] = fc_conf->high_water; 4891 hw->fc.low_water[0] = fc_conf->low_water; 4892 hw->fc.send_xon = fc_conf->send_xon; 4893 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4894 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4895 4896 err = ixgbe_flow_ctrl_enable(dev, hw); 4897 if (err < 0) { 4898 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4899 return -EIO; 4900 } 4901 return err; 4902 } 4903 4904 /** 4905 * ixgbe_pfc_enable_generic - Enable flow control 4906 * @hw: pointer to hardware structure 4907 * @tc_num: traffic class number 4908 * Enable flow control according to the current settings. 4909 */ 4910 static int 4911 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4912 { 4913 int ret_val = 0; 4914 uint32_t mflcn_reg, fccfg_reg; 4915 uint32_t reg; 4916 uint32_t fcrtl, fcrth; 4917 uint8_t i; 4918 uint8_t nb_rx_en; 4919 4920 /* Validate the water mark configuration */ 4921 if (!hw->fc.pause_time) { 4922 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4923 goto out; 4924 } 4925 4926 /* Low water mark of zero causes XOFF floods */ 4927 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4928 /* High/Low water can not be 0 */ 4929 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4930 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4931 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4932 goto out; 4933 } 4934 4935 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4936 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4937 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4938 goto out; 4939 } 4940 } 4941 /* Negotiate the fc mode to use */ 4942 ixgbe_fc_autoneg(hw); 4943 4944 /* Disable any previous flow control settings */ 4945 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4946 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4947 4948 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4949 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4950 4951 switch (hw->fc.current_mode) { 4952 case ixgbe_fc_none: 4953 /* 4954 * If the count of enabled RX Priority Flow control >1, 4955 * and the TX pause can not be disabled 4956 */ 4957 nb_rx_en = 0; 4958 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4959 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4960 if (reg & IXGBE_FCRTH_FCEN) 4961 nb_rx_en++; 4962 } 4963 if (nb_rx_en > 1) 4964 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4965 break; 4966 case ixgbe_fc_rx_pause: 4967 /* 4968 * Rx Flow control is enabled and Tx Flow control is 4969 * disabled by software override. Since there really 4970 * isn't a way to advertise that we are capable of RX 4971 * Pause ONLY, we will advertise that we support both 4972 * symmetric and asymmetric Rx PAUSE. Later, we will 4973 * disable the adapter's ability to send PAUSE frames. 4974 */ 4975 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4976 /* 4977 * If the count of enabled RX Priority Flow control >1, 4978 * and the TX pause can not be disabled 4979 */ 4980 nb_rx_en = 0; 4981 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4982 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4983 if (reg & IXGBE_FCRTH_FCEN) 4984 nb_rx_en++; 4985 } 4986 if (nb_rx_en > 1) 4987 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4988 break; 4989 case ixgbe_fc_tx_pause: 4990 /* 4991 * Tx Flow control is enabled, and Rx Flow control is 4992 * disabled by software override. 4993 */ 4994 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4995 break; 4996 case ixgbe_fc_full: 4997 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4998 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4999 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 5000 break; 5001 default: 5002 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 5003 ret_val = IXGBE_ERR_CONFIG; 5004 goto out; 5005 } 5006 5007 /* Set 802.3x based flow control settings. */ 5008 mflcn_reg |= IXGBE_MFLCN_DPF; 5009 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 5010 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 5011 5012 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 5013 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 5014 hw->fc.high_water[tc_num]) { 5015 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 5016 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 5017 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 5018 } else { 5019 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 5020 /* 5021 * In order to prevent Tx hangs when the internal Tx 5022 * switch is enabled we must set the high water mark 5023 * to the maximum FCRTH value. This allows the Tx 5024 * switch to function even under heavy Rx workloads. 5025 */ 5026 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 5027 } 5028 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 5029 5030 /* Configure pause time (2 TCs per register) */ 5031 reg = hw->fc.pause_time * 0x00010001; 5032 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 5033 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 5034 5035 /* Configure flow control refresh threshold value */ 5036 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 5037 5038 out: 5039 return ret_val; 5040 } 5041 5042 static int 5043 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 5044 { 5045 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5046 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 5047 5048 if (hw->mac.type != ixgbe_mac_82598EB) { 5049 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 5050 } 5051 return ret_val; 5052 } 5053 5054 static int 5055 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 5056 { 5057 int err; 5058 uint32_t rx_buf_size; 5059 uint32_t max_high_water; 5060 uint8_t tc_num; 5061 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 5062 struct ixgbe_hw *hw = 5063 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5064 struct ixgbe_dcb_config *dcb_config = 5065 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 5066 5067 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 5068 ixgbe_fc_none, 5069 ixgbe_fc_rx_pause, 5070 ixgbe_fc_tx_pause, 5071 ixgbe_fc_full 5072 }; 5073 5074 PMD_INIT_FUNC_TRACE(); 5075 5076 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 5077 tc_num = map[pfc_conf->priority]; 5078 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 5079 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 5080 /* 5081 * At least reserve one Ethernet frame for watermark 5082 * high_water/low_water in kilo bytes for ixgbe 5083 */ 5084 max_high_water = (rx_buf_size - 5085 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 5086 if ((pfc_conf->fc.high_water > max_high_water) || 5087 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 5088 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 5089 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 5090 return -EINVAL; 5091 } 5092 5093 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 5094 hw->fc.pause_time = pfc_conf->fc.pause_time; 5095 hw->fc.send_xon = pfc_conf->fc.send_xon; 5096 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 5097 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 5098 5099 err = ixgbe_dcb_pfc_enable(dev, tc_num); 5100 5101 /* Not negotiated is not an error case */ 5102 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 5103 return 0; 5104 5105 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5106 return -EIO; 5107 } 5108 5109 static int 5110 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5111 struct rte_eth_rss_reta_entry64 *reta_conf, 5112 uint16_t reta_size) 5113 { 5114 uint16_t i, sp_reta_size; 5115 uint8_t j, mask; 5116 uint32_t reta, r; 5117 uint16_t idx, shift; 5118 struct ixgbe_adapter *adapter = dev->data->dev_private; 5119 struct rte_eth_dev_data *dev_data = dev->data; 5120 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5121 uint32_t reta_reg; 5122 5123 PMD_INIT_FUNC_TRACE(); 5124 5125 if (!dev_data->dev_started) { 5126 PMD_DRV_LOG(ERR, 5127 "port %d must be started before rss reta update", 5128 dev_data->port_id); 5129 return -EIO; 5130 } 5131 5132 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5133 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5134 "NIC."); 5135 return -ENOTSUP; 5136 } 5137 5138 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5139 if (reta_size != sp_reta_size) { 5140 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5141 "(%d) doesn't match the number hardware can supported " 5142 "(%d)", reta_size, sp_reta_size); 5143 return -EINVAL; 5144 } 5145 5146 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5147 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5148 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5149 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5150 IXGBE_4_BIT_MASK); 5151 if (!mask) 5152 continue; 5153 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5154 if (mask == IXGBE_4_BIT_MASK) 5155 r = 0; 5156 else 5157 r = IXGBE_READ_REG(hw, reta_reg); 5158 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5159 if (mask & (0x1 << j)) 5160 reta |= reta_conf[idx].reta[shift + j] << 5161 (CHAR_BIT * j); 5162 else 5163 reta |= r & (IXGBE_8_BIT_MASK << 5164 (CHAR_BIT * j)); 5165 } 5166 IXGBE_WRITE_REG(hw, reta_reg, reta); 5167 } 5168 adapter->rss_reta_updated = 1; 5169 5170 return 0; 5171 } 5172 5173 static int 5174 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5175 struct rte_eth_rss_reta_entry64 *reta_conf, 5176 uint16_t reta_size) 5177 { 5178 uint16_t i, sp_reta_size; 5179 uint8_t j, mask; 5180 uint32_t reta; 5181 uint16_t idx, shift; 5182 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5183 uint32_t reta_reg; 5184 5185 PMD_INIT_FUNC_TRACE(); 5186 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5187 if (reta_size != sp_reta_size) { 5188 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5189 "(%d) doesn't match the number hardware can supported " 5190 "(%d)", reta_size, sp_reta_size); 5191 return -EINVAL; 5192 } 5193 5194 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5195 idx = i / RTE_ETH_RETA_GROUP_SIZE; 5196 shift = i % RTE_ETH_RETA_GROUP_SIZE; 5197 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5198 IXGBE_4_BIT_MASK); 5199 if (!mask) 5200 continue; 5201 5202 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5203 reta = IXGBE_READ_REG(hw, reta_reg); 5204 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5205 if (mask & (0x1 << j)) 5206 reta_conf[idx].reta[shift + j] = 5207 ((reta >> (CHAR_BIT * j)) & 5208 IXGBE_8_BIT_MASK); 5209 } 5210 } 5211 5212 return 0; 5213 } 5214 5215 static int 5216 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5217 uint32_t index, uint32_t pool) 5218 { 5219 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5220 uint32_t enable_addr = 1; 5221 5222 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5223 pool, enable_addr); 5224 } 5225 5226 static void 5227 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5228 { 5229 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5230 5231 ixgbe_clear_rar(hw, index); 5232 } 5233 5234 static int 5235 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5236 { 5237 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5238 5239 ixgbe_remove_rar(dev, 0); 5240 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5241 5242 return 0; 5243 } 5244 5245 static bool 5246 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5247 { 5248 if (strcmp(dev->device->driver->name, drv->driver.name)) 5249 return false; 5250 5251 return true; 5252 } 5253 5254 bool 5255 is_ixgbe_supported(struct rte_eth_dev *dev) 5256 { 5257 return is_device_supported(dev, &rte_ixgbe_pmd); 5258 } 5259 5260 static int 5261 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5262 { 5263 uint32_t hlreg0; 5264 uint32_t maxfrs; 5265 struct ixgbe_hw *hw; 5266 struct rte_eth_dev_info dev_info; 5267 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5268 int ret; 5269 5270 ret = ixgbe_dev_info_get(dev, &dev_info); 5271 if (ret != 0) 5272 return ret; 5273 5274 /* check that mtu is within the allowed range */ 5275 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5276 return -EINVAL; 5277 5278 /* If device is started, refuse mtu that requires the support of 5279 * scattered packets when this feature has not been enabled before. 5280 */ 5281 if (dev->data->dev_started && !dev->data->scattered_rx && 5282 frame_size + 2 * RTE_VLAN_HLEN > 5283 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 5284 PMD_INIT_LOG(ERR, "Stop port first."); 5285 return -EINVAL; 5286 } 5287 5288 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5289 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5290 5291 /* switch to jumbo mode if needed */ 5292 if (mtu > RTE_ETHER_MTU) 5293 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5294 else 5295 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5296 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5297 5298 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5299 maxfrs &= 0x0000FFFF; 5300 maxfrs |= (frame_size << 16); 5301 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5302 5303 return 0; 5304 } 5305 5306 /* 5307 * Virtual Function operations 5308 */ 5309 static void 5310 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5311 { 5312 struct ixgbe_interrupt *intr = 5313 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5314 struct ixgbe_hw *hw = 5315 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5316 5317 PMD_INIT_FUNC_TRACE(); 5318 5319 /* Clear interrupt mask to stop from interrupts being generated */ 5320 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5321 5322 IXGBE_WRITE_FLUSH(hw); 5323 5324 /* Clear mask value. */ 5325 intr->mask = 0; 5326 } 5327 5328 static void 5329 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5330 { 5331 struct ixgbe_interrupt *intr = 5332 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5333 struct ixgbe_hw *hw = 5334 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5335 5336 PMD_INIT_FUNC_TRACE(); 5337 5338 /* VF enable interrupt autoclean */ 5339 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5340 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5341 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5342 5343 IXGBE_WRITE_FLUSH(hw); 5344 5345 /* Save IXGBE_VTEIMS value to mask. */ 5346 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5347 } 5348 5349 static int 5350 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5351 { 5352 struct rte_eth_conf *conf = &dev->data->dev_conf; 5353 struct ixgbe_adapter *adapter = dev->data->dev_private; 5354 5355 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5356 dev->data->port_id); 5357 5358 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 5359 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 5360 5361 /* 5362 * VF has no ability to enable/disable HW CRC 5363 * Keep the persistent behavior the same as Host PF 5364 */ 5365 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5366 if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 5367 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5368 conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5369 } 5370 #else 5371 if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) { 5372 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5373 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 5374 } 5375 #endif 5376 5377 /* 5378 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5379 * allocation or vector Rx preconditions we will reset it. 5380 */ 5381 adapter->rx_bulk_alloc_allowed = true; 5382 adapter->rx_vec_allowed = true; 5383 5384 return 0; 5385 } 5386 5387 static int 5388 ixgbevf_dev_start(struct rte_eth_dev *dev) 5389 { 5390 struct ixgbe_hw *hw = 5391 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5392 uint32_t intr_vector = 0; 5393 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5394 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5395 5396 int err, mask = 0; 5397 5398 PMD_INIT_FUNC_TRACE(); 5399 5400 /* Stop the link setup handler before resetting the HW. */ 5401 ixgbe_dev_wait_setup_link_complete(dev, 0); 5402 5403 err = hw->mac.ops.reset_hw(hw); 5404 5405 /** 5406 * In this case, reuses the MAC address assigned by VF 5407 * initialization. 5408 */ 5409 if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { 5410 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5411 return err; 5412 } 5413 5414 hw->mac.get_link_status = true; 5415 5416 /* negotiate mailbox API version to use with the PF. */ 5417 ixgbevf_negotiate_api(hw); 5418 5419 ixgbevf_dev_tx_init(dev); 5420 5421 /* This can fail when allocating mbufs for descriptor rings */ 5422 err = ixgbevf_dev_rx_init(dev); 5423 if (err) { 5424 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5425 ixgbe_dev_clear_queues(dev); 5426 return err; 5427 } 5428 5429 /* Set vfta */ 5430 ixgbevf_set_vfta_all(dev, 1); 5431 5432 /* Set HW strip */ 5433 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 5434 RTE_ETH_VLAN_EXTEND_MASK; 5435 err = ixgbevf_vlan_offload_config(dev, mask); 5436 if (err) { 5437 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5438 ixgbe_dev_clear_queues(dev); 5439 return err; 5440 } 5441 5442 ixgbevf_dev_rxtx_start(dev); 5443 5444 /* check and configure queue intr-vector mapping */ 5445 if (rte_intr_cap_multiple(intr_handle) && 5446 dev->data->dev_conf.intr_conf.rxq) { 5447 /* According to datasheet, only vector 0/1/2 can be used, 5448 * now only one vector is used for Rx queue 5449 */ 5450 intr_vector = 1; 5451 if (rte_intr_efd_enable(intr_handle, intr_vector)) { 5452 ixgbe_dev_clear_queues(dev); 5453 return -1; 5454 } 5455 } 5456 5457 if (rte_intr_dp_is_en(intr_handle)) { 5458 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 5459 dev->data->nb_rx_queues)) { 5460 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5461 " intr_vec", dev->data->nb_rx_queues); 5462 ixgbe_dev_clear_queues(dev); 5463 return -ENOMEM; 5464 } 5465 } 5466 ixgbevf_configure_msix(dev); 5467 5468 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5469 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5470 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5471 * is not cleared, it will fail when following rte_intr_enable( ) tries 5472 * to map Rx queue interrupt to other VFIO vectors. 5473 * So clear uio/vfio intr/evevnfd first to avoid failure. 5474 */ 5475 rte_intr_disable(intr_handle); 5476 5477 rte_intr_enable(intr_handle); 5478 5479 /* Re-enable interrupt for VF */ 5480 ixgbevf_intr_enable(dev); 5481 5482 /* 5483 * Update link status right before return, because it may 5484 * start link configuration process in a separate thread. 5485 */ 5486 ixgbevf_dev_link_update(dev, 0); 5487 5488 hw->adapter_stopped = false; 5489 5490 return 0; 5491 } 5492 5493 static int 5494 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5495 { 5496 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5497 struct ixgbe_adapter *adapter = dev->data->dev_private; 5498 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5499 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5500 5501 if (hw->adapter_stopped) 5502 return 0; 5503 5504 PMD_INIT_FUNC_TRACE(); 5505 5506 ixgbe_dev_wait_setup_link_complete(dev, 0); 5507 5508 ixgbevf_intr_disable(dev); 5509 5510 dev->data->dev_started = 0; 5511 hw->adapter_stopped = 1; 5512 ixgbe_stop_adapter(hw); 5513 5514 /* 5515 * Clear what we set, but we still keep shadow_vfta to 5516 * restore after device starts 5517 */ 5518 ixgbevf_set_vfta_all(dev, 0); 5519 5520 /* Clear stored conf */ 5521 dev->data->scattered_rx = 0; 5522 5523 ixgbe_dev_clear_queues(dev); 5524 5525 /* Clean datapath event and queue/vec mapping */ 5526 rte_intr_efd_disable(intr_handle); 5527 rte_intr_vec_list_free(intr_handle); 5528 5529 adapter->rss_reta_updated = 0; 5530 5531 return 0; 5532 } 5533 5534 static int 5535 ixgbevf_dev_close(struct rte_eth_dev *dev) 5536 { 5537 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5538 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5539 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5540 int ret; 5541 5542 PMD_INIT_FUNC_TRACE(); 5543 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5544 return 0; 5545 5546 ixgbe_reset_hw(hw); 5547 5548 ret = ixgbevf_dev_stop(dev); 5549 5550 ixgbe_dev_free_queues(dev); 5551 5552 /** 5553 * Remove the VF MAC address ro ensure 5554 * that the VF traffic goes to the PF 5555 * after stop, close and detach of the VF 5556 **/ 5557 ixgbevf_remove_mac_addr(dev, 0); 5558 5559 rte_intr_disable(intr_handle); 5560 rte_intr_callback_unregister(intr_handle, 5561 ixgbevf_dev_interrupt_handler, dev); 5562 5563 return ret; 5564 } 5565 5566 /* 5567 * Reset VF device 5568 */ 5569 static int 5570 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5571 { 5572 int ret; 5573 5574 ret = eth_ixgbevf_dev_uninit(dev); 5575 if (ret) 5576 return ret; 5577 5578 ret = eth_ixgbevf_dev_init(dev); 5579 5580 return ret; 5581 } 5582 5583 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5584 { 5585 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5586 struct ixgbe_vfta *shadow_vfta = 5587 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5588 int i = 0, j = 0, vfta = 0, mask = 1; 5589 5590 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5591 vfta = shadow_vfta->vfta[i]; 5592 if (vfta) { 5593 mask = 1; 5594 for (j = 0; j < 32; j++) { 5595 if (vfta & mask) 5596 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5597 on, false); 5598 mask <<= 1; 5599 } 5600 } 5601 } 5602 5603 } 5604 5605 static int 5606 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5607 { 5608 struct ixgbe_hw *hw = 5609 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5610 struct ixgbe_vfta *shadow_vfta = 5611 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5612 uint32_t vid_idx = 0; 5613 uint32_t vid_bit = 0; 5614 int ret = 0; 5615 5616 PMD_INIT_FUNC_TRACE(); 5617 5618 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5619 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5620 if (ret) { 5621 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5622 return ret; 5623 } 5624 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5625 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5626 5627 /* Save what we set and retore it after device reset */ 5628 if (on) 5629 shadow_vfta->vfta[vid_idx] |= vid_bit; 5630 else 5631 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5632 5633 return 0; 5634 } 5635 5636 static void 5637 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5638 { 5639 struct ixgbe_hw *hw = 5640 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5641 uint32_t ctrl; 5642 5643 PMD_INIT_FUNC_TRACE(); 5644 5645 if (queue >= hw->mac.max_rx_queues) 5646 return; 5647 5648 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5649 if (on) 5650 ctrl |= IXGBE_RXDCTL_VME; 5651 else 5652 ctrl &= ~IXGBE_RXDCTL_VME; 5653 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5654 5655 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5656 } 5657 5658 static int 5659 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5660 { 5661 struct ixgbe_rx_queue *rxq; 5662 uint16_t i; 5663 int on = 0; 5664 5665 /* VF function only support hw strip feature, others are not support */ 5666 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 5667 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5668 rxq = dev->data->rx_queues[i]; 5669 on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 5670 ixgbevf_vlan_strip_queue_set(dev, i, on); 5671 } 5672 } 5673 5674 return 0; 5675 } 5676 5677 static int 5678 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5679 { 5680 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5681 5682 ixgbevf_vlan_offload_config(dev, mask); 5683 5684 return 0; 5685 } 5686 5687 int 5688 ixgbe_vt_check(struct ixgbe_hw *hw) 5689 { 5690 uint32_t reg_val; 5691 5692 /* if Virtualization Technology is enabled */ 5693 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5694 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5695 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5696 return -1; 5697 } 5698 5699 return 0; 5700 } 5701 5702 static uint32_t 5703 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5704 { 5705 uint32_t vector = 0; 5706 5707 switch (hw->mac.mc_filter_type) { 5708 case 0: /* use bits [47:36] of the address */ 5709 vector = ((uc_addr->addr_bytes[4] >> 4) | 5710 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5711 break; 5712 case 1: /* use bits [46:35] of the address */ 5713 vector = ((uc_addr->addr_bytes[4] >> 3) | 5714 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5715 break; 5716 case 2: /* use bits [45:34] of the address */ 5717 vector = ((uc_addr->addr_bytes[4] >> 2) | 5718 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5719 break; 5720 case 3: /* use bits [43:32] of the address */ 5721 vector = ((uc_addr->addr_bytes[4]) | 5722 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5723 break; 5724 default: /* Invalid mc_filter_type */ 5725 break; 5726 } 5727 5728 /* vector can only be 12-bits or boundary will be exceeded */ 5729 vector &= 0xFFF; 5730 return vector; 5731 } 5732 5733 static int 5734 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5735 struct rte_ether_addr *mac_addr, uint8_t on) 5736 { 5737 uint32_t vector; 5738 uint32_t uta_idx; 5739 uint32_t reg_val; 5740 uint32_t uta_shift; 5741 uint32_t rc; 5742 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5743 const uint32_t ixgbe_uta_bit_shift = 5; 5744 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5745 const uint32_t bit1 = 0x1; 5746 5747 struct ixgbe_hw *hw = 5748 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5749 struct ixgbe_uta_info *uta_info = 5750 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5751 5752 /* The UTA table only exists on 82599 hardware and newer */ 5753 if (hw->mac.type < ixgbe_mac_82599EB) 5754 return -ENOTSUP; 5755 5756 vector = ixgbe_uta_vector(hw, mac_addr); 5757 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5758 uta_shift = vector & ixgbe_uta_bit_mask; 5759 5760 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5761 if (rc == on) 5762 return 0; 5763 5764 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5765 if (on) { 5766 uta_info->uta_in_use++; 5767 reg_val |= (bit1 << uta_shift); 5768 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5769 } else { 5770 uta_info->uta_in_use--; 5771 reg_val &= ~(bit1 << uta_shift); 5772 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5773 } 5774 5775 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5776 5777 if (uta_info->uta_in_use > 0) 5778 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5779 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5780 else 5781 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5782 5783 return 0; 5784 } 5785 5786 static int 5787 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5788 { 5789 int i; 5790 struct ixgbe_hw *hw = 5791 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5792 struct ixgbe_uta_info *uta_info = 5793 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5794 5795 /* The UTA table only exists on 82599 hardware and newer */ 5796 if (hw->mac.type < ixgbe_mac_82599EB) 5797 return -ENOTSUP; 5798 5799 if (on) { 5800 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5801 uta_info->uta_shadow[i] = ~0; 5802 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5803 } 5804 } else { 5805 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5806 uta_info->uta_shadow[i] = 0; 5807 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5808 } 5809 } 5810 return 0; 5811 5812 } 5813 5814 uint32_t 5815 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5816 { 5817 uint32_t new_val = orig_val; 5818 5819 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) 5820 new_val |= IXGBE_VMOLR_AUPE; 5821 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) 5822 new_val |= IXGBE_VMOLR_ROMPE; 5823 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) 5824 new_val |= IXGBE_VMOLR_ROPE; 5825 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) 5826 new_val |= IXGBE_VMOLR_BAM; 5827 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) 5828 new_val |= IXGBE_VMOLR_MPE; 5829 5830 return new_val; 5831 } 5832 5833 static int 5834 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5835 { 5836 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5837 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5838 struct ixgbe_interrupt *intr = 5839 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5840 struct ixgbe_hw *hw = 5841 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5842 uint32_t vec = IXGBE_MISC_VEC_ID; 5843 5844 if (rte_intr_allow_others(intr_handle)) 5845 vec = IXGBE_RX_VEC_START; 5846 intr->mask |= (1 << vec); 5847 RTE_SET_USED(queue_id); 5848 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5849 5850 rte_intr_ack(intr_handle); 5851 5852 return 0; 5853 } 5854 5855 static int 5856 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5857 { 5858 struct ixgbe_interrupt *intr = 5859 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5860 struct ixgbe_hw *hw = 5861 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5862 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5863 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5864 uint32_t vec = IXGBE_MISC_VEC_ID; 5865 5866 if (rte_intr_allow_others(intr_handle)) 5867 vec = IXGBE_RX_VEC_START; 5868 intr->mask &= ~(1 << vec); 5869 RTE_SET_USED(queue_id); 5870 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5871 5872 return 0; 5873 } 5874 5875 static int 5876 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5877 { 5878 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5879 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 5880 uint32_t mask; 5881 struct ixgbe_hw *hw = 5882 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5883 struct ixgbe_interrupt *intr = 5884 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5885 5886 if (queue_id < 16) { 5887 ixgbe_disable_intr(hw); 5888 intr->mask |= (1 << queue_id); 5889 ixgbe_enable_intr(dev); 5890 } else if (queue_id < 32) { 5891 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5892 mask &= (1 << queue_id); 5893 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5894 } else if (queue_id < 64) { 5895 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5896 mask &= (1 << (queue_id - 32)); 5897 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5898 } 5899 rte_intr_ack(intr_handle); 5900 5901 return 0; 5902 } 5903 5904 static int 5905 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5906 { 5907 uint32_t mask; 5908 struct ixgbe_hw *hw = 5909 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5910 struct ixgbe_interrupt *intr = 5911 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5912 5913 if (queue_id < 16) { 5914 ixgbe_disable_intr(hw); 5915 intr->mask &= ~(1 << queue_id); 5916 ixgbe_enable_intr(dev); 5917 } else if (queue_id < 32) { 5918 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5919 mask &= ~(1 << queue_id); 5920 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5921 } else if (queue_id < 64) { 5922 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5923 mask &= ~(1 << (queue_id - 32)); 5924 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5925 } 5926 5927 return 0; 5928 } 5929 5930 static void 5931 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5932 uint8_t queue, uint8_t msix_vector) 5933 { 5934 uint32_t tmp, idx; 5935 5936 if (direction == -1) { 5937 /* other causes */ 5938 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5939 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 5940 tmp &= ~0xFF; 5941 tmp |= msix_vector; 5942 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 5943 } else { 5944 /* rx or tx cause */ 5945 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5946 idx = ((16 * (queue & 1)) + (8 * direction)); 5947 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 5948 tmp &= ~(0xFF << idx); 5949 tmp |= (msix_vector << idx); 5950 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 5951 } 5952 } 5953 5954 /** 5955 * set the IVAR registers, mapping interrupt causes to vectors 5956 * @param hw 5957 * pointer to ixgbe_hw struct 5958 * @direction 5959 * 0 for Rx, 1 for Tx, -1 for other causes 5960 * @queue 5961 * queue to map the corresponding interrupt to 5962 * @msix_vector 5963 * the vector to map to the corresponding queue 5964 */ 5965 static void 5966 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5967 uint8_t queue, uint8_t msix_vector) 5968 { 5969 uint32_t tmp, idx; 5970 5971 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5972 if (hw->mac.type == ixgbe_mac_82598EB) { 5973 if (direction == -1) 5974 direction = 0; 5975 idx = (((direction * 64) + queue) >> 2) & 0x1F; 5976 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 5977 tmp &= ~(0xFF << (8 * (queue & 0x3))); 5978 tmp |= (msix_vector << (8 * (queue & 0x3))); 5979 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 5980 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 5981 (hw->mac.type == ixgbe_mac_X540) || 5982 (hw->mac.type == ixgbe_mac_X550) || 5983 (hw->mac.type == ixgbe_mac_X550EM_a) || 5984 (hw->mac.type == ixgbe_mac_X550EM_x) || 5985 (hw->mac.type == ixgbe_mac_E610)) { 5986 if (direction == -1) { 5987 /* other causes */ 5988 idx = ((queue & 1) * 8); 5989 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5990 tmp &= ~(0xFF << idx); 5991 tmp |= (msix_vector << idx); 5992 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 5993 } else { 5994 /* rx or tx causes */ 5995 idx = ((16 * (queue & 1)) + (8 * direction)); 5996 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 5997 tmp &= ~(0xFF << idx); 5998 tmp |= (msix_vector << idx); 5999 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 6000 } 6001 } 6002 } 6003 6004 static void 6005 ixgbevf_configure_msix(struct rte_eth_dev *dev) 6006 { 6007 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6008 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 6009 struct ixgbe_hw *hw = 6010 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6011 uint32_t q_idx; 6012 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 6013 uint32_t base = IXGBE_MISC_VEC_ID; 6014 6015 /* Configure VF other cause ivar */ 6016 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 6017 6018 /* won't configure msix register if no mapping is done 6019 * between intr vector and event fd. 6020 */ 6021 if (!rte_intr_dp_is_en(intr_handle)) 6022 return; 6023 6024 if (rte_intr_allow_others(intr_handle)) { 6025 base = IXGBE_RX_VEC_START; 6026 vector_idx = IXGBE_RX_VEC_START; 6027 } 6028 6029 /* Configure all RX queues of VF */ 6030 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 6031 /* Force all queue use vector 0, 6032 * as IXGBE_VF_MAXMSIVECTOR = 1 6033 */ 6034 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 6035 rte_intr_vec_list_index_set(intr_handle, q_idx, 6036 vector_idx); 6037 if (vector_idx < base + rte_intr_nb_efd_get(intr_handle) 6038 - 1) 6039 vector_idx++; 6040 } 6041 6042 /* As RX queue setting above show, all queues use the vector 0. 6043 * Set only the ITR value of IXGBE_MISC_VEC_ID. 6044 */ 6045 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 6046 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6047 | IXGBE_EITR_CNT_WDIS); 6048 } 6049 6050 /** 6051 * Sets up the hardware to properly generate MSI-X interrupts 6052 * @hw 6053 * board private structure 6054 */ 6055 static void 6056 ixgbe_configure_msix(struct rte_eth_dev *dev) 6057 { 6058 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6059 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 6060 struct ixgbe_hw *hw = 6061 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6062 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 6063 uint32_t vec = IXGBE_MISC_VEC_ID; 6064 uint32_t mask; 6065 uint32_t gpie; 6066 6067 /* won't configure msix register if no mapping is done 6068 * between intr vector and event fd 6069 * but if misx has been enabled already, need to configure 6070 * auto clean, auto mask and throttling. 6071 */ 6072 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6073 if (!rte_intr_dp_is_en(intr_handle) && 6074 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 6075 return; 6076 6077 if (rte_intr_allow_others(intr_handle)) 6078 vec = base = IXGBE_RX_VEC_START; 6079 6080 /* setup GPIE for MSI-x mode */ 6081 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6082 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 6083 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 6084 /* auto clearing and auto setting corresponding bits in EIMS 6085 * when MSI-X interrupt is triggered 6086 */ 6087 if (hw->mac.type == ixgbe_mac_82598EB) { 6088 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 6089 } else { 6090 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 6091 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 6092 } 6093 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 6094 6095 /* Populate the IVAR table and set the ITR values to the 6096 * corresponding register. 6097 */ 6098 if (rte_intr_dp_is_en(intr_handle)) { 6099 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 6100 queue_id++) { 6101 /* by default, 1:1 mapping */ 6102 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 6103 rte_intr_vec_list_index_set(intr_handle, 6104 queue_id, vec); 6105 if (vec < base + rte_intr_nb_efd_get(intr_handle) 6106 - 1) 6107 vec++; 6108 } 6109 6110 switch (hw->mac.type) { 6111 case ixgbe_mac_82598EB: 6112 ixgbe_set_ivar_map(hw, -1, 6113 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6114 IXGBE_MISC_VEC_ID); 6115 break; 6116 case ixgbe_mac_82599EB: 6117 case ixgbe_mac_X540: 6118 case ixgbe_mac_X550: 6119 case ixgbe_mac_X550EM_x: 6120 case ixgbe_mac_E610: 6121 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6122 break; 6123 default: 6124 break; 6125 } 6126 } 6127 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6128 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6129 | IXGBE_EITR_CNT_WDIS); 6130 6131 /* set up to autoclear timer, and the vectors */ 6132 mask = IXGBE_EIMS_ENABLE_MASK; 6133 mask &= ~(IXGBE_EIMS_OTHER | 6134 IXGBE_EIMS_MAILBOX | 6135 IXGBE_EIMS_LSC); 6136 6137 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6138 } 6139 6140 int 6141 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6142 uint16_t queue_idx, uint32_t tx_rate) 6143 { 6144 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6145 uint32_t rf_dec, rf_int; 6146 uint32_t bcnrc_val; 6147 uint16_t link_speed = dev->data->dev_link.link_speed; 6148 6149 if (queue_idx >= hw->mac.max_tx_queues) 6150 return -EINVAL; 6151 6152 if (tx_rate != 0) { 6153 /* Calculate the rate factor values to set */ 6154 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6155 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6156 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6157 6158 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6159 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6160 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6161 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6162 } else { 6163 bcnrc_val = 0; 6164 } 6165 6166 /* 6167 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6168 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6169 * set as 0x4. 6170 */ 6171 if (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE) 6172 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); 6173 else 6174 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT); 6175 6176 /* Set RTTBCNRC of queue X */ 6177 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6178 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6179 IXGBE_WRITE_FLUSH(hw); 6180 6181 return 0; 6182 } 6183 6184 static int 6185 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6186 __rte_unused uint32_t index, 6187 __rte_unused uint32_t pool) 6188 { 6189 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6190 int diag; 6191 6192 /* 6193 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6194 * operation. Trap this case to avoid exhausting the [very limited] 6195 * set of PF resources used to store VF MAC addresses. 6196 */ 6197 if (memcmp(hw->mac.perm_addr, mac_addr, 6198 sizeof(struct rte_ether_addr)) == 0) 6199 return -1; 6200 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6201 if (diag != 0) 6202 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6203 RTE_ETHER_ADDR_PRT_FMT " - diag=%d", 6204 RTE_ETHER_ADDR_BYTES(mac_addr), diag); 6205 return diag; 6206 } 6207 6208 static void 6209 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6210 { 6211 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6212 struct rte_ether_addr *perm_addr = 6213 (struct rte_ether_addr *)hw->mac.perm_addr; 6214 struct rte_ether_addr *mac_addr; 6215 uint32_t i; 6216 int diag; 6217 6218 /* 6219 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6220 * not support the deletion of a given MAC address. 6221 * Instead, it imposes to delete all MAC addresses, then to add again 6222 * all MAC addresses with the exception of the one to be deleted. 6223 */ 6224 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6225 6226 /* 6227 * Add again all MAC addresses, with the exception of the deleted one 6228 * and of the permanent MAC address. 6229 */ 6230 for (i = 0, mac_addr = dev->data->mac_addrs; 6231 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6232 /* Skip the deleted MAC address */ 6233 if (i == index) 6234 continue; 6235 /* Skip NULL MAC addresses */ 6236 if (rte_is_zero_ether_addr(mac_addr)) 6237 continue; 6238 /* Skip the permanent MAC address */ 6239 if (memcmp(perm_addr, mac_addr, 6240 sizeof(struct rte_ether_addr)) == 0) 6241 continue; 6242 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6243 if (diag != 0) 6244 PMD_DRV_LOG(ERR, 6245 "Adding again MAC address " 6246 RTE_ETHER_ADDR_PRT_FMT " failed " 6247 "diag=%d", RTE_ETHER_ADDR_BYTES(mac_addr), 6248 diag); 6249 } 6250 } 6251 6252 static int 6253 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6254 struct rte_ether_addr *addr) 6255 { 6256 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6257 6258 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6259 6260 return 0; 6261 } 6262 6263 int 6264 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6265 struct rte_eth_syn_filter *filter, 6266 bool add) 6267 { 6268 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6269 struct ixgbe_filter_info *filter_info = 6270 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6271 uint32_t syn_info; 6272 uint32_t synqf; 6273 6274 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6275 return -EINVAL; 6276 6277 syn_info = filter_info->syn_info; 6278 6279 if (add) { 6280 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6281 return -EINVAL; 6282 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6283 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6284 6285 if (filter->hig_pri) 6286 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6287 else 6288 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6289 } else { 6290 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6291 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6292 return -ENOENT; 6293 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6294 } 6295 6296 filter_info->syn_info = synqf; 6297 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6298 IXGBE_WRITE_FLUSH(hw); 6299 return 0; 6300 } 6301 6302 6303 static inline enum ixgbe_5tuple_protocol 6304 convert_protocol_type(uint8_t protocol_value) 6305 { 6306 if (protocol_value == IPPROTO_TCP) 6307 return IXGBE_FILTER_PROTOCOL_TCP; 6308 else if (protocol_value == IPPROTO_UDP) 6309 return IXGBE_FILTER_PROTOCOL_UDP; 6310 else if (protocol_value == IPPROTO_SCTP) 6311 return IXGBE_FILTER_PROTOCOL_SCTP; 6312 else 6313 return IXGBE_FILTER_PROTOCOL_NONE; 6314 } 6315 6316 /* inject a 5-tuple filter to HW */ 6317 static inline void 6318 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6319 struct ixgbe_5tuple_filter *filter) 6320 { 6321 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6322 int i; 6323 uint32_t ftqf, sdpqf; 6324 uint32_t l34timir = 0; 6325 uint8_t mask = 0xff; 6326 6327 i = filter->index; 6328 6329 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6330 IXGBE_SDPQF_DSTPORT_SHIFT); 6331 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6332 6333 ftqf = (uint32_t)(filter->filter_info.proto & 6334 IXGBE_FTQF_PROTOCOL_MASK); 6335 ftqf |= (uint32_t)((filter->filter_info.priority & 6336 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6337 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6338 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6339 if (filter->filter_info.dst_ip_mask == 0) 6340 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6341 if (filter->filter_info.src_port_mask == 0) 6342 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6343 if (filter->filter_info.dst_port_mask == 0) 6344 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6345 if (filter->filter_info.proto_mask == 0) 6346 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6347 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6348 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6349 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6350 6351 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6352 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6353 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6354 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6355 6356 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6357 l34timir |= (uint32_t)(filter->queue << 6358 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6359 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6360 } 6361 6362 /* 6363 * add a 5tuple filter 6364 * 6365 * @param 6366 * dev: Pointer to struct rte_eth_dev. 6367 * index: the index the filter allocates. 6368 * filter: pointer to the filter that will be added. 6369 * rx_queue: the queue id the filter assigned to. 6370 * 6371 * @return 6372 * - On success, zero. 6373 * - On failure, a negative value. 6374 */ 6375 static int 6376 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6377 struct ixgbe_5tuple_filter *filter) 6378 { 6379 struct ixgbe_filter_info *filter_info = 6380 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6381 int i, idx, shift; 6382 6383 /* 6384 * look for an unused 5tuple filter index, 6385 * and insert the filter to list. 6386 */ 6387 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6388 idx = i / (sizeof(uint32_t) * NBBY); 6389 shift = i % (sizeof(uint32_t) * NBBY); 6390 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6391 filter_info->fivetuple_mask[idx] |= 1 << shift; 6392 filter->index = i; 6393 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6394 filter, 6395 entries); 6396 break; 6397 } 6398 } 6399 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6400 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6401 return -ENOSYS; 6402 } 6403 6404 ixgbe_inject_5tuple_filter(dev, filter); 6405 6406 return 0; 6407 } 6408 6409 /* 6410 * remove a 5tuple filter 6411 * 6412 * @param 6413 * dev: Pointer to struct rte_eth_dev. 6414 * filter: the pointer of the filter will be removed. 6415 */ 6416 static void 6417 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6418 struct ixgbe_5tuple_filter *filter) 6419 { 6420 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6421 struct ixgbe_filter_info *filter_info = 6422 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6423 uint16_t index = filter->index; 6424 6425 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6426 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6427 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6428 rte_free(filter); 6429 6430 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6431 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6432 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6433 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6434 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6435 } 6436 6437 static int 6438 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6439 { 6440 struct ixgbe_hw *hw; 6441 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6442 struct rte_eth_dev_data *dev_data = dev->data; 6443 6444 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6445 6446 if (mtu < RTE_ETHER_MIN_MTU || max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6447 return -EINVAL; 6448 6449 /* If device is started, refuse mtu that requires the support of 6450 * scattered packets when this feature has not been enabled before. 6451 */ 6452 if (dev_data->dev_started && !dev_data->scattered_rx && 6453 (max_frame + 2 * RTE_VLAN_HLEN > 6454 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6455 PMD_INIT_LOG(ERR, "Stop port first."); 6456 return -EINVAL; 6457 } 6458 6459 /* 6460 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6461 * request of the version 2.0 of the mailbox API. 6462 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6463 * of the mailbox API. 6464 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6465 * prior to 3.11.33 which contains the following change: 6466 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6467 */ 6468 if (ixgbevf_rlpml_set_vf(hw, max_frame)) 6469 return -EINVAL; 6470 6471 return 0; 6472 } 6473 6474 static inline struct ixgbe_5tuple_filter * 6475 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6476 struct ixgbe_5tuple_filter_info *key) 6477 { 6478 struct ixgbe_5tuple_filter *it; 6479 6480 TAILQ_FOREACH(it, filter_list, entries) { 6481 if (memcmp(key, &it->filter_info, 6482 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6483 return it; 6484 } 6485 } 6486 return NULL; 6487 } 6488 6489 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6490 static inline int 6491 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6492 struct ixgbe_5tuple_filter_info *filter_info) 6493 { 6494 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6495 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6496 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6497 return -EINVAL; 6498 6499 switch (filter->dst_ip_mask) { 6500 case UINT32_MAX: 6501 filter_info->dst_ip_mask = 0; 6502 filter_info->dst_ip = filter->dst_ip; 6503 break; 6504 case 0: 6505 filter_info->dst_ip_mask = 1; 6506 break; 6507 default: 6508 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6509 return -EINVAL; 6510 } 6511 6512 switch (filter->src_ip_mask) { 6513 case UINT32_MAX: 6514 filter_info->src_ip_mask = 0; 6515 filter_info->src_ip = filter->src_ip; 6516 break; 6517 case 0: 6518 filter_info->src_ip_mask = 1; 6519 break; 6520 default: 6521 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6522 return -EINVAL; 6523 } 6524 6525 switch (filter->dst_port_mask) { 6526 case UINT16_MAX: 6527 filter_info->dst_port_mask = 0; 6528 filter_info->dst_port = filter->dst_port; 6529 break; 6530 case 0: 6531 filter_info->dst_port_mask = 1; 6532 break; 6533 default: 6534 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6535 return -EINVAL; 6536 } 6537 6538 switch (filter->src_port_mask) { 6539 case UINT16_MAX: 6540 filter_info->src_port_mask = 0; 6541 filter_info->src_port = filter->src_port; 6542 break; 6543 case 0: 6544 filter_info->src_port_mask = 1; 6545 break; 6546 default: 6547 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6548 return -EINVAL; 6549 } 6550 6551 switch (filter->proto_mask) { 6552 case UINT8_MAX: 6553 filter_info->proto_mask = 0; 6554 filter_info->proto = 6555 convert_protocol_type(filter->proto); 6556 break; 6557 case 0: 6558 filter_info->proto_mask = 1; 6559 break; 6560 default: 6561 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6562 return -EINVAL; 6563 } 6564 6565 filter_info->priority = (uint8_t)filter->priority; 6566 return 0; 6567 } 6568 6569 /* 6570 * add or delete a ntuple filter 6571 * 6572 * @param 6573 * dev: Pointer to struct rte_eth_dev. 6574 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6575 * add: if true, add filter, if false, remove filter 6576 * 6577 * @return 6578 * - On success, zero. 6579 * - On failure, a negative value. 6580 */ 6581 int 6582 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6583 struct rte_eth_ntuple_filter *ntuple_filter, 6584 bool add) 6585 { 6586 struct ixgbe_filter_info *filter_info = 6587 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6588 struct ixgbe_5tuple_filter_info filter_5tuple; 6589 struct ixgbe_5tuple_filter *filter; 6590 int ret; 6591 6592 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6593 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6594 return -EINVAL; 6595 } 6596 6597 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6598 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6599 if (ret < 0) 6600 return ret; 6601 6602 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6603 &filter_5tuple); 6604 if (filter != NULL && add) { 6605 PMD_DRV_LOG(ERR, "filter exists."); 6606 return -EEXIST; 6607 } 6608 if (filter == NULL && !add) { 6609 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6610 return -ENOENT; 6611 } 6612 6613 if (add) { 6614 filter = rte_zmalloc("ixgbe_5tuple_filter", 6615 sizeof(struct ixgbe_5tuple_filter), 0); 6616 if (filter == NULL) 6617 return -ENOMEM; 6618 rte_memcpy(&filter->filter_info, 6619 &filter_5tuple, 6620 sizeof(struct ixgbe_5tuple_filter_info)); 6621 filter->queue = ntuple_filter->queue; 6622 ret = ixgbe_add_5tuple_filter(dev, filter); 6623 if (ret < 0) { 6624 rte_free(filter); 6625 return ret; 6626 } 6627 } else 6628 ixgbe_remove_5tuple_filter(dev, filter); 6629 6630 return 0; 6631 } 6632 6633 int 6634 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6635 struct rte_eth_ethertype_filter *filter, 6636 bool add) 6637 { 6638 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6639 struct ixgbe_filter_info *filter_info = 6640 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6641 uint32_t etqf = 0; 6642 uint32_t etqs = 0; 6643 int ret; 6644 struct ixgbe_ethertype_filter ethertype_filter; 6645 6646 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6647 return -EINVAL; 6648 6649 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6650 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6651 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6652 " ethertype filter.", filter->ether_type); 6653 return -EINVAL; 6654 } 6655 6656 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6657 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6658 return -EINVAL; 6659 } 6660 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6661 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6662 return -EINVAL; 6663 } 6664 6665 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6666 if (ret >= 0 && add) { 6667 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6668 filter->ether_type); 6669 return -EEXIST; 6670 } 6671 if (ret < 0 && !add) { 6672 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6673 filter->ether_type); 6674 return -ENOENT; 6675 } 6676 6677 if (add) { 6678 etqf = IXGBE_ETQF_FILTER_EN; 6679 etqf |= (uint32_t)filter->ether_type; 6680 etqs |= (uint32_t)((filter->queue << 6681 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6682 IXGBE_ETQS_RX_QUEUE); 6683 etqs |= IXGBE_ETQS_QUEUE_EN; 6684 6685 ethertype_filter.ethertype = filter->ether_type; 6686 ethertype_filter.etqf = etqf; 6687 ethertype_filter.etqs = etqs; 6688 ethertype_filter.conf = FALSE; 6689 ret = ixgbe_ethertype_filter_insert(filter_info, 6690 ðertype_filter); 6691 if (ret < 0) { 6692 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6693 return -ENOSPC; 6694 } 6695 } else { 6696 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6697 if (ret < 0) 6698 return -ENOSYS; 6699 } 6700 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6701 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6702 IXGBE_WRITE_FLUSH(hw); 6703 6704 return 0; 6705 } 6706 6707 static int 6708 ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 6709 const struct rte_flow_ops **ops) 6710 { 6711 *ops = &ixgbe_flow_ops; 6712 return 0; 6713 } 6714 6715 static u8 * 6716 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, 6717 u8 **mc_addr_ptr, u32 *vmdq) 6718 { 6719 u8 *mc_addr; 6720 6721 *vmdq = 0; 6722 mc_addr = *mc_addr_ptr; 6723 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 6724 return mc_addr; 6725 } 6726 6727 static int 6728 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6729 struct rte_ether_addr *mc_addr_set, 6730 uint32_t nb_mc_addr) 6731 { 6732 struct ixgbe_hw *hw; 6733 u8 *mc_addr_list; 6734 6735 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6736 mc_addr_list = (u8 *)mc_addr_set; 6737 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6738 ixgbe_dev_addr_list_itr, TRUE); 6739 } 6740 6741 static uint64_t 6742 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6743 { 6744 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6745 uint64_t systime_cycles; 6746 6747 switch (hw->mac.type) { 6748 case ixgbe_mac_X550: 6749 case ixgbe_mac_X550EM_x: 6750 case ixgbe_mac_X550EM_a: 6751 case ixgbe_mac_E610: 6752 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6753 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6754 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6755 * NSEC_PER_SEC; 6756 break; 6757 default: 6758 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6759 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6760 << 32; 6761 } 6762 6763 return systime_cycles; 6764 } 6765 6766 static uint64_t 6767 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6768 { 6769 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6770 uint64_t rx_tstamp_cycles; 6771 6772 switch (hw->mac.type) { 6773 case ixgbe_mac_X550: 6774 case ixgbe_mac_X550EM_x: 6775 case ixgbe_mac_X550EM_a: 6776 case ixgbe_mac_E610: 6777 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6778 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6779 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6780 * NSEC_PER_SEC; 6781 break; 6782 default: 6783 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6784 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6785 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6786 << 32; 6787 } 6788 6789 return rx_tstamp_cycles; 6790 } 6791 6792 static uint64_t 6793 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6794 { 6795 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6796 uint64_t tx_tstamp_cycles; 6797 6798 switch (hw->mac.type) { 6799 case ixgbe_mac_X550: 6800 case ixgbe_mac_X550EM_x: 6801 case ixgbe_mac_X550EM_a: 6802 case ixgbe_mac_E610: 6803 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6804 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6805 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6806 * NSEC_PER_SEC; 6807 break; 6808 default: 6809 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6810 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6811 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6812 << 32; 6813 } 6814 6815 return tx_tstamp_cycles; 6816 } 6817 6818 static void 6819 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6820 { 6821 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6822 struct ixgbe_adapter *adapter = dev->data->dev_private; 6823 struct rte_eth_link link; 6824 uint32_t incval = 0; 6825 uint32_t shift = 0; 6826 6827 /* Get current link speed. */ 6828 ixgbe_dev_link_update(dev, 1); 6829 rte_eth_linkstatus_get(dev, &link); 6830 6831 switch (link.link_speed) { 6832 case RTE_ETH_SPEED_NUM_100M: 6833 incval = IXGBE_INCVAL_100; 6834 shift = IXGBE_INCVAL_SHIFT_100; 6835 break; 6836 case RTE_ETH_SPEED_NUM_1G: 6837 incval = IXGBE_INCVAL_1GB; 6838 shift = IXGBE_INCVAL_SHIFT_1GB; 6839 break; 6840 case RTE_ETH_SPEED_NUM_10G: 6841 default: 6842 incval = IXGBE_INCVAL_10GB; 6843 shift = IXGBE_INCVAL_SHIFT_10GB; 6844 break; 6845 } 6846 6847 switch (hw->mac.type) { 6848 case ixgbe_mac_X550: 6849 case ixgbe_mac_X550EM_x: 6850 case ixgbe_mac_X550EM_a: 6851 case ixgbe_mac_E610: 6852 /* Independent of link speed. */ 6853 incval = 1; 6854 /* Cycles read will be interpreted as ns. */ 6855 shift = 0; 6856 /* Fall-through */ 6857 case ixgbe_mac_X540: 6858 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6859 break; 6860 case ixgbe_mac_82599EB: 6861 incval >>= IXGBE_INCVAL_SHIFT_82599; 6862 shift -= IXGBE_INCVAL_SHIFT_82599; 6863 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6864 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6865 break; 6866 default: 6867 /* Not supported. */ 6868 return; 6869 } 6870 6871 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6872 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6873 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6874 6875 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6876 adapter->systime_tc.cc_shift = shift; 6877 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6878 6879 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6880 adapter->rx_tstamp_tc.cc_shift = shift; 6881 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6882 6883 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6884 adapter->tx_tstamp_tc.cc_shift = shift; 6885 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6886 } 6887 6888 static int 6889 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6890 { 6891 struct ixgbe_adapter *adapter = dev->data->dev_private; 6892 6893 adapter->systime_tc.nsec += delta; 6894 adapter->rx_tstamp_tc.nsec += delta; 6895 adapter->tx_tstamp_tc.nsec += delta; 6896 6897 return 0; 6898 } 6899 6900 static int 6901 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6902 { 6903 uint64_t ns; 6904 struct ixgbe_adapter *adapter = dev->data->dev_private; 6905 6906 ns = rte_timespec_to_ns(ts); 6907 /* Set the timecounters to a new value. */ 6908 adapter->systime_tc.nsec = ns; 6909 adapter->rx_tstamp_tc.nsec = ns; 6910 adapter->tx_tstamp_tc.nsec = ns; 6911 6912 return 0; 6913 } 6914 6915 static int 6916 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 6917 { 6918 uint64_t ns, systime_cycles; 6919 struct ixgbe_adapter *adapter = dev->data->dev_private; 6920 6921 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 6922 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 6923 *ts = rte_ns_to_timespec(ns); 6924 6925 return 0; 6926 } 6927 6928 static int 6929 ixgbe_timesync_enable(struct rte_eth_dev *dev) 6930 { 6931 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6932 uint32_t tsync_ctl; 6933 uint32_t tsauxc; 6934 struct timespec ts; 6935 6936 memset(&ts, 0, sizeof(struct timespec)); 6937 6938 /* get current system time */ 6939 clock_gettime(CLOCK_REALTIME, &ts); 6940 6941 /* Stop the timesync system time. */ 6942 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 6943 /* Reset the timesync system time value. */ 6944 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 6945 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 6946 6947 /* Enable system time for platforms where it isn't on by default. */ 6948 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 6949 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 6950 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 6951 6952 ixgbe_start_timecounters(dev); 6953 6954 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6955 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 6956 (RTE_ETHER_TYPE_1588 | 6957 IXGBE_ETQF_FILTER_EN | 6958 IXGBE_ETQF_1588)); 6959 6960 /* Enable timestamping of received PTP packets. */ 6961 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6962 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 6963 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6964 6965 /* Enable timestamping of transmitted PTP packets. */ 6966 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6967 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 6968 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6969 6970 IXGBE_WRITE_FLUSH(hw); 6971 6972 /* ixgbe uses zero-based timestamping so only adjust timecounter */ 6973 ixgbe_timesync_write_time(dev, &ts); 6974 6975 return 0; 6976 } 6977 6978 static int 6979 ixgbe_timesync_disable(struct rte_eth_dev *dev) 6980 { 6981 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6982 uint32_t tsync_ctl; 6983 6984 /* Disable timestamping of transmitted PTP packets. */ 6985 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6986 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 6987 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6988 6989 /* Disable timestamping of received PTP packets. */ 6990 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6991 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 6992 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6993 6994 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6995 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 6996 6997 /* Stop incrementing the System Time registers. */ 6998 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 6999 7000 return 0; 7001 } 7002 7003 static int 7004 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 7005 struct timespec *timestamp, 7006 uint32_t flags __rte_unused) 7007 { 7008 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7009 struct ixgbe_adapter *adapter = dev->data->dev_private; 7010 uint32_t tsync_rxctl; 7011 uint64_t rx_tstamp_cycles; 7012 uint64_t ns; 7013 7014 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7015 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 7016 return -EINVAL; 7017 7018 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 7019 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 7020 *timestamp = rte_ns_to_timespec(ns); 7021 7022 return 0; 7023 } 7024 7025 static int 7026 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 7027 struct timespec *timestamp) 7028 { 7029 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7030 struct ixgbe_adapter *adapter = dev->data->dev_private; 7031 uint32_t tsync_txctl; 7032 uint64_t tx_tstamp_cycles; 7033 uint64_t ns; 7034 7035 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7036 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 7037 return -EINVAL; 7038 7039 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 7040 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 7041 *timestamp = rte_ns_to_timespec(ns); 7042 7043 return 0; 7044 } 7045 7046 static int 7047 ixgbe_get_reg_length(struct rte_eth_dev *dev) 7048 { 7049 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7050 int count = 0; 7051 int g_ind = 0; 7052 const struct reg_info *reg_group; 7053 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7054 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7055 7056 while ((reg_group = reg_set[g_ind++])) 7057 count += ixgbe_regs_group_count(reg_group); 7058 7059 return count; 7060 } 7061 7062 static int 7063 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 7064 { 7065 int count = 0; 7066 int g_ind = 0; 7067 const struct reg_info *reg_group; 7068 7069 while ((reg_group = ixgbevf_regs[g_ind++])) 7070 count += ixgbe_regs_group_count(reg_group); 7071 7072 return count; 7073 } 7074 7075 static int 7076 ixgbe_get_regs(struct rte_eth_dev *dev, 7077 struct rte_dev_reg_info *regs) 7078 { 7079 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7080 uint32_t *data = regs->data; 7081 int g_ind = 0; 7082 int count = 0; 7083 const struct reg_info *reg_group; 7084 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7085 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7086 7087 if (data == NULL) { 7088 regs->length = ixgbe_get_reg_length(dev); 7089 regs->width = sizeof(uint32_t); 7090 return 0; 7091 } 7092 7093 /* Support only full register dump */ 7094 if ((regs->length == 0) || 7095 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 7096 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7097 hw->device_id; 7098 while ((reg_group = reg_set[g_ind++])) 7099 count += ixgbe_read_regs_group(dev, &data[count], 7100 reg_group); 7101 return 0; 7102 } 7103 7104 return -ENOTSUP; 7105 } 7106 7107 static int 7108 ixgbevf_get_regs(struct rte_eth_dev *dev, 7109 struct rte_dev_reg_info *regs) 7110 { 7111 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7112 uint32_t *data = regs->data; 7113 int g_ind = 0; 7114 int count = 0; 7115 const struct reg_info *reg_group; 7116 7117 if (data == NULL) { 7118 regs->length = ixgbevf_get_reg_length(dev); 7119 regs->width = sizeof(uint32_t); 7120 return 0; 7121 } 7122 7123 /* Support only full register dump */ 7124 if ((regs->length == 0) || 7125 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7126 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7127 hw->device_id; 7128 while ((reg_group = ixgbevf_regs[g_ind++])) 7129 count += ixgbe_read_regs_group(dev, &data[count], 7130 reg_group); 7131 return 0; 7132 } 7133 7134 return -ENOTSUP; 7135 } 7136 7137 static int 7138 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7139 { 7140 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7141 7142 /* Return unit is byte count */ 7143 return hw->eeprom.word_size * 2; 7144 } 7145 7146 static int 7147 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7148 struct rte_dev_eeprom_info *in_eeprom) 7149 { 7150 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7151 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7152 uint16_t *data = in_eeprom->data; 7153 int first, length; 7154 7155 first = in_eeprom->offset >> 1; 7156 length = in_eeprom->length >> 1; 7157 if ((first > hw->eeprom.word_size) || 7158 ((first + length) > hw->eeprom.word_size)) 7159 return -EINVAL; 7160 7161 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7162 7163 return eeprom->ops.read_buffer(hw, first, length, data); 7164 } 7165 7166 static int 7167 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7168 struct rte_dev_eeprom_info *in_eeprom) 7169 { 7170 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7171 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7172 uint16_t *data = in_eeprom->data; 7173 int first, length; 7174 7175 first = in_eeprom->offset >> 1; 7176 length = in_eeprom->length >> 1; 7177 if ((first > hw->eeprom.word_size) || 7178 ((first + length) > hw->eeprom.word_size)) 7179 return -EINVAL; 7180 7181 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7182 7183 return eeprom->ops.write_buffer(hw, first, length, data); 7184 } 7185 7186 static int 7187 ixgbe_get_module_info(struct rte_eth_dev *dev, 7188 struct rte_eth_dev_module_info *modinfo) 7189 { 7190 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7191 uint32_t status; 7192 uint8_t sff8472_rev, addr_mode; 7193 bool page_swap = false; 7194 7195 /* Check whether we support SFF-8472 or not */ 7196 status = hw->phy.ops.read_i2c_eeprom(hw, 7197 IXGBE_SFF_SFF_8472_COMP, 7198 &sff8472_rev); 7199 if (status != 0) 7200 return -EIO; 7201 7202 /* addressing mode is not supported */ 7203 status = hw->phy.ops.read_i2c_eeprom(hw, 7204 IXGBE_SFF_SFF_8472_SWAP, 7205 &addr_mode); 7206 if (status != 0) 7207 return -EIO; 7208 7209 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7210 PMD_DRV_LOG(ERR, 7211 "Address change required to access page 0xA2, " 7212 "but not supported. Please report the module " 7213 "type to the driver maintainers."); 7214 page_swap = true; 7215 } 7216 7217 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7218 /* We have a SFP, but it does not support SFF-8472 */ 7219 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7220 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7221 } else { 7222 /* We have a SFP which supports a revision of SFF-8472. */ 7223 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7224 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7225 } 7226 7227 return 0; 7228 } 7229 7230 static int 7231 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7232 struct rte_dev_eeprom_info *info) 7233 { 7234 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7235 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7236 uint8_t databyte = 0xFF; 7237 uint8_t *data = info->data; 7238 uint32_t i = 0; 7239 7240 for (i = info->offset; i < info->offset + info->length; i++) { 7241 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7242 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7243 else 7244 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7245 7246 if (status != 0) 7247 return -EIO; 7248 7249 data[i - info->offset] = databyte; 7250 } 7251 7252 return 0; 7253 } 7254 7255 uint16_t 7256 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7257 switch (mac_type) { 7258 case ixgbe_mac_X550: 7259 case ixgbe_mac_X550EM_x: 7260 case ixgbe_mac_X550EM_a: 7261 case ixgbe_mac_E610: 7262 return RTE_ETH_RSS_RETA_SIZE_512; 7263 case ixgbe_mac_X550_vf: 7264 case ixgbe_mac_X550EM_x_vf: 7265 case ixgbe_mac_X550EM_a_vf: 7266 case ixgbe_mac_E610_vf: 7267 return RTE_ETH_RSS_RETA_SIZE_64; 7268 case ixgbe_mac_X540_vf: 7269 case ixgbe_mac_82599_vf: 7270 return 0; 7271 default: 7272 return RTE_ETH_RSS_RETA_SIZE_128; 7273 } 7274 } 7275 7276 uint32_t 7277 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7278 switch (mac_type) { 7279 case ixgbe_mac_X550: 7280 case ixgbe_mac_X550EM_x: 7281 case ixgbe_mac_X550EM_a: 7282 case ixgbe_mac_E610: 7283 if (reta_idx < RTE_ETH_RSS_RETA_SIZE_128) 7284 return IXGBE_RETA(reta_idx >> 2); 7285 else 7286 return IXGBE_ERETA((reta_idx - RTE_ETH_RSS_RETA_SIZE_128) >> 2); 7287 case ixgbe_mac_X550_vf: 7288 case ixgbe_mac_X550EM_x_vf: 7289 case ixgbe_mac_X550EM_a_vf: 7290 case ixgbe_mac_E610_vf: 7291 return IXGBE_VFRETA(reta_idx >> 2); 7292 default: 7293 return IXGBE_RETA(reta_idx >> 2); 7294 } 7295 } 7296 7297 uint32_t 7298 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7299 switch (mac_type) { 7300 case ixgbe_mac_X550_vf: 7301 case ixgbe_mac_X550EM_x_vf: 7302 case ixgbe_mac_X550EM_a_vf: 7303 return IXGBE_VFMRQC; 7304 default: 7305 return IXGBE_MRQC; 7306 } 7307 } 7308 7309 uint32_t 7310 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7311 switch (mac_type) { 7312 case ixgbe_mac_X550_vf: 7313 case ixgbe_mac_X550EM_x_vf: 7314 case ixgbe_mac_X550EM_a_vf: 7315 return IXGBE_VFRSSRK(i); 7316 default: 7317 return IXGBE_RSSRK(i); 7318 } 7319 } 7320 7321 bool 7322 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7323 switch (mac_type) { 7324 case ixgbe_mac_82599_vf: 7325 case ixgbe_mac_X540_vf: 7326 return 0; 7327 default: 7328 return 1; 7329 } 7330 } 7331 7332 static int 7333 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7334 struct rte_eth_dcb_info *dcb_info) 7335 { 7336 struct ixgbe_dcb_config *dcb_config = 7337 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7338 struct ixgbe_dcb_tc_config *tc; 7339 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7340 uint8_t nb_tcs; 7341 uint8_t i, j; 7342 7343 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 7344 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7345 else 7346 dcb_info->nb_tcs = 1; 7347 7348 tc_queue = &dcb_info->tc_queue; 7349 nb_tcs = dcb_info->nb_tcs; 7350 7351 if (dcb_config->vt_mode) { /* vt is enabled*/ 7352 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7353 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7354 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7355 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7356 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7357 for (j = 0; j < nb_tcs; j++) { 7358 tc_queue->tc_rxq[0][j].base = j; 7359 tc_queue->tc_rxq[0][j].nb_queue = 1; 7360 tc_queue->tc_txq[0][j].base = j; 7361 tc_queue->tc_txq[0][j].nb_queue = 1; 7362 } 7363 } else { 7364 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7365 for (j = 0; j < nb_tcs; j++) { 7366 tc_queue->tc_rxq[i][j].base = 7367 i * nb_tcs + j; 7368 tc_queue->tc_rxq[i][j].nb_queue = 1; 7369 tc_queue->tc_txq[i][j].base = 7370 i * nb_tcs + j; 7371 tc_queue->tc_txq[i][j].nb_queue = 1; 7372 } 7373 } 7374 } 7375 } else { /* vt is disabled*/ 7376 struct rte_eth_dcb_rx_conf *rx_conf = 7377 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7378 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 7379 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7380 if (dcb_info->nb_tcs == RTE_ETH_4_TCS) { 7381 for (i = 0; i < dcb_info->nb_tcs; i++) { 7382 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7383 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7384 } 7385 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7386 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7387 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7388 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7389 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7390 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7391 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7392 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7393 } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) { 7394 for (i = 0; i < dcb_info->nb_tcs; i++) { 7395 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7396 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7397 } 7398 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7399 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7400 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7401 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7402 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7403 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7404 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7405 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7406 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7407 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7408 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7409 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7410 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7411 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7412 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7413 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7414 } 7415 } 7416 for (i = 0; i < dcb_info->nb_tcs; i++) { 7417 tc = &dcb_config->tc_config[i]; 7418 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7419 } 7420 return 0; 7421 } 7422 7423 /* Update e-tag ether type */ 7424 static int 7425 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7426 uint16_t ether_type) 7427 { 7428 uint32_t etag_etype; 7429 7430 if (hw->mac.type != ixgbe_mac_X550 && 7431 hw->mac.type != ixgbe_mac_X550EM_x && 7432 hw->mac.type != ixgbe_mac_X550EM_a) { 7433 return -ENOTSUP; 7434 } 7435 7436 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7437 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7438 etag_etype |= ether_type; 7439 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7440 IXGBE_WRITE_FLUSH(hw); 7441 7442 return 0; 7443 } 7444 7445 /* Enable e-tag tunnel */ 7446 static int 7447 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7448 { 7449 uint32_t etag_etype; 7450 7451 if (hw->mac.type != ixgbe_mac_X550 && 7452 hw->mac.type != ixgbe_mac_X550EM_x && 7453 hw->mac.type != ixgbe_mac_X550EM_a) { 7454 return -ENOTSUP; 7455 } 7456 7457 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7458 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7459 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7460 IXGBE_WRITE_FLUSH(hw); 7461 7462 return 0; 7463 } 7464 7465 static int 7466 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7467 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7468 { 7469 int ret = 0; 7470 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7471 uint32_t i, rar_entries; 7472 uint32_t rar_low, rar_high; 7473 7474 if (hw->mac.type != ixgbe_mac_X550 && 7475 hw->mac.type != ixgbe_mac_X550EM_x && 7476 hw->mac.type != ixgbe_mac_X550EM_a) { 7477 return -ENOTSUP; 7478 } 7479 7480 rar_entries = ixgbe_get_num_rx_addrs(hw); 7481 7482 for (i = 1; i < rar_entries; i++) { 7483 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7484 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7485 if ((rar_high & IXGBE_RAH_AV) && 7486 (rar_high & IXGBE_RAH_ADTYPE) && 7487 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7488 l2_tunnel->tunnel_id)) { 7489 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7490 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7491 7492 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7493 7494 return ret; 7495 } 7496 } 7497 7498 return ret; 7499 } 7500 7501 static int 7502 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7503 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7504 { 7505 int ret = 0; 7506 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7507 uint32_t i, rar_entries; 7508 uint32_t rar_low, rar_high; 7509 7510 if (hw->mac.type != ixgbe_mac_X550 && 7511 hw->mac.type != ixgbe_mac_X550EM_x && 7512 hw->mac.type != ixgbe_mac_X550EM_a) { 7513 return -ENOTSUP; 7514 } 7515 7516 /* One entry for one tunnel. Try to remove potential existing entry. */ 7517 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7518 7519 rar_entries = ixgbe_get_num_rx_addrs(hw); 7520 7521 for (i = 1; i < rar_entries; i++) { 7522 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7523 if (rar_high & IXGBE_RAH_AV) { 7524 continue; 7525 } else { 7526 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7527 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7528 rar_low = l2_tunnel->tunnel_id; 7529 7530 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7531 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7532 7533 return ret; 7534 } 7535 } 7536 7537 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7538 " Please remove a rule before adding a new one."); 7539 return -EINVAL; 7540 } 7541 7542 static inline struct ixgbe_l2_tn_filter * 7543 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7544 struct ixgbe_l2_tn_key *key) 7545 { 7546 int ret; 7547 7548 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7549 if (ret < 0) 7550 return NULL; 7551 7552 return l2_tn_info->hash_map[ret]; 7553 } 7554 7555 static inline int 7556 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7557 struct ixgbe_l2_tn_filter *l2_tn_filter) 7558 { 7559 int ret; 7560 7561 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7562 &l2_tn_filter->key); 7563 7564 if (ret < 0) { 7565 PMD_DRV_LOG(ERR, 7566 "Failed to insert L2 tunnel filter" 7567 " to hash table %d!", 7568 ret); 7569 return ret; 7570 } 7571 7572 l2_tn_info->hash_map[ret] = l2_tn_filter; 7573 7574 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7575 7576 return 0; 7577 } 7578 7579 static inline int 7580 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7581 struct ixgbe_l2_tn_key *key) 7582 { 7583 int ret; 7584 struct ixgbe_l2_tn_filter *l2_tn_filter; 7585 7586 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7587 7588 if (ret < 0) { 7589 PMD_DRV_LOG(ERR, 7590 "No such L2 tunnel filter to delete %d!", 7591 ret); 7592 return ret; 7593 } 7594 7595 l2_tn_filter = l2_tn_info->hash_map[ret]; 7596 l2_tn_info->hash_map[ret] = NULL; 7597 7598 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7599 rte_free(l2_tn_filter); 7600 7601 return 0; 7602 } 7603 7604 /* Add l2 tunnel filter */ 7605 int 7606 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7607 struct ixgbe_l2_tunnel_conf *l2_tunnel, 7608 bool restore) 7609 { 7610 int ret; 7611 struct ixgbe_l2_tn_info *l2_tn_info = 7612 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7613 struct ixgbe_l2_tn_key key; 7614 struct ixgbe_l2_tn_filter *node; 7615 7616 if (!restore) { 7617 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7618 key.tn_id = l2_tunnel->tunnel_id; 7619 7620 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7621 7622 if (node) { 7623 PMD_DRV_LOG(ERR, 7624 "The L2 tunnel filter already exists!"); 7625 return -EINVAL; 7626 } 7627 7628 node = rte_zmalloc("ixgbe_l2_tn", 7629 sizeof(struct ixgbe_l2_tn_filter), 7630 0); 7631 if (!node) 7632 return -ENOMEM; 7633 7634 rte_memcpy(&node->key, 7635 &key, 7636 sizeof(struct ixgbe_l2_tn_key)); 7637 node->pool = l2_tunnel->pool; 7638 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7639 if (ret < 0) { 7640 rte_free(node); 7641 return ret; 7642 } 7643 } 7644 7645 switch (l2_tunnel->l2_tunnel_type) { 7646 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7647 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7648 break; 7649 default: 7650 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7651 ret = -EINVAL; 7652 break; 7653 } 7654 7655 if ((!restore) && (ret < 0)) 7656 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7657 7658 return ret; 7659 } 7660 7661 /* Delete l2 tunnel filter */ 7662 int 7663 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7664 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7665 { 7666 int ret; 7667 struct ixgbe_l2_tn_info *l2_tn_info = 7668 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7669 struct ixgbe_l2_tn_key key; 7670 7671 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7672 key.tn_id = l2_tunnel->tunnel_id; 7673 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7674 if (ret < 0) 7675 return ret; 7676 7677 switch (l2_tunnel->l2_tunnel_type) { 7678 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 7679 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7680 break; 7681 default: 7682 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7683 ret = -EINVAL; 7684 break; 7685 } 7686 7687 return ret; 7688 } 7689 7690 static int 7691 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7692 { 7693 int ret = 0; 7694 uint32_t ctrl; 7695 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7696 7697 if (hw->mac.type != ixgbe_mac_X550 && 7698 hw->mac.type != ixgbe_mac_X550EM_x && 7699 hw->mac.type != ixgbe_mac_X550EM_a) { 7700 return -ENOTSUP; 7701 } 7702 7703 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7704 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7705 if (en) 7706 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7707 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7708 7709 return ret; 7710 } 7711 7712 static int 7713 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7714 uint16_t port) 7715 { 7716 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7717 IXGBE_WRITE_FLUSH(hw); 7718 7719 return 0; 7720 } 7721 7722 /* There's only one register for VxLAN UDP port. 7723 * So, we cannot add several ports. Will update it. 7724 */ 7725 static int 7726 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7727 uint16_t port) 7728 { 7729 if (port == 0) { 7730 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7731 return -EINVAL; 7732 } 7733 7734 return ixgbe_update_vxlan_port(hw, port); 7735 } 7736 7737 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7738 * UDP port, it must have a value. 7739 * So, will reset it to the original value 0. 7740 */ 7741 static int 7742 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7743 uint16_t port) 7744 { 7745 uint16_t cur_port; 7746 7747 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7748 7749 if (cur_port != port) { 7750 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7751 return -EINVAL; 7752 } 7753 7754 return ixgbe_update_vxlan_port(hw, 0); 7755 } 7756 7757 /* Add UDP tunneling port */ 7758 static int 7759 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7760 struct rte_eth_udp_tunnel *udp_tunnel) 7761 { 7762 int ret = 0; 7763 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7764 7765 if (hw->mac.type != ixgbe_mac_X550 && 7766 hw->mac.type != ixgbe_mac_X550EM_x && 7767 hw->mac.type != ixgbe_mac_X550EM_a) { 7768 return -ENOTSUP; 7769 } 7770 7771 if (udp_tunnel == NULL) 7772 return -EINVAL; 7773 7774 switch (udp_tunnel->prot_type) { 7775 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7776 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7777 break; 7778 7779 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7780 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7781 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7782 ret = -EINVAL; 7783 break; 7784 7785 default: 7786 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7787 ret = -EINVAL; 7788 break; 7789 } 7790 7791 return ret; 7792 } 7793 7794 /* Remove UDP tunneling port */ 7795 static int 7796 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7797 struct rte_eth_udp_tunnel *udp_tunnel) 7798 { 7799 int ret = 0; 7800 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7801 7802 if (hw->mac.type != ixgbe_mac_X550 && 7803 hw->mac.type != ixgbe_mac_X550EM_x && 7804 hw->mac.type != ixgbe_mac_X550EM_a) { 7805 return -ENOTSUP; 7806 } 7807 7808 if (udp_tunnel == NULL) 7809 return -EINVAL; 7810 7811 switch (udp_tunnel->prot_type) { 7812 case RTE_ETH_TUNNEL_TYPE_VXLAN: 7813 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7814 break; 7815 case RTE_ETH_TUNNEL_TYPE_GENEVE: 7816 case RTE_ETH_TUNNEL_TYPE_TEREDO: 7817 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7818 ret = -EINVAL; 7819 break; 7820 default: 7821 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7822 ret = -EINVAL; 7823 break; 7824 } 7825 7826 return ret; 7827 } 7828 7829 static int 7830 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 7831 { 7832 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7833 int ret; 7834 7835 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 7836 case IXGBE_SUCCESS: 7837 ret = 0; 7838 break; 7839 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7840 ret = -ENOTSUP; 7841 break; 7842 default: 7843 ret = -EAGAIN; 7844 break; 7845 } 7846 7847 return ret; 7848 } 7849 7850 static int 7851 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 7852 { 7853 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7854 int mode = IXGBEVF_XCAST_MODE_NONE; 7855 int ret; 7856 7857 if (dev->data->all_multicast) 7858 mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7859 7860 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7861 case IXGBE_SUCCESS: 7862 ret = 0; 7863 break; 7864 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7865 ret = -ENOTSUP; 7866 break; 7867 default: 7868 ret = -EAGAIN; 7869 break; 7870 } 7871 7872 return ret; 7873 } 7874 7875 static int 7876 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7877 { 7878 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7879 int ret; 7880 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7881 7882 if (dev->data->promiscuous) 7883 return 0; 7884 7885 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7886 case IXGBE_SUCCESS: 7887 ret = 0; 7888 break; 7889 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7890 ret = -ENOTSUP; 7891 break; 7892 default: 7893 ret = -EAGAIN; 7894 break; 7895 } 7896 7897 return ret; 7898 } 7899 7900 static int 7901 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7902 { 7903 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7904 int ret; 7905 7906 if (dev->data->promiscuous) 7907 return 0; 7908 7909 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 7910 case IXGBE_SUCCESS: 7911 ret = 0; 7912 break; 7913 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7914 ret = -ENOTSUP; 7915 break; 7916 default: 7917 ret = -EAGAIN; 7918 break; 7919 } 7920 7921 return ret; 7922 } 7923 7924 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 7925 { 7926 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7927 u32 in_msg = 0; 7928 7929 /* peek the message first */ 7930 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 7931 7932 /* PF reset VF event */ 7933 if (in_msg == IXGBE_PF_CONTROL_MSG) { 7934 /* dummy mbx read to ack pf */ 7935 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 7936 return; 7937 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 7938 NULL); 7939 } 7940 } 7941 7942 static int 7943 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 7944 { 7945 uint32_t eicr; 7946 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7947 struct ixgbe_interrupt *intr = 7948 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7949 ixgbevf_intr_disable(dev); 7950 7951 /* read-on-clear nic registers here */ 7952 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 7953 intr->flags = 0; 7954 7955 /* only one misc vector supported - mailbox */ 7956 eicr &= IXGBE_VTEICR_MASK; 7957 if (eicr == IXGBE_MISC_VEC_ID) 7958 intr->flags |= IXGBE_FLAG_MAILBOX; 7959 7960 return 0; 7961 } 7962 7963 static int 7964 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 7965 { 7966 struct ixgbe_interrupt *intr = 7967 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7968 7969 if (intr->flags & IXGBE_FLAG_MAILBOX) { 7970 ixgbevf_mbx_process(dev); 7971 intr->flags &= ~IXGBE_FLAG_MAILBOX; 7972 } 7973 7974 ixgbevf_intr_enable(dev); 7975 7976 return 0; 7977 } 7978 7979 static void 7980 ixgbevf_dev_interrupt_handler(void *param) 7981 { 7982 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 7983 7984 ixgbevf_dev_interrupt_get_status(dev); 7985 ixgbevf_dev_interrupt_action(dev); 7986 } 7987 7988 /** 7989 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 7990 * @hw: pointer to hardware structure 7991 * 7992 * Stops the transmit data path and waits for the HW to internally empty 7993 * the Tx security block 7994 **/ 7995 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 7996 { 7997 #define IXGBE_MAX_SECTX_POLL 40 7998 7999 int i; 8000 int sectxreg; 8001 8002 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8003 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 8004 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8005 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 8006 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 8007 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 8008 break; 8009 /* Use interrupt-safe sleep just in case */ 8010 usec_delay(1000); 8011 } 8012 8013 /* For informational purposes only */ 8014 if (i >= IXGBE_MAX_SECTX_POLL) 8015 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 8016 "path fully disabled. Continuing with init."); 8017 8018 return IXGBE_SUCCESS; 8019 } 8020 8021 /** 8022 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 8023 * @hw: pointer to hardware structure 8024 * 8025 * Enables the transmit data path. 8026 **/ 8027 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 8028 { 8029 uint32_t sectxreg; 8030 8031 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8032 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 8033 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8034 IXGBE_WRITE_FLUSH(hw); 8035 8036 return IXGBE_SUCCESS; 8037 } 8038 8039 /* restore n-tuple filter */ 8040 static inline void 8041 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 8042 { 8043 struct ixgbe_filter_info *filter_info = 8044 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8045 struct ixgbe_5tuple_filter *node; 8046 8047 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 8048 ixgbe_inject_5tuple_filter(dev, node); 8049 } 8050 } 8051 8052 /* restore ethernet type filter */ 8053 static inline void 8054 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 8055 { 8056 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8057 struct ixgbe_filter_info *filter_info = 8058 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8059 int i; 8060 8061 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8062 if (filter_info->ethertype_mask & (1 << i)) { 8063 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 8064 filter_info->ethertype_filters[i].etqf); 8065 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 8066 filter_info->ethertype_filters[i].etqs); 8067 IXGBE_WRITE_FLUSH(hw); 8068 } 8069 } 8070 } 8071 8072 /* restore SYN filter */ 8073 static inline void 8074 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 8075 { 8076 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8077 struct ixgbe_filter_info *filter_info = 8078 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8079 uint32_t synqf; 8080 8081 synqf = filter_info->syn_info; 8082 8083 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 8084 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 8085 IXGBE_WRITE_FLUSH(hw); 8086 } 8087 } 8088 8089 /* restore L2 tunnel filter */ 8090 static inline void 8091 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 8092 { 8093 struct ixgbe_l2_tn_info *l2_tn_info = 8094 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8095 struct ixgbe_l2_tn_filter *node; 8096 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8097 8098 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 8099 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 8100 l2_tn_conf.tunnel_id = node->key.tn_id; 8101 l2_tn_conf.pool = node->pool; 8102 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 8103 } 8104 } 8105 8106 /* restore rss filter */ 8107 static inline void 8108 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 8109 { 8110 struct ixgbe_filter_info *filter_info = 8111 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8112 8113 if (filter_info->rss_info.conf.queue_num) 8114 ixgbe_config_rss_filter(dev, 8115 &filter_info->rss_info, TRUE); 8116 } 8117 8118 static int 8119 ixgbe_filter_restore(struct rte_eth_dev *dev) 8120 { 8121 ixgbe_ntuple_filter_restore(dev); 8122 ixgbe_ethertype_filter_restore(dev); 8123 ixgbe_syn_filter_restore(dev); 8124 ixgbe_fdir_filter_restore(dev); 8125 ixgbe_l2_tn_filter_restore(dev); 8126 ixgbe_rss_filter_restore(dev); 8127 8128 return 0; 8129 } 8130 8131 static void 8132 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8133 { 8134 struct ixgbe_l2_tn_info *l2_tn_info = 8135 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8136 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8137 8138 if (l2_tn_info->e_tag_en) 8139 (void)ixgbe_e_tag_enable(hw); 8140 8141 if (l2_tn_info->e_tag_fwd_en) 8142 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8143 8144 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8145 } 8146 8147 /* remove all the n-tuple filters */ 8148 void 8149 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8150 { 8151 struct ixgbe_filter_info *filter_info = 8152 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8153 struct ixgbe_5tuple_filter *p_5tuple; 8154 8155 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8156 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8157 } 8158 8159 /* remove all the ether type filters */ 8160 void 8161 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8162 { 8163 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8164 struct ixgbe_filter_info *filter_info = 8165 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8166 int i; 8167 8168 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8169 if (filter_info->ethertype_mask & (1 << i) && 8170 !filter_info->ethertype_filters[i].conf) { 8171 (void)ixgbe_ethertype_filter_remove(filter_info, 8172 (uint8_t)i); 8173 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8174 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8175 IXGBE_WRITE_FLUSH(hw); 8176 } 8177 } 8178 } 8179 8180 /* remove the SYN filter */ 8181 void 8182 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8183 { 8184 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8185 struct ixgbe_filter_info *filter_info = 8186 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8187 8188 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8189 filter_info->syn_info = 0; 8190 8191 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8192 IXGBE_WRITE_FLUSH(hw); 8193 } 8194 } 8195 8196 /* remove all the L2 tunnel filters */ 8197 int 8198 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8199 { 8200 struct ixgbe_l2_tn_info *l2_tn_info = 8201 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8202 struct ixgbe_l2_tn_filter *l2_tn_filter; 8203 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8204 int ret = 0; 8205 8206 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8207 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8208 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8209 l2_tn_conf.pool = l2_tn_filter->pool; 8210 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8211 if (ret < 0) 8212 return ret; 8213 } 8214 8215 return 0; 8216 } 8217 8218 void 8219 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8220 struct ixgbe_macsec_setting *macsec_setting) 8221 { 8222 struct ixgbe_macsec_setting *macsec = 8223 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8224 8225 macsec->offload_en = macsec_setting->offload_en; 8226 macsec->encrypt_en = macsec_setting->encrypt_en; 8227 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8228 } 8229 8230 void 8231 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8232 { 8233 struct ixgbe_macsec_setting *macsec = 8234 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8235 8236 macsec->offload_en = 0; 8237 macsec->encrypt_en = 0; 8238 macsec->replayprotect_en = 0; 8239 } 8240 8241 void 8242 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8243 struct ixgbe_macsec_setting *macsec_setting) 8244 { 8245 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8246 uint32_t ctrl; 8247 uint8_t en = macsec_setting->encrypt_en; 8248 uint8_t rp = macsec_setting->replayprotect_en; 8249 8250 /** 8251 * Workaround: 8252 * As no ixgbe_disable_sec_rx_path equivalent is 8253 * implemented for tx in the base code, and we are 8254 * not allowed to modify the base code in DPDK, so 8255 * just call the hand-written one directly for now. 8256 * The hardware support has been checked by 8257 * ixgbe_disable_sec_rx_path(). 8258 */ 8259 ixgbe_disable_sec_tx_path_generic(hw); 8260 8261 /* Enable Ethernet CRC (required by MACsec offload) */ 8262 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8263 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8264 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8265 8266 /* Enable the TX and RX crypto engines */ 8267 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8268 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8269 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8270 8271 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8272 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8273 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8274 8275 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8276 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8277 ctrl |= 0x3; 8278 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8279 8280 /* Enable SA lookup */ 8281 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8282 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8283 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 8284 IXGBE_LSECTXCTRL_AUTH; 8285 ctrl |= IXGBE_LSECTXCTRL_AISCI; 8286 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8287 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8288 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8289 8290 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8291 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8292 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 8293 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 8294 if (rp) 8295 ctrl |= IXGBE_LSECRXCTRL_RP; 8296 else 8297 ctrl &= ~IXGBE_LSECRXCTRL_RP; 8298 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8299 8300 /* Start the data paths */ 8301 ixgbe_enable_sec_rx_path(hw); 8302 /** 8303 * Workaround: 8304 * As no ixgbe_enable_sec_rx_path equivalent is 8305 * implemented for tx in the base code, and we are 8306 * not allowed to modify the base code in DPDK, so 8307 * just call the hand-written one directly for now. 8308 */ 8309 ixgbe_enable_sec_tx_path_generic(hw); 8310 } 8311 8312 void 8313 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 8314 { 8315 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8316 uint32_t ctrl; 8317 8318 /** 8319 * Workaround: 8320 * As no ixgbe_disable_sec_rx_path equivalent is 8321 * implemented for tx in the base code, and we are 8322 * not allowed to modify the base code in DPDK, so 8323 * just call the hand-written one directly for now. 8324 * The hardware support has been checked by 8325 * ixgbe_disable_sec_rx_path(). 8326 */ 8327 ixgbe_disable_sec_tx_path_generic(hw); 8328 8329 /* Disable the TX and RX crypto engines */ 8330 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8331 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 8332 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8333 8334 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8335 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 8336 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8337 8338 /* Disable SA lookup */ 8339 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8340 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8341 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 8342 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8343 8344 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8345 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8346 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 8347 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8348 8349 /* Start the data paths */ 8350 ixgbe_enable_sec_rx_path(hw); 8351 /** 8352 * Workaround: 8353 * As no ixgbe_enable_sec_rx_path equivalent is 8354 * implemented for tx in the base code, and we are 8355 * not allowed to modify the base code in DPDK, so 8356 * just call the hand-written one directly for now. 8357 */ 8358 ixgbe_enable_sec_tx_path_generic(hw); 8359 } 8360 8361 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8362 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8363 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8364 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe, 8365 IXGBE_DEVARG_FIBER_SDP3_NOT_TX_DISABLE "=<0|1>"); 8366 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8367 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8368 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8369 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 8370 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 8371 8372 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_init, init, NOTICE); 8373 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_driver, driver, NOTICE); 8374 8375 #ifdef RTE_ETHDEV_DEBUG_RX 8376 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_rx, rx, DEBUG); 8377 #endif 8378 #ifdef RTE_ETHDEV_DEBUG_TX 8379 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_tx, tx, DEBUG); 8380 #endif 8381