1 /* $NetBSD: ixgbe_82599.c,v 1.22 2019/12/23 09:36:17 msaitoh Exp $ */ 2 3 /****************************************************************************** 4 SPDX-License-Identifier: BSD-3-Clause 5 6 Copyright (c) 2001-2017, Intel Corporation 7 All rights reserved. 8 9 Redistribution and use in source and binary forms, with or without 10 modification, are permitted provided that the following conditions are met: 11 12 1. Redistributions of source code must retain the above copyright notice, 13 this list of conditions and the following disclaimer. 14 15 2. Redistributions in binary form must reproduce the above copyright 16 notice, this list of conditions and the following disclaimer in the 17 documentation and/or other materials provided with the distribution. 18 19 3. Neither the name of the Intel Corporation nor the names of its 20 contributors may be used to endorse or promote products derived from 21 this software without specific prior written permission. 22 23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 POSSIBILITY OF SUCH DAMAGE. 34 35 ******************************************************************************/ 36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 331224 2018-03-19 20:55:05Z erj $*/ 37 38 #include "ixgbe_type.h" 39 #include "ixgbe_82599.h" 40 #include "ixgbe_api.h" 41 #include "ixgbe_common.h" 42 #include "ixgbe_phy.h" 43 44 #define IXGBE_82599_MAX_TX_QUEUES 128 45 #define IXGBE_82599_MAX_RX_QUEUES 128 46 #define IXGBE_82599_RAR_ENTRIES 128 47 #define IXGBE_82599_MC_TBL_SIZE 128 48 #define IXGBE_82599_VFT_TBL_SIZE 128 49 #define IXGBE_82599_RX_PB_SIZE 512 50 51 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 52 ixgbe_link_speed speed, 53 bool autoneg_wait_to_complete); 54 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 55 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 56 u16 offset, u16 *data); 57 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 58 u16 words, u16 *data); 59 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); 60 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 61 u8 dev_addr, u8 *data); 62 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 63 u8 dev_addr, u8 data); 64 65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 66 { 67 struct ixgbe_mac_info *mac = &hw->mac; 68 69 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 70 71 /* 72 * enable the laser control functions for SFP+ fiber 73 * and MNG not enabled 74 */ 75 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 76 !ixgbe_mng_enabled(hw)) { 77 mac->ops.disable_tx_laser = 78 ixgbe_disable_tx_laser_multispeed_fiber; 79 mac->ops.enable_tx_laser = 80 ixgbe_enable_tx_laser_multispeed_fiber; 81 mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; 82 83 } else { 84 mac->ops.disable_tx_laser = NULL; 85 mac->ops.enable_tx_laser = NULL; 86 mac->ops.flap_tx_laser = NULL; 87 } 88 89 if (hw->phy.multispeed_fiber) { 90 /* Set up dual speed SFP+ support */ 91 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; 92 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; 93 mac->ops.set_rate_select_speed = 94 ixgbe_set_hard_rate_select_speed; 95 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed) 96 mac->ops.set_rate_select_speed = 97 ixgbe_set_soft_rate_select_speed; 98 } else { 99 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 100 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 101 hw->phy.smart_speed == ixgbe_smart_speed_on) && 102 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 103 mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; 104 } else { 105 mac->ops.setup_link = ixgbe_setup_mac_link_82599; 106 } 107 } 108 } 109 110 /** 111 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 112 * @hw: pointer to hardware structure 113 * 114 * Initialize any function pointers that were not able to be 115 * set during init_shared_code because the PHY/SFP type was 116 * not known. Perform the SFP init if necessary. 117 * 118 **/ 119 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 120 { 121 struct ixgbe_mac_info *mac = &hw->mac; 122 struct ixgbe_phy_info *phy = &hw->phy; 123 s32 ret_val = IXGBE_SUCCESS; 124 u32 esdp; 125 126 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 127 128 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { 129 /* Store flag indicating I2C bus access control unit. */ 130 hw->phy.qsfp_shared_i2c_bus = TRUE; 131 132 /* Initialize access to QSFP+ I2C bus */ 133 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 134 esdp |= IXGBE_ESDP_SDP0_DIR; 135 esdp &= ~IXGBE_ESDP_SDP1_DIR; 136 esdp &= ~IXGBE_ESDP_SDP0; 137 esdp &= ~IXGBE_ESDP_SDP0_NATIVE; 138 esdp &= ~IXGBE_ESDP_SDP1_NATIVE; 139 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 140 IXGBE_WRITE_FLUSH(hw); 141 142 phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; 143 phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; 144 } 145 /* Identify the PHY or SFP module */ 146 ret_val = phy->ops.identify(hw); 147 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 148 goto init_phy_ops_out; 149 150 /* Setup function pointers based on detected SFP module and speeds */ 151 ixgbe_init_mac_link_ops_82599(hw); 152 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 153 hw->phy.ops.reset = NULL; 154 155 /* If copper media, overwrite with copper function pointers */ 156 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 157 mac->ops.setup_link = ixgbe_setup_copper_link_82599; 158 mac->ops.get_link_capabilities = 159 ixgbe_get_copper_link_capabilities_generic; 160 } 161 162 /* Set necessary function pointers based on PHY type */ 163 switch (hw->phy.type) { 164 case ixgbe_phy_tn: 165 phy->ops.setup_link = ixgbe_setup_phy_link_tnx; 166 phy->ops.check_link = ixgbe_check_phy_link_tnx; 167 phy->ops.get_firmware_version = 168 ixgbe_get_phy_firmware_version_tnx; 169 break; 170 default: 171 break; 172 } 173 init_phy_ops_out: 174 return ret_val; 175 } 176 177 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 178 { 179 s32 ret_val = IXGBE_SUCCESS; 180 u16 list_offset, data_offset, data_value; 181 182 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 183 184 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 185 ixgbe_init_mac_link_ops_82599(hw); 186 187 hw->phy.ops.reset = NULL; 188 189 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 190 &data_offset); 191 if (ret_val != IXGBE_SUCCESS) 192 goto setup_sfp_out; 193 194 /* PHY config will finish before releasing the semaphore */ 195 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 196 IXGBE_GSSR_MAC_CSR_SM); 197 if (ret_val != IXGBE_SUCCESS) { 198 ret_val = IXGBE_ERR_SWFW_SYNC; 199 goto setup_sfp_out; 200 } 201 202 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 203 goto setup_sfp_err; 204 while (data_value != 0xffff) { 205 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 206 IXGBE_WRITE_FLUSH(hw); 207 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 208 goto setup_sfp_err; 209 } 210 211 /* Release the semaphore */ 212 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 213 /* Delay obtaining semaphore again to allow FW access 214 * prot_autoc_write uses the semaphore too. 215 */ 216 msec_delay(hw->eeprom.semaphore_delay); 217 218 /* Restart DSP and set SFI mode */ 219 ret_val = hw->mac.ops.prot_autoc_write(hw, 220 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, 221 FALSE); 222 223 if (ret_val) { 224 DEBUGOUT("sfp module setup not complete\n"); 225 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 226 goto setup_sfp_out; 227 } 228 229 } 230 231 setup_sfp_out: 232 return ret_val; 233 234 setup_sfp_err: 235 /* Release the semaphore */ 236 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 237 /* Delay obtaining semaphore again to allow FW access */ 238 msec_delay(hw->eeprom.semaphore_delay); 239 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 240 "eeprom read at offset %d failed", data_offset); 241 return IXGBE_ERR_PHY; 242 } 243 244 /** 245 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read 246 * @hw: pointer to hardware structure 247 * @locked: Return the if we locked for this read. 248 * @reg_val: Value we read from AUTOC 249 * 250 * For this part (82599) we need to wrap read-modify-writes with a possible 251 * FW/SW lock. It is assumed this lock will be freed with the next 252 * prot_autoc_write_82599(). 253 */ 254 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 255 { 256 s32 ret_val; 257 258 *locked = FALSE; 259 /* If LESM is on then we need to hold the SW/FW semaphore. */ 260 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 261 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 262 IXGBE_GSSR_MAC_CSR_SM); 263 if (ret_val != IXGBE_SUCCESS) 264 return IXGBE_ERR_SWFW_SYNC; 265 266 *locked = TRUE; 267 } 268 269 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 270 return IXGBE_SUCCESS; 271 } 272 273 /** 274 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write 275 * @hw: pointer to hardware structure 276 * @autoc: value to write to AUTOC 277 * @locked: bool to indicate whether the SW/FW lock was already taken by 278 * previous proc_autoc_read_82599. 279 * 280 * This part (82599) may need to hold the SW/FW lock around all writes to 281 * AUTOC. Likewise after a write we need to do a pipeline reset. 282 */ 283 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) 284 { 285 s32 ret_val = IXGBE_SUCCESS; 286 287 /* Blocked by MNG FW so bail */ 288 if (ixgbe_check_reset_blocked(hw)) 289 goto out; 290 291 /* We only need to get the lock if: 292 * - We didn't do it already (in the read part of a read-modify-write) 293 * - LESM is enabled. 294 */ 295 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { 296 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 297 IXGBE_GSSR_MAC_CSR_SM); 298 if (ret_val != IXGBE_SUCCESS) 299 return IXGBE_ERR_SWFW_SYNC; 300 301 locked = TRUE; 302 } 303 304 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 305 ret_val = ixgbe_reset_pipeline_82599(hw); 306 307 out: 308 /* Free the SW/FW semaphore as we either grabbed it here or 309 * already had it when this function was called. 310 */ 311 if (locked) 312 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 313 314 return ret_val; 315 } 316 317 /** 318 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 319 * @hw: pointer to hardware structure 320 * 321 * Initialize the function pointers and assign the MAC type for 82599. 322 * Does not touch the hardware. 323 **/ 324 325 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 326 { 327 struct ixgbe_mac_info *mac = &hw->mac; 328 struct ixgbe_phy_info *phy = &hw->phy; 329 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 330 s32 ret_val; 331 332 DEBUGFUNC("ixgbe_init_ops_82599"); 333 334 ixgbe_init_phy_ops_generic(hw); 335 ret_val = ixgbe_init_ops_generic(hw); 336 337 /* PHY */ 338 phy->ops.identify = ixgbe_identify_phy_82599; 339 phy->ops.init = ixgbe_init_phy_ops_82599; 340 341 /* MAC */ 342 mac->ops.reset_hw = ixgbe_reset_hw_82599; 343 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; 344 mac->ops.get_media_type = ixgbe_get_media_type_82599; 345 mac->ops.get_supported_physical_layer = 346 ixgbe_get_supported_physical_layer_82599; 347 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; 348 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; 349 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; 350 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; 351 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; 352 mac->ops.start_hw = ixgbe_start_hw_82599; 353 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; 354 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; 355 mac->ops.get_device_caps = ixgbe_get_device_caps_generic; 356 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; 357 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; 358 mac->ops.prot_autoc_read = prot_autoc_read_82599; 359 mac->ops.prot_autoc_write = prot_autoc_write_82599; 360 361 /* RAR, Multicast, VLAN */ 362 mac->ops.set_vmdq = ixgbe_set_vmdq_generic; 363 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; 364 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; 365 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; 366 mac->rar_highwater = 1; 367 mac->ops.set_vfta = ixgbe_set_vfta_generic; 368 mac->ops.set_vlvf = ixgbe_set_vlvf_generic; 369 mac->ops.clear_vfta = ixgbe_clear_vfta_generic; 370 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; 371 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; 372 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; 373 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; 374 375 /* Link */ 376 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; 377 mac->ops.check_link = ixgbe_check_mac_link_generic; 378 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; 379 ixgbe_init_mac_link_ops_82599(hw); 380 381 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; 382 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; 383 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 384 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; 385 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 386 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 387 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 388 389 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) 390 & IXGBE_FWSM_MODE_MASK); 391 392 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 393 394 /* EEPROM */ 395 eeprom->ops.read = ixgbe_read_eeprom_82599; 396 eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; 397 398 /* Manageability interface */ 399 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; 400 401 mac->ops.bypass_rw = ixgbe_bypass_rw_generic; 402 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic; 403 mac->ops.bypass_set = ixgbe_bypass_set_generic; 404 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic; 405 406 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; 407 408 return ret_val; 409 } 410 411 /** 412 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 413 * @hw: pointer to hardware structure 414 * @speed: pointer to link speed 415 * @autoneg: TRUE when autoneg or autotry is enabled 416 * 417 * Determines the link capabilities by reading the AUTOC register. 418 **/ 419 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 420 ixgbe_link_speed *speed, 421 bool *autoneg) 422 { 423 s32 status = IXGBE_SUCCESS; 424 u32 autoc = 0; 425 426 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 427 428 429 /* Check if 1G SFP module. */ 430 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 431 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 432 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || 433 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || 434 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 435 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 436 *speed = IXGBE_LINK_SPEED_1GB_FULL; 437 *autoneg = TRUE; 438 goto out; 439 } 440 441 /* 442 * Determine link capabilities based on the stored value of AUTOC, 443 * which represents EEPROM defaults. If AUTOC value has not 444 * been stored, use the current register values. 445 */ 446 if (hw->mac.orig_link_settings_stored) 447 autoc = hw->mac.orig_autoc; 448 else 449 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 450 451 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 452 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 453 *speed = IXGBE_LINK_SPEED_1GB_FULL; 454 *autoneg = FALSE; 455 break; 456 457 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 458 *speed = IXGBE_LINK_SPEED_10GB_FULL; 459 *autoneg = FALSE; 460 break; 461 462 case IXGBE_AUTOC_LMS_1G_AN: 463 *speed = IXGBE_LINK_SPEED_1GB_FULL; 464 *autoneg = TRUE; 465 break; 466 467 case IXGBE_AUTOC_LMS_10G_SERIAL: 468 *speed = IXGBE_LINK_SPEED_10GB_FULL; 469 *autoneg = FALSE; 470 break; 471 472 case IXGBE_AUTOC_LMS_KX4_KX_KR: 473 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 474 *speed = IXGBE_LINK_SPEED_UNKNOWN; 475 if (autoc & IXGBE_AUTOC_KR_SUPP) 476 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 477 if (autoc & IXGBE_AUTOC_KX4_SUPP) 478 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 479 if (autoc & IXGBE_AUTOC_KX_SUPP) 480 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 481 *autoneg = TRUE; 482 break; 483 484 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 485 *speed = IXGBE_LINK_SPEED_100_FULL; 486 if (autoc & IXGBE_AUTOC_KR_SUPP) 487 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 488 if (autoc & IXGBE_AUTOC_KX4_SUPP) 489 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 490 if (autoc & IXGBE_AUTOC_KX_SUPP) 491 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 492 *autoneg = TRUE; 493 break; 494 495 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 496 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 497 *autoneg = FALSE; 498 break; 499 500 default: 501 status = IXGBE_ERR_LINK_SETUP; 502 goto out; 503 break; 504 } 505 506 if (hw->phy.multispeed_fiber) { 507 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 508 IXGBE_LINK_SPEED_1GB_FULL; 509 510 /* QSFP must not enable full auto-negotiation 511 * Limited autoneg is enabled at 1G 512 */ 513 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) 514 *autoneg = FALSE; 515 else 516 *autoneg = TRUE; 517 } 518 519 out: 520 return status; 521 } 522 523 /** 524 * ixgbe_get_media_type_82599 - Get media type 525 * @hw: pointer to hardware structure 526 * 527 * Returns the media type (fiber, copper, backplane) 528 **/ 529 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 530 { 531 enum ixgbe_media_type media_type; 532 533 DEBUGFUNC("ixgbe_get_media_type_82599"); 534 535 /* Detect if there is a copper PHY attached. */ 536 switch (hw->phy.type) { 537 case ixgbe_phy_cu_unknown: 538 case ixgbe_phy_tn: 539 media_type = ixgbe_media_type_copper; 540 goto out; 541 default: 542 break; 543 } 544 545 switch (hw->device_id) { 546 case IXGBE_DEV_ID_82599_KX4: 547 case IXGBE_DEV_ID_82599_KX4_MEZZ: 548 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 549 case IXGBE_DEV_ID_82599_KR: 550 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 551 case IXGBE_DEV_ID_82599_XAUI_LOM: 552 /* Default device ID is mezzanine card KX/KX4 */ 553 media_type = ixgbe_media_type_backplane; 554 break; 555 case IXGBE_DEV_ID_82599_SFP: 556 case IXGBE_DEV_ID_82599_SFP_FCOE: 557 case IXGBE_DEV_ID_82599_SFP_EM: 558 case IXGBE_DEV_ID_82599_SFP_SF2: 559 case IXGBE_DEV_ID_82599_SFP_SF_QP: 560 case IXGBE_DEV_ID_82599EN_SFP: 561 media_type = ixgbe_media_type_fiber; 562 break; 563 case IXGBE_DEV_ID_82599_CX4: 564 media_type = ixgbe_media_type_cx4; 565 break; 566 case IXGBE_DEV_ID_82599_T3_LOM: 567 media_type = ixgbe_media_type_copper; 568 break; 569 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 570 media_type = ixgbe_media_type_fiber_qsfp; 571 break; 572 case IXGBE_DEV_ID_82599_BYPASS: 573 media_type = ixgbe_media_type_fiber_fixed; 574 hw->phy.multispeed_fiber = TRUE; 575 break; 576 default: 577 media_type = ixgbe_media_type_unknown; 578 break; 579 } 580 out: 581 return media_type; 582 } 583 584 /** 585 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 586 * @hw: pointer to hardware structure 587 * 588 * Disables link during D3 power down sequence. 589 * 590 **/ 591 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) 592 { 593 u32 autoc2_reg; 594 u16 ee_ctrl_2 = 0; 595 596 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); 597 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); 598 599 if (!ixgbe_mng_present(hw) && !hw->wol_enabled && 600 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { 601 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 602 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; 603 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 604 } 605 } 606 607 /** 608 * ixgbe_start_mac_link_82599 - Setup MAC link settings 609 * @hw: pointer to hardware structure 610 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 611 * 612 * Configures link settings based on values in the ixgbe_hw struct. 613 * Restarts the link. Performs autonegotiation if needed. 614 **/ 615 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 616 bool autoneg_wait_to_complete) 617 { 618 u32 autoc_reg; 619 u32 links_reg; 620 u32 i; 621 s32 status = IXGBE_SUCCESS; 622 bool got_lock = FALSE; 623 624 DEBUGFUNC("ixgbe_start_mac_link_82599"); 625 626 627 /* reset_pipeline requires us to hold this lock as it writes to 628 * AUTOC. 629 */ 630 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 631 status = hw->mac.ops.acquire_swfw_sync(hw, 632 IXGBE_GSSR_MAC_CSR_SM); 633 if (status != IXGBE_SUCCESS) 634 goto out; 635 636 got_lock = TRUE; 637 } 638 639 /* Restart link */ 640 ixgbe_reset_pipeline_82599(hw); 641 642 if (got_lock) 643 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 644 645 /* Only poll for autoneg to complete if specified to do so */ 646 if (autoneg_wait_to_complete) { 647 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 648 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 649 IXGBE_AUTOC_LMS_KX4_KX_KR || 650 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 651 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 652 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 653 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 654 links_reg = 0; /* Just in case Autoneg time = 0 */ 655 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 656 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 657 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 658 break; 659 msec_delay(100); 660 } 661 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 662 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 663 DEBUGOUT("Autoneg did not complete.\n"); 664 } 665 } 666 } 667 668 /* Add delay to filter out noises during initial link setup */ 669 msec_delay(50); 670 671 out: 672 return status; 673 } 674 675 /** 676 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 677 * @hw: pointer to hardware structure 678 * 679 * The base drivers may require better control over SFP+ module 680 * PHY states. This includes selectively shutting down the Tx 681 * laser on the PHY, effectively halting physical link. 682 **/ 683 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 684 { 685 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 686 687 /* Blocked by MNG FW so bail */ 688 if (ixgbe_check_reset_blocked(hw)) 689 return; 690 691 /* Disable Tx laser; allow 100us to go dark per spec */ 692 esdp_reg |= IXGBE_ESDP_SDP3; 693 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 694 IXGBE_WRITE_FLUSH(hw); 695 usec_delay(100); 696 } 697 698 /** 699 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 700 * @hw: pointer to hardware structure 701 * 702 * The base drivers may require better control over SFP+ module 703 * PHY states. This includes selectively turning on the Tx 704 * laser on the PHY, effectively starting physical link. 705 **/ 706 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 707 { 708 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 709 710 /* Enable Tx laser; allow 100ms to light up */ 711 esdp_reg &= ~IXGBE_ESDP_SDP3; 712 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 713 IXGBE_WRITE_FLUSH(hw); 714 msec_delay(100); 715 } 716 717 /** 718 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 719 * @hw: pointer to hardware structure 720 * 721 * When the driver changes the link speeds that it can support, 722 * it sets autotry_restart to TRUE to indicate that we need to 723 * initiate a new autotry session with the link partner. To do 724 * so, we set the speed then disable and re-enable the Tx laser, to 725 * alert the link partner that it also needs to restart autotry on its 726 * end. This is consistent with TRUE clause 37 autoneg, which also 727 * involves a loss of signal. 728 **/ 729 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 730 { 731 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 732 733 /* Blocked by MNG FW so bail */ 734 if (ixgbe_check_reset_blocked(hw)) 735 return; 736 737 if (hw->mac.autotry_restart) { 738 ixgbe_disable_tx_laser_multispeed_fiber(hw); 739 ixgbe_enable_tx_laser_multispeed_fiber(hw); 740 hw->mac.autotry_restart = FALSE; 741 } 742 } 743 744 /** 745 * ixgbe_set_hard_rate_select_speed - Set module link speed 746 * @hw: pointer to hardware structure 747 * @speed: link speed to set 748 * 749 * Set module link speed via RS0/RS1 rate select pins. 750 */ 751 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, 752 ixgbe_link_speed speed) 753 { 754 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 755 756 switch (speed) { 757 case IXGBE_LINK_SPEED_10GB_FULL: 758 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 759 break; 760 case IXGBE_LINK_SPEED_1GB_FULL: 761 esdp_reg &= ~IXGBE_ESDP_SDP5; 762 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 763 break; 764 default: 765 DEBUGOUT("Invalid fixed module speed\n"); 766 return; 767 } 768 769 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 770 IXGBE_WRITE_FLUSH(hw); 771 } 772 773 /** 774 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 775 * @hw: pointer to hardware structure 776 * @speed: new link speed 777 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 778 * 779 * Implements the Intel SmartSpeed algorithm. 780 **/ 781 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 782 ixgbe_link_speed speed, 783 bool autoneg_wait_to_complete) 784 { 785 s32 status = IXGBE_SUCCESS; 786 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 787 s32 i, j; 788 bool link_up = FALSE; 789 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 790 791 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 792 793 /* Set autoneg_advertised value based on input link speed */ 794 hw->phy.autoneg_advertised = 0; 795 796 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 797 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 798 799 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 800 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 801 802 if (speed & IXGBE_LINK_SPEED_100_FULL) 803 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 804 805 /* 806 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 807 * autoneg advertisement if link is unable to be established at the 808 * highest negotiated rate. This can sometimes happen due to integrity 809 * issues with the physical media connection. 810 */ 811 812 /* First, try to get link with full advertisement */ 813 hw->phy.smart_speed_active = FALSE; 814 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 815 status = ixgbe_setup_mac_link_82599(hw, speed, 816 autoneg_wait_to_complete); 817 if (status != IXGBE_SUCCESS) 818 goto out; 819 820 /* 821 * Wait for the controller to acquire link. Per IEEE 802.3ap, 822 * Section 73.10.2, we may have to wait up to 500ms if KR is 823 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 824 * Table 9 in the AN MAS. 825 */ 826 for (i = 0; i < 5; i++) { 827 msec_delay(100); 828 829 /* If we have link, just jump out */ 830 status = ixgbe_check_link(hw, &link_speed, &link_up, 831 FALSE); 832 if (status != IXGBE_SUCCESS) 833 goto out; 834 835 if (link_up) 836 goto out; 837 } 838 } 839 840 /* 841 * We didn't get link. If we advertised KR plus one of KX4/KX 842 * (or BX4/BX), then disable KR and try again. 843 */ 844 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 845 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 846 goto out; 847 848 /* Turn SmartSpeed on to disable KR support */ 849 hw->phy.smart_speed_active = TRUE; 850 status = ixgbe_setup_mac_link_82599(hw, speed, 851 autoneg_wait_to_complete); 852 if (status != IXGBE_SUCCESS) 853 goto out; 854 855 /* 856 * Wait for the controller to acquire link. 600ms will allow for 857 * the AN link_fail_inhibit_timer as well for multiple cycles of 858 * parallel detect, both 10g and 1g. This allows for the maximum 859 * connect attempts as defined in the AN MAS table 73-7. 860 */ 861 for (i = 0; i < 6; i++) { 862 msec_delay(100); 863 864 /* If we have link, just jump out */ 865 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 866 if (status != IXGBE_SUCCESS) 867 goto out; 868 869 if (link_up) 870 goto out; 871 } 872 873 /* We didn't get link. Turn SmartSpeed back off. */ 874 hw->phy.smart_speed_active = FALSE; 875 status = ixgbe_setup_mac_link_82599(hw, speed, 876 autoneg_wait_to_complete); 877 878 out: 879 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 880 DEBUGOUT("Smartspeed has downgraded the link speed " 881 "from the maximum advertised\n"); 882 return status; 883 } 884 885 /** 886 * ixgbe_setup_mac_link_82599 - Set MAC link speed 887 * @hw: pointer to hardware structure 888 * @speed: new link speed 889 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 890 * 891 * Set the link speed in the AUTOC register and restarts link. 892 **/ 893 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 894 ixgbe_link_speed speed, 895 bool autoneg_wait_to_complete) 896 { 897 bool autoneg = FALSE; 898 s32 status = IXGBE_SUCCESS; 899 u32 pma_pmd_1g, link_mode; 900 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ 901 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ 902 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ 903 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 904 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 905 u32 links_reg; 906 u32 i; 907 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 908 909 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 910 911 /* Check to see if speed passed in is supported. */ 912 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 913 if (status) 914 goto out; 915 916 speed &= link_capabilities; 917 918 if (speed == 0) { 919 ixgbe_disable_tx_laser(hw); /* For fiber */ 920 ixgbe_set_phy_power(hw, false); /* For copper */ 921 } else { 922 /* In case previous media setting was none(down) */ 923 ixgbe_enable_tx_laser(hw); /* for Fiber */ 924 ixgbe_set_phy_power(hw, true); /* For copper */ 925 } 926 927 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 928 if (hw->mac.orig_link_settings_stored) 929 orig_autoc = hw->mac.orig_autoc; 930 else 931 orig_autoc = autoc; 932 933 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 934 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 935 936 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 937 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 938 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 939 /* Set KX4/KX/KR support according to speed requested */ 940 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 941 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 942 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 943 autoc |= IXGBE_AUTOC_KX4_SUPP; 944 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 945 (hw->phy.smart_speed_active == FALSE)) 946 autoc |= IXGBE_AUTOC_KR_SUPP; 947 } 948 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 949 autoc |= IXGBE_AUTOC_KX_SUPP; 950 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 951 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 952 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 953 /* Switch from 1G SFI to 10G SFI if requested */ 954 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 955 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 956 autoc &= ~IXGBE_AUTOC_LMS_MASK; 957 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 958 } 959 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 960 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 961 /* Switch from 10G SFI to 1G SFI if requested */ 962 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 963 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 964 autoc &= ~IXGBE_AUTOC_LMS_MASK; 965 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) 966 autoc |= IXGBE_AUTOC_LMS_1G_AN; 967 else 968 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 969 } 970 } 971 972 if (autoc != current_autoc) { 973 /* Restart link */ 974 status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE); 975 if (status != IXGBE_SUCCESS) 976 goto out; 977 978 /* Only poll for autoneg to complete if specified to do so */ 979 if (autoneg_wait_to_complete) { 980 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 981 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 982 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 983 links_reg = 0; /*Just in case Autoneg time=0*/ 984 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 985 links_reg = 986 IXGBE_READ_REG(hw, IXGBE_LINKS); 987 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 988 break; 989 msec_delay(100); 990 } 991 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 992 status = 993 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 994 DEBUGOUT("Autoneg did not complete.\n"); 995 } 996 } 997 } 998 999 /* Add delay to filter out noises during initial link setup */ 1000 msec_delay(50); 1001 } 1002 1003 out: 1004 return status; 1005 } 1006 1007 /** 1008 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 1009 * @hw: pointer to hardware structure 1010 * @speed: new link speed 1011 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 1012 * 1013 * Restarts link on PHY and MAC based on settings passed in. 1014 **/ 1015 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 1016 ixgbe_link_speed speed, 1017 bool autoneg_wait_to_complete) 1018 { 1019 s32 status; 1020 1021 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 1022 1023 /* Setup the PHY according to input speed */ 1024 status = hw->phy.ops.setup_link_speed(hw, speed, 1025 autoneg_wait_to_complete); 1026 /* Set up MAC */ 1027 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 1028 1029 return status; 1030 } 1031 1032 /** 1033 * ixgbe_reset_hw_82599 - Perform hardware reset 1034 * @hw: pointer to hardware structure 1035 * 1036 * Resets the hardware by resetting the transmit and receive units, masks 1037 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 1038 * reset. 1039 **/ 1040 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 1041 { 1042 ixgbe_link_speed link_speed; 1043 s32 status; 1044 s32 phy_status = IXGBE_SUCCESS; 1045 u32 ctrl = 0; 1046 u32 i, autoc, autoc2; 1047 u32 curr_lms; 1048 bool link_up = FALSE; 1049 1050 DEBUGFUNC("ixgbe_reset_hw_82599"); 1051 1052 /* Call adapter stop to disable tx/rx and clear interrupts */ 1053 status = hw->mac.ops.stop_adapter(hw); 1054 if (status != IXGBE_SUCCESS) 1055 goto reset_hw_out; 1056 1057 /* flush pending Tx transactions */ 1058 ixgbe_clear_tx_pending(hw); 1059 1060 /* PHY ops must be identified and initialized prior to reset */ 1061 1062 /* Identify PHY and related function pointers */ 1063 phy_status = hw->phy.ops.init(hw); 1064 1065 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1066 goto mac_reset_top; 1067 1068 /* Setup SFP module if there is one present. */ 1069 if (hw->phy.sfp_setup_needed) { 1070 phy_status = hw->mac.ops.setup_sfp(hw); 1071 hw->phy.sfp_setup_needed = FALSE; 1072 } 1073 1074 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1075 goto mac_reset_top; 1076 1077 /* Reset PHY */ 1078 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1079 hw->phy.ops.reset(hw); 1080 1081 mac_reset_top: 1082 /* remember AUTOC from before we reset */ 1083 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; 1084 1085 mac_reset_retry: 1086 /* 1087 * Issue global reset to the MAC. Needs to be SW reset if link is up. 1088 * If link reset is used when link is up, it might reset the PHY when 1089 * mng is using it. If link is down or the flag to force full link 1090 * reset is set, then perform link reset. 1091 */ 1092 ctrl = IXGBE_CTRL_LNK_RST; 1093 if (!hw->force_full_reset) { 1094 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 1095 if (link_up) 1096 ctrl = IXGBE_CTRL_RST; 1097 } 1098 1099 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 1100 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1101 IXGBE_WRITE_FLUSH(hw); 1102 1103 /* Poll for reset bit to self-clear meaning reset is complete */ 1104 for (i = 0; i < 10; i++) { 1105 usec_delay(1); 1106 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1107 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 1108 break; 1109 } 1110 1111 if (ctrl & IXGBE_CTRL_RST_MASK) { 1112 status = IXGBE_ERR_RESET_FAILED; 1113 DEBUGOUT("Reset polling failed to complete.\n"); 1114 } 1115 1116 msec_delay(50); 1117 1118 /* 1119 * Double resets are required for recovery from certain error 1120 * conditions. Between resets, it is necessary to stall to 1121 * allow time for any pending HW events to complete. 1122 */ 1123 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1124 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1125 goto mac_reset_retry; 1126 } 1127 1128 /* 1129 * Store the original AUTOC/AUTOC2 values if they have not been 1130 * stored off yet. Otherwise restore the stored original 1131 * values since the reset operation sets back to defaults. 1132 */ 1133 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1134 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1135 1136 /* Enable link if disabled in NVM */ 1137 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 1138 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 1139 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1140 IXGBE_WRITE_FLUSH(hw); 1141 } 1142 1143 if (hw->mac.orig_link_settings_stored == FALSE) { 1144 hw->mac.orig_autoc = autoc; 1145 hw->mac.orig_autoc2 = autoc2; 1146 hw->mac.orig_link_settings_stored = TRUE; 1147 } else { 1148 1149 /* If MNG FW is running on a multi-speed device that 1150 * doesn't autoneg with out driver support we need to 1151 * leave LMS in the state it was before we MAC reset. 1152 * Likewise if we support WoL we don't want change the 1153 * LMS state. 1154 */ 1155 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || 1156 hw->wol_enabled) 1157 hw->mac.orig_autoc = 1158 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | 1159 curr_lms; 1160 1161 if (autoc != hw->mac.orig_autoc) { 1162 status = hw->mac.ops.prot_autoc_write(hw, 1163 hw->mac.orig_autoc, 1164 FALSE); 1165 if (status != IXGBE_SUCCESS) 1166 goto reset_hw_out; 1167 } 1168 1169 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1170 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1171 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1172 autoc2 |= (hw->mac.orig_autoc2 & 1173 IXGBE_AUTOC2_UPPER_MASK); 1174 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1175 } 1176 } 1177 1178 /* Store the permanent mac address */ 1179 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1180 1181 /* 1182 * Store MAC address from RAR0, clear receive address registers, and 1183 * clear the multicast table. Also reset num_rar_entries to 128, 1184 * since we modify this value when programming the SAN MAC address. 1185 */ 1186 hw->mac.num_rar_entries = 128; 1187 hw->mac.ops.init_rx_addrs(hw); 1188 1189 /* Store the permanent SAN mac address */ 1190 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1191 1192 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1193 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1194 /* Save the SAN MAC RAR index */ 1195 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1196 1197 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, 1198 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1199 1200 /* clear VMDq pool/queue selection for this RAR */ 1201 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, 1202 IXGBE_CLEAR_VMDQ_ALL); 1203 1204 /* Reserve the last RAR for the SAN MAC address */ 1205 hw->mac.num_rar_entries--; 1206 } 1207 1208 /* Store the alternative WWNN/WWPN prefix */ 1209 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1210 &hw->mac.wwpn_prefix); 1211 1212 reset_hw_out: 1213 if (phy_status != IXGBE_SUCCESS) 1214 status = phy_status; 1215 1216 return status; 1217 } 1218 1219 /** 1220 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete 1221 * @hw: pointer to hardware structure 1222 * @fdircmd: current value of FDIRCMD register 1223 */ 1224 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) 1225 { 1226 int i; 1227 1228 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1229 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); 1230 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) 1231 return IXGBE_SUCCESS; 1232 usec_delay(10); 1233 } 1234 1235 return IXGBE_ERR_FDIR_CMD_INCOMPLETE; 1236 } 1237 1238 /** 1239 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1240 * @hw: pointer to hardware structure 1241 **/ 1242 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1243 { 1244 s32 err; 1245 int i; 1246 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1247 u32 fdircmd; 1248 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1249 1250 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1251 1252 /* 1253 * Before starting reinitialization process, 1254 * FDIRCMD.CMD must be zero. 1255 */ 1256 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1257 if (err) { 1258 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); 1259 return err; 1260 } 1261 1262 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1263 IXGBE_WRITE_FLUSH(hw); 1264 /* 1265 * 82599 adapters flow director init flow cannot be restarted, 1266 * Workaround 82599 silicon errata by performing the following steps 1267 * before re-writing the FDIRCTRL control register with the same value. 1268 * - write 1 to bit 8 of FDIRCMD register & 1269 * - write 0 to bit 8 of FDIRCMD register 1270 */ 1271 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1272 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1273 IXGBE_FDIRCMD_CLEARHT)); 1274 IXGBE_WRITE_FLUSH(hw); 1275 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1276 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1277 ~IXGBE_FDIRCMD_CLEARHT)); 1278 IXGBE_WRITE_FLUSH(hw); 1279 /* 1280 * Clear FDIR Hash register to clear any leftover hashes 1281 * waiting to be programmed. 1282 */ 1283 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1284 IXGBE_WRITE_FLUSH(hw); 1285 1286 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1287 IXGBE_WRITE_FLUSH(hw); 1288 1289 /* Poll init-done after we write FDIRCTRL register */ 1290 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1291 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1292 IXGBE_FDIRCTRL_INIT_DONE) 1293 break; 1294 msec_delay(1); 1295 } 1296 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1297 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1298 return IXGBE_ERR_FDIR_REINIT_FAILED; 1299 } 1300 1301 /* Clear FDIR statistics registers (read to clear) */ 1302 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1303 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1304 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1305 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1306 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1307 1308 return IXGBE_SUCCESS; 1309 } 1310 1311 /** 1312 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers 1313 * @hw: pointer to hardware structure 1314 * @fdirctrl: value to write to flow director control register 1315 **/ 1316 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1317 { 1318 int i; 1319 1320 DEBUGFUNC("ixgbe_fdir_enable_82599"); 1321 1322 /* Prime the keys for hashing */ 1323 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1324 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1325 1326 /* 1327 * Poll init-done after we write the register. Estimated times: 1328 * 10G: PBALLOC = 11b, timing is 60us 1329 * 1G: PBALLOC = 11b, timing is 600us 1330 * 100M: PBALLOC = 11b, timing is 6ms 1331 * 1332 * Multiple these timings by 4 if under full Rx load 1333 * 1334 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1335 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1336 * this might not finish in our poll time, but we can live with that 1337 * for now. 1338 */ 1339 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1340 IXGBE_WRITE_FLUSH(hw); 1341 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1342 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1343 IXGBE_FDIRCTRL_INIT_DONE) 1344 break; 1345 msec_delay(1); 1346 } 1347 1348 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1349 DEBUGOUT("Flow Director poll time exceeded!\n"); 1350 } 1351 1352 /** 1353 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1354 * @hw: pointer to hardware structure 1355 * @fdirctrl: value to write to flow director control register, initially 1356 * contains just the value of the Rx packet buffer allocation 1357 **/ 1358 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1359 { 1360 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1361 1362 /* 1363 * Continue setup of fdirctrl register bits: 1364 * Move the flexible bytes to use the ethertype - shift 6 words 1365 * Set the maximum length per hash bucket to 0xA filters 1366 * Send interrupt when 64 filters are left 1367 */ 1368 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1369 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1370 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1371 1372 /* write hashes and fdirctrl register, poll for completion */ 1373 ixgbe_fdir_enable_82599(hw, fdirctrl); 1374 1375 return IXGBE_SUCCESS; 1376 } 1377 1378 /** 1379 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1380 * @hw: pointer to hardware structure 1381 * @fdirctrl: value to write to flow director control register, initially 1382 * contains just the value of the Rx packet buffer allocation 1383 * @cloud_mode: TRUE - cloud mode, FALSE - other mode 1384 **/ 1385 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, 1386 bool cloud_mode) 1387 { 1388 UNREFERENCED_1PARAMETER(cloud_mode); 1389 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1390 1391 /* 1392 * Continue setup of fdirctrl register bits: 1393 * Turn perfect match filtering on 1394 * Report hash in RSS field of Rx wb descriptor 1395 * Initialize the drop queue to queue 127 1396 * Move the flexible bytes to use the ethertype - shift 6 words 1397 * Set the maximum length per hash bucket to 0xA filters 1398 * Send interrupt when 64 (0x4 * 16) filters are left 1399 */ 1400 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | 1401 IXGBE_FDIRCTRL_REPORT_STATUS | 1402 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | 1403 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1404 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1405 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1406 1407 if (cloud_mode) 1408 fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << 1409 IXGBE_FDIRCTRL_FILTERMODE_SHIFT); 1410 1411 /* write hashes and fdirctrl register, poll for completion */ 1412 ixgbe_fdir_enable_82599(hw, fdirctrl); 1413 1414 return IXGBE_SUCCESS; 1415 } 1416 1417 /** 1418 * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue 1419 * @hw: pointer to hardware structure 1420 * @dropqueue: Rx queue index used for the dropped packets 1421 **/ 1422 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) 1423 { 1424 u32 fdirctrl; 1425 1426 DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); 1427 /* Clear init done bit and drop queue field */ 1428 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1429 fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); 1430 1431 /* Set drop queue */ 1432 fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); 1433 if ((hw->mac.type == ixgbe_mac_X550) || 1434 (hw->mac.type == ixgbe_mac_X550EM_x) || 1435 (hw->mac.type == ixgbe_mac_X550EM_a)) 1436 fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; 1437 1438 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1439 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1440 IXGBE_FDIRCMD_CLEARHT)); 1441 IXGBE_WRITE_FLUSH(hw); 1442 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1443 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1444 ~IXGBE_FDIRCMD_CLEARHT)); 1445 IXGBE_WRITE_FLUSH(hw); 1446 1447 /* write hashes and fdirctrl register, poll for completion */ 1448 ixgbe_fdir_enable_82599(hw, fdirctrl); 1449 } 1450 1451 /* 1452 * These defines allow us to quickly generate all of the necessary instructions 1453 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1454 * for values 0 through 15 1455 */ 1456 #define IXGBE_ATR_COMMON_HASH_KEY \ 1457 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1458 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1459 do { \ 1460 u32 n = (_n); \ 1461 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1462 common_hash ^= lo_hash_dword >> n; \ 1463 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1464 bucket_hash ^= lo_hash_dword >> n; \ 1465 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1466 sig_hash ^= lo_hash_dword << (16 - n); \ 1467 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1468 common_hash ^= hi_hash_dword >> n; \ 1469 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1470 bucket_hash ^= hi_hash_dword >> n; \ 1471 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1472 sig_hash ^= hi_hash_dword << (16 - n); \ 1473 } while (0) 1474 1475 /** 1476 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1477 * @input: input bitstream to compute the hash on 1478 * @common: compressed common input dword 1479 * 1480 * This function is almost identical to the function above but contains 1481 * several optimizations such as unwinding all of the loops, letting the 1482 * compiler work out all of the conditional ifs since the keys are static 1483 * defines, and computing two keys at once since the hashed dword stream 1484 * will be the same for both keys. 1485 **/ 1486 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1487 union ixgbe_atr_hash_dword common) 1488 { 1489 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1490 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1491 1492 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1493 flow_vm_vlan = IXGBE_NTOHL(input.dword); 1494 1495 /* generate common hash dword */ 1496 hi_hash_dword = IXGBE_NTOHL(common.dword); 1497 1498 /* low dword is word swapped version of common */ 1499 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1500 1501 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1502 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1503 1504 /* Process bits 0 and 16 */ 1505 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1506 1507 /* 1508 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1509 * delay this because bit 0 of the stream should not be processed 1510 * so we do not add the VLAN until after bit 0 was processed 1511 */ 1512 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1513 1514 /* Process remaining 30 bit of the key */ 1515 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1516 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1517 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1518 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1519 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1520 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1521 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1522 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1523 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1524 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1525 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1526 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1527 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1528 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1529 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1530 1531 /* combine common_hash result with signature and bucket hashes */ 1532 bucket_hash ^= common_hash; 1533 bucket_hash &= IXGBE_ATR_HASH_MASK; 1534 1535 sig_hash ^= common_hash << 16; 1536 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1537 1538 /* return completed signature hash */ 1539 return sig_hash ^ bucket_hash; 1540 } 1541 1542 /** 1543 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1544 * @hw: pointer to hardware structure 1545 * @input: unique input dword 1546 * @common: compressed common input dword 1547 * @queue: queue index to direct traffic to 1548 * 1549 * Note that the tunnel bit in input must not be set when the hardware 1550 * tunneling support does not exist. 1551 **/ 1552 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1553 union ixgbe_atr_hash_dword input, 1554 union ixgbe_atr_hash_dword common, 1555 u8 queue) 1556 { 1557 u64 fdirhashcmd; 1558 u8 flow_type; 1559 bool tunnel; 1560 u32 fdircmd; 1561 1562 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1563 1564 /* 1565 * Get the flow_type in order to program FDIRCMD properly 1566 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1567 * fifth is FDIRCMD.TUNNEL_FILTER 1568 */ 1569 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); 1570 flow_type = input.formatted.flow_type & 1571 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); 1572 switch (flow_type) { 1573 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1574 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1575 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1576 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1577 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1578 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1579 break; 1580 default: 1581 DEBUGOUT(" Error on flow type input\n"); 1582 return; 1583 } 1584 1585 /* configure FDIRCMD register */ 1586 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1587 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1588 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1589 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1590 if (tunnel) 1591 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; 1592 1593 /* 1594 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1595 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1596 */ 1597 fdirhashcmd = (u64)fdircmd << 32; 1598 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1599 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1600 1601 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1602 1603 return; 1604 } 1605 1606 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ 1607 do { \ 1608 u32 n = (_n); \ 1609 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1610 bucket_hash ^= lo_hash_dword >> n; \ 1611 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1612 bucket_hash ^= hi_hash_dword >> n; \ 1613 } while (0) 1614 1615 /** 1616 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1617 * @input: input bitstream to compute the hash on 1618 * @input_mask: mask for the input bitstream 1619 * 1620 * This function serves two main purposes. First it applies the input_mask 1621 * to the atr_input resulting in a cleaned up atr_input data stream. 1622 * Secondly it computes the hash and stores it in the bkt_hash field at 1623 * the end of the input byte stream. This way it will be available for 1624 * future use without needing to recompute the hash. 1625 **/ 1626 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 1627 union ixgbe_atr_input *input_mask) 1628 { 1629 1630 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1631 u32 bucket_hash = 0; 1632 u32 hi_dword = 0; 1633 u32 i = 0; 1634 1635 /* Apply masks to input data */ 1636 for (i = 0; i < 14; i++) 1637 input->dword_stream[i] &= input_mask->dword_stream[i]; 1638 1639 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1640 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); 1641 1642 /* generate common hash dword */ 1643 for (i = 1; i <= 13; i++) 1644 hi_dword ^= input->dword_stream[i]; 1645 hi_hash_dword = IXGBE_NTOHL(hi_dword); 1646 1647 /* low dword is word swapped version of common */ 1648 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1649 1650 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1651 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1652 1653 /* Process bits 0 and 16 */ 1654 IXGBE_COMPUTE_BKT_HASH_ITERATION(0); 1655 1656 /* 1657 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1658 * delay this because bit 0 of the stream should not be processed 1659 * so we do not add the VLAN until after bit 0 was processed 1660 */ 1661 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1662 1663 /* Process remaining 30 bit of the key */ 1664 for (i = 1; i <= 15; i++) 1665 IXGBE_COMPUTE_BKT_HASH_ITERATION(i); 1666 1667 /* 1668 * Limit hash to 13 bits since max bucket count is 8K. 1669 * Store result at the end of the input stream. 1670 */ 1671 input->formatted.bkt_hash = bucket_hash & 0x1FFF; 1672 } 1673 1674 /** 1675 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks 1676 * @input_mask: mask to be bit swapped 1677 * 1678 * The source and destination port masks for flow director are bit swapped 1679 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1680 * generate a correctly swapped value we need to bit swap the mask and that 1681 * is what is accomplished by this function. 1682 **/ 1683 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1684 { 1685 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); 1686 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1687 mask |= IXGBE_NTOHS(input_mask->formatted.src_port); 1688 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1689 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1690 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1691 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1692 } 1693 1694 /* 1695 * These two macros are meant to address the fact that we have registers 1696 * that are either all or in part big-endian. As a result on big-endian 1697 * systems we will end up byte swapping the value to little-endian before 1698 * it is byte swapped again and written to the hardware in the original 1699 * big-endian format. 1700 */ 1701 #define IXGBE_STORE_AS_BE32(_value) \ 1702 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1703 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1704 1705 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1706 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1707 1708 #define IXGBE_STORE_AS_BE16(_value) \ 1709 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1710 1711 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 1712 union ixgbe_atr_input *input_mask, bool cloud_mode) 1713 { 1714 /* mask IPv6 since it is currently not supported */ 1715 u32 fdirm = IXGBE_FDIRM_DIPv6; 1716 u32 fdirtcpm; 1717 u32 fdirip6m; 1718 UNREFERENCED_1PARAMETER(cloud_mode); 1719 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); 1720 1721 /* 1722 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1723 * are zero, then assume a full mask for that field. Also assume that 1724 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1725 * cannot be masked out in this implementation. 1726 * 1727 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1728 * point in time. 1729 */ 1730 1731 /* verify bucket hash is cleared on hash generation */ 1732 if (input_mask->formatted.bkt_hash) 1733 DEBUGOUT(" bucket hash should always be 0 in mask\n"); 1734 1735 /* Program FDIRM and verify partial masks */ 1736 switch (input_mask->formatted.vm_pool & 0x7F) { 1737 case 0x0: 1738 fdirm |= IXGBE_FDIRM_POOL; 1739 case 0x7F: 1740 break; 1741 default: 1742 DEBUGOUT(" Error on vm pool mask\n"); 1743 return IXGBE_ERR_CONFIG; 1744 } 1745 1746 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { 1747 case 0x0: 1748 fdirm |= IXGBE_FDIRM_L4P; 1749 if (input_mask->formatted.dst_port || 1750 input_mask->formatted.src_port) { 1751 DEBUGOUT(" Error on src/dst port mask\n"); 1752 return IXGBE_ERR_CONFIG; 1753 } 1754 case IXGBE_ATR_L4TYPE_MASK: 1755 break; 1756 default: 1757 DEBUGOUT(" Error on flow type mask\n"); 1758 return IXGBE_ERR_CONFIG; 1759 } 1760 1761 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { 1762 case 0x0000: 1763 /* mask VLAN ID */ 1764 fdirm |= IXGBE_FDIRM_VLANID; 1765 /* fall through */ 1766 case 0x0FFF: 1767 /* mask VLAN priority */ 1768 fdirm |= IXGBE_FDIRM_VLANP; 1769 break; 1770 case 0xE000: 1771 /* mask VLAN ID only */ 1772 fdirm |= IXGBE_FDIRM_VLANID; 1773 /* fall through */ 1774 case 0xEFFF: 1775 /* no VLAN fields masked */ 1776 break; 1777 default: 1778 DEBUGOUT(" Error on VLAN mask\n"); 1779 return IXGBE_ERR_CONFIG; 1780 } 1781 1782 switch (input_mask->formatted.flex_bytes & 0xFFFF) { 1783 case 0x0000: 1784 /* Mask Flex Bytes */ 1785 fdirm |= IXGBE_FDIRM_FLEX; 1786 /* fall through */ 1787 case 0xFFFF: 1788 break; 1789 default: 1790 DEBUGOUT(" Error on flexible byte mask\n"); 1791 return IXGBE_ERR_CONFIG; 1792 } 1793 1794 if (cloud_mode) { 1795 fdirm |= IXGBE_FDIRM_L3P; 1796 fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); 1797 fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; 1798 1799 switch (input_mask->formatted.inner_mac[0] & 0xFF) { 1800 case 0x00: 1801 /* Mask inner MAC, fall through */ 1802 fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; 1803 case 0xFF: 1804 break; 1805 default: 1806 DEBUGOUT(" Error on inner_mac byte mask\n"); 1807 return IXGBE_ERR_CONFIG; 1808 } 1809 1810 switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { 1811 case 0x0: 1812 /* Mask vxlan id */ 1813 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; 1814 break; 1815 case 0x00FFFFFF: 1816 fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; 1817 break; 1818 case 0xFFFFFFFF: 1819 break; 1820 default: 1821 DEBUGOUT(" Error on TNI/VNI byte mask\n"); 1822 return IXGBE_ERR_CONFIG; 1823 } 1824 1825 switch (input_mask->formatted.tunnel_type & 0xFFFF) { 1826 case 0x0: 1827 /* Mask turnnel type, fall through */ 1828 fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; 1829 case 0xFFFF: 1830 break; 1831 default: 1832 DEBUGOUT(" Error on tunnel type byte mask\n"); 1833 return IXGBE_ERR_CONFIG; 1834 } 1835 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); 1836 1837 /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, 1838 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow 1839 * L3/L3 packets to tunnel. 1840 */ 1841 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); 1842 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); 1843 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); 1844 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); 1845 switch (hw->mac.type) { 1846 case ixgbe_mac_X550: 1847 case ixgbe_mac_X550EM_x: 1848 case ixgbe_mac_X550EM_a: 1849 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); 1850 break; 1851 default: 1852 break; 1853 } 1854 } 1855 1856 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1857 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1858 1859 if (!cloud_mode) { 1860 /* store the TCP/UDP port masks, bit reversed from port 1861 * layout */ 1862 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); 1863 1864 /* write both the same so that UDP and TCP use the same mask */ 1865 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1866 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1867 /* also use it for SCTP */ 1868 switch (hw->mac.type) { 1869 case ixgbe_mac_X550: 1870 case ixgbe_mac_X550EM_x: 1871 case ixgbe_mac_X550EM_a: 1872 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); 1873 break; 1874 default: 1875 break; 1876 } 1877 1878 /* store source and destination IP masks (big-enian) */ 1879 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1880 ~input_mask->formatted.src_ip[0]); 1881 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1882 ~input_mask->formatted.dst_ip[0]); 1883 } 1884 return IXGBE_SUCCESS; 1885 } 1886 1887 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 1888 union ixgbe_atr_input *input, 1889 u16 soft_id, u8 queue, bool cloud_mode) 1890 { 1891 u32 fdirport, fdirvlan, fdirhash, fdircmd; 1892 u32 addr_low, addr_high; 1893 u32 cloud_type = 0; 1894 s32 err; 1895 UNREFERENCED_1PARAMETER(cloud_mode); 1896 1897 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); 1898 if (!cloud_mode) { 1899 /* currently IPv6 is not supported, must be programmed with 0 */ 1900 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), 1901 input->formatted.src_ip[0]); 1902 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), 1903 input->formatted.src_ip[1]); 1904 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), 1905 input->formatted.src_ip[2]); 1906 1907 /* record the source address (big-endian) */ 1908 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, 1909 input->formatted.src_ip[0]); 1910 1911 /* record the first 32 bits of the destination address 1912 * (big-endian) */ 1913 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, 1914 input->formatted.dst_ip[0]); 1915 1916 /* record source and destination port (little-endian)*/ 1917 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1918 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1919 fdirport |= IXGBE_NTOHS(input->formatted.src_port); 1920 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1921 } 1922 1923 /* record VLAN (little-endian) and flex_bytes(big-endian) */ 1924 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); 1925 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1926 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); 1927 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1928 1929 if (cloud_mode) { 1930 if (input->formatted.tunnel_type != 0) 1931 cloud_type = 0x80000000; 1932 1933 addr_low = ((u32)input->formatted.inner_mac[0] | 1934 ((u32)input->formatted.inner_mac[1] << 8) | 1935 ((u32)input->formatted.inner_mac[2] << 16) | 1936 ((u32)input->formatted.inner_mac[3] << 24)); 1937 addr_high = ((u32)input->formatted.inner_mac[4] | 1938 ((u32)input->formatted.inner_mac[5] << 8)); 1939 cloud_type |= addr_high; 1940 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); 1941 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); 1942 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); 1943 } 1944 1945 /* configure FDIRHASH register */ 1946 fdirhash = input->formatted.bkt_hash; 1947 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1948 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1949 1950 /* 1951 * flush all previous writes to make certain registers are 1952 * programmed prior to issuing the command 1953 */ 1954 IXGBE_WRITE_FLUSH(hw); 1955 1956 /* configure FDIRCMD register */ 1957 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1958 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1959 if (queue == IXGBE_FDIR_DROP_QUEUE) 1960 fdircmd |= IXGBE_FDIRCMD_DROP; 1961 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) 1962 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; 1963 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1964 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1965 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; 1966 1967 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1968 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1969 if (err) { 1970 DEBUGOUT("Flow Director command did not complete!\n"); 1971 return err; 1972 } 1973 1974 return IXGBE_SUCCESS; 1975 } 1976 1977 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 1978 union ixgbe_atr_input *input, 1979 u16 soft_id) 1980 { 1981 u32 fdirhash; 1982 u32 fdircmd; 1983 s32 err; 1984 1985 /* configure FDIRHASH register */ 1986 fdirhash = input->formatted.bkt_hash; 1987 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1988 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1989 1990 /* flush hash to HW */ 1991 IXGBE_WRITE_FLUSH(hw); 1992 1993 /* Query if filter is present */ 1994 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); 1995 1996 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1997 if (err) { 1998 DEBUGOUT("Flow Director command did not complete!\n"); 1999 return err; 2000 } 2001 2002 /* if filter exists in hardware then remove it */ 2003 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { 2004 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 2005 IXGBE_WRITE_FLUSH(hw); 2006 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 2007 IXGBE_FDIRCMD_CMD_REMOVE_FLOW); 2008 } 2009 2010 return IXGBE_SUCCESS; 2011 } 2012 2013 /** 2014 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 2015 * @hw: pointer to hardware structure 2016 * @input: input bitstream 2017 * @input_mask: mask for the input bitstream 2018 * @soft_id: software index for the filters 2019 * @queue: queue index to direct traffic to 2020 * @cloud_mode: unused 2021 * 2022 * Note that the caller to this function must lock before calling, since the 2023 * hardware writes must be protected from one another. 2024 **/ 2025 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 2026 union ixgbe_atr_input *input, 2027 union ixgbe_atr_input *input_mask, 2028 u16 soft_id, u8 queue, bool cloud_mode) 2029 { 2030 s32 err = IXGBE_ERR_CONFIG; 2031 UNREFERENCED_1PARAMETER(cloud_mode); 2032 2033 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 2034 2035 /* 2036 * Check flow_type formatting, and bail out before we touch the hardware 2037 * if there's a configuration issue 2038 */ 2039 switch (input->formatted.flow_type) { 2040 case IXGBE_ATR_FLOW_TYPE_IPV4: 2041 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: 2042 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; 2043 if (input->formatted.dst_port || input->formatted.src_port) { 2044 DEBUGOUT(" Error on src/dst port\n"); 2045 return IXGBE_ERR_CONFIG; 2046 } 2047 break; 2048 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 2049 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: 2050 if (input->formatted.dst_port || input->formatted.src_port) { 2051 DEBUGOUT(" Error on src/dst port\n"); 2052 return IXGBE_ERR_CONFIG; 2053 } 2054 /* fall through */ 2055 case IXGBE_ATR_FLOW_TYPE_TCPV4: 2056 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: 2057 case IXGBE_ATR_FLOW_TYPE_UDPV4: 2058 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: 2059 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 2060 IXGBE_ATR_L4TYPE_MASK; 2061 break; 2062 default: 2063 DEBUGOUT(" Error on flow type input\n"); 2064 return err; 2065 } 2066 2067 /* program input mask into the HW */ 2068 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); 2069 if (err) 2070 return err; 2071 2072 /* apply mask and compute/store hash */ 2073 ixgbe_atr_compute_perfect_hash_82599(input, input_mask); 2074 2075 /* program filters to filter memory */ 2076 return ixgbe_fdir_write_perfect_filter_82599(hw, input, 2077 soft_id, queue, cloud_mode); 2078 } 2079 2080 /** 2081 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 2082 * @hw: pointer to hardware structure 2083 * @reg: analog register to read 2084 * @val: read value 2085 * 2086 * Performs read operation to Omer analog register specified. 2087 **/ 2088 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 2089 { 2090 u32 core_ctl; 2091 2092 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 2093 2094 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 2095 (reg << 8)); 2096 IXGBE_WRITE_FLUSH(hw); 2097 usec_delay(10); 2098 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 2099 *val = (u8)core_ctl; 2100 2101 return IXGBE_SUCCESS; 2102 } 2103 2104 /** 2105 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 2106 * @hw: pointer to hardware structure 2107 * @reg: atlas register to write 2108 * @val: value to write 2109 * 2110 * Performs write operation to Omer analog register specified. 2111 **/ 2112 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 2113 { 2114 u32 core_ctl; 2115 2116 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 2117 2118 core_ctl = (reg << 8) | val; 2119 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 2120 IXGBE_WRITE_FLUSH(hw); 2121 usec_delay(10); 2122 2123 return IXGBE_SUCCESS; 2124 } 2125 2126 /** 2127 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 2128 * @hw: pointer to hardware structure 2129 * 2130 * Starts the hardware using the generic start_hw function 2131 * and the generation start_hw function. 2132 * Then performs revision-specific operations, if any. 2133 **/ 2134 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 2135 { 2136 s32 ret_val = IXGBE_SUCCESS; 2137 2138 DEBUGFUNC("ixgbe_start_hw_82599"); 2139 2140 ret_val = ixgbe_start_hw_generic(hw); 2141 if (ret_val != IXGBE_SUCCESS) 2142 goto out; 2143 2144 ret_val = ixgbe_start_hw_gen2(hw); 2145 if (ret_val != IXGBE_SUCCESS) 2146 goto out; 2147 2148 /* We need to run link autotry after the driver loads */ 2149 hw->mac.autotry_restart = TRUE; 2150 2151 if (ret_val == IXGBE_SUCCESS) 2152 ret_val = ixgbe_verify_fw_version_82599(hw); 2153 out: 2154 return ret_val; 2155 } 2156 2157 /** 2158 * ixgbe_identify_phy_82599 - Get physical layer module 2159 * @hw: pointer to hardware structure 2160 * 2161 * Determines the physical layer module found on the current adapter. 2162 * If PHY already detected, maintains current PHY type in hw struct, 2163 * otherwise executes the PHY detection routine. 2164 **/ 2165 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 2166 { 2167 s32 status; 2168 2169 DEBUGFUNC("ixgbe_identify_phy_82599"); 2170 2171 /* Detect PHY if not unknown - returns success if already detected. */ 2172 status = ixgbe_identify_phy_generic(hw); 2173 if (status != IXGBE_SUCCESS) { 2174 /* 82599 10GBASE-T requires an external PHY */ 2175 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 2176 return status; 2177 else 2178 status = ixgbe_identify_module_generic(hw); 2179 } 2180 2181 /* Set PHY type none if no PHY detected */ 2182 if (hw->phy.type == ixgbe_phy_unknown) { 2183 hw->phy.type = ixgbe_phy_none; 2184 return IXGBE_SUCCESS; 2185 } 2186 2187 /* Return error if SFP module has been detected but is not supported */ 2188 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 2189 return IXGBE_ERR_SFP_NOT_SUPPORTED; 2190 2191 return status; 2192 } 2193 2194 /** 2195 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 2196 * @hw: pointer to hardware structure 2197 * 2198 * Determines physical layer capabilities of the current configuration. 2199 **/ 2200 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 2201 { 2202 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 2203 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2204 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2205 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 2206 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 2207 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 2208 u16 ext_ability = 0; 2209 2210 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 2211 2212 hw->phy.ops.identify(hw); 2213 2214 switch (hw->phy.type) { 2215 case ixgbe_phy_tn: 2216 case ixgbe_phy_cu_unknown: 2217 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 2218 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 2219 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 2220 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 2221 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 2222 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 2223 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 2224 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 2225 goto out; 2226 default: 2227 break; 2228 } 2229 2230 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 2231 case IXGBE_AUTOC_LMS_1G_AN: 2232 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 2233 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 2234 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 2235 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 2236 goto out; 2237 } else 2238 /* SFI mode so read SFP module */ 2239 goto sfp_check; 2240 break; 2241 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 2242 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 2243 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 2244 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 2245 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2246 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 2247 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 2248 goto out; 2249 break; 2250 case IXGBE_AUTOC_LMS_10G_SERIAL: 2251 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 2252 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2253 goto out; 2254 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2255 goto sfp_check; 2256 break; 2257 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2258 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2259 if (autoc & IXGBE_AUTOC_KX_SUPP) 2260 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2261 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2262 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2263 if (autoc & IXGBE_AUTOC_KR_SUPP) 2264 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2265 goto out; 2266 break; 2267 default: 2268 goto out; 2269 break; 2270 } 2271 2272 sfp_check: 2273 /* SFP check must be done last since DA modules are sometimes used to 2274 * test KR mode - we need to id KR mode correctly before SFP module. 2275 * Call identify_sfp because the pluggable module may have changed */ 2276 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); 2277 out: 2278 return physical_layer; 2279 } 2280 2281 /** 2282 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2283 * @hw: pointer to hardware structure 2284 * @regval: register value to write to RXCTRL 2285 * 2286 * Enables the Rx DMA unit for 82599 2287 **/ 2288 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2289 { 2290 2291 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2292 2293 /* 2294 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2295 * If traffic is incoming before we enable the Rx unit, it could hang 2296 * the Rx DMA unit. Therefore, make sure the security engine is 2297 * completely disabled prior to enabling the Rx unit. 2298 */ 2299 2300 hw->mac.ops.disable_sec_rx_path(hw); 2301 2302 if (regval & IXGBE_RXCTRL_RXEN) 2303 ixgbe_enable_rx(hw); 2304 else 2305 ixgbe_disable_rx(hw); 2306 2307 hw->mac.ops.enable_sec_rx_path(hw); 2308 2309 return IXGBE_SUCCESS; 2310 } 2311 2312 /** 2313 * ixgbe_verify_fw_version_82599 - verify FW version for 82599 2314 * @hw: pointer to hardware structure 2315 * 2316 * Verifies that installed the firmware version is 0.6 or higher 2317 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2318 * 2319 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2320 * if the FW version is not supported. 2321 **/ 2322 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2323 { 2324 s32 status = IXGBE_ERR_EEPROM_VERSION; 2325 u16 fw_offset, fw_ptp_cfg_offset; 2326 u16 fw_version; 2327 2328 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2329 2330 /* firmware check is only necessary for SFI devices */ 2331 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2332 status = IXGBE_SUCCESS; 2333 goto fw_version_out; 2334 } 2335 2336 /* get the offset to the Firmware Module block */ 2337 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { 2338 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2339 "eeprom read at offset %d failed", IXGBE_FW_PTR); 2340 return IXGBE_ERR_EEPROM_VERSION; 2341 } 2342 2343 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2344 goto fw_version_out; 2345 2346 /* get the offset to the Pass Through Patch Configuration block */ 2347 if (hw->eeprom.ops.read(hw, (fw_offset + 2348 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2349 &fw_ptp_cfg_offset)) { 2350 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2351 "eeprom read at offset %d failed", 2352 fw_offset + 2353 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); 2354 return IXGBE_ERR_EEPROM_VERSION; 2355 } 2356 2357 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2358 goto fw_version_out; 2359 2360 /* get the firmware version */ 2361 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2362 IXGBE_FW_PATCH_VERSION_4), &fw_version)) { 2363 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 2364 "eeprom read at offset %d failed", 2365 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); 2366 return IXGBE_ERR_EEPROM_VERSION; 2367 } 2368 2369 if (fw_version > 0x5) 2370 status = IXGBE_SUCCESS; 2371 2372 fw_version_out: 2373 return status; 2374 } 2375 2376 /** 2377 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2378 * @hw: pointer to hardware structure 2379 * 2380 * Returns TRUE if the LESM FW module is present and enabled. Otherwise 2381 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. 2382 **/ 2383 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2384 { 2385 bool lesm_enabled = FALSE; 2386 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2387 s32 status; 2388 2389 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2390 2391 /* get the offset to the Firmware Module block */ 2392 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2393 2394 if ((status != IXGBE_SUCCESS) || 2395 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2396 goto out; 2397 2398 /* get the offset to the LESM Parameters block */ 2399 status = hw->eeprom.ops.read(hw, (fw_offset + 2400 IXGBE_FW_LESM_PARAMETERS_PTR), 2401 &fw_lesm_param_offset); 2402 2403 if ((status != IXGBE_SUCCESS) || 2404 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2405 goto out; 2406 2407 /* get the LESM state word */ 2408 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2409 IXGBE_FW_LESM_STATE_1), 2410 &fw_lesm_state); 2411 2412 if ((status == IXGBE_SUCCESS) && 2413 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2414 lesm_enabled = TRUE; 2415 2416 out: 2417 return lesm_enabled; 2418 } 2419 2420 /** 2421 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using 2422 * fastest available method 2423 * 2424 * @hw: pointer to hardware structure 2425 * @offset: offset of word in EEPROM to read 2426 * @words: number of words 2427 * @data: word(s) read from the EEPROM 2428 * 2429 * Retrieves 16 bit word(s) read from EEPROM 2430 **/ 2431 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 2432 u16 words, u16 *data) 2433 { 2434 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2435 s32 ret_val = IXGBE_ERR_CONFIG; 2436 2437 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); 2438 2439 /* 2440 * If EEPROM is detected and can be addressed using 14 bits, 2441 * use EERD otherwise use bit bang 2442 */ 2443 if ((eeprom->type == ixgbe_eeprom_spi) && 2444 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) 2445 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, 2446 data); 2447 else 2448 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, 2449 words, 2450 data); 2451 2452 return ret_val; 2453 } 2454 2455 /** 2456 * ixgbe_read_eeprom_82599 - Read EEPROM word using 2457 * fastest available method 2458 * 2459 * @hw: pointer to hardware structure 2460 * @offset: offset of word in the EEPROM to read 2461 * @data: word read from the EEPROM 2462 * 2463 * Reads a 16 bit word from the EEPROM 2464 **/ 2465 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 2466 u16 offset, u16 *data) 2467 { 2468 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2469 s32 ret_val = IXGBE_ERR_CONFIG; 2470 2471 DEBUGFUNC("ixgbe_read_eeprom_82599"); 2472 2473 /* 2474 * If EEPROM is detected and can be addressed using 14 bits, 2475 * use EERD otherwise use bit bang 2476 */ 2477 if ((eeprom->type == ixgbe_eeprom_spi) && 2478 (offset <= IXGBE_EERD_MAX_ADDR)) 2479 ret_val = ixgbe_read_eerd_generic(hw, offset, data); 2480 else 2481 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); 2482 2483 return ret_val; 2484 } 2485 2486 /** 2487 * ixgbe_reset_pipeline_82599 - perform pipeline reset 2488 * 2489 * @hw: pointer to hardware structure 2490 * 2491 * Reset pipeline by asserting Restart_AN together with LMS change to ensure 2492 * full pipeline reset. This function assumes the SW/FW lock is held. 2493 **/ 2494 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) 2495 { 2496 s32 ret_val; 2497 u32 anlp1_reg = 0; 2498 u32 i, autoc_reg, autoc2_reg; 2499 2500 /* Enable link if disabled in NVM */ 2501 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2502 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 2503 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 2504 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 2505 IXGBE_WRITE_FLUSH(hw); 2506 } 2507 2508 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2509 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2510 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ 2511 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, 2512 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); 2513 /* Wait for AN to leave state 0 */ 2514 for (i = 0; i < 10; i++) { 2515 msec_delay(4); 2516 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2517 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) 2518 break; 2519 } 2520 2521 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { 2522 DEBUGOUT("auto negotiation not completed\n"); 2523 ret_val = IXGBE_ERR_RESET_FAILED; 2524 goto reset_pipeline_out; 2525 } 2526 2527 ret_val = IXGBE_SUCCESS; 2528 2529 reset_pipeline_out: 2530 /* Write AUTOC register with original LMS field and Restart_AN */ 2531 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2532 IXGBE_WRITE_FLUSH(hw); 2533 2534 return ret_val; 2535 } 2536 2537 /** 2538 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C 2539 * @hw: pointer to hardware structure 2540 * @byte_offset: byte offset to read 2541 * @dev_addr: address to read from 2542 * @data: value read 2543 * 2544 * Performs byte read operation to SFP module's EEPROM over I2C interface at 2545 * a specified device address. 2546 **/ 2547 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2548 u8 dev_addr, u8 *data) 2549 { 2550 u32 esdp; 2551 s32 status; 2552 s32 timeout = 200; 2553 2554 DEBUGFUNC("ixgbe_read_i2c_byte_82599"); 2555 2556 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2557 /* Acquire I2C bus ownership. */ 2558 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2559 esdp |= IXGBE_ESDP_SDP0; 2560 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2561 IXGBE_WRITE_FLUSH(hw); 2562 2563 while (timeout) { 2564 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2565 if (esdp & IXGBE_ESDP_SDP1) 2566 break; 2567 2568 msec_delay(5); 2569 timeout--; 2570 } 2571 2572 if (!timeout) { 2573 DEBUGOUT("Driver can't access resource," 2574 " acquiring I2C bus timeout.\n"); 2575 status = IXGBE_ERR_I2C; 2576 goto release_i2c_access; 2577 } 2578 } 2579 2580 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2581 2582 release_i2c_access: 2583 2584 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2585 /* Release I2C bus ownership. */ 2586 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2587 esdp &= ~IXGBE_ESDP_SDP0; 2588 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2589 IXGBE_WRITE_FLUSH(hw); 2590 } 2591 2592 return status; 2593 } 2594 2595 /** 2596 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C 2597 * @hw: pointer to hardware structure 2598 * @byte_offset: byte offset to write 2599 * @dev_addr: address to read from 2600 * @data: value to write 2601 * 2602 * Performs byte write operation to SFP module's EEPROM over I2C interface at 2603 * a specified device address. 2604 **/ 2605 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2606 u8 dev_addr, u8 data) 2607 { 2608 u32 esdp; 2609 s32 status; 2610 s32 timeout = 200; 2611 2612 DEBUGFUNC("ixgbe_write_i2c_byte_82599"); 2613 2614 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2615 /* Acquire I2C bus ownership. */ 2616 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2617 esdp |= IXGBE_ESDP_SDP0; 2618 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2619 IXGBE_WRITE_FLUSH(hw); 2620 2621 while (timeout) { 2622 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2623 if (esdp & IXGBE_ESDP_SDP1) 2624 break; 2625 2626 msec_delay(5); 2627 timeout--; 2628 } 2629 2630 if (!timeout) { 2631 DEBUGOUT("Driver can't access resource," 2632 " acquiring I2C bus timeout.\n"); 2633 status = IXGBE_ERR_I2C; 2634 goto release_i2c_access; 2635 } 2636 } 2637 2638 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2639 2640 release_i2c_access: 2641 2642 if (hw->phy.qsfp_shared_i2c_bus == TRUE) { 2643 /* Release I2C bus ownership. */ 2644 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2645 esdp &= ~IXGBE_ESDP_SDP0; 2646 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2647 IXGBE_WRITE_FLUSH(hw); 2648 } 2649 2650 return status; 2651 } 2652