1 /****************************************************************************** 2 3 Copyright (c) 2001-2012, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 240155 2012-09-06 02:07:58Z kevlo $*/ 34 /*$NetBSD: ixgbe_82599.c,v 1.8 2015/04/14 07:17:06 msaitoh Exp $*/ 35 36 #include "ixgbe_type.h" 37 #include "ixgbe_82599.h" 38 #include "ixgbe_api.h" 39 #include "ixgbe_common.h" 40 #include "ixgbe_phy.h" 41 42 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 43 ixgbe_link_speed speed, 44 bool autoneg, 45 bool autoneg_wait_to_complete); 46 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 47 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 48 u16 offset, u16 *data); 49 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 50 u16 words, u16 *data); 51 52 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 53 { 54 struct ixgbe_mac_info *mac = &hw->mac; 55 56 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 57 58 /* enable the laser control functions for SFP+ fiber */ 59 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { 60 mac->ops.disable_tx_laser = 61 &ixgbe_disable_tx_laser_multispeed_fiber; 62 mac->ops.enable_tx_laser = 63 &ixgbe_enable_tx_laser_multispeed_fiber; 64 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 65 66 } else { 67 mac->ops.disable_tx_laser = NULL; 68 mac->ops.enable_tx_laser = NULL; 69 mac->ops.flap_tx_laser = NULL; 70 } 71 72 if (hw->phy.multispeed_fiber) { 73 /* Set up dual speed SFP+ support */ 74 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 75 } else { 76 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 77 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 78 hw->phy.smart_speed == ixgbe_smart_speed_on) && 79 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { 80 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 81 } else { 82 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 83 } 84 } 85 } 86 87 /** 88 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 89 * @hw: pointer to hardware structure 90 * 91 * Initialize any function pointers that were not able to be 92 * set during init_shared_code because the PHY/SFP type was 93 * not known. Perform the SFP init if necessary. 94 * 95 **/ 96 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 97 { 98 struct ixgbe_mac_info *mac = &hw->mac; 99 struct ixgbe_phy_info *phy = &hw->phy; 100 s32 ret_val = IXGBE_SUCCESS; 101 102 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 103 104 /* Identify the PHY or SFP module */ 105 ret_val = phy->ops.identify(hw); 106 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 107 goto init_phy_ops_out; 108 109 /* Setup function pointers based on detected SFP module and speeds */ 110 ixgbe_init_mac_link_ops_82599(hw); 111 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 112 hw->phy.ops.reset = NULL; 113 114 /* If copper media, overwrite with copper function pointers */ 115 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 116 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 117 mac->ops.get_link_capabilities = 118 &ixgbe_get_copper_link_capabilities_generic; 119 } 120 121 /* Set necessary function pointers based on phy type */ 122 switch (hw->phy.type) { 123 case ixgbe_phy_tn: 124 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 125 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 126 phy->ops.get_firmware_version = 127 &ixgbe_get_phy_firmware_version_tnx; 128 break; 129 default: 130 break; 131 } 132 init_phy_ops_out: 133 return ret_val; 134 } 135 136 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 137 { 138 s32 ret_val = IXGBE_SUCCESS; 139 u32 reg_anlp1 = 0; 140 u32 i = 0; 141 u16 list_offset, data_offset, data_value; 142 143 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 144 145 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 146 ixgbe_init_mac_link_ops_82599(hw); 147 148 hw->phy.ops.reset = NULL; 149 150 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 151 &data_offset); 152 if (ret_val != IXGBE_SUCCESS) 153 goto setup_sfp_out; 154 155 /* PHY config will finish before releasing the semaphore */ 156 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 157 IXGBE_GSSR_MAC_CSR_SM); 158 if (ret_val != IXGBE_SUCCESS) { 159 ret_val = IXGBE_ERR_SWFW_SYNC; 160 goto setup_sfp_out; 161 } 162 163 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 164 while (data_value != 0xffff) { 165 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 166 IXGBE_WRITE_FLUSH(hw); 167 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 168 } 169 170 /* Release the semaphore */ 171 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 172 /* Delay obtaining semaphore again to allow FW access */ 173 msec_delay(hw->eeprom.semaphore_delay); 174 175 /* Now restart DSP by setting Restart_AN and clearing LMS */ 176 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 177 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 178 IXGBE_AUTOC_AN_RESTART)); 179 180 /* Wait for AN to leave state 0 */ 181 for (i = 0; i < 10; i++) { 182 msec_delay(4); 183 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 184 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 185 break; 186 } 187 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { 188 DEBUGOUT("sfp module setup not complete\n"); 189 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 190 goto setup_sfp_out; 191 } 192 193 /* Restart DSP by setting Restart_AN and return to SFI mode */ 194 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, 195 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | 196 IXGBE_AUTOC_AN_RESTART)); 197 } 198 199 setup_sfp_out: 200 return ret_val; 201 } 202 203 /** 204 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 205 * @hw: pointer to hardware structure 206 * 207 * Initialize the function pointers and assign the MAC type for 82599. 208 * Does not touch the hardware. 209 **/ 210 211 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 212 { 213 struct ixgbe_mac_info *mac = &hw->mac; 214 struct ixgbe_phy_info *phy = &hw->phy; 215 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 216 s32 ret_val; 217 218 DEBUGFUNC("ixgbe_init_ops_82599"); 219 220 ret_val = ixgbe_init_phy_ops_generic(hw); 221 ret_val = ixgbe_init_ops_generic(hw); 222 223 /* PHY */ 224 phy->ops.identify = &ixgbe_identify_phy_82599; 225 phy->ops.init = &ixgbe_init_phy_ops_82599; 226 227 /* MAC */ 228 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 229 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; 230 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 231 mac->ops.get_supported_physical_layer = 232 &ixgbe_get_supported_physical_layer_82599; 233 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic; 234 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic; 235 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 236 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 237 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 238 mac->ops.start_hw = &ixgbe_start_hw_82599; 239 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; 240 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; 241 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; 242 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; 243 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; 244 245 /* RAR, Multicast, VLAN */ 246 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; 247 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic; 248 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; 249 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; 250 mac->rar_highwater = 1; 251 mac->ops.set_vfta = &ixgbe_set_vfta_generic; 252 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic; 253 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; 254 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; 255 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 256 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; 257 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; 258 259 /* Link */ 260 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 261 mac->ops.check_link = &ixgbe_check_mac_link_generic; 262 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; 263 ixgbe_init_mac_link_ops_82599(hw); 264 265 mac->mcft_size = 128; 266 mac->vft_size = 128; 267 mac->num_rar_entries = 128; 268 mac->rx_pb_size = 512; 269 mac->max_tx_queues = 128; 270 mac->max_rx_queues = 128; 271 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 272 273 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) & 274 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE; 275 276 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 277 278 /* EEPROM */ 279 eeprom->ops.read = &ixgbe_read_eeprom_82599; 280 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599; 281 282 /* Manageability interface */ 283 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic; 284 285 286 return ret_val; 287 } 288 289 /** 290 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 291 * @hw: pointer to hardware structure 292 * @speed: pointer to link speed 293 * @negotiation: TRUE when autoneg or autotry is enabled 294 * 295 * Determines the link capabilities by reading the AUTOC register. 296 **/ 297 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 298 ixgbe_link_speed *speed, 299 bool *negotiation) 300 { 301 s32 status = IXGBE_SUCCESS; 302 u32 autoc = 0; 303 304 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 305 306 307 /* Check if 1G SFP module. */ 308 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 309 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 310 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 311 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { 312 *speed = IXGBE_LINK_SPEED_1GB_FULL; 313 *negotiation = TRUE; 314 goto out; 315 } 316 317 /* 318 * Determine link capabilities based on the stored value of AUTOC, 319 * which represents EEPROM defaults. If AUTOC value has not 320 * been stored, use the current register values. 321 */ 322 if (hw->mac.orig_link_settings_stored) 323 autoc = hw->mac.orig_autoc; 324 else 325 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 326 327 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 328 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 329 *speed = IXGBE_LINK_SPEED_1GB_FULL; 330 *negotiation = FALSE; 331 break; 332 333 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 334 *speed = IXGBE_LINK_SPEED_10GB_FULL; 335 *negotiation = FALSE; 336 break; 337 338 case IXGBE_AUTOC_LMS_1G_AN: 339 *speed = IXGBE_LINK_SPEED_1GB_FULL; 340 *negotiation = TRUE; 341 break; 342 343 case IXGBE_AUTOC_LMS_10G_SERIAL: 344 *speed = IXGBE_LINK_SPEED_10GB_FULL; 345 *negotiation = FALSE; 346 break; 347 348 case IXGBE_AUTOC_LMS_KX4_KX_KR: 349 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 350 *speed = IXGBE_LINK_SPEED_UNKNOWN; 351 if (autoc & IXGBE_AUTOC_KR_SUPP) 352 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 353 if (autoc & IXGBE_AUTOC_KX4_SUPP) 354 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 355 if (autoc & IXGBE_AUTOC_KX_SUPP) 356 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 357 *negotiation = TRUE; 358 break; 359 360 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 361 *speed = IXGBE_LINK_SPEED_100_FULL; 362 if (autoc & IXGBE_AUTOC_KR_SUPP) 363 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 364 if (autoc & IXGBE_AUTOC_KX4_SUPP) 365 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 366 if (autoc & IXGBE_AUTOC_KX_SUPP) 367 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 368 *negotiation = TRUE; 369 break; 370 371 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 372 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 373 *negotiation = FALSE; 374 break; 375 376 default: 377 status = IXGBE_ERR_LINK_SETUP; 378 goto out; 379 break; 380 } 381 382 if (hw->phy.multispeed_fiber) { 383 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 384 IXGBE_LINK_SPEED_1GB_FULL; 385 *negotiation = TRUE; 386 } 387 388 out: 389 return status; 390 } 391 392 /** 393 * ixgbe_get_media_type_82599 - Get media type 394 * @hw: pointer to hardware structure 395 * 396 * Returns the media type (fiber, copper, backplane) 397 **/ 398 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 399 { 400 enum ixgbe_media_type media_type; 401 402 DEBUGFUNC("ixgbe_get_media_type_82599"); 403 404 /* Detect if there is a copper PHY attached. */ 405 switch (hw->phy.type) { 406 case ixgbe_phy_cu_unknown: 407 case ixgbe_phy_tn: 408 media_type = ixgbe_media_type_copper; 409 goto out; 410 default: 411 break; 412 } 413 414 switch (hw->device_id) { 415 case IXGBE_DEV_ID_82599_KX4: 416 case IXGBE_DEV_ID_82599_KX4_MEZZ: 417 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 418 case IXGBE_DEV_ID_82599_KR: 419 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 420 case IXGBE_DEV_ID_82599_XAUI_LOM: 421 /* Default device ID is mezzanine card KX/KX4 */ 422 media_type = ixgbe_media_type_backplane; 423 break; 424 case IXGBE_DEV_ID_82599_SFP: 425 case IXGBE_DEV_ID_82599_SFP_FCOE: 426 case IXGBE_DEV_ID_82599_SFP_EM: 427 case IXGBE_DEV_ID_82599_SFP_SF2: 428 case IXGBE_DEV_ID_82599_SFP_SF_QP: 429 case IXGBE_DEV_ID_82599EN_SFP: 430 media_type = ixgbe_media_type_fiber; 431 break; 432 case IXGBE_DEV_ID_82599_CX4: 433 media_type = ixgbe_media_type_cx4; 434 break; 435 case IXGBE_DEV_ID_82599_T3_LOM: 436 media_type = ixgbe_media_type_copper; 437 break; 438 default: 439 media_type = ixgbe_media_type_unknown; 440 break; 441 } 442 out: 443 return media_type; 444 } 445 446 /** 447 * ixgbe_start_mac_link_82599 - Setup MAC link settings 448 * @hw: pointer to hardware structure 449 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 450 * 451 * Configures link settings based on values in the ixgbe_hw struct. 452 * Restarts the link. Performs autonegotiation if needed. 453 **/ 454 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 455 bool autoneg_wait_to_complete) 456 { 457 u32 autoc_reg; 458 u32 links_reg; 459 u32 i; 460 s32 status = IXGBE_SUCCESS; 461 462 DEBUGFUNC("ixgbe_start_mac_link_82599"); 463 464 465 /* Restart link */ 466 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 467 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 468 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 469 470 /* Only poll for autoneg to complete if specified to do so */ 471 if (autoneg_wait_to_complete) { 472 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 473 IXGBE_AUTOC_LMS_KX4_KX_KR || 474 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 475 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 476 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 477 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 478 links_reg = 0; /* Just in case Autoneg time = 0 */ 479 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 480 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 481 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 482 break; 483 msec_delay(100); 484 } 485 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 486 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 487 DEBUGOUT("Autoneg did not complete.\n"); 488 } 489 } 490 } 491 492 /* Add delay to filter out noises during initial link setup */ 493 msec_delay(50); 494 495 return status; 496 } 497 498 /** 499 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 500 * @hw: pointer to hardware structure 501 * 502 * The base drivers may require better control over SFP+ module 503 * PHY states. This includes selectively shutting down the Tx 504 * laser on the PHY, effectively halting physical link. 505 **/ 506 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 507 { 508 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 509 510 /* Disable tx laser; allow 100us to go dark per spec */ 511 esdp_reg |= IXGBE_ESDP_SDP3; 512 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 513 IXGBE_WRITE_FLUSH(hw); 514 usec_delay(100); 515 } 516 517 /** 518 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 519 * @hw: pointer to hardware structure 520 * 521 * The base drivers may require better control over SFP+ module 522 * PHY states. This includes selectively turning on the Tx 523 * laser on the PHY, effectively starting physical link. 524 **/ 525 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 526 { 527 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 528 529 /* Enable tx laser; allow 100ms to light up */ 530 esdp_reg &= ~IXGBE_ESDP_SDP3; 531 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 532 IXGBE_WRITE_FLUSH(hw); 533 msec_delay(100); 534 } 535 536 /** 537 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 538 * @hw: pointer to hardware structure 539 * 540 * When the driver changes the link speeds that it can support, 541 * it sets autotry_restart to TRUE to indicate that we need to 542 * initiate a new autotry session with the link partner. To do 543 * so, we set the speed then disable and re-enable the tx laser, to 544 * alert the link partner that it also needs to restart autotry on its 545 * end. This is consistent with TRUE clause 37 autoneg, which also 546 * involves a loss of signal. 547 **/ 548 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 549 { 550 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 551 552 if (hw->mac.autotry_restart) { 553 ixgbe_disable_tx_laser_multispeed_fiber(hw); 554 ixgbe_enable_tx_laser_multispeed_fiber(hw); 555 hw->mac.autotry_restart = FALSE; 556 } 557 } 558 559 /** 560 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 561 * @hw: pointer to hardware structure 562 * @speed: new link speed 563 * @autoneg: TRUE if autonegotiation enabled 564 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 565 * 566 * Set the link speed in the AUTOC register and restarts link. 567 **/ 568 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 569 ixgbe_link_speed speed, bool autoneg, 570 bool autoneg_wait_to_complete) 571 { 572 s32 status = IXGBE_SUCCESS; 573 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 574 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 575 u32 speedcnt = 0; 576 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 577 u32 i = 0; 578 bool link_up = FALSE; 579 bool negotiation; 580 581 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 582 583 /* Mask off requested but non-supported speeds */ 584 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); 585 if (status != IXGBE_SUCCESS) 586 return status; 587 588 speed &= link_speed; 589 590 /* 591 * Try each speed one by one, highest priority first. We do this in 592 * software because 10gb fiber doesn't support speed autonegotiation. 593 */ 594 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 595 speedcnt++; 596 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 597 598 /* If we already have link at this speed, just jump out */ 599 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 600 if (status != IXGBE_SUCCESS) 601 return status; 602 603 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 604 goto out; 605 606 /* Set the module link speed */ 607 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 608 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 609 IXGBE_WRITE_FLUSH(hw); 610 611 /* Allow module to change analog characteristics (1G->10G) */ 612 msec_delay(40); 613 614 status = ixgbe_setup_mac_link_82599(hw, 615 IXGBE_LINK_SPEED_10GB_FULL, 616 autoneg, 617 autoneg_wait_to_complete); 618 if (status != IXGBE_SUCCESS) 619 return status; 620 621 /* Flap the tx laser if it has not already been done */ 622 ixgbe_flap_tx_laser(hw); 623 624 /* 625 * Wait for the controller to acquire link. Per IEEE 802.3ap, 626 * Section 73.10.2, we may have to wait up to 500ms if KR is 627 * attempted. 82599 uses the same timing for 10g SFI. 628 */ 629 for (i = 0; i < 5; i++) { 630 /* Wait for the link partner to also set speed */ 631 msec_delay(100); 632 633 /* If we have link, just jump out */ 634 status = ixgbe_check_link(hw, &link_speed, 635 &link_up, FALSE); 636 if (status != IXGBE_SUCCESS) 637 return status; 638 639 if (link_up) 640 goto out; 641 } 642 } 643 644 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 645 speedcnt++; 646 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 647 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 648 649 /* If we already have link at this speed, just jump out */ 650 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 651 if (status != IXGBE_SUCCESS) 652 return status; 653 654 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 655 goto out; 656 657 /* Set the module link speed */ 658 esdp_reg &= ~IXGBE_ESDP_SDP5; 659 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 660 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 661 IXGBE_WRITE_FLUSH(hw); 662 663 /* Allow module to change analog characteristics (10G->1G) */ 664 msec_delay(40); 665 666 status = ixgbe_setup_mac_link_82599(hw, 667 IXGBE_LINK_SPEED_1GB_FULL, 668 autoneg, 669 autoneg_wait_to_complete); 670 if (status != IXGBE_SUCCESS) 671 return status; 672 673 /* Flap the tx laser if it has not already been done */ 674 ixgbe_flap_tx_laser(hw); 675 676 /* Wait for the link partner to also set speed */ 677 msec_delay(100); 678 679 /* If we have link, just jump out */ 680 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 681 if (status != IXGBE_SUCCESS) 682 return status; 683 684 if (link_up) 685 goto out; 686 } 687 688 /* 689 * We didn't get link. Configure back to the highest speed we tried, 690 * (if there was more than one). We call ourselves back with just the 691 * single highest speed that the user requested. 692 */ 693 if (speedcnt > 1) 694 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 695 highest_link_speed, autoneg, autoneg_wait_to_complete); 696 697 out: 698 /* Set autoneg_advertised value based on input link speed */ 699 hw->phy.autoneg_advertised = 0; 700 701 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 702 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 703 704 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 705 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 706 707 return status; 708 } 709 710 /** 711 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 712 * @hw: pointer to hardware structure 713 * @speed: new link speed 714 * @autoneg: TRUE if autonegotiation enabled 715 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 716 * 717 * Implements the Intel SmartSpeed algorithm. 718 **/ 719 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 720 ixgbe_link_speed speed, bool autoneg, 721 bool autoneg_wait_to_complete) 722 { 723 s32 status = IXGBE_SUCCESS; 724 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 725 s32 i, j; 726 bool link_up = FALSE; 727 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 728 729 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 730 731 /* Set autoneg_advertised value based on input link speed */ 732 hw->phy.autoneg_advertised = 0; 733 734 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 735 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 736 737 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 738 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 739 740 if (speed & IXGBE_LINK_SPEED_100_FULL) 741 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 742 743 /* 744 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 745 * autoneg advertisement if link is unable to be established at the 746 * highest negotiated rate. This can sometimes happen due to integrity 747 * issues with the physical media connection. 748 */ 749 750 /* First, try to get link with full advertisement */ 751 hw->phy.smart_speed_active = FALSE; 752 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 753 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 754 autoneg_wait_to_complete); 755 if (status != IXGBE_SUCCESS) 756 goto out; 757 758 /* 759 * Wait for the controller to acquire link. Per IEEE 802.3ap, 760 * Section 73.10.2, we may have to wait up to 500ms if KR is 761 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 762 * Table 9 in the AN MAS. 763 */ 764 for (i = 0; i < 5; i++) { 765 msec_delay(100); 766 767 /* If we have link, just jump out */ 768 status = ixgbe_check_link(hw, &link_speed, &link_up, 769 FALSE); 770 if (status != IXGBE_SUCCESS) 771 goto out; 772 773 if (link_up) 774 goto out; 775 } 776 } 777 778 /* 779 * We didn't get link. If we advertised KR plus one of KX4/KX 780 * (or BX4/BX), then disable KR and try again. 781 */ 782 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 783 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 784 goto out; 785 786 /* Turn SmartSpeed on to disable KR support */ 787 hw->phy.smart_speed_active = TRUE; 788 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 789 autoneg_wait_to_complete); 790 if (status != IXGBE_SUCCESS) 791 goto out; 792 793 /* 794 * Wait for the controller to acquire link. 600ms will allow for 795 * the AN link_fail_inhibit_timer as well for multiple cycles of 796 * parallel detect, both 10g and 1g. This allows for the maximum 797 * connect attempts as defined in the AN MAS table 73-7. 798 */ 799 for (i = 0; i < 6; i++) { 800 msec_delay(100); 801 802 /* If we have link, just jump out */ 803 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 804 if (status != IXGBE_SUCCESS) 805 goto out; 806 807 if (link_up) 808 goto out; 809 } 810 811 /* We didn't get link. Turn SmartSpeed back off. */ 812 hw->phy.smart_speed_active = FALSE; 813 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 814 autoneg_wait_to_complete); 815 816 out: 817 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 818 DEBUGOUT("Smartspeed has downgraded the link speed " 819 "from the maximum advertised\n"); 820 return status; 821 } 822 823 /** 824 * ixgbe_setup_mac_link_82599 - Set MAC link speed 825 * @hw: pointer to hardware structure 826 * @speed: new link speed 827 * @autoneg: TRUE if autonegotiation enabled 828 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 829 * 830 * Set the link speed in the AUTOC register and restarts link. 831 **/ 832 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 833 ixgbe_link_speed speed, bool autoneg, 834 bool autoneg_wait_to_complete) 835 { 836 s32 status = IXGBE_SUCCESS; 837 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 838 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 839 u32 start_autoc = autoc; 840 u32 orig_autoc = 0; 841 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 842 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 843 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 844 u32 links_reg; 845 u32 i; 846 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 847 848 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 849 850 /* Check to see if speed passed in is supported. */ 851 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 852 if (status != IXGBE_SUCCESS) 853 goto out; 854 855 speed &= link_capabilities; 856 857 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 858 status = IXGBE_ERR_LINK_SETUP; 859 goto out; 860 } 861 862 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 863 if (hw->mac.orig_link_settings_stored) 864 orig_autoc = hw->mac.orig_autoc; 865 else 866 orig_autoc = autoc; 867 868 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 869 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 870 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 871 /* Set KX4/KX/KR support according to speed requested */ 872 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 873 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 874 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 875 autoc |= IXGBE_AUTOC_KX4_SUPP; 876 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 877 (hw->phy.smart_speed_active == FALSE)) 878 autoc |= IXGBE_AUTOC_KR_SUPP; 879 } 880 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 881 autoc |= IXGBE_AUTOC_KX_SUPP; 882 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 883 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 884 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 885 /* Switch from 1G SFI to 10G SFI if requested */ 886 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 887 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 888 autoc &= ~IXGBE_AUTOC_LMS_MASK; 889 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 890 } 891 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 892 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 893 /* Switch from 10G SFI to 1G SFI if requested */ 894 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 895 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 896 autoc &= ~IXGBE_AUTOC_LMS_MASK; 897 if (autoneg) 898 autoc |= IXGBE_AUTOC_LMS_1G_AN; 899 else 900 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 901 } 902 } 903 904 if (autoc != start_autoc) { 905 /* Restart link */ 906 autoc |= IXGBE_AUTOC_AN_RESTART; 907 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 908 909 /* Only poll for autoneg to complete if specified to do so */ 910 if (autoneg_wait_to_complete) { 911 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 912 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 913 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 914 links_reg = 0; /*Just in case Autoneg time=0*/ 915 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 916 links_reg = 917 IXGBE_READ_REG(hw, IXGBE_LINKS); 918 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 919 break; 920 msec_delay(100); 921 } 922 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 923 status = 924 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 925 DEBUGOUT("Autoneg did not complete.\n"); 926 } 927 } 928 } 929 930 /* Add delay to filter out noises during initial link setup */ 931 msec_delay(50); 932 } 933 934 out: 935 return status; 936 } 937 938 /** 939 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 940 * @hw: pointer to hardware structure 941 * @speed: new link speed 942 * @autoneg: TRUE if autonegotiation enabled 943 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 944 * 945 * Restarts link on PHY and MAC based on settings passed in. 946 **/ 947 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 948 ixgbe_link_speed speed, 949 bool autoneg, 950 bool autoneg_wait_to_complete) 951 { 952 s32 status; 953 954 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 955 956 /* Setup the PHY according to input speed */ 957 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 958 autoneg_wait_to_complete); 959 /* Set up MAC */ 960 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 961 962 return status; 963 } 964 965 /** 966 * ixgbe_reset_hw_82599 - Perform hardware reset 967 * @hw: pointer to hardware structure 968 * 969 * Resets the hardware by resetting the transmit and receive units, masks 970 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 971 * reset. 972 **/ 973 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 974 { 975 ixgbe_link_speed link_speed; 976 s32 status; 977 u32 ctrl, i, autoc, autoc2; 978 bool link_up = FALSE; 979 980 DEBUGFUNC("ixgbe_reset_hw_82599"); 981 982 /* Call adapter stop to disable tx/rx and clear interrupts */ 983 status = hw->mac.ops.stop_adapter(hw); 984 if (status != IXGBE_SUCCESS) 985 goto reset_hw_out; 986 987 /* flush pending Tx transactions */ 988 ixgbe_clear_tx_pending(hw); 989 990 /* PHY ops must be identified and initialized prior to reset */ 991 992 /* Identify PHY and related function pointers */ 993 status = hw->phy.ops.init(hw); 994 995 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 996 goto reset_hw_out; 997 998 /* Setup SFP module if there is one present. */ 999 if (hw->phy.sfp_setup_needed) { 1000 status = hw->mac.ops.setup_sfp(hw); 1001 hw->phy.sfp_setup_needed = FALSE; 1002 } 1003 1004 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 1005 goto reset_hw_out; 1006 1007 /* Reset PHY */ 1008 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1009 hw->phy.ops.reset(hw); 1010 1011 mac_reset_top: 1012 /* 1013 * Issue global reset to the MAC. Needs to be SW reset if link is up. 1014 * If link reset is used when link is up, it might reset the PHY when 1015 * mng is using it. If link is down or the flag to force full link 1016 * reset is set, then perform link reset. 1017 */ 1018 ctrl = IXGBE_CTRL_LNK_RST; 1019 if (!hw->force_full_reset) { 1020 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 1021 if (link_up) 1022 ctrl = IXGBE_CTRL_RST; 1023 } 1024 1025 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 1026 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1027 IXGBE_WRITE_FLUSH(hw); 1028 1029 /* Poll for reset bit to self-clear indicating reset is complete */ 1030 for (i = 0; i < 10; i++) { 1031 usec_delay(1); 1032 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1033 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 1034 break; 1035 } 1036 1037 if (ctrl & IXGBE_CTRL_RST_MASK) { 1038 status = IXGBE_ERR_RESET_FAILED; 1039 DEBUGOUT("Reset polling failed to complete.\n"); 1040 } 1041 1042 msec_delay(50); 1043 1044 /* 1045 * Double resets are required for recovery from certain error 1046 * conditions. Between resets, it is necessary to stall to allow time 1047 * for any pending HW events to complete. 1048 */ 1049 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1050 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1051 goto mac_reset_top; 1052 } 1053 1054 /* 1055 * Store the original AUTOC/AUTOC2 values if they have not been 1056 * stored off yet. Otherwise restore the stored original 1057 * values since the reset operation sets back to defaults. 1058 */ 1059 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1060 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1061 if (hw->mac.orig_link_settings_stored == FALSE) { 1062 hw->mac.orig_autoc = autoc; 1063 hw->mac.orig_autoc2 = autoc2; 1064 hw->mac.orig_link_settings_stored = TRUE; 1065 } else { 1066 if (autoc != hw->mac.orig_autoc) 1067 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1068 IXGBE_AUTOC_AN_RESTART)); 1069 1070 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1071 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1072 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1073 autoc2 |= (hw->mac.orig_autoc2 & 1074 IXGBE_AUTOC2_UPPER_MASK); 1075 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1076 } 1077 } 1078 1079 /* Store the permanent mac address */ 1080 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1081 1082 /* 1083 * Store MAC address from RAR0, clear receive address registers, and 1084 * clear the multicast table. Also reset num_rar_entries to 128, 1085 * since we modify this value when programming the SAN MAC address. 1086 */ 1087 hw->mac.num_rar_entries = 128; 1088 hw->mac.ops.init_rx_addrs(hw); 1089 1090 /* Store the permanent SAN mac address */ 1091 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1092 1093 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1094 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1095 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1096 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1097 1098 /* Save the SAN MAC RAR index */ 1099 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1100 1101 /* Reserve the last RAR for the SAN MAC address */ 1102 hw->mac.num_rar_entries--; 1103 } 1104 1105 /* Store the alternative WWNN/WWPN prefix */ 1106 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1107 &hw->mac.wwpn_prefix); 1108 1109 reset_hw_out: 1110 return status; 1111 } 1112 1113 /** 1114 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1115 * @hw: pointer to hardware structure 1116 **/ 1117 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1118 { 1119 int i; 1120 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1121 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1122 1123 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1124 1125 /* 1126 * Before starting reinitialization process, 1127 * FDIRCMD.CMD must be zero. 1128 */ 1129 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1130 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1131 IXGBE_FDIRCMD_CMD_MASK)) 1132 break; 1133 usec_delay(10); 1134 } 1135 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1136 DEBUGOUT("Flow Director previous command isn't complete, " 1137 "aborting table re-initialization.\n"); 1138 return IXGBE_ERR_FDIR_REINIT_FAILED; 1139 } 1140 1141 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1142 IXGBE_WRITE_FLUSH(hw); 1143 /* 1144 * 82599 adapters flow director init flow cannot be restarted, 1145 * Workaround 82599 silicon errata by performing the following steps 1146 * before re-writing the FDIRCTRL control register with the same value. 1147 * - write 1 to bit 8 of FDIRCMD register & 1148 * - write 0 to bit 8 of FDIRCMD register 1149 */ 1150 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1151 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1152 IXGBE_FDIRCMD_CLEARHT)); 1153 IXGBE_WRITE_FLUSH(hw); 1154 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1155 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1156 ~IXGBE_FDIRCMD_CLEARHT)); 1157 IXGBE_WRITE_FLUSH(hw); 1158 /* 1159 * Clear FDIR Hash register to clear any leftover hashes 1160 * waiting to be programmed. 1161 */ 1162 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1163 IXGBE_WRITE_FLUSH(hw); 1164 1165 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1166 IXGBE_WRITE_FLUSH(hw); 1167 1168 /* Poll init-done after we write FDIRCTRL register */ 1169 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1170 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1171 IXGBE_FDIRCTRL_INIT_DONE) 1172 break; 1173 usec_delay(10); 1174 } 1175 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1176 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1177 return IXGBE_ERR_FDIR_REINIT_FAILED; 1178 } 1179 1180 /* Clear FDIR statistics registers (read to clear) */ 1181 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1182 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1183 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1184 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1185 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1186 1187 return IXGBE_SUCCESS; 1188 } 1189 1190 /** 1191 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers 1192 * @hw: pointer to hardware structure 1193 * @fdirctrl: value to write to flow director control register 1194 **/ 1195 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1196 { 1197 int i; 1198 1199 DEBUGFUNC("ixgbe_fdir_enable_82599"); 1200 1201 /* Prime the keys for hashing */ 1202 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1203 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1204 1205 /* 1206 * Poll init-done after we write the register. Estimated times: 1207 * 10G: PBALLOC = 11b, timing is 60us 1208 * 1G: PBALLOC = 11b, timing is 600us 1209 * 100M: PBALLOC = 11b, timing is 6ms 1210 * 1211 * Multiple these timings by 4 if under full Rx load 1212 * 1213 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1214 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1215 * this might not finish in our poll time, but we can live with that 1216 * for now. 1217 */ 1218 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1219 IXGBE_WRITE_FLUSH(hw); 1220 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1221 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1222 IXGBE_FDIRCTRL_INIT_DONE) 1223 break; 1224 msec_delay(1); 1225 } 1226 1227 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1228 DEBUGOUT("Flow Director poll time exceeded!\n"); 1229 } 1230 1231 /** 1232 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1233 * @hw: pointer to hardware structure 1234 * @fdirctrl: value to write to flow director control register, initially 1235 * contains just the value of the Rx packet buffer allocation 1236 **/ 1237 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1238 { 1239 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1240 1241 /* 1242 * Continue setup of fdirctrl register bits: 1243 * Move the flexible bytes to use the ethertype - shift 6 words 1244 * Set the maximum length per hash bucket to 0xA filters 1245 * Send interrupt when 64 filters are left 1246 */ 1247 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1248 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1249 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1250 1251 /* write hashes and fdirctrl register, poll for completion */ 1252 ixgbe_fdir_enable_82599(hw, fdirctrl); 1253 1254 return IXGBE_SUCCESS; 1255 } 1256 1257 /** 1258 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1259 * @hw: pointer to hardware structure 1260 * @fdirctrl: value to write to flow director control register, initially 1261 * contains just the value of the Rx packet buffer allocation 1262 **/ 1263 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1264 { 1265 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1266 1267 /* 1268 * Continue setup of fdirctrl register bits: 1269 * Turn perfect match filtering on 1270 * Report hash in RSS field of Rx wb descriptor 1271 * Initialize the drop queue 1272 * Move the flexible bytes to use the ethertype - shift 6 words 1273 * Set the maximum length per hash bucket to 0xA filters 1274 * Send interrupt when 64 (0x4 * 16) filters are left 1275 */ 1276 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | 1277 IXGBE_FDIRCTRL_REPORT_STATUS | 1278 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | 1279 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1280 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1281 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1282 1283 /* write hashes and fdirctrl register, poll for completion */ 1284 ixgbe_fdir_enable_82599(hw, fdirctrl); 1285 1286 return IXGBE_SUCCESS; 1287 } 1288 1289 /* 1290 * These defines allow us to quickly generate all of the necessary instructions 1291 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1292 * for values 0 through 15 1293 */ 1294 #define IXGBE_ATR_COMMON_HASH_KEY \ 1295 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1296 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1297 do { \ 1298 u32 n = (_n); \ 1299 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1300 common_hash ^= lo_hash_dword >> n; \ 1301 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1302 bucket_hash ^= lo_hash_dword >> n; \ 1303 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1304 sig_hash ^= lo_hash_dword << (16 - n); \ 1305 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1306 common_hash ^= hi_hash_dword >> n; \ 1307 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1308 bucket_hash ^= hi_hash_dword >> n; \ 1309 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1310 sig_hash ^= hi_hash_dword << (16 - n); \ 1311 } while (0); 1312 1313 /** 1314 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1315 * @stream: input bitstream to compute the hash on 1316 * 1317 * This function is almost identical to the function above but contains 1318 * several optomizations such as unwinding all of the loops, letting the 1319 * compiler work out all of the conditional ifs since the keys are static 1320 * defines, and computing two keys at once since the hashed dword stream 1321 * will be the same for both keys. 1322 **/ 1323 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1324 union ixgbe_atr_hash_dword common) 1325 { 1326 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1327 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1328 1329 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1330 flow_vm_vlan = IXGBE_NTOHL(input.dword); 1331 1332 /* generate common hash dword */ 1333 hi_hash_dword = IXGBE_NTOHL(common.dword); 1334 1335 /* low dword is word swapped version of common */ 1336 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1337 1338 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1339 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1340 1341 /* Process bits 0 and 16 */ 1342 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1343 1344 /* 1345 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1346 * delay this because bit 0 of the stream should not be processed 1347 * so we do not add the vlan until after bit 0 was processed 1348 */ 1349 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1350 1351 /* Process remaining 30 bit of the key */ 1352 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1353 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1354 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1355 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1356 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1357 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1358 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1359 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1360 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1361 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1362 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1363 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1364 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1365 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1366 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1367 1368 /* combine common_hash result with signature and bucket hashes */ 1369 bucket_hash ^= common_hash; 1370 bucket_hash &= IXGBE_ATR_HASH_MASK; 1371 1372 sig_hash ^= common_hash << 16; 1373 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1374 1375 /* return completed signature hash */ 1376 return sig_hash ^ bucket_hash; 1377 } 1378 1379 /** 1380 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1381 * @hw: pointer to hardware structure 1382 * @input: unique input dword 1383 * @common: compressed common input dword 1384 * @queue: queue index to direct traffic to 1385 **/ 1386 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1387 union ixgbe_atr_hash_dword input, 1388 union ixgbe_atr_hash_dword common, 1389 u8 queue) 1390 { 1391 u64 fdirhashcmd; 1392 u32 fdircmd; 1393 1394 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1395 1396 /* 1397 * Get the flow_type in order to program FDIRCMD properly 1398 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1399 */ 1400 switch (input.formatted.flow_type) { 1401 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1402 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1403 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1404 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1405 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1406 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1407 break; 1408 default: 1409 DEBUGOUT(" Error on flow type input\n"); 1410 return IXGBE_ERR_CONFIG; 1411 } 1412 1413 /* configure FDIRCMD register */ 1414 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1415 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1416 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1417 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1418 1419 /* 1420 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1421 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1422 */ 1423 fdirhashcmd = (u64)fdircmd << 32; 1424 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1425 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1426 1427 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1428 1429 return IXGBE_SUCCESS; 1430 } 1431 1432 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ 1433 do { \ 1434 u32 n = (_n); \ 1435 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1436 bucket_hash ^= lo_hash_dword >> n; \ 1437 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1438 bucket_hash ^= hi_hash_dword >> n; \ 1439 } while (0); 1440 1441 /** 1442 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1443 * @atr_input: input bitstream to compute the hash on 1444 * @input_mask: mask for the input bitstream 1445 * 1446 * This function serves two main purposes. First it applys the input_mask 1447 * to the atr_input resulting in a cleaned up atr_input data stream. 1448 * Secondly it computes the hash and stores it in the bkt_hash field at 1449 * the end of the input byte stream. This way it will be available for 1450 * future use without needing to recompute the hash. 1451 **/ 1452 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 1453 union ixgbe_atr_input *input_mask) 1454 { 1455 1456 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1457 u32 bucket_hash = 0; 1458 1459 /* Apply masks to input data */ 1460 input->dword_stream[0] &= input_mask->dword_stream[0]; 1461 input->dword_stream[1] &= input_mask->dword_stream[1]; 1462 input->dword_stream[2] &= input_mask->dword_stream[2]; 1463 input->dword_stream[3] &= input_mask->dword_stream[3]; 1464 input->dword_stream[4] &= input_mask->dword_stream[4]; 1465 input->dword_stream[5] &= input_mask->dword_stream[5]; 1466 input->dword_stream[6] &= input_mask->dword_stream[6]; 1467 input->dword_stream[7] &= input_mask->dword_stream[7]; 1468 input->dword_stream[8] &= input_mask->dword_stream[8]; 1469 input->dword_stream[9] &= input_mask->dword_stream[9]; 1470 input->dword_stream[10] &= input_mask->dword_stream[10]; 1471 1472 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1473 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); 1474 1475 /* generate common hash dword */ 1476 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^ 1477 input->dword_stream[2] ^ 1478 input->dword_stream[3] ^ 1479 input->dword_stream[4] ^ 1480 input->dword_stream[5] ^ 1481 input->dword_stream[6] ^ 1482 input->dword_stream[7] ^ 1483 input->dword_stream[8] ^ 1484 input->dword_stream[9] ^ 1485 input->dword_stream[10]); 1486 1487 /* low dword is word swapped version of common */ 1488 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1489 1490 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1491 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1492 1493 /* Process bits 0 and 16 */ 1494 IXGBE_COMPUTE_BKT_HASH_ITERATION(0); 1495 1496 /* 1497 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1498 * delay this because bit 0 of the stream should not be processed 1499 * so we do not add the vlan until after bit 0 was processed 1500 */ 1501 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1502 1503 /* Process remaining 30 bit of the key */ 1504 IXGBE_COMPUTE_BKT_HASH_ITERATION(1); 1505 IXGBE_COMPUTE_BKT_HASH_ITERATION(2); 1506 IXGBE_COMPUTE_BKT_HASH_ITERATION(3); 1507 IXGBE_COMPUTE_BKT_HASH_ITERATION(4); 1508 IXGBE_COMPUTE_BKT_HASH_ITERATION(5); 1509 IXGBE_COMPUTE_BKT_HASH_ITERATION(6); 1510 IXGBE_COMPUTE_BKT_HASH_ITERATION(7); 1511 IXGBE_COMPUTE_BKT_HASH_ITERATION(8); 1512 IXGBE_COMPUTE_BKT_HASH_ITERATION(9); 1513 IXGBE_COMPUTE_BKT_HASH_ITERATION(10); 1514 IXGBE_COMPUTE_BKT_HASH_ITERATION(11); 1515 IXGBE_COMPUTE_BKT_HASH_ITERATION(12); 1516 IXGBE_COMPUTE_BKT_HASH_ITERATION(13); 1517 IXGBE_COMPUTE_BKT_HASH_ITERATION(14); 1518 IXGBE_COMPUTE_BKT_HASH_ITERATION(15); 1519 1520 /* 1521 * Limit hash to 13 bits since max bucket count is 8K. 1522 * Store result at the end of the input stream. 1523 */ 1524 input->formatted.bkt_hash = bucket_hash & 0x1FFF; 1525 } 1526 1527 /** 1528 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1529 * @input_mask: mask to be bit swapped 1530 * 1531 * The source and destination port masks for flow director are bit swapped 1532 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1533 * generate a correctly swapped value we need to bit swap the mask and that 1534 * is what is accomplished by this function. 1535 **/ 1536 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1537 { 1538 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); 1539 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1540 mask |= IXGBE_NTOHS(input_mask->formatted.src_port); 1541 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1542 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1543 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1544 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1545 } 1546 1547 /* 1548 * These two macros are meant to address the fact that we have registers 1549 * that are either all or in part big-endian. As a result on big-endian 1550 * systems we will end up byte swapping the value to little-endian before 1551 * it is byte swapped again and written to the hardware in the original 1552 * big-endian format. 1553 */ 1554 #define IXGBE_STORE_AS_BE32(_value) \ 1555 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1556 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1557 1558 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1559 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1560 1561 #define IXGBE_STORE_AS_BE16(_value) \ 1562 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1563 1564 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 1565 union ixgbe_atr_input *input_mask) 1566 { 1567 /* mask IPv6 since it is currently not supported */ 1568 u32 fdirm = IXGBE_FDIRM_DIPv6; 1569 u32 fdirtcpm; 1570 1571 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); 1572 1573 /* 1574 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1575 * are zero, then assume a full mask for that field. Also assume that 1576 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1577 * cannot be masked out in this implementation. 1578 * 1579 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1580 * point in time. 1581 */ 1582 1583 /* verify bucket hash is cleared on hash generation */ 1584 if (input_mask->formatted.bkt_hash) 1585 DEBUGOUT(" bucket hash should always be 0 in mask\n"); 1586 1587 /* Program FDIRM and verify partial masks */ 1588 switch (input_mask->formatted.vm_pool & 0x7F) { 1589 case 0x0: 1590 fdirm |= IXGBE_FDIRM_POOL; 1591 case 0x7F: 1592 break; 1593 default: 1594 DEBUGOUT(" Error on vm pool mask\n"); 1595 return IXGBE_ERR_CONFIG; 1596 } 1597 1598 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { 1599 case 0x0: 1600 fdirm |= IXGBE_FDIRM_L4P; 1601 if (input_mask->formatted.dst_port || 1602 input_mask->formatted.src_port) { 1603 DEBUGOUT(" Error on src/dst port mask\n"); 1604 return IXGBE_ERR_CONFIG; 1605 } 1606 case IXGBE_ATR_L4TYPE_MASK: 1607 break; 1608 default: 1609 DEBUGOUT(" Error on flow type mask\n"); 1610 return IXGBE_ERR_CONFIG; 1611 } 1612 1613 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { 1614 case 0x0000: 1615 /* mask VLAN ID, fall through to mask VLAN priority */ 1616 fdirm |= IXGBE_FDIRM_VLANID; 1617 case 0x0FFF: 1618 /* mask VLAN priority */ 1619 fdirm |= IXGBE_FDIRM_VLANP; 1620 break; 1621 case 0xE000: 1622 /* mask VLAN ID only, fall through */ 1623 fdirm |= IXGBE_FDIRM_VLANID; 1624 case 0xEFFF: 1625 /* no VLAN fields masked */ 1626 break; 1627 default: 1628 DEBUGOUT(" Error on VLAN mask\n"); 1629 return IXGBE_ERR_CONFIG; 1630 } 1631 1632 switch (input_mask->formatted.flex_bytes & 0xFFFF) { 1633 case 0x0000: 1634 /* Mask Flex Bytes, fall through */ 1635 fdirm |= IXGBE_FDIRM_FLEX; 1636 case 0xFFFF: 1637 break; 1638 default: 1639 DEBUGOUT(" Error on flexible byte mask\n"); 1640 return IXGBE_ERR_CONFIG; 1641 } 1642 1643 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1644 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1645 1646 /* store the TCP/UDP port masks, bit reversed from port layout */ 1647 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); 1648 1649 /* write both the same so that UDP and TCP use the same mask */ 1650 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1651 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1652 1653 /* store source and destination IP masks (big-enian) */ 1654 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1655 ~input_mask->formatted.src_ip[0]); 1656 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1657 ~input_mask->formatted.dst_ip[0]); 1658 1659 return IXGBE_SUCCESS; 1660 } 1661 1662 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 1663 union ixgbe_atr_input *input, 1664 u16 soft_id, u8 queue) 1665 { 1666 u32 fdirport, fdirvlan, fdirhash, fdircmd; 1667 1668 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); 1669 1670 /* currently IPv6 is not supported, must be programmed with 0 */ 1671 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), 1672 input->formatted.src_ip[0]); 1673 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), 1674 input->formatted.src_ip[1]); 1675 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), 1676 input->formatted.src_ip[2]); 1677 1678 /* record the source address (big-endian) */ 1679 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1680 1681 /* record the first 32 bits of the destination address (big-endian) */ 1682 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1683 1684 /* record source and destination port (little-endian)*/ 1685 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1686 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1687 fdirport |= IXGBE_NTOHS(input->formatted.src_port); 1688 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1689 1690 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1691 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); 1692 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1693 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); 1694 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1695 1696 /* configure FDIRHASH register */ 1697 fdirhash = input->formatted.bkt_hash; 1698 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1699 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1700 1701 /* 1702 * flush all previous writes to make certain registers are 1703 * programmed prior to issuing the command 1704 */ 1705 IXGBE_WRITE_FLUSH(hw); 1706 1707 /* configure FDIRCMD register */ 1708 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1709 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1710 if (queue == IXGBE_FDIR_DROP_QUEUE) 1711 fdircmd |= IXGBE_FDIRCMD_DROP; 1712 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1713 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1714 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; 1715 1716 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1717 1718 return IXGBE_SUCCESS; 1719 } 1720 1721 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 1722 union ixgbe_atr_input *input, 1723 u16 soft_id) 1724 { 1725 u32 fdirhash; 1726 u32 fdircmd = 0; 1727 u32 retry_count; 1728 s32 err = IXGBE_SUCCESS; 1729 1730 /* configure FDIRHASH register */ 1731 fdirhash = input->formatted.bkt_hash; 1732 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1733 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1734 1735 /* flush hash to HW */ 1736 IXGBE_WRITE_FLUSH(hw); 1737 1738 /* Query if filter is present */ 1739 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); 1740 1741 for (retry_count = 10; retry_count; retry_count--) { 1742 /* allow 10us for query to process */ 1743 usec_delay(10); 1744 /* verify query completed successfully */ 1745 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); 1746 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) 1747 break; 1748 } 1749 1750 if (!retry_count) 1751 err = IXGBE_ERR_FDIR_REINIT_FAILED; 1752 1753 /* if filter exists in hardware then remove it */ 1754 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { 1755 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1756 IXGBE_WRITE_FLUSH(hw); 1757 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1758 IXGBE_FDIRCMD_CMD_REMOVE_FLOW); 1759 } 1760 1761 return err; 1762 } 1763 1764 /** 1765 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1766 * @hw: pointer to hardware structure 1767 * @input: input bitstream 1768 * @input_mask: mask for the input bitstream 1769 * @soft_id: software index for the filters 1770 * @queue: queue index to direct traffic to 1771 * 1772 * Note that the caller to this function must lock before calling, since the 1773 * hardware writes must be protected from one another. 1774 **/ 1775 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1776 union ixgbe_atr_input *input, 1777 union ixgbe_atr_input *input_mask, 1778 u16 soft_id, u8 queue) 1779 { 1780 s32 err = IXGBE_ERR_CONFIG; 1781 1782 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 1783 1784 /* 1785 * Check flow_type formatting, and bail out before we touch the hardware 1786 * if there's a configuration issue 1787 */ 1788 switch (input->formatted.flow_type) { 1789 case IXGBE_ATR_FLOW_TYPE_IPV4: 1790 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; 1791 if (input->formatted.dst_port || input->formatted.src_port) { 1792 DEBUGOUT(" Error on src/dst port\n"); 1793 return IXGBE_ERR_CONFIG; 1794 } 1795 break; 1796 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1797 if (input->formatted.dst_port || input->formatted.src_port) { 1798 DEBUGOUT(" Error on src/dst port\n"); 1799 return IXGBE_ERR_CONFIG; 1800 } 1801 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1802 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1803 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | 1804 IXGBE_ATR_L4TYPE_MASK; 1805 break; 1806 default: 1807 DEBUGOUT(" Error on flow type input\n"); 1808 return err; 1809 } 1810 1811 /* program input mask into the HW */ 1812 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask); 1813 if (err) 1814 return err; 1815 1816 /* apply mask and compute/store hash */ 1817 ixgbe_atr_compute_perfect_hash_82599(input, input_mask); 1818 1819 /* program filters to filter memory */ 1820 return ixgbe_fdir_write_perfect_filter_82599(hw, input, 1821 soft_id, queue); 1822 } 1823 1824 /** 1825 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1826 * @hw: pointer to hardware structure 1827 * @reg: analog register to read 1828 * @val: read value 1829 * 1830 * Performs read operation to Omer analog register specified. 1831 **/ 1832 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 1833 { 1834 u32 core_ctl; 1835 1836 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 1837 1838 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1839 (reg << 8)); 1840 IXGBE_WRITE_FLUSH(hw); 1841 usec_delay(10); 1842 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1843 *val = (u8)core_ctl; 1844 1845 return IXGBE_SUCCESS; 1846 } 1847 1848 /** 1849 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 1850 * @hw: pointer to hardware structure 1851 * @reg: atlas register to write 1852 * @val: value to write 1853 * 1854 * Performs write operation to Omer analog register specified. 1855 **/ 1856 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 1857 { 1858 u32 core_ctl; 1859 1860 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 1861 1862 core_ctl = (reg << 8) | val; 1863 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 1864 IXGBE_WRITE_FLUSH(hw); 1865 usec_delay(10); 1866 1867 return IXGBE_SUCCESS; 1868 } 1869 1870 /** 1871 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 1872 * @hw: pointer to hardware structure 1873 * 1874 * Starts the hardware using the generic start_hw function 1875 * and the generation start_hw function. 1876 * Then performs revision-specific operations, if any. 1877 **/ 1878 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 1879 { 1880 s32 ret_val = IXGBE_SUCCESS; 1881 1882 DEBUGFUNC("ixgbe_start_hw_82599"); 1883 1884 ret_val = ixgbe_start_hw_generic(hw); 1885 if (ret_val != IXGBE_SUCCESS) 1886 goto out; 1887 1888 ret_val = ixgbe_start_hw_gen2(hw); 1889 if (ret_val != IXGBE_SUCCESS) 1890 goto out; 1891 1892 /* We need to run link autotry after the driver loads */ 1893 hw->mac.autotry_restart = TRUE; 1894 1895 if (ret_val == IXGBE_SUCCESS) 1896 ret_val = ixgbe_verify_fw_version_82599(hw); 1897 out: 1898 return ret_val; 1899 } 1900 1901 /** 1902 * ixgbe_identify_phy_82599 - Get physical layer module 1903 * @hw: pointer to hardware structure 1904 * 1905 * Determines the physical layer module found on the current adapter. 1906 * If PHY already detected, maintains current PHY type in hw struct, 1907 * otherwise executes the PHY detection routine. 1908 **/ 1909 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1910 { 1911 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1912 1913 DEBUGFUNC("ixgbe_identify_phy_82599"); 1914 1915 /* Detect PHY if not unknown - returns success if already detected. */ 1916 status = ixgbe_identify_phy_generic(hw); 1917 if (status != IXGBE_SUCCESS) { 1918 /* 82599 10GBASE-T requires an external PHY */ 1919 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1920 goto out; 1921 else 1922 status = ixgbe_identify_module_generic(hw); 1923 } 1924 1925 /* Set PHY type none if no PHY detected */ 1926 if (hw->phy.type == ixgbe_phy_unknown) { 1927 hw->phy.type = ixgbe_phy_none; 1928 status = IXGBE_SUCCESS; 1929 } 1930 1931 /* Return error if SFP module has been detected but is not supported */ 1932 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 1933 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 1934 1935 out: 1936 return status; 1937 } 1938 1939 /** 1940 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 1941 * @hw: pointer to hardware structure 1942 * 1943 * Determines physical layer capabilities of the current configuration. 1944 **/ 1945 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 1946 { 1947 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1948 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1949 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1950 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 1951 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1952 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1953 u16 ext_ability = 0; 1954 u8 comp_codes_10g = 0; 1955 u8 comp_codes_1g = 0; 1956 1957 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 1958 1959 hw->phy.ops.identify(hw); 1960 1961 switch (hw->phy.type) { 1962 case ixgbe_phy_tn: 1963 case ixgbe_phy_cu_unknown: 1964 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1965 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1966 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1967 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1968 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1969 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1970 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1971 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1972 goto out; 1973 default: 1974 break; 1975 } 1976 1977 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1978 case IXGBE_AUTOC_LMS_1G_AN: 1979 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1980 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 1981 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 1982 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1983 goto out; 1984 } else 1985 /* SFI mode so read SFP module */ 1986 goto sfp_check; 1987 break; 1988 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1989 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 1990 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1991 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 1992 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1993 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 1994 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 1995 goto out; 1996 break; 1997 case IXGBE_AUTOC_LMS_10G_SERIAL: 1998 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 1999 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2000 goto out; 2001 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2002 goto sfp_check; 2003 break; 2004 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2005 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2006 if (autoc & IXGBE_AUTOC_KX_SUPP) 2007 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2008 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2009 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2010 if (autoc & IXGBE_AUTOC_KR_SUPP) 2011 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2012 goto out; 2013 break; 2014 default: 2015 goto out; 2016 break; 2017 } 2018 2019 sfp_check: 2020 /* SFP check must be done last since DA modules are sometimes used to 2021 * test KR mode - we need to id KR mode correctly before SFP module. 2022 * Call identify_sfp because the pluggable module may have changed */ 2023 hw->phy.ops.identify_sfp(hw); 2024 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 2025 goto out; 2026 2027 switch (hw->phy.type) { 2028 case ixgbe_phy_sfp_passive_tyco: 2029 case ixgbe_phy_sfp_passive_unknown: 2030 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2031 break; 2032 case ixgbe_phy_sfp_ftl_active: 2033 case ixgbe_phy_sfp_active_unknown: 2034 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2035 break; 2036 case ixgbe_phy_sfp_avago: 2037 case ixgbe_phy_sfp_ftl: 2038 case ixgbe_phy_sfp_intel: 2039 case ixgbe_phy_sfp_unknown: 2040 hw->phy.ops.read_i2c_eeprom(hw, 2041 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); 2042 hw->phy.ops.read_i2c_eeprom(hw, 2043 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2044 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2045 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2046 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2047 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2048 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2049 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2050 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) 2051 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; 2052 break; 2053 default: 2054 break; 2055 } 2056 2057 out: 2058 return physical_layer; 2059 } 2060 2061 /** 2062 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2063 * @hw: pointer to hardware structure 2064 * @regval: register value to write to RXCTRL 2065 * 2066 * Enables the Rx DMA unit for 82599 2067 **/ 2068 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2069 { 2070 2071 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2072 2073 /* 2074 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2075 * If traffic is incoming before we enable the Rx unit, it could hang 2076 * the Rx DMA unit. Therefore, make sure the security engine is 2077 * completely disabled prior to enabling the Rx unit. 2078 */ 2079 2080 hw->mac.ops.disable_sec_rx_path(hw); 2081 2082 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2083 2084 hw->mac.ops.enable_sec_rx_path(hw); 2085 2086 return IXGBE_SUCCESS; 2087 } 2088 2089 /** 2090 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2091 * @hw: pointer to hardware structure 2092 * 2093 * Verifies that installed the firmware version is 0.6 or higher 2094 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2095 * 2096 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2097 * if the FW version is not supported. 2098 **/ 2099 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2100 { 2101 s32 status = IXGBE_ERR_EEPROM_VERSION; 2102 u16 fw_offset, fw_ptp_cfg_offset; 2103 u16 fw_version = 0; 2104 2105 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2106 2107 /* firmware check is only necessary for SFI devices */ 2108 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2109 status = IXGBE_SUCCESS; 2110 goto fw_version_out; 2111 } 2112 2113 /* get the offset to the Firmware Module block */ 2114 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2115 2116 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2117 goto fw_version_out; 2118 2119 /* get the offset to the Pass Through Patch Configuration block */ 2120 hw->eeprom.ops.read(hw, (fw_offset + 2121 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2122 &fw_ptp_cfg_offset); 2123 2124 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2125 goto fw_version_out; 2126 2127 /* get the firmware version */ 2128 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2129 IXGBE_FW_PATCH_VERSION_4), &fw_version); 2130 2131 if (fw_version > 0x5) 2132 status = IXGBE_SUCCESS; 2133 2134 fw_version_out: 2135 return status; 2136 } 2137 2138 /** 2139 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 2140 * @hw: pointer to hardware structure 2141 * 2142 * Returns TRUE if the LESM FW module is present and enabled. Otherwise 2143 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. 2144 **/ 2145 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2146 { 2147 bool lesm_enabled = FALSE; 2148 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2149 s32 status; 2150 2151 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); 2152 2153 /* get the offset to the Firmware Module block */ 2154 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2155 2156 if ((status != IXGBE_SUCCESS) || 2157 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2158 goto out; 2159 2160 /* get the offset to the LESM Parameters block */ 2161 status = hw->eeprom.ops.read(hw, (fw_offset + 2162 IXGBE_FW_LESM_PARAMETERS_PTR), 2163 &fw_lesm_param_offset); 2164 2165 if ((status != IXGBE_SUCCESS) || 2166 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2167 goto out; 2168 2169 /* get the lesm state word */ 2170 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2171 IXGBE_FW_LESM_STATE_1), 2172 &fw_lesm_state); 2173 2174 if ((status == IXGBE_SUCCESS) && 2175 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2176 lesm_enabled = TRUE; 2177 2178 out: 2179 return lesm_enabled; 2180 } 2181 2182 /** 2183 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using 2184 * fastest available method 2185 * 2186 * @hw: pointer to hardware structure 2187 * @offset: offset of word in EEPROM to read 2188 * @words: number of words 2189 * @data: word(s) read from the EEPROM 2190 * 2191 * Retrieves 16 bit word(s) read from EEPROM 2192 **/ 2193 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 2194 u16 words, u16 *data) 2195 { 2196 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2197 s32 ret_val = IXGBE_ERR_CONFIG; 2198 2199 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); 2200 2201 /* 2202 * If EEPROM is detected and can be addressed using 14 bits, 2203 * use EERD otherwise use bit bang 2204 */ 2205 if ((eeprom->type == ixgbe_eeprom_spi) && 2206 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) 2207 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, 2208 data); 2209 else 2210 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, 2211 words, 2212 data); 2213 2214 return ret_val; 2215 } 2216 2217 /** 2218 * ixgbe_read_eeprom_82599 - Read EEPROM word using 2219 * fastest available method 2220 * 2221 * @hw: pointer to hardware structure 2222 * @offset: offset of word in the EEPROM to read 2223 * @data: word read from the EEPROM 2224 * 2225 * Reads a 16 bit word from the EEPROM 2226 **/ 2227 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 2228 u16 offset, u16 *data) 2229 { 2230 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 2231 s32 ret_val = IXGBE_ERR_CONFIG; 2232 2233 DEBUGFUNC("ixgbe_read_eeprom_82599"); 2234 2235 /* 2236 * If EEPROM is detected and can be addressed using 14 bits, 2237 * use EERD otherwise use bit bang 2238 */ 2239 if ((eeprom->type == ixgbe_eeprom_spi) && 2240 (offset <= IXGBE_EERD_MAX_ADDR)) 2241 ret_val = ixgbe_read_eerd_generic(hw, offset, data); 2242 else 2243 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); 2244 2245 return ret_val; 2246 } 2247 2248 2249