1 /* $NetBSD: ixgbe_82598.c,v 1.12 2018/04/04 08:59:22 msaitoh Exp $ */ 2 3 /****************************************************************************** 4 SPDX-License-Identifier: BSD-3-Clause 5 6 Copyright (c) 2001-2017, Intel Corporation 7 All rights reserved. 8 9 Redistribution and use in source and binary forms, with or without 10 modification, are permitted provided that the following conditions are met: 11 12 1. Redistributions of source code must retain the above copyright notice, 13 this list of conditions and the following disclaimer. 14 15 2. Redistributions in binary form must reproduce the above copyright 16 notice, this list of conditions and the following disclaimer in the 17 documentation and/or other materials provided with the distribution. 18 19 3. Neither the name of the Intel Corporation nor the names of its 20 contributors may be used to endorse or promote products derived from 21 this software without specific prior written permission. 22 23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 POSSIBILITY OF SUCH DAMAGE. 34 35 ******************************************************************************/ 36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 331224 2018-03-19 20:55:05Z erj $*/ 37 38 #include "ixgbe_type.h" 39 #include "ixgbe_82598.h" 40 #include "ixgbe_api.h" 41 #include "ixgbe_common.h" 42 #include "ixgbe_phy.h" 43 44 #define IXGBE_82598_MAX_TX_QUEUES 32 45 #define IXGBE_82598_MAX_RX_QUEUES 64 46 #define IXGBE_82598_RAR_ENTRIES 16 47 #define IXGBE_82598_MC_TBL_SIZE 128 48 #define IXGBE_82598_VFT_TBL_SIZE 128 49 #define IXGBE_82598_RX_PB_SIZE 512 50 51 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 52 ixgbe_link_speed *speed, 53 bool *autoneg); 54 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); 55 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 56 bool autoneg_wait_to_complete); 57 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 58 ixgbe_link_speed *speed, bool *link_up, 59 bool link_up_wait_to_complete); 60 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 61 ixgbe_link_speed speed, 62 bool autoneg_wait_to_complete); 63 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 64 ixgbe_link_speed speed, 65 bool autoneg_wait_to_complete); 66 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 67 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 68 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); 69 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, 70 u32 headroom, int strategy); 71 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, 72 u8 *sff8472_data); 73 /** 74 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 75 * @hw: pointer to the HW structure 76 * 77 * The defaults for 82598 should be in the range of 50us to 50ms, 78 * however the hardware default for these parts is 500us to 1ms which is less 79 * than the 10ms recommended by the pci-e spec. To address this we need to 80 * increase the value to either 10ms to 250ms for capability version 1 config, 81 * or 16ms to 55ms for version 2. 82 **/ 83 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 84 { 85 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 86 u16 pcie_devctl2; 87 88 /* only take action if timeout value is defaulted to 0 */ 89 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 90 goto out; 91 92 /* 93 * if capababilities version is type 1 we can write the 94 * timeout of 10ms to 250ms through the GCR register 95 */ 96 if (!(gcr & IXGBE_GCR_CAP_VER2)) { 97 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 98 goto out; 99 } 100 101 /* 102 * for version 2 capabilities we need to write the config space 103 * directly in order to set the completion timeout value for 104 * 16ms to 55ms 105 */ 106 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 107 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 108 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 109 out: 110 /* disable completion timeout resend */ 111 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 112 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 113 } 114 115 /** 116 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type 117 * @hw: pointer to hardware structure 118 * 119 * Initialize the function pointers and assign the MAC type for 82598. 120 * Does not touch the hardware. 121 **/ 122 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) 123 { 124 struct ixgbe_mac_info *mac = &hw->mac; 125 struct ixgbe_phy_info *phy = &hw->phy; 126 s32 ret_val; 127 128 DEBUGFUNC("ixgbe_init_ops_82598"); 129 130 ret_val = ixgbe_init_phy_ops_generic(hw); 131 ret_val = ixgbe_init_ops_generic(hw); 132 133 /* PHY */ 134 phy->ops.init = ixgbe_init_phy_ops_82598; 135 136 /* MAC */ 137 mac->ops.start_hw = ixgbe_start_hw_82598; 138 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598; 139 mac->ops.reset_hw = ixgbe_reset_hw_82598; 140 mac->ops.get_media_type = ixgbe_get_media_type_82598; 141 mac->ops.get_supported_physical_layer = 142 ixgbe_get_supported_physical_layer_82598; 143 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; 144 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; 145 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; 146 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; 147 148 /* RAR, Multicast, VLAN */ 149 mac->ops.set_vmdq = ixgbe_set_vmdq_82598; 150 mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; 151 mac->ops.set_vfta = ixgbe_set_vfta_82598; 152 mac->ops.set_vlvf = NULL; 153 mac->ops.clear_vfta = ixgbe_clear_vfta_82598; 154 155 /* Flow Control */ 156 mac->ops.fc_enable = ixgbe_fc_enable_82598; 157 158 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 159 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 160 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 161 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; 162 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 163 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 164 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 165 166 /* SFP+ Module */ 167 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; 168 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; 169 170 /* Link */ 171 mac->ops.check_link = ixgbe_check_mac_link_82598; 172 mac->ops.setup_link = ixgbe_setup_mac_link_82598; 173 mac->ops.flap_tx_laser = NULL; 174 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; 175 mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; 176 177 /* Manageability interface */ 178 mac->ops.set_fw_drv_ver = NULL; 179 180 mac->ops.get_rtrup2tc = NULL; 181 182 return ret_val; 183 } 184 185 /** 186 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init 187 * @hw: pointer to hardware structure 188 * 189 * Initialize any function pointers that were not able to be 190 * set during init_shared_code because the PHY/SFP type was 191 * not known. Perform the SFP init if necessary. 192 * 193 **/ 194 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) 195 { 196 struct ixgbe_mac_info *mac = &hw->mac; 197 struct ixgbe_phy_info *phy = &hw->phy; 198 s32 ret_val = IXGBE_SUCCESS; 199 u16 list_offset, data_offset; 200 201 DEBUGFUNC("ixgbe_init_phy_ops_82598"); 202 203 /* Identify the PHY */ 204 phy->ops.identify(hw); 205 206 /* Overwrite the link function pointers if copper PHY */ 207 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 208 mac->ops.setup_link = ixgbe_setup_copper_link_82598; 209 mac->ops.get_link_capabilities = 210 ixgbe_get_copper_link_capabilities_generic; 211 } 212 213 switch (hw->phy.type) { 214 case ixgbe_phy_tn: 215 phy->ops.setup_link = ixgbe_setup_phy_link_tnx; 216 phy->ops.check_link = ixgbe_check_phy_link_tnx; 217 phy->ops.get_firmware_version = 218 ixgbe_get_phy_firmware_version_tnx; 219 break; 220 case ixgbe_phy_nl: 221 phy->ops.reset = ixgbe_reset_phy_nl; 222 223 /* Call SFP+ identify routine to get the SFP+ module type */ 224 ret_val = phy->ops.identify_sfp(hw); 225 if (ret_val != IXGBE_SUCCESS) 226 goto out; 227 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 228 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 229 goto out; 230 } 231 232 /* Check to see if SFP+ module is supported */ 233 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 234 &list_offset, 235 &data_offset); 236 if (ret_val != IXGBE_SUCCESS) { 237 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 238 goto out; 239 } 240 break; 241 default: 242 break; 243 } 244 245 out: 246 return ret_val; 247 } 248 249 /** 250 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 251 * @hw: pointer to hardware structure 252 * 253 * Starts the hardware using the generic start_hw function. 254 * Disables relaxed ordering Then set pcie completion timeout 255 * 256 **/ 257 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 258 { 259 u32 regval; 260 u32 i; 261 s32 ret_val = IXGBE_SUCCESS; 262 263 DEBUGFUNC("ixgbe_start_hw_82598"); 264 265 ret_val = ixgbe_start_hw_generic(hw); 266 if (ret_val) 267 return ret_val; 268 269 /* Disable relaxed ordering */ 270 for (i = 0; ((i < hw->mac.max_tx_queues) && 271 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 272 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 273 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 274 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 275 } 276 277 for (i = 0; ((i < hw->mac.max_rx_queues) && 278 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 279 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 280 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 281 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 282 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 283 } 284 285 /* set the completion timeout for interface */ 286 ixgbe_set_pcie_completion_timeout(hw); 287 288 return ret_val; 289 } 290 291 /** 292 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 293 * @hw: pointer to hardware structure 294 * @speed: pointer to link speed 295 * @autoneg: boolean auto-negotiation value 296 * 297 * Determines the link capabilities by reading the AUTOC register. 298 **/ 299 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 300 ixgbe_link_speed *speed, 301 bool *autoneg) 302 { 303 s32 status = IXGBE_SUCCESS; 304 u32 autoc = 0; 305 306 DEBUGFUNC("ixgbe_get_link_capabilities_82598"); 307 308 /* 309 * Determine link capabilities based on the stored value of AUTOC, 310 * which represents EEPROM defaults. If AUTOC value has not been 311 * stored, use the current register value. 312 */ 313 if (hw->mac.orig_link_settings_stored) 314 autoc = hw->mac.orig_autoc; 315 else 316 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 317 318 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 319 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 320 *speed = IXGBE_LINK_SPEED_1GB_FULL; 321 *autoneg = FALSE; 322 break; 323 324 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 325 *speed = IXGBE_LINK_SPEED_10GB_FULL; 326 *autoneg = FALSE; 327 break; 328 329 case IXGBE_AUTOC_LMS_1G_AN: 330 *speed = IXGBE_LINK_SPEED_1GB_FULL; 331 *autoneg = TRUE; 332 break; 333 334 case IXGBE_AUTOC_LMS_KX4_AN: 335 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 336 *speed = IXGBE_LINK_SPEED_UNKNOWN; 337 if (autoc & IXGBE_AUTOC_KX4_SUPP) 338 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 339 if (autoc & IXGBE_AUTOC_KX_SUPP) 340 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 341 *autoneg = TRUE; 342 break; 343 344 default: 345 status = IXGBE_ERR_LINK_SETUP; 346 break; 347 } 348 349 return status; 350 } 351 352 /** 353 * ixgbe_get_media_type_82598 - Determines media type 354 * @hw: pointer to hardware structure 355 * 356 * Returns the media type (fiber, copper, backplane) 357 **/ 358 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 359 { 360 enum ixgbe_media_type media_type; 361 362 DEBUGFUNC("ixgbe_get_media_type_82598"); 363 364 /* Detect if there is a copper PHY attached. */ 365 switch (hw->phy.type) { 366 case ixgbe_phy_cu_unknown: 367 case ixgbe_phy_tn: 368 media_type = ixgbe_media_type_copper; 369 goto out; 370 default: 371 break; 372 } 373 374 /* Media type for I82598 is based on device ID */ 375 switch (hw->device_id) { 376 case IXGBE_DEV_ID_82598: 377 case IXGBE_DEV_ID_82598_BX: 378 /* Default device ID is mezzanine card KX/KX4 */ 379 media_type = ixgbe_media_type_backplane; 380 break; 381 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 382 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 383 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 384 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 385 case IXGBE_DEV_ID_82598EB_XF_LR: 386 case IXGBE_DEV_ID_82598EB_SFP_LOM: 387 media_type = ixgbe_media_type_fiber; 388 break; 389 case IXGBE_DEV_ID_82598EB_CX4: 390 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 391 media_type = ixgbe_media_type_cx4; 392 break; 393 case IXGBE_DEV_ID_82598AT: 394 case IXGBE_DEV_ID_82598AT2: 395 media_type = ixgbe_media_type_copper; 396 break; 397 default: 398 media_type = ixgbe_media_type_unknown; 399 break; 400 } 401 out: 402 return media_type; 403 } 404 405 /** 406 * ixgbe_fc_enable_82598 - Enable flow control 407 * @hw: pointer to hardware structure 408 * 409 * Enable flow control according to the current settings. 410 **/ 411 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 412 { 413 s32 ret_val = IXGBE_SUCCESS; 414 u32 fctrl_reg; 415 u32 rmcs_reg; 416 u32 reg; 417 u32 fcrtl, fcrth; 418 u32 link_speed = 0; 419 int i; 420 bool link_up; 421 422 DEBUGFUNC("ixgbe_fc_enable_82598"); 423 424 /* Validate the water mark configuration */ 425 if (!hw->fc.pause_time) { 426 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 427 goto out; 428 } 429 430 /* Low water mark of zero causes XOFF floods */ 431 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 432 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 433 hw->fc.high_water[i]) { 434 if (!hw->fc.low_water[i] || 435 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 436 DEBUGOUT("Invalid water mark configuration\n"); 437 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 438 goto out; 439 } 440 } 441 } 442 443 /* 444 * On 82598 having Rx FC on causes resets while doing 1G 445 * so if it's on turn it off once we know link_speed. For 446 * more details see 82598 Specification update. 447 */ 448 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 449 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 450 switch (hw->fc.requested_mode) { 451 case ixgbe_fc_full: 452 hw->fc.requested_mode = ixgbe_fc_tx_pause; 453 break; 454 case ixgbe_fc_rx_pause: 455 hw->fc.requested_mode = ixgbe_fc_none; 456 break; 457 default: 458 /* no change */ 459 break; 460 } 461 } 462 463 /* Negotiate the fc mode to use */ 464 ixgbe_fc_autoneg(hw); 465 466 /* Disable any previous flow control settings */ 467 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 468 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 469 470 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 471 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 472 473 /* 474 * The possible values of fc.current_mode are: 475 * 0: Flow control is completely disabled 476 * 1: Rx flow control is enabled (we can receive pause frames, 477 * but not send pause frames). 478 * 2: Tx flow control is enabled (we can send pause frames but 479 * we do not support receiving pause frames). 480 * 3: Both Rx and Tx flow control (symmetric) are enabled. 481 * other: Invalid. 482 */ 483 switch (hw->fc.current_mode) { 484 case ixgbe_fc_none: 485 /* 486 * Flow control is disabled by software override or autoneg. 487 * The code below will actually disable it in the HW. 488 */ 489 break; 490 case ixgbe_fc_rx_pause: 491 /* 492 * Rx Flow control is enabled and Tx Flow control is 493 * disabled by software override. Since there really 494 * isn't a way to advertise that we are capable of RX 495 * Pause ONLY, we will advertise that we support both 496 * symmetric and asymmetric Rx PAUSE. Later, we will 497 * disable the adapter's ability to send PAUSE frames. 498 */ 499 fctrl_reg |= IXGBE_FCTRL_RFCE; 500 break; 501 case ixgbe_fc_tx_pause: 502 /* 503 * Tx Flow control is enabled, and Rx Flow control is 504 * disabled by software override. 505 */ 506 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 507 break; 508 case ixgbe_fc_full: 509 /* Flow control (both Rx and Tx) is enabled by SW override. */ 510 fctrl_reg |= IXGBE_FCTRL_RFCE; 511 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 512 break; 513 default: 514 DEBUGOUT("Flow control param set incorrectly\n"); 515 ret_val = IXGBE_ERR_CONFIG; 516 goto out; 517 break; 518 } 519 520 /* Set 802.3x based flow control settings. */ 521 fctrl_reg |= IXGBE_FCTRL_DPF; 522 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 523 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 524 525 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 526 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 527 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 528 hw->fc.high_water[i]) { 529 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 530 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 531 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 532 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 533 } else { 534 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); 535 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); 536 } 537 538 } 539 540 /* Configure pause time (2 TCs per register) */ 541 reg = hw->fc.pause_time * 0x00010001; 542 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 543 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 544 545 /* Configure flow control refresh threshold value */ 546 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 547 548 out: 549 return ret_val; 550 } 551 552 /** 553 * ixgbe_start_mac_link_82598 - Configures MAC link settings 554 * @hw: pointer to hardware structure 555 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 556 * 557 * Configures link settings based on values in the ixgbe_hw struct. 558 * Restarts the link. Performs autonegotiation if needed. 559 **/ 560 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 561 bool autoneg_wait_to_complete) 562 { 563 u32 autoc_reg; 564 u32 links_reg; 565 u32 i; 566 s32 status = IXGBE_SUCCESS; 567 568 DEBUGFUNC("ixgbe_start_mac_link_82598"); 569 570 /* Restart link */ 571 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 572 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 573 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 574 575 /* Only poll for autoneg to complete if specified to do so */ 576 if (autoneg_wait_to_complete) { 577 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 578 IXGBE_AUTOC_LMS_KX4_AN || 579 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 580 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 581 links_reg = 0; /* Just in case Autoneg time = 0 */ 582 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 583 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 584 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 585 break; 586 msec_delay(100); 587 } 588 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 589 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 590 DEBUGOUT("Autonegotiation did not complete.\n"); 591 } 592 } 593 } 594 595 /* Add delay to filter out noises during initial link setup */ 596 msec_delay(50); 597 598 return status; 599 } 600 601 /** 602 * ixgbe_validate_link_ready - Function looks for phy link 603 * @hw: pointer to hardware structure 604 * 605 * Function indicates success when phy link is available. If phy is not ready 606 * within 5 seconds of MAC indicating link, the function returns error. 607 **/ 608 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) 609 { 610 u32 timeout; 611 u16 an_reg; 612 613 if (hw->device_id != IXGBE_DEV_ID_82598AT2) 614 return IXGBE_SUCCESS; 615 616 for (timeout = 0; 617 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { 618 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, 619 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); 620 621 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && 622 (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) 623 break; 624 625 msec_delay(100); 626 } 627 628 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { 629 DEBUGOUT("Link was indicated but link is down\n"); 630 return IXGBE_ERR_LINK_SETUP; 631 } 632 633 return IXGBE_SUCCESS; 634 } 635 636 /** 637 * ixgbe_check_mac_link_82598 - Get link/speed status 638 * @hw: pointer to hardware structure 639 * @speed: pointer to link speed 640 * @link_up: TRUE is link is up, FALSE otherwise 641 * @link_up_wait_to_complete: bool used to wait for link up or not 642 * 643 * Reads the links register to determine if link is up and the current speed 644 **/ 645 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 646 ixgbe_link_speed *speed, bool *link_up, 647 bool link_up_wait_to_complete) 648 { 649 u32 links_reg; 650 u32 i; 651 u16 link_reg, adapt_comp_reg; 652 653 DEBUGFUNC("ixgbe_check_mac_link_82598"); 654 655 /* 656 * SERDES PHY requires us to read link status from undocumented 657 * register 0xC79F. Bit 0 set indicates link is up/ready; clear 658 * indicates link down. OxC00C is read to check that the XAUI lanes 659 * are active. Bit 0 clear indicates active; set indicates inactive. 660 */ 661 if (hw->phy.type == ixgbe_phy_nl) { 662 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 663 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 664 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, 665 &adapt_comp_reg); 666 if (link_up_wait_to_complete) { 667 for (i = 0; i < hw->mac.max_link_up_time; i++) { 668 if ((link_reg & 1) && 669 ((adapt_comp_reg & 1) == 0)) { 670 *link_up = TRUE; 671 break; 672 } else { 673 *link_up = FALSE; 674 } 675 msec_delay(100); 676 hw->phy.ops.read_reg(hw, 0xC79F, 677 IXGBE_TWINAX_DEV, 678 &link_reg); 679 hw->phy.ops.read_reg(hw, 0xC00C, 680 IXGBE_TWINAX_DEV, 681 &adapt_comp_reg); 682 } 683 } else { 684 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 685 *link_up = TRUE; 686 else 687 *link_up = FALSE; 688 } 689 690 if (*link_up == FALSE) 691 goto out; 692 } 693 694 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 695 if (link_up_wait_to_complete) { 696 for (i = 0; i < hw->mac.max_link_up_time; i++) { 697 if (links_reg & IXGBE_LINKS_UP) { 698 *link_up = TRUE; 699 break; 700 } else { 701 *link_up = FALSE; 702 } 703 msec_delay(100); 704 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 705 } 706 } else { 707 if (links_reg & IXGBE_LINKS_UP) 708 *link_up = TRUE; 709 else 710 *link_up = FALSE; 711 } 712 713 if (links_reg & IXGBE_LINKS_SPEED) 714 *speed = IXGBE_LINK_SPEED_10GB_FULL; 715 else 716 *speed = IXGBE_LINK_SPEED_1GB_FULL; 717 718 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) && 719 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) 720 *link_up = FALSE; 721 722 out: 723 return IXGBE_SUCCESS; 724 } 725 726 /** 727 * ixgbe_setup_mac_link_82598 - Set MAC link speed 728 * @hw: pointer to hardware structure 729 * @speed: new link speed 730 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 731 * 732 * Set the link speed in the AUTOC register and restarts link. 733 **/ 734 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 735 ixgbe_link_speed speed, 736 bool autoneg_wait_to_complete) 737 { 738 bool autoneg = FALSE; 739 s32 status = IXGBE_SUCCESS; 740 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 741 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 742 u32 autoc = curr_autoc; 743 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 744 745 DEBUGFUNC("ixgbe_setup_mac_link_82598"); 746 747 /* Check to see if speed passed in is supported. */ 748 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 749 speed &= link_capabilities; 750 751 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 752 status = IXGBE_ERR_LINK_SETUP; 753 754 /* Set KX4/KX support according to speed requested */ 755 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 756 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 757 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 758 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 759 autoc |= IXGBE_AUTOC_KX4_SUPP; 760 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 761 autoc |= IXGBE_AUTOC_KX_SUPP; 762 if (autoc != curr_autoc) 763 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 764 } 765 766 if (status == IXGBE_SUCCESS) { 767 /* 768 * Setup and restart the link based on the new values in 769 * ixgbe_hw This will write the AUTOC register based on the new 770 * stored values 771 */ 772 status = ixgbe_start_mac_link_82598(hw, 773 autoneg_wait_to_complete); 774 } 775 776 return status; 777 } 778 779 780 /** 781 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field 782 * @hw: pointer to hardware structure 783 * @speed: new link speed 784 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 785 * 786 * Sets the link speed in the AUTOC register in the MAC and restarts link. 787 **/ 788 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 789 ixgbe_link_speed speed, 790 bool autoneg_wait_to_complete) 791 { 792 s32 status; 793 794 DEBUGFUNC("ixgbe_setup_copper_link_82598"); 795 796 /* Setup the PHY according to input speed */ 797 status = hw->phy.ops.setup_link_speed(hw, speed, 798 autoneg_wait_to_complete); 799 /* Set up MAC */ 800 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 801 802 return status; 803 } 804 805 /** 806 * ixgbe_reset_hw_82598 - Performs hardware reset 807 * @hw: pointer to hardware structure 808 * 809 * Resets the hardware by resetting the transmit and receive units, masks and 810 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 811 * reset. 812 **/ 813 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 814 { 815 s32 status = IXGBE_SUCCESS; 816 s32 phy_status = IXGBE_SUCCESS; 817 u32 ctrl; 818 u32 gheccr; 819 u32 i; 820 u32 autoc; 821 u8 analog_val; 822 823 DEBUGFUNC("ixgbe_reset_hw_82598"); 824 825 /* Call adapter stop to disable tx/rx and clear interrupts */ 826 status = hw->mac.ops.stop_adapter(hw); 827 if (status != IXGBE_SUCCESS) 828 goto reset_hw_out; 829 830 /* 831 * Power up the Atlas Tx lanes if they are currently powered down. 832 * Atlas Tx lanes are powered down for MAC loopback tests, but 833 * they are not automatically restored on reset. 834 */ 835 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 836 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 837 /* Enable Tx Atlas so packets can be transmitted again */ 838 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 839 &analog_val); 840 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 841 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 842 analog_val); 843 844 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 845 &analog_val); 846 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 847 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 848 analog_val); 849 850 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 851 &analog_val); 852 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 853 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 854 analog_val); 855 856 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 857 &analog_val); 858 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 859 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 860 analog_val); 861 } 862 863 /* Reset PHY */ 864 if (hw->phy.reset_disable == FALSE) { 865 /* PHY ops must be identified and initialized prior to reset */ 866 867 /* Init PHY and function pointers, perform SFP setup */ 868 phy_status = hw->phy.ops.init(hw); 869 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) 870 goto reset_hw_out; 871 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 872 goto mac_reset_top; 873 874 hw->phy.ops.reset(hw); 875 } 876 877 mac_reset_top: 878 /* 879 * Issue global reset to the MAC. This needs to be a SW reset. 880 * If link reset is used, it might reset the MAC when mng is using it 881 */ 882 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; 883 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 884 IXGBE_WRITE_FLUSH(hw); 885 886 /* Poll for reset bit to self-clear indicating reset is complete */ 887 for (i = 0; i < 10; i++) { 888 usec_delay(1); 889 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 890 if (!(ctrl & IXGBE_CTRL_RST)) 891 break; 892 } 893 if (ctrl & IXGBE_CTRL_RST) { 894 status = IXGBE_ERR_RESET_FAILED; 895 DEBUGOUT("Reset polling failed to complete.\n"); 896 } 897 898 msec_delay(50); 899 900 /* 901 * Double resets are required for recovery from certain error 902 * conditions. Between resets, it is necessary to stall to allow time 903 * for any pending HW events to complete. 904 */ 905 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 906 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 907 goto mac_reset_top; 908 } 909 910 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 911 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); 912 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 913 914 /* 915 * Store the original AUTOC value if it has not been 916 * stored off yet. Otherwise restore the stored original 917 * AUTOC value since the reset operation sets back to deaults. 918 */ 919 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 920 if (hw->mac.orig_link_settings_stored == FALSE) { 921 hw->mac.orig_autoc = autoc; 922 hw->mac.orig_link_settings_stored = TRUE; 923 } else if (autoc != hw->mac.orig_autoc) { 924 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 925 } 926 927 /* Store the permanent mac address */ 928 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 929 930 /* 931 * Store MAC address from RAR0, clear receive address registers, and 932 * clear the multicast table 933 */ 934 hw->mac.ops.init_rx_addrs(hw); 935 936 reset_hw_out: 937 if (phy_status != IXGBE_SUCCESS) 938 status = phy_status; 939 940 return status; 941 } 942 943 /** 944 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 945 * @hw: pointer to hardware struct 946 * @rar: receive address register index to associate with a VMDq index 947 * @vmdq: VMDq set index 948 **/ 949 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 950 { 951 u32 rar_high; 952 u32 rar_entries = hw->mac.num_rar_entries; 953 954 DEBUGFUNC("ixgbe_set_vmdq_82598"); 955 956 /* Make sure we are using a valid rar index range */ 957 if (rar >= rar_entries) { 958 DEBUGOUT1("RAR index %d is out of range.\n", rar); 959 return IXGBE_ERR_INVALID_ARGUMENT; 960 } 961 962 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 963 rar_high &= ~IXGBE_RAH_VIND_MASK; 964 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 965 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 966 return IXGBE_SUCCESS; 967 } 968 969 /** 970 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 971 * @hw: pointer to hardware struct 972 * @rar: receive address register index to associate with a VMDq index 973 * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 974 **/ 975 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 976 { 977 u32 rar_high; 978 u32 rar_entries = hw->mac.num_rar_entries; 979 980 UNREFERENCED_1PARAMETER(vmdq); 981 982 /* Make sure we are using a valid rar index range */ 983 if (rar >= rar_entries) { 984 DEBUGOUT1("RAR index %d is out of range.\n", rar); 985 return IXGBE_ERR_INVALID_ARGUMENT; 986 } 987 988 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 989 if (rar_high & IXGBE_RAH_VIND_MASK) { 990 rar_high &= ~IXGBE_RAH_VIND_MASK; 991 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 992 } 993 994 return IXGBE_SUCCESS; 995 } 996 997 /** 998 * ixgbe_set_vfta_82598 - Set VLAN filter table 999 * @hw: pointer to hardware structure 1000 * @vlan: VLAN id to write to VLAN filter 1001 * @vind: VMDq output index that maps queue to VLAN id in VFTA 1002 * @vlan_on: boolean flag to turn on/off VLAN in VFTA 1003 * @vlvf_bypass: boolean flag - unused 1004 * 1005 * Turn on/off specified VLAN in the VLAN filter table. 1006 **/ 1007 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 1008 bool vlan_on, bool vlvf_bypass) 1009 { 1010 u32 regindex; 1011 u32 bitindex; 1012 u32 bits; 1013 u32 vftabyte; 1014 1015 UNREFERENCED_1PARAMETER(vlvf_bypass); 1016 1017 DEBUGFUNC("ixgbe_set_vfta_82598"); 1018 1019 if (vlan > 4095) 1020 return IXGBE_ERR_PARAM; 1021 1022 /* Determine 32-bit word position in array */ 1023 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 1024 1025 /* Determine the location of the (VMD) queue index */ 1026 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 1027 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 1028 1029 /* Set the nibble for VMD queue index */ 1030 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 1031 bits &= (~(0x0F << bitindex)); 1032 bits |= (vind << bitindex); 1033 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 1034 1035 /* Determine the location of the bit for this VLAN id */ 1036 bitindex = vlan & 0x1F; /* lower five bits */ 1037 1038 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 1039 if (vlan_on) 1040 /* Turn on this VLAN id */ 1041 bits |= (1 << bitindex); 1042 else 1043 /* Turn off this VLAN id */ 1044 bits &= ~(1 << bitindex); 1045 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 1046 1047 return IXGBE_SUCCESS; 1048 } 1049 1050 /** 1051 * ixgbe_clear_vfta_82598 - Clear VLAN filter table 1052 * @hw: pointer to hardware structure 1053 * 1054 * Clears the VLAN filer table, and the VMDq index associated with the filter 1055 **/ 1056 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 1057 { 1058 u32 offset; 1059 u32 vlanbyte; 1060 1061 DEBUGFUNC("ixgbe_clear_vfta_82598"); 1062 1063 for (offset = 0; offset < hw->mac.vft_size; offset++) 1064 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 1065 1066 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 1067 for (offset = 0; offset < hw->mac.vft_size; offset++) 1068 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 1069 0); 1070 1071 return IXGBE_SUCCESS; 1072 } 1073 1074 /** 1075 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 1076 * @hw: pointer to hardware structure 1077 * @reg: analog register to read 1078 * @val: read value 1079 * 1080 * Performs read operation to Atlas analog register specified. 1081 **/ 1082 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 1083 { 1084 u32 atlas_ctl; 1085 1086 DEBUGFUNC("ixgbe_read_analog_reg8_82598"); 1087 1088 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 1089 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 1090 IXGBE_WRITE_FLUSH(hw); 1091 usec_delay(10); 1092 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 1093 *val = (u8)atlas_ctl; 1094 1095 return IXGBE_SUCCESS; 1096 } 1097 1098 /** 1099 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 1100 * @hw: pointer to hardware structure 1101 * @reg: atlas register to write 1102 * @val: value to write 1103 * 1104 * Performs write operation to Atlas analog register specified. 1105 **/ 1106 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 1107 { 1108 u32 atlas_ctl; 1109 1110 DEBUGFUNC("ixgbe_write_analog_reg8_82598"); 1111 1112 atlas_ctl = (reg << 8) | val; 1113 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 1114 IXGBE_WRITE_FLUSH(hw); 1115 usec_delay(10); 1116 1117 return IXGBE_SUCCESS; 1118 } 1119 1120 /** 1121 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. 1122 * @hw: pointer to hardware structure 1123 * @dev_addr: address to read from 1124 * @byte_offset: byte offset to read from dev_addr 1125 * @eeprom_data: value read 1126 * 1127 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1128 **/ 1129 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, 1130 u8 byte_offset, u8 *eeprom_data) 1131 { 1132 s32 status = IXGBE_SUCCESS; 1133 u16 sfp_addr = 0; 1134 u16 sfp_data = 0; 1135 u16 sfp_stat = 0; 1136 u16 gssr; 1137 u32 i; 1138 1139 DEBUGFUNC("ixgbe_read_i2c_phy_82598"); 1140 1141 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 1142 gssr = IXGBE_GSSR_PHY1_SM; 1143 else 1144 gssr = IXGBE_GSSR_PHY0_SM; 1145 1146 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) 1147 return IXGBE_ERR_SWFW_SYNC; 1148 1149 if (hw->phy.type == ixgbe_phy_nl) { 1150 /* 1151 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to 1152 * 0xC30D. These registers are used to talk to the SFP+ 1153 * module's EEPROM through the SDA/SCL (I2C) interface. 1154 */ 1155 sfp_addr = (dev_addr << 8) + byte_offset; 1156 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1157 hw->phy.ops.write_reg_mdi(hw, 1158 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1159 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1160 sfp_addr); 1161 1162 /* Poll status */ 1163 for (i = 0; i < 100; i++) { 1164 hw->phy.ops.read_reg_mdi(hw, 1165 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1166 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1167 &sfp_stat); 1168 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1169 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1170 break; 1171 msec_delay(10); 1172 } 1173 1174 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1175 DEBUGOUT("EEPROM read did not pass.\n"); 1176 status = IXGBE_ERR_SFP_NOT_PRESENT; 1177 goto out; 1178 } 1179 1180 /* Read data */ 1181 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1182 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); 1183 1184 *eeprom_data = (u8)(sfp_data >> 8); 1185 } else { 1186 status = IXGBE_ERR_PHY; 1187 } 1188 1189 out: 1190 hw->mac.ops.release_swfw_sync(hw, gssr); 1191 return status; 1192 } 1193 1194 /** 1195 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. 1196 * @hw: pointer to hardware structure 1197 * @byte_offset: EEPROM byte offset to read 1198 * @eeprom_data: value read 1199 * 1200 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1201 **/ 1202 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1203 u8 *eeprom_data) 1204 { 1205 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, 1206 byte_offset, eeprom_data); 1207 } 1208 1209 /** 1210 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. 1211 * @hw: pointer to hardware structure 1212 * @byte_offset: byte offset at address 0xA2 1213 * @sff8472_data: value read 1214 * 1215 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C 1216 **/ 1217 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, 1218 u8 *sff8472_data) 1219 { 1220 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, 1221 byte_offset, sff8472_data); 1222 } 1223 1224 /** 1225 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 1226 * @hw: pointer to hardware structure 1227 * 1228 * Determines physical layer capabilities of the current configuration. 1229 **/ 1230 u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1231 { 1232 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1233 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1234 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1235 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1236 u16 ext_ability = 0; 1237 1238 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); 1239 1240 hw->phy.ops.identify(hw); 1241 1242 /* Copper PHY must be checked before AUTOC LMS to determine correct 1243 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1244 switch (hw->phy.type) { 1245 case ixgbe_phy_tn: 1246 case ixgbe_phy_cu_unknown: 1247 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1248 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1249 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1250 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1251 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1252 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1253 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1254 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1255 goto out; 1256 default: 1257 break; 1258 } 1259 1260 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1261 case IXGBE_AUTOC_LMS_1G_AN: 1262 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1263 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) 1264 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1265 else 1266 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1267 break; 1268 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1269 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) 1270 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1271 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) 1272 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1273 else /* XAUI */ 1274 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1275 break; 1276 case IXGBE_AUTOC_LMS_KX4_AN: 1277 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 1278 if (autoc & IXGBE_AUTOC_KX_SUPP) 1279 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1280 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1281 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1282 break; 1283 default: 1284 break; 1285 } 1286 1287 if (hw->phy.type == ixgbe_phy_nl) { 1288 hw->phy.ops.identify_sfp(hw); 1289 1290 switch (hw->phy.sfp_type) { 1291 case ixgbe_sfp_type_da_cu: 1292 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1293 break; 1294 case ixgbe_sfp_type_sr: 1295 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1296 break; 1297 case ixgbe_sfp_type_lr: 1298 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1299 break; 1300 default: 1301 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1302 break; 1303 } 1304 } 1305 1306 switch (hw->device_id) { 1307 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 1308 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1309 break; 1310 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1311 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1312 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1313 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1314 break; 1315 case IXGBE_DEV_ID_82598EB_XF_LR: 1316 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1317 break; 1318 default: 1319 break; 1320 } 1321 1322 out: 1323 return physical_layer; 1324 } 1325 1326 /** 1327 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple 1328 * port devices. 1329 * @hw: pointer to the HW structure 1330 * 1331 * Calls common function and corrects issue with some single port devices 1332 * that enable LAN1 but not LAN0. 1333 **/ 1334 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) 1335 { 1336 struct ixgbe_bus_info *bus = &hw->bus; 1337 u16 pci_gen = 0; 1338 u16 pci_ctrl2 = 0; 1339 1340 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); 1341 1342 ixgbe_set_lan_id_multi_port_pcie(hw); 1343 1344 /* check if LAN0 is disabled */ 1345 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); 1346 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { 1347 1348 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); 1349 1350 /* if LAN0 is completely disabled force function to 0 */ 1351 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && 1352 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && 1353 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { 1354 1355 bus->func = 0; 1356 } 1357 } 1358 } 1359 1360 /** 1361 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering 1362 * @hw: pointer to hardware structure 1363 * 1364 **/ 1365 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) 1366 { 1367 u32 regval; 1368 u32 i; 1369 1370 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); 1371 1372 /* Enable relaxed ordering */ 1373 for (i = 0; ((i < hw->mac.max_tx_queues) && 1374 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1375 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1376 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1377 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 1378 } 1379 1380 for (i = 0; ((i < hw->mac.max_rx_queues) && 1381 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1382 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 1383 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 1384 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 1385 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 1386 } 1387 1388 } 1389 1390 /** 1391 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer 1392 * @hw: pointer to hardware structure 1393 * @num_pb: number of packet buffers to allocate 1394 * @headroom: reserve n KB of headroom 1395 * @strategy: packet buffer allocation strategy 1396 **/ 1397 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, 1398 u32 headroom, int strategy) 1399 { 1400 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1401 u8 i = 0; 1402 UNREFERENCED_1PARAMETER(headroom); 1403 1404 if (!num_pb) 1405 return; 1406 1407 /* Setup Rx packet buffer sizes */ 1408 switch (strategy) { 1409 case PBA_STRATEGY_WEIGHTED: 1410 /* Setup the first four at 80KB */ 1411 rxpktsize = IXGBE_RXPBSIZE_80KB; 1412 for (; i < 4; i++) 1413 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1414 /* Setup the last four at 48KB...don't re-init i */ 1415 rxpktsize = IXGBE_RXPBSIZE_48KB; 1416 /* Fall Through */ 1417 case PBA_STRATEGY_EQUAL: 1418 default: 1419 /* Divide the remaining Rx packet buffer evenly among the TCs */ 1420 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1421 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1422 break; 1423 } 1424 1425 /* Setup Tx packet buffer sizes */ 1426 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1427 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1428 } 1429 1430 /** 1431 * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit 1432 * @hw: pointer to hardware structure 1433 * @regval: register value to write to RXCTRL 1434 * 1435 * Enables the Rx DMA unit 1436 **/ 1437 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) 1438 { 1439 DEBUGFUNC("ixgbe_enable_rx_dma_82598"); 1440 1441 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 1442 1443 return IXGBE_SUCCESS; 1444 } 1445