1 /* $NetBSD: ixgbe_common.c,v 1.43 2022/06/06 02:16:37 msaitoh Exp $ */ 2 3 /****************************************************************************** 4 SPDX-License-Identifier: BSD-3-Clause 5 6 Copyright (c) 2001-2020, Intel Corporation 7 All rights reserved. 8 9 Redistribution and use in source and binary forms, with or without 10 modification, are permitted provided that the following conditions are met: 11 12 1. Redistributions of source code must retain the above copyright notice, 13 this list of conditions and the following disclaimer. 14 15 2. Redistributions in binary form must reproduce the above copyright 16 notice, this list of conditions and the following disclaimer in the 17 documentation and/or other materials provided with the distribution. 18 19 3. Neither the name of the Intel Corporation nor the names of its 20 contributors may be used to endorse or promote products derived from 21 this software without specific prior written permission. 22 23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 POSSIBILITY OF SUCH DAMAGE. 34 35 ******************************************************************************/ 36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 331224 2018-03-19 20:55:05Z erj $*/ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_common.c,v 1.43 2022/06/06 02:16:37 msaitoh Exp $"); 40 41 #include "ixgbe_common.h" 42 #include "ixgbe_phy.h" 43 #include "ixgbe_dcb.h" 44 #include "ixgbe_dcb_82599.h" 45 #include "ixgbe_api.h" 46 47 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 48 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 49 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 50 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 51 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 52 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 53 u16 count); 54 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 55 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 56 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 57 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 58 59 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 60 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 61 u16 *san_mac_offset); 62 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 63 u16 words, u16 *data); 64 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 65 u16 words, u16 *data); 66 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 67 u16 offset); 68 69 /** 70 * ixgbe_init_ops_generic - Inits function ptrs 71 * @hw: pointer to the hardware structure 72 * 73 * Initialize the function pointers. 74 **/ 75 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 76 { 77 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 78 struct ixgbe_mac_info *mac = &hw->mac; 79 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 80 81 DEBUGFUNC("ixgbe_init_ops_generic"); 82 83 /* EEPROM */ 84 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; 85 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 86 if (eec & IXGBE_EEC_PRES) { 87 eeprom->ops.read = ixgbe_read_eerd_generic; 88 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; 89 } else { 90 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; 91 eeprom->ops.read_buffer = 92 ixgbe_read_eeprom_buffer_bit_bang_generic; 93 } 94 eeprom->ops.write = ixgbe_write_eeprom_generic; 95 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; 96 eeprom->ops.validate_checksum = 97 ixgbe_validate_eeprom_checksum_generic; 98 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; 99 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; 100 101 /* MAC */ 102 mac->ops.init_hw = ixgbe_init_hw_generic; 103 mac->ops.reset_hw = NULL; 104 mac->ops.start_hw = ixgbe_start_hw_generic; 105 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; 106 mac->ops.get_media_type = NULL; 107 mac->ops.get_supported_physical_layer = NULL; 108 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; 109 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; 110 mac->ops.stop_adapter = ixgbe_stop_adapter_generic; 111 mac->ops.get_bus_info = ixgbe_get_bus_info_generic; 112 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; 113 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; 114 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; 115 mac->ops.prot_autoc_read = prot_autoc_read_generic; 116 mac->ops.prot_autoc_write = prot_autoc_write_generic; 117 118 /* LEDs */ 119 mac->ops.led_on = ixgbe_led_on_generic; 120 mac->ops.led_off = ixgbe_led_off_generic; 121 mac->ops.blink_led_start = ixgbe_blink_led_start_generic; 122 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; 123 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; 124 125 /* RAR, Multicast, VLAN */ 126 mac->ops.set_rar = ixgbe_set_rar_generic; 127 mac->ops.clear_rar = ixgbe_clear_rar_generic; 128 mac->ops.insert_mac_addr = NULL; 129 mac->ops.set_vmdq = NULL; 130 mac->ops.clear_vmdq = NULL; 131 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; 132 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; 133 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; 134 mac->ops.enable_mc = ixgbe_enable_mc_generic; 135 mac->ops.disable_mc = ixgbe_disable_mc_generic; 136 mac->ops.clear_vfta = NULL; 137 mac->ops.set_vfta = NULL; 138 mac->ops.set_vlvf = NULL; 139 mac->ops.init_uta_tables = NULL; 140 mac->ops.enable_rx = ixgbe_enable_rx_generic; 141 mac->ops.disable_rx = ixgbe_disable_rx_generic; 142 mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic; 143 144 /* Flow Control */ 145 mac->ops.fc_enable = ixgbe_fc_enable_generic; 146 mac->ops.setup_fc = ixgbe_setup_fc_generic; 147 mac->ops.fc_autoneg = ixgbe_fc_autoneg; 148 149 /* Link */ 150 mac->ops.get_link_capabilities = NULL; 151 mac->ops.setup_link = NULL; 152 mac->ops.check_link = NULL; 153 mac->ops.dmac_config = NULL; 154 mac->ops.dmac_update_tcs = NULL; 155 mac->ops.dmac_config_tcs = NULL; 156 157 return IXGBE_SUCCESS; 158 } 159 160 /** 161 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation 162 * of flow control 163 * @hw: pointer to hardware structure 164 * 165 * This function returns TRUE if the device supports flow control 166 * autonegotiation, and FALSE if it does not. 167 * 168 **/ 169 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 170 { 171 bool supported = FALSE; 172 ixgbe_link_speed speed; 173 bool link_up; 174 175 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 176 177 switch (hw->phy.media_type) { 178 case ixgbe_media_type_fiber_fixed: 179 case ixgbe_media_type_fiber_qsfp: 180 case ixgbe_media_type_fiber: 181 /* flow control autoneg black list */ 182 switch (hw->device_id) { 183 case IXGBE_DEV_ID_X550EM_A_SFP: 184 case IXGBE_DEV_ID_X550EM_A_SFP_N: 185 case IXGBE_DEV_ID_X550EM_A_QSFP: 186 case IXGBE_DEV_ID_X550EM_A_QSFP_N: 187 supported = FALSE; 188 break; 189 default: 190 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 191 /* if link is down, assume supported */ 192 if (link_up) 193 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? 194 TRUE : FALSE; 195 else 196 supported = TRUE; 197 } 198 199 break; 200 case ixgbe_media_type_backplane: 201 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) 202 supported = FALSE; 203 else 204 supported = TRUE; 205 break; 206 case ixgbe_media_type_copper: 207 /* only some copper devices support flow control autoneg */ 208 switch (hw->device_id) { 209 case IXGBE_DEV_ID_82599_T3_LOM: 210 case IXGBE_DEV_ID_X540T: 211 case IXGBE_DEV_ID_X540T1: 212 case IXGBE_DEV_ID_X540_BYPASS: 213 case IXGBE_DEV_ID_X550T: 214 case IXGBE_DEV_ID_X550T1: 215 case IXGBE_DEV_ID_X550EM_X_10G_T: 216 case IXGBE_DEV_ID_X550EM_A_10G_T: 217 case IXGBE_DEV_ID_X550EM_A_1G_T: 218 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 219 supported = TRUE; 220 break; 221 default: 222 supported = FALSE; 223 } 224 default: 225 break; 226 } 227 228 return supported; 229 } 230 231 /** 232 * ixgbe_setup_fc_generic - Set up flow control 233 * @hw: pointer to hardware structure 234 * 235 * Called at init time to set up flow control. 236 **/ 237 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 238 { 239 s32 ret_val = IXGBE_SUCCESS; 240 u32 reg = 0, reg_bp = 0; 241 u16 reg_cu = 0; 242 bool locked = FALSE; 243 244 DEBUGFUNC("ixgbe_setup_fc_generic"); 245 246 /* Validate the requested mode */ 247 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 248 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, 249 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 250 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 251 goto out; 252 } 253 254 /* 255 * 10gig parts do not have a word in the EEPROM to determine the 256 * default flow control setting, so we explicitly set it to full. 257 */ 258 if (hw->fc.requested_mode == ixgbe_fc_default) 259 hw->fc.requested_mode = ixgbe_fc_full; 260 261 /* 262 * Set up the 1G and 10G flow control advertisement registers so the 263 * HW will be able to do fc autoneg once the cable is plugged in. If 264 * we link at 10G, the 1G advertisement is harmless and vice versa. 265 */ 266 switch (hw->phy.media_type) { 267 case ixgbe_media_type_backplane: 268 /* some MAC's need RMW protection on AUTOC */ 269 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 270 if (ret_val != IXGBE_SUCCESS) 271 goto out; 272 273 /* fall through - only backplane uses autoc */ 274 case ixgbe_media_type_fiber_fixed: 275 case ixgbe_media_type_fiber_qsfp: 276 case ixgbe_media_type_fiber: 277 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 278 279 break; 280 case ixgbe_media_type_copper: 281 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 282 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 283 break; 284 default: 285 break; 286 } 287 288 /* 289 * The possible values of fc.requested_mode are: 290 * 0: Flow control is completely disabled 291 * 1: Rx flow control is enabled (we can receive pause frames, 292 * but not send pause frames). 293 * 2: Tx flow control is enabled (we can send pause frames but 294 * we do not support receiving pause frames). 295 * 3: Both Rx and Tx flow control (symmetric) are enabled. 296 * other: Invalid. 297 */ 298 switch (hw->fc.requested_mode) { 299 case ixgbe_fc_none: 300 /* Flow control completely disabled by software override. */ 301 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 302 if (hw->phy.media_type == ixgbe_media_type_backplane) 303 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 304 IXGBE_AUTOC_ASM_PAUSE); 305 else if (hw->phy.media_type == ixgbe_media_type_copper) 306 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 307 break; 308 case ixgbe_fc_tx_pause: 309 /* 310 * Tx Flow control is enabled, and Rx Flow control is 311 * disabled by software override. 312 */ 313 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 314 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 315 if (hw->phy.media_type == ixgbe_media_type_backplane) { 316 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 317 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 318 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 319 reg_cu |= IXGBE_TAF_ASM_PAUSE; 320 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 321 } 322 break; 323 case ixgbe_fc_rx_pause: 324 /* 325 * Rx Flow control is enabled and Tx Flow control is 326 * disabled by software override. Since there really 327 * isn't a way to advertise that we are capable of RX 328 * Pause ONLY, we will advertise that we support both 329 * symmetric and asymmetric Rx PAUSE, as such we fall 330 * through to the fc_full statement. Later, we will 331 * disable the adapter's ability to send PAUSE frames. 332 */ 333 case ixgbe_fc_full: 334 /* Flow control (both Rx and Tx) is enabled by SW override. */ 335 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 336 if (hw->phy.media_type == ixgbe_media_type_backplane) 337 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 338 IXGBE_AUTOC_ASM_PAUSE; 339 else if (hw->phy.media_type == ixgbe_media_type_copper) 340 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 341 break; 342 default: 343 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 344 "Flow control param set incorrectly\n"); 345 ret_val = IXGBE_ERR_CONFIG; 346 goto out; 347 break; 348 } 349 350 if (hw->mac.type < ixgbe_mac_X540) { 351 /* 352 * Enable auto-negotiation between the MAC & PHY; 353 * the MAC will advertise clause 37 flow control. 354 */ 355 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 356 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 357 358 /* Disable AN timeout */ 359 if (hw->fc.strict_ieee) 360 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 361 362 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 363 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 364 } 365 366 /* 367 * AUTOC restart handles negotiation of 1G and 10G on backplane 368 * and copper. There is no need to set the PCS1GCTL register. 369 * 370 */ 371 if (hw->phy.media_type == ixgbe_media_type_backplane) { 372 reg_bp |= IXGBE_AUTOC_AN_RESTART; 373 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 374 if (ret_val) 375 goto out; 376 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 377 (ixgbe_device_supports_autoneg_fc(hw))) { 378 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 379 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 380 } 381 382 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 383 out: 384 return ret_val; 385 } 386 387 /** 388 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 389 * @hw: pointer to hardware structure 390 * 391 * Starts the hardware by filling the bus info structure and media type, clears 392 * all on chip counters, initializes receive address registers, multicast 393 * table, VLAN filter table, calls routine to set up link and flow control 394 * settings, and leaves transmit and receive units disabled and uninitialized 395 **/ 396 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 397 { 398 s32 ret_val; 399 u32 ctrl_ext; 400 u16 device_caps; 401 402 DEBUGFUNC("ixgbe_start_hw_generic"); 403 404 /* Set the media type */ 405 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 406 407 /* PHY ops initialization must be done in reset_hw() */ 408 409 /* Clear the VLAN filter table */ 410 hw->mac.ops.clear_vfta(hw); 411 412 /* Clear statistics registers */ 413 hw->mac.ops.clear_hw_cntrs(hw); 414 415 /* Set No Snoop Disable */ 416 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 417 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 418 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 419 IXGBE_WRITE_FLUSH(hw); 420 421 /* Setup flow control */ 422 ret_val = ixgbe_setup_fc(hw); 423 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { 424 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); 425 return ret_val; 426 } 427 428 /* Cache bit indicating need for crosstalk fix */ 429 switch (hw->mac.type) { 430 case ixgbe_mac_82599EB: 431 case ixgbe_mac_X550EM_x: 432 case ixgbe_mac_X550EM_a: 433 hw->mac.ops.get_device_caps(hw, &device_caps); 434 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 435 hw->need_crosstalk_fix = FALSE; 436 else 437 hw->need_crosstalk_fix = TRUE; 438 break; 439 default: 440 hw->need_crosstalk_fix = FALSE; 441 break; 442 } 443 444 /* Clear adapter stopped flag */ 445 hw->adapter_stopped = FALSE; 446 447 return IXGBE_SUCCESS; 448 } 449 450 /** 451 * ixgbe_start_hw_gen2 - Init sequence for common device family 452 * @hw: pointer to hw structure 453 * 454 * Performs the init sequence common to the second generation 455 * of 10 GbE devices. 456 * Devices in the second generation: 457 * 82599 458 * X540 459 **/ 460 void ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 461 { 462 u32 i; 463 u32 regval; 464 465 DEBUGFUNC("ixgbe_start_hw_gen2"); 466 467 /* Clear the rate limiters */ 468 for (i = 0; i < hw->mac.max_tx_queues; i++) { 469 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 470 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 471 } 472 IXGBE_WRITE_FLUSH(hw); 473 474 /* Disable relaxed ordering */ 475 for (i = 0; i < hw->mac.max_tx_queues; i++) { 476 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 477 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 478 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 479 } 480 481 for (i = 0; i < hw->mac.max_rx_queues; i++) { 482 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 483 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 484 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 485 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 486 } 487 } 488 489 /** 490 * ixgbe_init_hw_generic - Generic hardware initialization 491 * @hw: pointer to hardware structure 492 * 493 * Initialize the hardware by resetting the hardware, filling the bus info 494 * structure and media type, clears all on chip counters, initializes receive 495 * address registers, multicast table, VLAN filter table, calls routine to set 496 * up link and flow control settings, and leaves transmit and receive units 497 * disabled and uninitialized 498 **/ 499 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 500 { 501 s32 status; 502 503 DEBUGFUNC("ixgbe_init_hw_generic"); 504 505 /* Reset the hardware */ 506 status = hw->mac.ops.reset_hw(hw); 507 508 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { 509 /* Start the HW */ 510 status = hw->mac.ops.start_hw(hw); 511 } 512 513 /* Initialize the LED link active for LED blink support */ 514 if (hw->mac.ops.init_led_link_act) 515 hw->mac.ops.init_led_link_act(hw); 516 517 if (status != IXGBE_SUCCESS) 518 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); 519 520 return status; 521 } 522 523 /** 524 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 525 * @hw: pointer to hardware structure 526 * 527 * Clears all hardware statistics counters by reading them from the hardware 528 * Statistics counters are clear on read. 529 **/ 530 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 531 { 532 u16 i = 0; 533 534 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 535 536 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 537 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 538 IXGBE_READ_REG(hw, IXGBE_ERRBC); 539 IXGBE_READ_REG(hw, IXGBE_MSPDC); 540 if (hw->mac.type >= ixgbe_mac_X550) 541 IXGBE_READ_REG(hw, IXGBE_MBSDC); 542 for (i = 0; i < 8; i++) 543 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 544 545 IXGBE_READ_REG(hw, IXGBE_MLFC); 546 IXGBE_READ_REG(hw, IXGBE_MRFC); 547 IXGBE_READ_REG(hw, IXGBE_RLEC); 548 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 549 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 550 if (hw->mac.type >= ixgbe_mac_82599EB) { 551 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 552 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 553 } else { 554 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 555 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 556 } 557 558 for (i = 0; i < 8; i++) { 559 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 560 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 561 if (hw->mac.type >= ixgbe_mac_82599EB) { 562 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 563 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 564 } else { 565 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 566 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 567 } 568 } 569 if (hw->mac.type >= ixgbe_mac_82599EB) 570 for (i = 0; i < 8; i++) 571 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 572 IXGBE_READ_REG(hw, IXGBE_PRC64); 573 IXGBE_READ_REG(hw, IXGBE_PRC127); 574 IXGBE_READ_REG(hw, IXGBE_PRC255); 575 IXGBE_READ_REG(hw, IXGBE_PRC511); 576 IXGBE_READ_REG(hw, IXGBE_PRC1023); 577 IXGBE_READ_REG(hw, IXGBE_PRC1522); 578 IXGBE_READ_REG(hw, IXGBE_GPRC); 579 IXGBE_READ_REG(hw, IXGBE_BPRC); 580 IXGBE_READ_REG(hw, IXGBE_MPRC); 581 IXGBE_READ_REG(hw, IXGBE_GPTC); 582 IXGBE_READ_REG(hw, IXGBE_GORCL); 583 IXGBE_READ_REG(hw, IXGBE_GORCH); 584 IXGBE_READ_REG(hw, IXGBE_GOTCL); 585 IXGBE_READ_REG(hw, IXGBE_GOTCH); 586 if (hw->mac.type == ixgbe_mac_82598EB) 587 for (i = 0; i < 8; i++) 588 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 589 IXGBE_READ_REG(hw, IXGBE_RUC); 590 IXGBE_READ_REG(hw, IXGBE_RFC); 591 IXGBE_READ_REG(hw, IXGBE_ROC); 592 IXGBE_READ_REG(hw, IXGBE_RJC); 593 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 594 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 595 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 596 IXGBE_READ_REG(hw, IXGBE_TORL); 597 IXGBE_READ_REG(hw, IXGBE_TORH); 598 IXGBE_READ_REG(hw, IXGBE_TPR); 599 IXGBE_READ_REG(hw, IXGBE_TPT); 600 IXGBE_READ_REG(hw, IXGBE_PTC64); 601 IXGBE_READ_REG(hw, IXGBE_PTC127); 602 IXGBE_READ_REG(hw, IXGBE_PTC255); 603 IXGBE_READ_REG(hw, IXGBE_PTC511); 604 IXGBE_READ_REG(hw, IXGBE_PTC1023); 605 IXGBE_READ_REG(hw, IXGBE_PTC1522); 606 IXGBE_READ_REG(hw, IXGBE_MPTC); 607 IXGBE_READ_REG(hw, IXGBE_BPTC); 608 for (i = 0; i < 16; i++) { 609 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 610 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 611 if (hw->mac.type >= ixgbe_mac_82599EB) { 612 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 613 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 614 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 615 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 616 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 617 } else { 618 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 619 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 620 } 621 } 622 623 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 624 if (hw->phy.id == 0) 625 ixgbe_identify_phy(hw); 626 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, 627 IXGBE_MDIO_PCS_DEV_TYPE, &i); 628 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, 629 IXGBE_MDIO_PCS_DEV_TYPE, &i); 630 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, 631 IXGBE_MDIO_PCS_DEV_TYPE, &i); 632 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, 633 IXGBE_MDIO_PCS_DEV_TYPE, &i); 634 } 635 636 return IXGBE_SUCCESS; 637 } 638 639 /** 640 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 641 * @hw: pointer to hardware structure 642 * @pba_num: stores the part number string from the EEPROM 643 * @pba_num_size: part number string buffer length 644 * 645 * Reads the part number string from the EEPROM. 646 **/ 647 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 648 u32 pba_num_size) 649 { 650 s32 ret_val; 651 u16 data; 652 u16 pba_ptr; 653 u16 offset; 654 u16 length; 655 656 DEBUGFUNC("ixgbe_read_pba_string_generic"); 657 658 if (pba_num == NULL) { 659 DEBUGOUT("PBA string buffer was null\n"); 660 return IXGBE_ERR_INVALID_ARGUMENT; 661 } 662 663 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 664 if (ret_val) { 665 DEBUGOUT("NVM Read Error\n"); 666 return ret_val; 667 } 668 669 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 670 if (ret_val) { 671 DEBUGOUT("NVM Read Error\n"); 672 return ret_val; 673 } 674 675 /* 676 * if data is not ptr guard the PBA must be in legacy format which 677 * means pba_ptr is actually our second data word for the PBA number 678 * and we can decode it into an ascii string 679 */ 680 if (data != IXGBE_PBANUM_PTR_GUARD) { 681 DEBUGOUT("NVM PBA number is not stored as string\n"); 682 683 /* we will need 11 characters to store the PBA */ 684 if (pba_num_size < 11) { 685 DEBUGOUT("PBA string buffer too small\n"); 686 return IXGBE_ERR_NO_SPACE; 687 } 688 689 /* extract hex string from data and pba_ptr */ 690 pba_num[0] = (data >> 12) & 0xF; 691 pba_num[1] = (data >> 8) & 0xF; 692 pba_num[2] = (data >> 4) & 0xF; 693 pba_num[3] = data & 0xF; 694 pba_num[4] = (pba_ptr >> 12) & 0xF; 695 pba_num[5] = (pba_ptr >> 8) & 0xF; 696 pba_num[6] = '-'; 697 pba_num[7] = 0; 698 pba_num[8] = (pba_ptr >> 4) & 0xF; 699 pba_num[9] = pba_ptr & 0xF; 700 701 /* put a null character on the end of our string */ 702 pba_num[10] = '\0'; 703 704 /* switch all the data but the '-' to hex char */ 705 for (offset = 0; offset < 10; offset++) { 706 if (pba_num[offset] < 0xA) 707 pba_num[offset] += '0'; 708 else if (pba_num[offset] < 0x10) 709 pba_num[offset] += 'A' - 0xA; 710 } 711 712 return IXGBE_SUCCESS; 713 } 714 715 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 716 if (ret_val) { 717 DEBUGOUT("NVM Read Error\n"); 718 return ret_val; 719 } 720 721 if (length == 0xFFFF || length == 0) { 722 DEBUGOUT("NVM PBA number section invalid length\n"); 723 return IXGBE_ERR_PBA_SECTION; 724 } 725 726 /* check if pba_num buffer is big enough */ 727 if (pba_num_size < (((u32)length * 2) - 1)) { 728 DEBUGOUT("PBA string buffer too small\n"); 729 return IXGBE_ERR_NO_SPACE; 730 } 731 732 /* trim pba length from start of string */ 733 pba_ptr++; 734 length--; 735 736 for (offset = 0; offset < length; offset++) { 737 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 738 if (ret_val) { 739 DEBUGOUT("NVM Read Error\n"); 740 return ret_val; 741 } 742 pba_num[offset * 2] = (u8)(data >> 8); 743 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 744 } 745 pba_num[offset * 2] = '\0'; 746 747 return IXGBE_SUCCESS; 748 } 749 750 /** 751 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 752 * @hw: pointer to hardware structure 753 * @pba_num: stores the part number from the EEPROM 754 * 755 * Reads the part number from the EEPROM. 756 **/ 757 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 758 { 759 s32 ret_val; 760 u16 data; 761 762 DEBUGFUNC("ixgbe_read_pba_num_generic"); 763 764 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 765 if (ret_val) { 766 DEBUGOUT("NVM Read Error\n"); 767 return ret_val; 768 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 769 DEBUGOUT("NVM Not supported\n"); 770 return IXGBE_NOT_IMPLEMENTED; 771 } 772 *pba_num = (u32)(data << 16); 773 774 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 775 if (ret_val) { 776 DEBUGOUT("NVM Read Error\n"); 777 return ret_val; 778 } 779 *pba_num |= (u32)data; 780 781 return IXGBE_SUCCESS; 782 } 783 784 /** 785 * ixgbe_read_pba_raw 786 * @hw: pointer to the HW structure 787 * @eeprom_buf: optional pointer to EEPROM image 788 * @eeprom_buf_size: size of EEPROM image in words 789 * @max_pba_block_size: PBA block size limit 790 * @pba: pointer to output PBA structure 791 * 792 * Reads PBA from EEPROM image when eeprom_buf is not NULL. 793 * Reads PBA from physical EEPROM device when eeprom_buf is NULL. 794 * 795 **/ 796 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 797 u32 eeprom_buf_size, u16 max_pba_block_size, 798 struct ixgbe_pba *pba) 799 { 800 s32 ret_val; 801 u16 pba_block_size; 802 803 if (pba == NULL) 804 return IXGBE_ERR_PARAM; 805 806 if (eeprom_buf == NULL) { 807 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 808 &pba->word[0]); 809 if (ret_val) 810 return ret_val; 811 } else { 812 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 813 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 814 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 815 } else { 816 return IXGBE_ERR_PARAM; 817 } 818 } 819 820 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 821 if (pba->pba_block == NULL) 822 return IXGBE_ERR_PARAM; 823 824 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, 825 eeprom_buf_size, 826 &pba_block_size); 827 if (ret_val) 828 return ret_val; 829 830 if (pba_block_size > max_pba_block_size) 831 return IXGBE_ERR_PARAM; 832 833 if (eeprom_buf == NULL) { 834 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], 835 pba_block_size, 836 pba->pba_block); 837 if (ret_val) 838 return ret_val; 839 } else { 840 if (eeprom_buf_size > (u32)(pba->word[1] + 841 pba_block_size)) { 842 memcpy(pba->pba_block, 843 &eeprom_buf[pba->word[1]], 844 pba_block_size * sizeof(u16)); 845 } else { 846 return IXGBE_ERR_PARAM; 847 } 848 } 849 } 850 851 return IXGBE_SUCCESS; 852 } 853 854 /** 855 * ixgbe_write_pba_raw 856 * @hw: pointer to the HW structure 857 * @eeprom_buf: optional pointer to EEPROM image 858 * @eeprom_buf_size: size of EEPROM image in words 859 * @pba: pointer to PBA structure 860 * 861 * Writes PBA to EEPROM image when eeprom_buf is not NULL. 862 * Writes PBA to physical EEPROM device when eeprom_buf is NULL. 863 * 864 **/ 865 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 866 u32 eeprom_buf_size, struct ixgbe_pba *pba) 867 { 868 s32 ret_val; 869 870 if (pba == NULL) 871 return IXGBE_ERR_PARAM; 872 873 if (eeprom_buf == NULL) { 874 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, 875 &pba->word[0]); 876 if (ret_val) 877 return ret_val; 878 } else { 879 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 880 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; 881 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; 882 } else { 883 return IXGBE_ERR_PARAM; 884 } 885 } 886 887 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 888 if (pba->pba_block == NULL) 889 return IXGBE_ERR_PARAM; 890 891 if (eeprom_buf == NULL) { 892 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], 893 pba->pba_block[0], 894 pba->pba_block); 895 if (ret_val) 896 return ret_val; 897 } else { 898 if (eeprom_buf_size > (u32)(pba->word[1] + 899 pba->pba_block[0])) { 900 memcpy(&eeprom_buf[pba->word[1]], 901 pba->pba_block, 902 pba->pba_block[0] * sizeof(u16)); 903 } else { 904 return IXGBE_ERR_PARAM; 905 } 906 } 907 } 908 909 return IXGBE_SUCCESS; 910 } 911 912 /** 913 * ixgbe_get_pba_block_size 914 * @hw: pointer to the HW structure 915 * @eeprom_buf: optional pointer to EEPROM image 916 * @eeprom_buf_size: size of EEPROM image in words 917 * @pba_data_size: pointer to output variable 918 * 919 * Returns the size of the PBA block in words. Function operates on EEPROM 920 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical 921 * EEPROM device. 922 * 923 **/ 924 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, 925 u32 eeprom_buf_size, u16 *pba_block_size) 926 { 927 s32 ret_val; 928 u16 pba_word[2]; 929 u16 length; 930 931 DEBUGFUNC("ixgbe_get_pba_block_size"); 932 933 if (eeprom_buf == NULL) { 934 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 935 &pba_word[0]); 936 if (ret_val) 937 return ret_val; 938 } else { 939 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 940 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 941 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 942 } else { 943 return IXGBE_ERR_PARAM; 944 } 945 } 946 947 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { 948 if (eeprom_buf == NULL) { 949 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, 950 &length); 951 if (ret_val) 952 return ret_val; 953 } else { 954 if (eeprom_buf_size > pba_word[1]) 955 length = eeprom_buf[pba_word[1] + 0]; 956 else 957 return IXGBE_ERR_PARAM; 958 } 959 960 if (length == 0xFFFF || length == 0) 961 return IXGBE_ERR_PBA_SECTION; 962 } else { 963 /* PBA number in legacy format, there is no PBA Block. */ 964 length = 0; 965 } 966 967 if (pba_block_size != NULL) 968 *pba_block_size = length; 969 970 return IXGBE_SUCCESS; 971 } 972 973 /** 974 * ixgbe_get_mac_addr_generic - Generic get MAC address 975 * @hw: pointer to hardware structure 976 * @mac_addr: Adapter MAC address 977 * 978 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 979 * A reset of the adapter must be performed prior to calling this function 980 * in order for the MAC address to have been loaded from the EEPROM into RAR0 981 **/ 982 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 983 { 984 u32 rar_high; 985 u32 rar_low; 986 u16 i; 987 988 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 989 990 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 991 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 992 993 for (i = 0; i < 4; i++) 994 mac_addr[i] = (u8)(rar_low >> (i*8)); 995 996 for (i = 0; i < 2; i++) 997 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 998 999 return IXGBE_SUCCESS; 1000 } 1001 1002 /** 1003 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info 1004 * @hw: pointer to hardware structure 1005 * @link_status: the link status returned by the PCI config space 1006 * 1007 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure 1008 **/ 1009 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) 1010 { 1011 struct ixgbe_mac_info *mac = &hw->mac; 1012 1013 if (hw->bus.type == ixgbe_bus_type_unknown) 1014 hw->bus.type = ixgbe_bus_type_pci_express; 1015 1016 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 1017 case IXGBE_PCI_LINK_WIDTH_1: 1018 hw->bus.width = ixgbe_bus_width_pcie_x1; 1019 break; 1020 case IXGBE_PCI_LINK_WIDTH_2: 1021 hw->bus.width = ixgbe_bus_width_pcie_x2; 1022 break; 1023 case IXGBE_PCI_LINK_WIDTH_4: 1024 hw->bus.width = ixgbe_bus_width_pcie_x4; 1025 break; 1026 case IXGBE_PCI_LINK_WIDTH_8: 1027 hw->bus.width = ixgbe_bus_width_pcie_x8; 1028 break; 1029 default: 1030 hw->bus.width = ixgbe_bus_width_unknown; 1031 break; 1032 } 1033 1034 switch (link_status & IXGBE_PCI_LINK_SPEED) { 1035 case IXGBE_PCI_LINK_SPEED_2500: 1036 hw->bus.speed = ixgbe_bus_speed_2500; 1037 break; 1038 case IXGBE_PCI_LINK_SPEED_5000: 1039 hw->bus.speed = ixgbe_bus_speed_5000; 1040 break; 1041 case IXGBE_PCI_LINK_SPEED_8000: 1042 hw->bus.speed = ixgbe_bus_speed_8000; 1043 break; 1044 default: 1045 hw->bus.speed = ixgbe_bus_speed_unknown; 1046 break; 1047 } 1048 1049 mac->ops.set_lan_id(hw); 1050 } 1051 1052 /** 1053 * ixgbe_get_bus_info_generic - Generic set PCI bus info 1054 * @hw: pointer to hardware structure 1055 * 1056 * Gets the PCI bus info (speed, width, type) then calls helper function to 1057 * store this data within the ixgbe_hw structure. 1058 **/ 1059 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 1060 { 1061 u16 link_status; 1062 1063 DEBUGFUNC("ixgbe_get_bus_info_generic"); 1064 1065 /* Get the negotiated link width and speed from PCI config space */ 1066 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 1067 1068 ixgbe_set_pci_config_data_generic(hw, link_status); 1069 1070 return IXGBE_SUCCESS; 1071 } 1072 1073 /** 1074 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 1075 * @hw: pointer to the HW structure 1076 * 1077 * Determines the LAN function id by reading memory-mapped registers and swaps 1078 * the port value if requested, and set MAC instance for devices that share 1079 * CS4227. 1080 **/ 1081 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 1082 { 1083 struct ixgbe_bus_info *bus = &hw->bus; 1084 u32 reg; 1085 u16 ee_ctrl_4; 1086 1087 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 1088 1089 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 1090 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 1091 bus->lan_id = (u8)bus->func; 1092 1093 /* check for a port swap */ 1094 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 1095 if (reg & IXGBE_FACTPS_LFS) 1096 bus->func ^= 0x1; 1097 1098 /* Get MAC instance from EEPROM for configuring CS4227 */ 1099 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 1100 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 1101 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> 1102 IXGBE_EE_CTRL_4_INST_ID_SHIFT; 1103 } 1104 } 1105 1106 /** 1107 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 1108 * @hw: pointer to hardware structure 1109 * 1110 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 1111 * disables transmit and receive units. The adapter_stopped flag is used by 1112 * the shared code and drivers to determine if the adapter is in a stopped 1113 * state and should not touch the hardware. 1114 **/ 1115 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 1116 { 1117 u32 reg_val; 1118 u16 i; 1119 1120 DEBUGFUNC("ixgbe_stop_adapter_generic"); 1121 1122 /* 1123 * Set the adapter_stopped flag so other driver functions stop touching 1124 * the hardware 1125 */ 1126 hw->adapter_stopped = TRUE; 1127 1128 /* Disable the receive unit */ 1129 ixgbe_disable_rx(hw); 1130 1131 /* Clear interrupt mask to stop interrupts from being generated */ 1132 /* 1133 * XXX 1134 * This function is called in the state of both interrupt disabled 1135 * and interrupt enabled, e.g. 1136 * + interrupt disabled case: 1137 * - ixgbe_stop_locked() 1138 * - ixgbe_disable_intr() // interrupt disabled here 1139 * - ixgbe_stop_adapter() 1140 * - hw->mac.ops.stop_adapter() 1141 * == this function 1142 * + interrupt enabled case: 1143 * - ixgbe_local_timer1() 1144 * - ixgbe_init_locked() 1145 * - ixgbe_stop_adapter() 1146 * - hw->mac.ops.stop_adapter() 1147 * == this function 1148 * Therefore, it causes nest status breaking to nest the status 1149 * (that is, que->im_nest++) at all times. So, this function must 1150 * use ixgbe_ensure_disabled_intr() instead of ixgbe_disable_intr(). 1151 */ 1152 ixgbe_ensure_disabled_intr(hw->back); 1153 1154 /* Clear any pending interrupts, flush previous writes */ 1155 IXGBE_READ_REG(hw, IXGBE_EICR); 1156 1157 /* Disable the transmit unit. Each queue must be disabled. */ 1158 for (i = 0; i < hw->mac.max_tx_queues; i++) 1159 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 1160 1161 /* Disable the receive unit by stopping each queue */ 1162 for (i = 0; i < hw->mac.max_rx_queues; i++) { 1163 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 1164 reg_val &= ~IXGBE_RXDCTL_ENABLE; 1165 reg_val |= IXGBE_RXDCTL_SWFLSH; 1166 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 1167 } 1168 1169 /* flush all queues disables */ 1170 IXGBE_WRITE_FLUSH(hw); 1171 msec_delay(2); 1172 1173 /* 1174 * Prevent the PCI-E bus from hanging by disabling PCI-E master 1175 * access and verify no pending requests 1176 */ 1177 return ixgbe_disable_pcie_master(hw); 1178 } 1179 1180 /** 1181 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 1182 * @hw: pointer to hardware structure 1183 * 1184 * Store the index for the link active LED. This will be used to support 1185 * blinking the LED. 1186 **/ 1187 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 1188 { 1189 struct ixgbe_mac_info *mac = &hw->mac; 1190 u32 led_reg, led_mode; 1191 u8 i; 1192 1193 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1194 1195 /* Get LED link active from the LEDCTL register */ 1196 for (i = 0; i < 4; i++) { 1197 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 1198 1199 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 1200 IXGBE_LED_LINK_ACTIVE) { 1201 mac->led_link_act = i; 1202 return IXGBE_SUCCESS; 1203 } 1204 } 1205 1206 /* 1207 * If LEDCTL register does not have the LED link active set, then use 1208 * known MAC defaults. 1209 */ 1210 switch (hw->mac.type) { 1211 case ixgbe_mac_X550EM_a: 1212 case ixgbe_mac_X550EM_x: 1213 mac->led_link_act = 1; 1214 break; 1215 default: 1216 mac->led_link_act = 2; 1217 } 1218 return IXGBE_SUCCESS; 1219 } 1220 1221 /** 1222 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 1223 * @hw: pointer to hardware structure 1224 * @index: led number to turn on 1225 **/ 1226 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 1227 { 1228 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1229 1230 DEBUGFUNC("ixgbe_led_on_generic"); 1231 1232 if (index > 3) 1233 return IXGBE_ERR_PARAM; 1234 1235 /* To turn on the LED, set mode to ON. */ 1236 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1237 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 1238 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1239 IXGBE_WRITE_FLUSH(hw); 1240 1241 return IXGBE_SUCCESS; 1242 } 1243 1244 /** 1245 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 1246 * @hw: pointer to hardware structure 1247 * @index: led number to turn off 1248 **/ 1249 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 1250 { 1251 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1252 1253 DEBUGFUNC("ixgbe_led_off_generic"); 1254 1255 if (index > 3) 1256 return IXGBE_ERR_PARAM; 1257 1258 /* To turn off the LED, set mode to OFF. */ 1259 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1260 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 1261 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1262 IXGBE_WRITE_FLUSH(hw); 1263 1264 return IXGBE_SUCCESS; 1265 } 1266 1267 /** 1268 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 1269 * @hw: pointer to hardware structure 1270 * 1271 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 1272 * ixgbe_hw struct in order to set up EEPROM access. 1273 **/ 1274 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 1275 { 1276 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 1277 u32 eec; 1278 u16 eeprom_size; 1279 1280 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 1281 1282 if (eeprom->type == ixgbe_eeprom_uninitialized) { 1283 eeprom->type = ixgbe_eeprom_none; 1284 /* Set default semaphore delay to 10ms which is a well 1285 * tested value */ 1286 eeprom->semaphore_delay = 10; 1287 /* Clear EEPROM page size, it will be initialized as needed */ 1288 eeprom->word_page_size = 0; 1289 1290 /* 1291 * Check for EEPROM present first. 1292 * If not present leave as none 1293 */ 1294 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1295 if (eec & IXGBE_EEC_PRES) { 1296 eeprom->type = ixgbe_eeprom_spi; 1297 1298 /* 1299 * SPI EEPROM is assumed here. This code would need to 1300 * change if a future EEPROM is not SPI. 1301 */ 1302 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 1303 IXGBE_EEC_SIZE_SHIFT); 1304 eeprom->word_size = 1 << (eeprom_size + 1305 IXGBE_EEPROM_WORD_SIZE_SHIFT); 1306 } 1307 1308 if (eec & IXGBE_EEC_ADDR_SIZE) 1309 eeprom->address_bits = 16; 1310 else 1311 eeprom->address_bits = 8; 1312 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 1313 "%d\n", eeprom->type, eeprom->word_size, 1314 eeprom->address_bits); 1315 } 1316 1317 return IXGBE_SUCCESS; 1318 } 1319 1320 /** 1321 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 1322 * @hw: pointer to hardware structure 1323 * @offset: offset within the EEPROM to write 1324 * @words: number of word(s) 1325 * @data: 16 bit word(s) to write to EEPROM 1326 * 1327 * Reads 16 bit word(s) from EEPROM through bit-bang method 1328 **/ 1329 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1330 u16 words, u16 *data) 1331 { 1332 s32 status = IXGBE_SUCCESS; 1333 u16 i, count; 1334 1335 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); 1336 1337 hw->eeprom.ops.init_params(hw); 1338 1339 if (words == 0) { 1340 status = IXGBE_ERR_INVALID_ARGUMENT; 1341 goto out; 1342 } 1343 1344 if (offset + words > hw->eeprom.word_size) { 1345 status = IXGBE_ERR_EEPROM; 1346 goto out; 1347 } 1348 1349 /* 1350 * The EEPROM page size cannot be queried from the chip. We do lazy 1351 * initialization. It is worth to do that when we write large buffer. 1352 */ 1353 if ((hw->eeprom.word_page_size == 0) && 1354 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 1355 ixgbe_detect_eeprom_page_size_generic(hw, offset); 1356 1357 /* 1358 * We cannot hold synchronization semaphores for too long 1359 * to avoid other entity starvation. However it is more efficient 1360 * to read in bursts than synchronizing access for each word. 1361 */ 1362 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1363 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1364 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1365 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 1366 count, &data[i]); 1367 1368 if (status != IXGBE_SUCCESS) 1369 break; 1370 } 1371 1372 out: 1373 return status; 1374 } 1375 1376 /** 1377 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 1378 * @hw: pointer to hardware structure 1379 * @offset: offset within the EEPROM to be written to 1380 * @words: number of word(s) 1381 * @data: 16 bit word(s) to be written to the EEPROM 1382 * 1383 * If ixgbe_eeprom_update_checksum is not called after this function, the 1384 * EEPROM will most likely contain an invalid checksum. 1385 **/ 1386 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1387 u16 words, u16 *data) 1388 { 1389 s32 status; 1390 u16 word; 1391 u16 page_size; 1392 u16 i; 1393 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 1394 1395 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); 1396 1397 /* Prepare the EEPROM for writing */ 1398 status = ixgbe_acquire_eeprom(hw); 1399 1400 if (status == IXGBE_SUCCESS) { 1401 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1402 ixgbe_release_eeprom(hw); 1403 status = IXGBE_ERR_EEPROM; 1404 } 1405 } 1406 1407 if (status == IXGBE_SUCCESS) { 1408 for (i = 0; i < words; i++) { 1409 ixgbe_standby_eeprom(hw); 1410 1411 /* Send the WRITE ENABLE command (8 bit opcode ) */ 1412 ixgbe_shift_out_eeprom_bits(hw, 1413 IXGBE_EEPROM_WREN_OPCODE_SPI, 1414 IXGBE_EEPROM_OPCODE_BITS); 1415 1416 ixgbe_standby_eeprom(hw); 1417 1418 /* 1419 * Some SPI eeproms use the 8th address bit embedded 1420 * in the opcode 1421 */ 1422 if ((hw->eeprom.address_bits == 8) && 1423 ((offset + i) >= 128)) 1424 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1425 1426 /* Send the Write command (8-bit opcode + addr) */ 1427 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 1428 IXGBE_EEPROM_OPCODE_BITS); 1429 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1430 hw->eeprom.address_bits); 1431 1432 page_size = hw->eeprom.word_page_size; 1433 1434 /* Send the data in burst via SPI*/ 1435 do { 1436 word = data[i]; 1437 word = (word >> 8) | (word << 8); 1438 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1439 1440 if (page_size == 0) 1441 break; 1442 1443 /* do not wrap around page */ 1444 if (((offset + i) & (page_size - 1)) == 1445 (page_size - 1)) 1446 break; 1447 } while (++i < words); 1448 1449 ixgbe_standby_eeprom(hw); 1450 msec_delay(10); 1451 } 1452 /* Done with writing - release the EEPROM */ 1453 ixgbe_release_eeprom(hw); 1454 } 1455 1456 return status; 1457 } 1458 1459 /** 1460 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1461 * @hw: pointer to hardware structure 1462 * @offset: offset within the EEPROM to be written to 1463 * @data: 16 bit word to be written to the EEPROM 1464 * 1465 * If ixgbe_eeprom_update_checksum is not called after this function, the 1466 * EEPROM will most likely contain an invalid checksum. 1467 **/ 1468 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1469 { 1470 s32 status; 1471 1472 DEBUGFUNC("ixgbe_write_eeprom_generic"); 1473 1474 hw->eeprom.ops.init_params(hw); 1475 1476 if (offset >= hw->eeprom.word_size) { 1477 status = IXGBE_ERR_EEPROM; 1478 goto out; 1479 } 1480 1481 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1482 1483 out: 1484 return status; 1485 } 1486 1487 /** 1488 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1489 * @hw: pointer to hardware structure 1490 * @offset: offset within the EEPROM to be read 1491 * @data: read 16 bit words(s) from EEPROM 1492 * @words: number of word(s) 1493 * 1494 * Reads 16 bit word(s) from EEPROM through bit-bang method 1495 **/ 1496 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1497 u16 words, u16 *data) 1498 { 1499 s32 status = IXGBE_SUCCESS; 1500 u16 i, count; 1501 1502 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); 1503 1504 hw->eeprom.ops.init_params(hw); 1505 1506 if (words == 0) { 1507 status = IXGBE_ERR_INVALID_ARGUMENT; 1508 goto out; 1509 } 1510 1511 if (offset + words > hw->eeprom.word_size) { 1512 status = IXGBE_ERR_EEPROM; 1513 goto out; 1514 } 1515 1516 /* 1517 * We cannot hold synchronization semaphores for too long 1518 * to avoid other entity starvation. However it is more efficient 1519 * to read in bursts than synchronizing access for each word. 1520 */ 1521 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1522 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1523 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1524 1525 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1526 count, &data[i]); 1527 1528 if (status != IXGBE_SUCCESS) 1529 break; 1530 } 1531 1532 out: 1533 return status; 1534 } 1535 1536 /** 1537 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1538 * @hw: pointer to hardware structure 1539 * @offset: offset within the EEPROM to be read 1540 * @words: number of word(s) 1541 * @data: read 16 bit word(s) from EEPROM 1542 * 1543 * Reads 16 bit word(s) from EEPROM through bit-bang method 1544 **/ 1545 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1546 u16 words, u16 *data) 1547 { 1548 s32 status; 1549 u16 word_in; 1550 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1551 u16 i; 1552 1553 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); 1554 1555 /* Prepare the EEPROM for reading */ 1556 status = ixgbe_acquire_eeprom(hw); 1557 1558 if (status == IXGBE_SUCCESS) { 1559 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1560 ixgbe_release_eeprom(hw); 1561 status = IXGBE_ERR_EEPROM; 1562 } 1563 } 1564 1565 if (status == IXGBE_SUCCESS) { 1566 for (i = 0; i < words; i++) { 1567 ixgbe_standby_eeprom(hw); 1568 /* 1569 * Some SPI eeproms use the 8th address bit embedded 1570 * in the opcode 1571 */ 1572 if ((hw->eeprom.address_bits == 8) && 1573 ((offset + i) >= 128)) 1574 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1575 1576 /* Send the READ command (opcode + addr) */ 1577 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1578 IXGBE_EEPROM_OPCODE_BITS); 1579 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1580 hw->eeprom.address_bits); 1581 1582 /* Read the data. */ 1583 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1584 data[i] = (word_in >> 8) | (word_in << 8); 1585 } 1586 1587 /* End this read operation */ 1588 ixgbe_release_eeprom(hw); 1589 } 1590 1591 return status; 1592 } 1593 1594 /** 1595 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1596 * @hw: pointer to hardware structure 1597 * @offset: offset within the EEPROM to be read 1598 * @data: read 16 bit value from EEPROM 1599 * 1600 * Reads 16 bit value from EEPROM through bit-bang method 1601 **/ 1602 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1603 u16 *data) 1604 { 1605 s32 status; 1606 1607 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 1608 1609 hw->eeprom.ops.init_params(hw); 1610 1611 if (offset >= hw->eeprom.word_size) { 1612 status = IXGBE_ERR_EEPROM; 1613 goto out; 1614 } 1615 1616 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1617 1618 out: 1619 return status; 1620 } 1621 1622 /** 1623 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1624 * @hw: pointer to hardware structure 1625 * @offset: offset of word in the EEPROM to read 1626 * @words: number of word(s) 1627 * @data: 16 bit word(s) from the EEPROM 1628 * 1629 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1630 **/ 1631 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1632 u16 words, u16 *data) 1633 { 1634 u32 eerd; 1635 s32 status = IXGBE_SUCCESS; 1636 u32 i; 1637 1638 DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); 1639 1640 hw->eeprom.ops.init_params(hw); 1641 1642 if (words == 0) { 1643 status = IXGBE_ERR_INVALID_ARGUMENT; 1644 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1645 goto out; 1646 } 1647 1648 if (offset >= hw->eeprom.word_size) { 1649 status = IXGBE_ERR_EEPROM; 1650 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1651 goto out; 1652 } 1653 1654 for (i = 0; i < words; i++) { 1655 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1656 IXGBE_EEPROM_RW_REG_START; 1657 1658 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1659 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1660 1661 if (status == IXGBE_SUCCESS) { 1662 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1663 IXGBE_EEPROM_RW_REG_DATA); 1664 } else { 1665 DEBUGOUT("Eeprom read timed out\n"); 1666 goto out; 1667 } 1668 } 1669 out: 1670 return status; 1671 } 1672 1673 /** 1674 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1675 * @hw: pointer to hardware structure 1676 * @offset: offset within the EEPROM to be used as a scratch pad 1677 * 1678 * Discover EEPROM page size by writing marching data at given offset. 1679 * This function is called only when we are writing a new large buffer 1680 * at given offset so the data would be overwritten anyway. 1681 **/ 1682 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1683 u16 offset) 1684 { 1685 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1686 s32 status = IXGBE_SUCCESS; 1687 u16 i; 1688 1689 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); 1690 1691 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1692 data[i] = i; 1693 1694 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1695 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1696 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1697 hw->eeprom.word_page_size = 0; 1698 if (status != IXGBE_SUCCESS) 1699 goto out; 1700 1701 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1702 if (status != IXGBE_SUCCESS) 1703 goto out; 1704 1705 /* 1706 * When writing in burst more than the actual page size 1707 * EEPROM address wraps around current page. 1708 */ 1709 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1710 1711 DEBUGOUT1("Detected EEPROM page size = %d words.", 1712 hw->eeprom.word_page_size); 1713 out: 1714 return status; 1715 } 1716 1717 /** 1718 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1719 * @hw: pointer to hardware structure 1720 * @offset: offset of word in the EEPROM to read 1721 * @data: word read from the EEPROM 1722 * 1723 * Reads a 16 bit word from the EEPROM using the EERD register. 1724 **/ 1725 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1726 { 1727 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1728 } 1729 1730 /** 1731 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1732 * @hw: pointer to hardware structure 1733 * @offset: offset of word in the EEPROM to write 1734 * @words: number of word(s) 1735 * @data: word(s) write to the EEPROM 1736 * 1737 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1738 **/ 1739 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1740 u16 words, u16 *data) 1741 { 1742 u32 eewr; 1743 s32 status = IXGBE_SUCCESS; 1744 u16 i; 1745 1746 DEBUGFUNC("ixgbe_write_eewr_generic"); 1747 1748 hw->eeprom.ops.init_params(hw); 1749 1750 if (words == 0) { 1751 status = IXGBE_ERR_INVALID_ARGUMENT; 1752 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1753 goto out; 1754 } 1755 1756 if (offset >= hw->eeprom.word_size) { 1757 status = IXGBE_ERR_EEPROM; 1758 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1759 goto out; 1760 } 1761 1762 for (i = 0; i < words; i++) { 1763 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1764 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1765 IXGBE_EEPROM_RW_REG_START; 1766 1767 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1768 if (status != IXGBE_SUCCESS) { 1769 DEBUGOUT("Eeprom write EEWR timed out\n"); 1770 goto out; 1771 } 1772 1773 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1774 1775 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1776 if (status != IXGBE_SUCCESS) { 1777 DEBUGOUT("Eeprom write EEWR timed out\n"); 1778 goto out; 1779 } 1780 } 1781 1782 out: 1783 return status; 1784 } 1785 1786 /** 1787 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1788 * @hw: pointer to hardware structure 1789 * @offset: offset of word in the EEPROM to write 1790 * @data: word write to the EEPROM 1791 * 1792 * Write a 16 bit word to the EEPROM using the EEWR register. 1793 **/ 1794 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1795 { 1796 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1797 } 1798 1799 /** 1800 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1801 * @hw: pointer to hardware structure 1802 * @ee_reg: EEPROM flag for polling 1803 * 1804 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1805 * read or write is done respectively. 1806 **/ 1807 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1808 { 1809 u32 i; 1810 u32 reg; 1811 s32 status = IXGBE_ERR_EEPROM; 1812 1813 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1814 1815 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1816 if (ee_reg == IXGBE_NVM_POLL_READ) 1817 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1818 else 1819 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1820 1821 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1822 status = IXGBE_SUCCESS; 1823 break; 1824 } 1825 usec_delay(5); 1826 } 1827 1828 if (i == IXGBE_EERD_EEWR_ATTEMPTS) 1829 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1830 "EEPROM read/write done polling timed out"); 1831 1832 return status; 1833 } 1834 1835 /** 1836 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1837 * @hw: pointer to hardware structure 1838 * 1839 * Prepares EEPROM for access using bit-bang method. This function should 1840 * be called before issuing a command to the EEPROM. 1841 **/ 1842 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1843 { 1844 s32 status = IXGBE_SUCCESS; 1845 u32 eec; 1846 u32 i; 1847 1848 DEBUGFUNC("ixgbe_acquire_eeprom"); 1849 1850 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) 1851 != IXGBE_SUCCESS) 1852 status = IXGBE_ERR_SWFW_SYNC; 1853 1854 if (status == IXGBE_SUCCESS) { 1855 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1856 1857 /* Request EEPROM Access */ 1858 eec |= IXGBE_EEC_REQ; 1859 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1860 1861 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1862 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1863 if (eec & IXGBE_EEC_GNT) 1864 break; 1865 usec_delay(5); 1866 } 1867 1868 /* Release if grant not acquired */ 1869 if (!(eec & IXGBE_EEC_GNT)) { 1870 eec &= ~IXGBE_EEC_REQ; 1871 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1872 DEBUGOUT("Could not acquire EEPROM grant\n"); 1873 1874 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1875 status = IXGBE_ERR_EEPROM; 1876 } 1877 1878 /* Setup EEPROM for Read/Write */ 1879 if (status == IXGBE_SUCCESS) { 1880 /* Clear CS and SK */ 1881 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1882 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1883 IXGBE_WRITE_FLUSH(hw); 1884 usec_delay(1); 1885 } 1886 } 1887 return status; 1888 } 1889 1890 /** 1891 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1892 * @hw: pointer to hardware structure 1893 * 1894 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1895 **/ 1896 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1897 { 1898 s32 status = IXGBE_ERR_EEPROM; 1899 u32 timeout = 2000; 1900 u32 i; 1901 u32 swsm; 1902 1903 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1904 1905 1906 /* Get SMBI software semaphore between device drivers first */ 1907 for (i = 0; i < timeout; i++) { 1908 /* 1909 * If the SMBI bit is 0 when we read it, then the bit will be 1910 * set and we have the semaphore 1911 */ 1912 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1913 if (!(swsm & IXGBE_SWSM_SMBI)) { 1914 status = IXGBE_SUCCESS; 1915 break; 1916 } 1917 usec_delay(50); 1918 } 1919 1920 if (i == timeout) { 1921 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " 1922 "not granted.\n"); 1923 /* 1924 * this release is particularly important because our attempts 1925 * above to get the semaphore may have succeeded, and if there 1926 * was a timeout, we should unconditionally clear the semaphore 1927 * bits to free the driver to make progress 1928 */ 1929 ixgbe_release_eeprom_semaphore(hw); 1930 1931 usec_delay(50); 1932 /* 1933 * one last try 1934 * If the SMBI bit is 0 when we read it, then the bit will be 1935 * set and we have the semaphore 1936 */ 1937 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1938 if (!(swsm & IXGBE_SWSM_SMBI)) 1939 status = IXGBE_SUCCESS; 1940 } 1941 1942 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1943 if (status == IXGBE_SUCCESS) { 1944 for (i = 0; i < timeout; i++) { 1945 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1946 1947 /* Set the SW EEPROM semaphore bit to request access */ 1948 swsm |= IXGBE_SWSM_SWESMBI; 1949 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); 1950 1951 /* 1952 * If we set the bit successfully then we got the 1953 * semaphore. 1954 */ 1955 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1956 if (swsm & IXGBE_SWSM_SWESMBI) 1957 break; 1958 1959 usec_delay(50); 1960 } 1961 1962 /* 1963 * Release semaphores and return error if SW EEPROM semaphore 1964 * was not granted because we don't have access to the EEPROM 1965 */ 1966 if (i >= timeout) { 1967 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1968 "SWESMBI Software EEPROM semaphore not granted.\n"); 1969 ixgbe_release_eeprom_semaphore(hw); 1970 status = IXGBE_ERR_EEPROM; 1971 } 1972 } else { 1973 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1974 "Software semaphore SMBI between device drivers " 1975 "not granted.\n"); 1976 } 1977 1978 return status; 1979 } 1980 1981 /** 1982 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1983 * @hw: pointer to hardware structure 1984 * 1985 * This function clears hardware semaphore bits. 1986 **/ 1987 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1988 { 1989 u32 swsm; 1990 1991 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1992 1993 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1994 1995 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1996 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1997 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1998 IXGBE_WRITE_FLUSH(hw); 1999 } 2000 2001 /** 2002 * ixgbe_ready_eeprom - Polls for EEPROM ready 2003 * @hw: pointer to hardware structure 2004 **/ 2005 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 2006 { 2007 s32 status = IXGBE_SUCCESS; 2008 u16 i; 2009 u8 spi_stat_reg; 2010 2011 DEBUGFUNC("ixgbe_ready_eeprom"); 2012 2013 /* 2014 * Read "Status Register" repeatedly until the LSB is cleared. The 2015 * EEPROM will signal that the command has been completed by clearing 2016 * bit 0 of the internal status register. If it's not cleared within 2017 * 5 milliseconds, then error out. 2018 */ 2019 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 2020 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 2021 IXGBE_EEPROM_OPCODE_BITS); 2022 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 2023 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 2024 break; 2025 2026 usec_delay(5); 2027 ixgbe_standby_eeprom(hw); 2028 } 2029 2030 /* 2031 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 2032 * devices (and only 0-5mSec on 5V devices) 2033 */ 2034 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 2035 DEBUGOUT("SPI EEPROM Status error\n"); 2036 status = IXGBE_ERR_EEPROM; 2037 } 2038 2039 return status; 2040 } 2041 2042 /** 2043 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 2044 * @hw: pointer to hardware structure 2045 **/ 2046 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 2047 { 2048 u32 eec; 2049 2050 DEBUGFUNC("ixgbe_standby_eeprom"); 2051 2052 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2053 2054 /* Toggle CS to flush commands */ 2055 eec |= IXGBE_EEC_CS; 2056 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2057 IXGBE_WRITE_FLUSH(hw); 2058 usec_delay(1); 2059 eec &= ~IXGBE_EEC_CS; 2060 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2061 IXGBE_WRITE_FLUSH(hw); 2062 usec_delay(1); 2063 } 2064 2065 /** 2066 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 2067 * @hw: pointer to hardware structure 2068 * @data: data to send to the EEPROM 2069 * @count: number of bits to shift out 2070 **/ 2071 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 2072 u16 count) 2073 { 2074 u32 eec; 2075 u32 mask; 2076 u32 i; 2077 2078 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 2079 2080 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2081 2082 /* 2083 * Mask is used to shift "count" bits of "data" out to the EEPROM 2084 * one bit at a time. Determine the starting bit based on count 2085 */ 2086 mask = 0x01 << (count - 1); 2087 2088 for (i = 0; i < count; i++) { 2089 /* 2090 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 2091 * "1", and then raising and then lowering the clock (the SK 2092 * bit controls the clock input to the EEPROM). A "0" is 2093 * shifted out to the EEPROM by setting "DI" to "0" and then 2094 * raising and then lowering the clock. 2095 */ 2096 if (data & mask) 2097 eec |= IXGBE_EEC_DI; 2098 else 2099 eec &= ~IXGBE_EEC_DI; 2100 2101 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2102 IXGBE_WRITE_FLUSH(hw); 2103 2104 usec_delay(1); 2105 2106 ixgbe_raise_eeprom_clk(hw, &eec); 2107 ixgbe_lower_eeprom_clk(hw, &eec); 2108 2109 /* 2110 * Shift mask to signify next bit of data to shift in to the 2111 * EEPROM 2112 */ 2113 mask = mask >> 1; 2114 } 2115 2116 /* We leave the "DI" bit set to "0" when we leave this routine. */ 2117 eec &= ~IXGBE_EEC_DI; 2118 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2119 IXGBE_WRITE_FLUSH(hw); 2120 } 2121 2122 /** 2123 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 2124 * @hw: pointer to hardware structure 2125 * @count: number of bits to shift 2126 **/ 2127 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 2128 { 2129 u32 eec; 2130 u32 i; 2131 u16 data = 0; 2132 2133 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 2134 2135 /* 2136 * In order to read a register from the EEPROM, we need to shift 2137 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 2138 * the clock input to the EEPROM (setting the SK bit), and then reading 2139 * the value of the "DO" bit. During this "shifting in" process the 2140 * "DI" bit should always be clear. 2141 */ 2142 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2143 2144 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 2145 2146 for (i = 0; i < count; i++) { 2147 data = data << 1; 2148 ixgbe_raise_eeprom_clk(hw, &eec); 2149 2150 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2151 2152 eec &= ~(IXGBE_EEC_DI); 2153 if (eec & IXGBE_EEC_DO) 2154 data |= 1; 2155 2156 ixgbe_lower_eeprom_clk(hw, &eec); 2157 } 2158 2159 return data; 2160 } 2161 2162 /** 2163 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 2164 * @hw: pointer to hardware structure 2165 * @eec: EEC register's current value 2166 **/ 2167 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2168 { 2169 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 2170 2171 /* 2172 * Raise the clock input to the EEPROM 2173 * (setting the SK bit), then delay 2174 */ 2175 *eec = *eec | IXGBE_EEC_SK; 2176 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2177 IXGBE_WRITE_FLUSH(hw); 2178 usec_delay(1); 2179 } 2180 2181 /** 2182 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 2183 * @hw: pointer to hardware structure 2184 * @eec: EEC's current value 2185 **/ 2186 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2187 { 2188 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 2189 2190 /* 2191 * Lower the clock input to the EEPROM (clearing the SK bit), then 2192 * delay 2193 */ 2194 *eec = *eec & ~IXGBE_EEC_SK; 2195 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2196 IXGBE_WRITE_FLUSH(hw); 2197 usec_delay(1); 2198 } 2199 2200 /** 2201 * ixgbe_release_eeprom - Release EEPROM, release semaphores 2202 * @hw: pointer to hardware structure 2203 **/ 2204 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 2205 { 2206 u32 eec; 2207 2208 DEBUGFUNC("ixgbe_release_eeprom"); 2209 2210 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2211 2212 eec |= IXGBE_EEC_CS; /* Pull CS high */ 2213 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 2214 2215 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2216 IXGBE_WRITE_FLUSH(hw); 2217 2218 usec_delay(1); 2219 2220 /* Stop requesting EEPROM access */ 2221 eec &= ~IXGBE_EEC_REQ; 2222 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2223 2224 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 2225 2226 /* Delay before attempt to obtain semaphore again to allow FW access */ 2227 msec_delay(hw->eeprom.semaphore_delay); 2228 } 2229 2230 /** 2231 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 2232 * @hw: pointer to hardware structure 2233 * 2234 * Returns a negative error code on error, or the 16-bit checksum 2235 **/ 2236 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 2237 { 2238 u16 i; 2239 u16 j; 2240 u16 checksum = 0; 2241 u16 length = 0; 2242 u16 pointer = 0; 2243 u16 word = 0; 2244 2245 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 2246 2247 /* Include 0x0-0x3F in the checksum */ 2248 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 2249 if (hw->eeprom.ops.read(hw, i, &word)) { 2250 DEBUGOUT("EEPROM read failed\n"); 2251 return IXGBE_ERR_EEPROM; 2252 } 2253 checksum += word; 2254 } 2255 2256 /* Include all data from pointers except for the fw pointer */ 2257 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 2258 if (hw->eeprom.ops.read(hw, i, &pointer)) { 2259 DEBUGOUT("EEPROM read failed\n"); 2260 return IXGBE_ERR_EEPROM; 2261 } 2262 2263 /* If the pointer seems invalid */ 2264 if (pointer == 0xFFFF || pointer == 0) 2265 continue; 2266 2267 if (hw->eeprom.ops.read(hw, pointer, &length)) { 2268 DEBUGOUT("EEPROM read failed\n"); 2269 return IXGBE_ERR_EEPROM; 2270 } 2271 2272 if (length == 0xFFFF || length == 0) 2273 continue; 2274 2275 for (j = pointer + 1; j <= pointer + length; j++) { 2276 if (hw->eeprom.ops.read(hw, j, &word)) { 2277 DEBUGOUT("EEPROM read failed\n"); 2278 return IXGBE_ERR_EEPROM; 2279 } 2280 checksum += word; 2281 } 2282 } 2283 2284 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 2285 2286 return (s32)checksum; 2287 } 2288 2289 /** 2290 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 2291 * @hw: pointer to hardware structure 2292 * @checksum_val: calculated checksum 2293 * 2294 * Performs checksum calculation and validates the EEPROM checksum. If the 2295 * caller does not need checksum_val, the value can be NULL. 2296 **/ 2297 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 2298 u16 *checksum_val) 2299 { 2300 s32 status; 2301 u16 checksum; 2302 u16 read_checksum = 0; 2303 2304 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 2305 2306 /* Read the first word from the EEPROM. If this times out or fails, do 2307 * not continue or we could be in for a very long wait while every 2308 * EEPROM read fails 2309 */ 2310 status = hw->eeprom.ops.read(hw, 0, &checksum); 2311 if (status) { 2312 DEBUGOUT("EEPROM read failed\n"); 2313 return status; 2314 } 2315 2316 status = hw->eeprom.ops.calc_checksum(hw); 2317 if (status < 0) 2318 return status; 2319 2320 checksum = (u16)(status & 0xffff); 2321 2322 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 2323 if (status) { 2324 DEBUGOUT("EEPROM read failed\n"); 2325 return status; 2326 } 2327 2328 /* Verify read checksum from EEPROM is the same as 2329 * calculated checksum 2330 */ 2331 if (read_checksum != checksum) 2332 status = IXGBE_ERR_EEPROM_CHECKSUM; 2333 2334 /* If the user cares, return the calculated checksum */ 2335 if (checksum_val) 2336 *checksum_val = checksum; 2337 2338 return status; 2339 } 2340 2341 /** 2342 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 2343 * @hw: pointer to hardware structure 2344 **/ 2345 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 2346 { 2347 s32 status; 2348 u16 checksum; 2349 2350 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 2351 2352 /* Read the first word from the EEPROM. If this times out or fails, do 2353 * not continue or we could be in for a very long wait while every 2354 * EEPROM read fails 2355 */ 2356 status = hw->eeprom.ops.read(hw, 0, &checksum); 2357 if (status) { 2358 DEBUGOUT("EEPROM read failed\n"); 2359 return status; 2360 } 2361 2362 status = hw->eeprom.ops.calc_checksum(hw); 2363 if (status < 0) 2364 return status; 2365 2366 checksum = (u16)(status & 0xffff); 2367 2368 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 2369 2370 return status; 2371 } 2372 2373 /** 2374 * ixgbe_validate_mac_addr - Validate MAC address 2375 * @mac_addr: pointer to MAC address. 2376 * 2377 * Tests a MAC address to ensure it is a valid Individual Address. 2378 **/ 2379 s32 ixgbe_validate_mac_addr(u8 *mac_addr) 2380 { 2381 s32 status = IXGBE_SUCCESS; 2382 2383 DEBUGFUNC("ixgbe_validate_mac_addr"); 2384 2385 /* Make sure it is not a multicast address */ 2386 if (IXGBE_IS_MULTICAST(mac_addr)) { 2387 status = IXGBE_ERR_INVALID_MAC_ADDR; 2388 /* Not a broadcast address */ 2389 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 2390 status = IXGBE_ERR_INVALID_MAC_ADDR; 2391 /* Reject the zero address */ 2392 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 2393 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 2394 status = IXGBE_ERR_INVALID_MAC_ADDR; 2395 } 2396 return status; 2397 } 2398 2399 /** 2400 * ixgbe_set_rar_generic - Set Rx address register 2401 * @hw: pointer to hardware structure 2402 * @index: Receive address register to write 2403 * @addr: Address to put into receive address register 2404 * @vmdq: VMDq "set" or "pool" index 2405 * @enable_addr: set flag that address is active 2406 * 2407 * Puts an ethernet address into a receive address register. 2408 **/ 2409 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 2410 u32 enable_addr) 2411 { 2412 u32 rar_low, rar_high; 2413 u32 rar_entries = hw->mac.num_rar_entries; 2414 2415 DEBUGFUNC("ixgbe_set_rar_generic"); 2416 2417 /* Make sure we are using a valid rar index range */ 2418 if (index >= rar_entries) { 2419 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2420 "RAR index %d is out of range.\n", index); 2421 return IXGBE_ERR_INVALID_ARGUMENT; 2422 } 2423 2424 /* setup VMDq pool selection before this RAR gets enabled */ 2425 hw->mac.ops.set_vmdq(hw, index, vmdq); 2426 2427 /* 2428 * HW expects these in little endian so we reverse the byte 2429 * order from network order (big endian) to little endian 2430 */ 2431 rar_low = ((u32)addr[0] | 2432 ((u32)addr[1] << 8) | 2433 ((u32)addr[2] << 16) | 2434 ((u32)addr[3] << 24)); 2435 /* 2436 * Some parts put the VMDq setting in the extra RAH bits, 2437 * so save everything except the lower 16 bits that hold part 2438 * of the address and the address valid bit. 2439 */ 2440 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2441 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2442 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 2443 2444 if (enable_addr != 0) 2445 rar_high |= IXGBE_RAH_AV; 2446 2447 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 2448 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2449 2450 return IXGBE_SUCCESS; 2451 } 2452 2453 /** 2454 * ixgbe_clear_rar_generic - Remove Rx address register 2455 * @hw: pointer to hardware structure 2456 * @index: Receive address register to write 2457 * 2458 * Clears an ethernet address from a receive address register. 2459 **/ 2460 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 2461 { 2462 u32 rar_high; 2463 u32 rar_entries = hw->mac.num_rar_entries; 2464 2465 DEBUGFUNC("ixgbe_clear_rar_generic"); 2466 2467 /* Make sure we are using a valid rar index range */ 2468 if (index >= rar_entries) { 2469 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2470 "RAR index %d is out of range.\n", index); 2471 return IXGBE_ERR_INVALID_ARGUMENT; 2472 } 2473 2474 /* 2475 * Some parts put the VMDq setting in the extra RAH bits, 2476 * so save everything except the lower 16 bits that hold part 2477 * of the address and the address valid bit. 2478 */ 2479 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2480 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2481 2482 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 2483 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2484 2485 /* clear VMDq pool/queue selection for this RAR */ 2486 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 2487 2488 return IXGBE_SUCCESS; 2489 } 2490 2491 /** 2492 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 2493 * @hw: pointer to hardware structure 2494 * 2495 * Places the MAC address in receive address register 0 and clears the rest 2496 * of the receive address registers. Clears the multicast table. Assumes 2497 * the receiver is in reset when the routine is called. 2498 **/ 2499 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 2500 { 2501 u32 i; 2502 u32 rar_entries = hw->mac.num_rar_entries; 2503 2504 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 2505 2506 /* 2507 * If the current mac address is valid, assume it is a software override 2508 * to the permanent address. 2509 * Otherwise, use the permanent address from the eeprom. 2510 */ 2511 if (ixgbe_validate_mac_addr(hw->mac.addr) == 2512 IXGBE_ERR_INVALID_MAC_ADDR) { 2513 /* Get the MAC address from the RAR0 for later reference */ 2514 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2515 2516 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 2517 hw->mac.addr[0], hw->mac.addr[1], 2518 hw->mac.addr[2]); 2519 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2520 hw->mac.addr[4], hw->mac.addr[5]); 2521 } else { 2522 /* Setup the receive address. */ 2523 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 2524 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 2525 hw->mac.addr[0], hw->mac.addr[1], 2526 hw->mac.addr[2]); 2527 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2528 hw->mac.addr[4], hw->mac.addr[5]); 2529 2530 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2531 } 2532 2533 /* clear VMDq pool/queue selection for RAR 0 */ 2534 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 2535 2536 hw->addr_ctrl.overflow_promisc = 0; 2537 2538 hw->addr_ctrl.rar_used_count = 1; 2539 2540 /* Zero out the other receive addresses. */ 2541 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 2542 for (i = 1; i < rar_entries; i++) { 2543 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 2544 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 2545 } 2546 2547 /* Clear the MTA */ 2548 hw->addr_ctrl.mta_in_use = 0; 2549 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2550 2551 DEBUGOUT(" Clearing MTA\n"); 2552 for (i = 0; i < hw->mac.mcft_size; i++) 2553 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 2554 2555 ixgbe_init_uta_tables(hw); 2556 2557 return IXGBE_SUCCESS; 2558 } 2559 2560 /** 2561 * ixgbe_add_uc_addr - Adds a secondary unicast address. 2562 * @hw: pointer to hardware structure 2563 * @addr: new address 2564 * @vmdq: VMDq "set" or "pool" index 2565 * 2566 * Adds it to unused receive address register or goes into promiscuous mode. 2567 **/ 2568 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2569 { 2570 u32 rar_entries = hw->mac.num_rar_entries; 2571 u32 rar; 2572 2573 DEBUGFUNC("ixgbe_add_uc_addr"); 2574 2575 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 2576 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 2577 2578 /* 2579 * Place this address in the RAR if there is room, 2580 * else put the controller into promiscuous mode 2581 */ 2582 if (hw->addr_ctrl.rar_used_count < rar_entries) { 2583 rar = hw->addr_ctrl.rar_used_count; 2584 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2585 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 2586 hw->addr_ctrl.rar_used_count++; 2587 } else { 2588 hw->addr_ctrl.overflow_promisc++; 2589 } 2590 2591 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 2592 } 2593 2594 /** 2595 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 2596 * @hw: pointer to hardware structure 2597 * @addr_list: the list of new addresses 2598 * @addr_count: number of addresses 2599 * @next: iterator function to walk the address list 2600 * 2601 * The given list replaces any existing list. Clears the secondary addrs from 2602 * receive address registers. Uses unused receive address registers for the 2603 * first secondary addresses, and falls back to promiscuous mode as needed. 2604 * 2605 * Drivers using secondary unicast addresses must set user_set_promisc when 2606 * manually putting the device into promiscuous mode. 2607 **/ 2608 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 2609 u32 addr_count, ixgbe_mc_addr_itr next) 2610 { 2611 u8 *addr; 2612 u32 i; 2613 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 2614 u32 uc_addr_in_use; 2615 u32 fctrl; 2616 u32 vmdq; 2617 2618 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 2619 2620 /* 2621 * Clear accounting of old secondary address list, 2622 * don't count RAR[0] 2623 */ 2624 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 2625 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 2626 hw->addr_ctrl.overflow_promisc = 0; 2627 2628 /* Zero out the other receive addresses */ 2629 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 2630 for (i = 0; i < uc_addr_in_use; i++) { 2631 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 2632 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 2633 } 2634 2635 /* Add the new addresses */ 2636 for (i = 0; i < addr_count; i++) { 2637 DEBUGOUT(" Adding the secondary addresses:\n"); 2638 addr = next(hw, &addr_list, &vmdq); 2639 ixgbe_add_uc_addr(hw, addr, vmdq); 2640 } 2641 2642 if (hw->addr_ctrl.overflow_promisc) { 2643 /* enable promisc if not already in overflow or set by user */ 2644 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2645 DEBUGOUT(" Entering address overflow promisc mode\n"); 2646 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2647 fctrl |= IXGBE_FCTRL_UPE; 2648 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2649 } 2650 } else { 2651 /* only disable if set by overflow, not by user */ 2652 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2653 DEBUGOUT(" Leaving address overflow promisc mode\n"); 2654 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2655 fctrl &= ~IXGBE_FCTRL_UPE; 2656 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2657 } 2658 } 2659 2660 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 2661 return IXGBE_SUCCESS; 2662 } 2663 2664 /** 2665 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 2666 * @hw: pointer to hardware structure 2667 * @mc_addr: the multicast address 2668 * 2669 * Extracts the 12 bits, from a multicast address, to determine which 2670 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 2671 * incoming rx multicast addresses, to determine the bit-vector to check in 2672 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 2673 * by the MO field of the MCSTCTRL. The MO field is set during initialization 2674 * to mc_filter_type. 2675 **/ 2676 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 2677 { 2678 u32 vector = 0; 2679 2680 DEBUGFUNC("ixgbe_mta_vector"); 2681 2682 switch (hw->mac.mc_filter_type) { 2683 case 0: /* use bits [47:36] of the address */ 2684 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2685 break; 2686 case 1: /* use bits [46:35] of the address */ 2687 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2688 break; 2689 case 2: /* use bits [45:34] of the address */ 2690 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2691 break; 2692 case 3: /* use bits [43:32] of the address */ 2693 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2694 break; 2695 default: /* Invalid mc_filter_type */ 2696 DEBUGOUT("MC filter type param set incorrectly\n"); 2697 ASSERT(0); 2698 break; 2699 } 2700 2701 /* vector can only be 12-bits or boundary will be exceeded */ 2702 vector &= 0xFFF; 2703 return vector; 2704 } 2705 2706 /** 2707 * ixgbe_set_mta - Set bit-vector in multicast table 2708 * @hw: pointer to hardware structure 2709 * @mc_addr: Multicast address 2710 * 2711 * Sets the bit-vector in the multicast table. 2712 **/ 2713 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2714 { 2715 u32 vector; 2716 u32 vector_bit; 2717 u32 vector_reg; 2718 2719 DEBUGFUNC("ixgbe_set_mta"); 2720 2721 hw->addr_ctrl.mta_in_use++; 2722 2723 vector = ixgbe_mta_vector(hw, mc_addr); 2724 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 2725 2726 /* 2727 * The MTA is a register array of 128 32-bit registers. It is treated 2728 * like an array of 4096 bits. We want to set bit 2729 * BitArray[vector_value]. So we figure out what register the bit is 2730 * in, read it, OR in the new bit, then write back the new value. The 2731 * register is determined by the upper 7 bits of the vector value and 2732 * the bit within that register are determined by the lower 5 bits of 2733 * the value. 2734 */ 2735 vector_reg = (vector >> 5) & 0x7F; 2736 vector_bit = vector & 0x1F; 2737 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 2738 } 2739 2740 /** 2741 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2742 * @hw: pointer to hardware structure 2743 * @mc_addr_list: the list of new multicast addresses 2744 * @mc_addr_count: number of addresses 2745 * @next: iterator function to walk the multicast address list 2746 * @clear: flag, when set clears the table beforehand 2747 * 2748 * When the clear flag is set, the given list replaces any existing list. 2749 * Hashes the given addresses into the multicast table. 2750 **/ 2751 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 2752 u32 mc_addr_count, ixgbe_mc_addr_itr next, 2753 bool clear) 2754 { 2755 u32 i; 2756 u32 vmdq; 2757 2758 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 2759 2760 /* 2761 * Set the new number of MC addresses that we are being requested to 2762 * use. 2763 */ 2764 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 2765 hw->addr_ctrl.mta_in_use = 0; 2766 2767 /* Clear mta_shadow */ 2768 if (clear) { 2769 DEBUGOUT(" Clearing MTA\n"); 2770 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2771 } 2772 2773 /* Update mta_shadow */ 2774 for (i = 0; i < mc_addr_count; i++) { 2775 DEBUGOUT(" Adding the multicast addresses:\n"); 2776 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 2777 } 2778 2779 /* Enable mta */ 2780 for (i = 0; i < hw->mac.mcft_size; i++) 2781 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2782 hw->mac.mta_shadow[i]); 2783 2784 if (hw->addr_ctrl.mta_in_use > 0) 2785 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2786 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2787 2788 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 2789 return IXGBE_SUCCESS; 2790 } 2791 2792 /** 2793 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2794 * @hw: pointer to hardware structure 2795 * 2796 * Enables multicast address in RAR and the use of the multicast hash table. 2797 **/ 2798 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2799 { 2800 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2801 2802 DEBUGFUNC("ixgbe_enable_mc_generic"); 2803 2804 if (a->mta_in_use > 0) 2805 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2806 hw->mac.mc_filter_type); 2807 2808 return IXGBE_SUCCESS; 2809 } 2810 2811 /** 2812 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2813 * @hw: pointer to hardware structure 2814 * 2815 * Disables multicast address in RAR and the use of the multicast hash table. 2816 **/ 2817 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2818 { 2819 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2820 2821 DEBUGFUNC("ixgbe_disable_mc_generic"); 2822 2823 if (a->mta_in_use > 0) 2824 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2825 2826 return IXGBE_SUCCESS; 2827 } 2828 2829 /** 2830 * ixgbe_fc_enable_generic - Enable flow control 2831 * @hw: pointer to hardware structure 2832 * 2833 * Enable flow control according to the current settings. 2834 **/ 2835 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2836 { 2837 s32 ret_val = IXGBE_SUCCESS; 2838 u32 mflcn_reg, fccfg_reg; 2839 u32 reg; 2840 u32 fcrtl, fcrth; 2841 int i; 2842 2843 DEBUGFUNC("ixgbe_fc_enable_generic"); 2844 2845 /* Validate the water mark configuration */ 2846 if (!hw->fc.pause_time) { 2847 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2848 goto out; 2849 } 2850 2851 /* Low water mark of zero causes XOFF floods */ 2852 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2853 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2854 hw->fc.high_water[i]) { 2855 if (!hw->fc.low_water[i] || 2856 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2857 DEBUGOUT("Invalid water mark configuration\n"); 2858 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2859 goto out; 2860 } 2861 } 2862 } 2863 2864 /* Negotiate the fc mode to use */ 2865 hw->mac.ops.fc_autoneg(hw); 2866 2867 /* Disable any previous flow control settings */ 2868 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2869 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2870 2871 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2872 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2873 2874 /* 2875 * The possible values of fc.current_mode are: 2876 * 0: Flow control is completely disabled 2877 * 1: Rx flow control is enabled (we can receive pause frames, 2878 * but not send pause frames). 2879 * 2: Tx flow control is enabled (we can send pause frames but 2880 * we do not support receiving pause frames). 2881 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2882 * other: Invalid. 2883 */ 2884 switch (hw->fc.current_mode) { 2885 case ixgbe_fc_none: 2886 /* 2887 * Flow control is disabled by software override or autoneg. 2888 * The code below will actually disable it in the HW. 2889 */ 2890 break; 2891 case ixgbe_fc_rx_pause: 2892 /* 2893 * Rx Flow control is enabled and Tx Flow control is 2894 * disabled by software override. Since there really 2895 * isn't a way to advertise that we are capable of RX 2896 * Pause ONLY, we will advertise that we support both 2897 * symmetric and asymmetric Rx PAUSE. Later, we will 2898 * disable the adapter's ability to send PAUSE frames. 2899 */ 2900 mflcn_reg |= IXGBE_MFLCN_RFCE; 2901 break; 2902 case ixgbe_fc_tx_pause: 2903 /* 2904 * Tx Flow control is enabled, and Rx Flow control is 2905 * disabled by software override. 2906 */ 2907 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2908 break; 2909 case ixgbe_fc_full: 2910 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2911 mflcn_reg |= IXGBE_MFLCN_RFCE; 2912 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2913 break; 2914 default: 2915 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 2916 "Flow control param set incorrectly\n"); 2917 ret_val = IXGBE_ERR_CONFIG; 2918 goto out; 2919 break; 2920 } 2921 2922 /* Set 802.3x based flow control settings. */ 2923 mflcn_reg |= IXGBE_MFLCN_DPF; 2924 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2925 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2926 2927 2928 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2929 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2930 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2931 hw->fc.high_water[i]) { 2932 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2933 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2934 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2935 } else { 2936 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2937 /* 2938 * In order to prevent Tx hangs when the internal Tx 2939 * switch is enabled we must set the high water mark 2940 * to the Rx packet buffer size - 24KB. This allows 2941 * the Tx switch to function even under heavy Rx 2942 * workloads. 2943 */ 2944 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2945 } 2946 2947 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2948 } 2949 2950 /* Configure pause time (2 TCs per register) */ 2951 reg = (u32)hw->fc.pause_time * 0x00010001; 2952 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 2953 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2954 2955 /* Configure flow control refresh threshold value */ 2956 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2957 2958 out: 2959 return ret_val; 2960 } 2961 2962 /** 2963 * ixgbe_negotiate_fc - Negotiate flow control 2964 * @hw: pointer to hardware structure 2965 * @adv_reg: flow control advertised settings 2966 * @lp_reg: link partner's flow control settings 2967 * @adv_sym: symmetric pause bit in advertisement 2968 * @adv_asm: asymmetric pause bit in advertisement 2969 * @lp_sym: symmetric pause bit in link partner advertisement 2970 * @lp_asm: asymmetric pause bit in link partner advertisement 2971 * 2972 * Find the intersection between advertised settings and link partner's 2973 * advertised settings 2974 **/ 2975 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2976 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2977 { 2978 if ((!(adv_reg)) || (!(lp_reg))) { 2979 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, 2980 "Local or link partner's advertised flow control " 2981 "settings are NULL. Local: %x, link partner: %x\n", 2982 adv_reg, lp_reg); 2983 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2984 } 2985 2986 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2987 /* 2988 * Now we need to check if the user selected Rx ONLY 2989 * of pause frames. In this case, we had to advertise 2990 * FULL flow control because we could not advertise RX 2991 * ONLY. Hence, we must now check to see if we need to 2992 * turn OFF the TRANSMISSION of PAUSE frames. 2993 */ 2994 if (hw->fc.requested_mode == ixgbe_fc_full) { 2995 hw->fc.current_mode = ixgbe_fc_full; 2996 DEBUGOUT("Flow Control = FULL.\n"); 2997 } else { 2998 hw->fc.current_mode = ixgbe_fc_rx_pause; 2999 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 3000 } 3001 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 3002 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 3003 hw->fc.current_mode = ixgbe_fc_tx_pause; 3004 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 3005 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 3006 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 3007 hw->fc.current_mode = ixgbe_fc_rx_pause; 3008 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 3009 } else { 3010 hw->fc.current_mode = ixgbe_fc_none; 3011 DEBUGOUT("Flow Control = NONE.\n"); 3012 } 3013 return IXGBE_SUCCESS; 3014 } 3015 3016 /** 3017 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 3018 * @hw: pointer to hardware structure 3019 * 3020 * Enable flow control according on 1 gig fiber. 3021 **/ 3022 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 3023 { 3024 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 3025 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3026 3027 /* 3028 * On multispeed fiber at 1g, bail out if 3029 * - link is up but AN did not complete, or if 3030 * - link is up and AN completed but timed out 3031 */ 3032 3033 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 3034 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 3035 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 3036 DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); 3037 goto out; 3038 } 3039 3040 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 3041 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 3042 3043 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 3044 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 3045 IXGBE_PCS1GANA_ASM_PAUSE, 3046 IXGBE_PCS1GANA_SYM_PAUSE, 3047 IXGBE_PCS1GANA_ASM_PAUSE); 3048 3049 out: 3050 return ret_val; 3051 } 3052 3053 /** 3054 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 3055 * @hw: pointer to hardware structure 3056 * 3057 * Enable flow control according to IEEE clause 37. 3058 **/ 3059 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 3060 { 3061 u32 links2, anlp1_reg, autoc_reg, links; 3062 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3063 3064 /* 3065 * On backplane, bail out if 3066 * - backplane autoneg was not completed, or if 3067 * - we are 82599 and link partner is not AN enabled 3068 */ 3069 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 3070 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 3071 DEBUGOUT("Auto-Negotiation did not complete\n"); 3072 goto out; 3073 } 3074 3075 if (hw->mac.type == ixgbe_mac_82599EB) { 3076 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 3077 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 3078 DEBUGOUT("Link partner is not AN enabled\n"); 3079 goto out; 3080 } 3081 } 3082 /* 3083 * Read the 10g AN autoc and LP ability registers and resolve 3084 * local flow control settings accordingly 3085 */ 3086 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3087 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 3088 3089 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 3090 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 3091 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 3092 3093 out: 3094 return ret_val; 3095 } 3096 3097 /** 3098 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 3099 * @hw: pointer to hardware structure 3100 * 3101 * Enable flow control according to IEEE clause 37. 3102 **/ 3103 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 3104 { 3105 u16 technology_ability_reg = 0; 3106 u16 lp_technology_ability_reg = 0; 3107 3108 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 3109 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 3110 &technology_ability_reg); 3111 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 3112 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 3113 &lp_technology_ability_reg); 3114 3115 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 3116 (u32)lp_technology_ability_reg, 3117 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 3118 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 3119 } 3120 3121 /** 3122 * ixgbe_fc_autoneg - Configure flow control 3123 * @hw: pointer to hardware structure 3124 * 3125 * Compares our advertised flow control capabilities to those advertised by 3126 * our link partner, and determines the proper flow control mode to use. 3127 **/ 3128 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 3129 { 3130 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3131 ixgbe_link_speed speed; 3132 bool link_up; 3133 3134 DEBUGFUNC("ixgbe_fc_autoneg"); 3135 3136 /* 3137 * AN should have completed when the cable was plugged in. 3138 * Look for reasons to bail out. Bail out if: 3139 * - FC autoneg is disabled, or if 3140 * - link is not up. 3141 */ 3142 if (hw->fc.disable_fc_autoneg) { 3143 /* TODO: This should be just an informative log */ 3144 ERROR_REPORT1(IXGBE_ERROR_CAUTION, 3145 "Flow control autoneg is disabled"); 3146 goto out; 3147 } 3148 3149 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3150 if (!link_up) { 3151 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); 3152 goto out; 3153 } 3154 3155 switch (hw->phy.media_type) { 3156 /* Autoneg flow control on fiber adapters */ 3157 case ixgbe_media_type_fiber_fixed: 3158 case ixgbe_media_type_fiber_qsfp: 3159 case ixgbe_media_type_fiber: 3160 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 3161 ret_val = ixgbe_fc_autoneg_fiber(hw); 3162 break; 3163 3164 /* Autoneg flow control on backplane adapters */ 3165 case ixgbe_media_type_backplane: 3166 ret_val = ixgbe_fc_autoneg_backplane(hw); 3167 break; 3168 3169 /* Autoneg flow control on copper adapters */ 3170 case ixgbe_media_type_copper: 3171 if (ixgbe_device_supports_autoneg_fc(hw)) 3172 ret_val = ixgbe_fc_autoneg_copper(hw); 3173 break; 3174 3175 default: 3176 break; 3177 } 3178 3179 out: 3180 if (ret_val == IXGBE_SUCCESS) { 3181 hw->fc.fc_was_autonegged = TRUE; 3182 } else { 3183 hw->fc.fc_was_autonegged = FALSE; 3184 hw->fc.current_mode = hw->fc.requested_mode; 3185 } 3186 } 3187 3188 /* 3189 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 3190 * @hw: pointer to hardware structure 3191 * 3192 * System-wide timeout range is encoded in PCIe Device Control2 register. 3193 * 3194 * Add 10% to specified maximum and return the number of times to poll for 3195 * completion timeout, in units of 100 microsec. Never return less than 3196 * 800 = 80 millisec. 3197 */ 3198 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 3199 { 3200 s16 devctl2; 3201 u32 pollcnt; 3202 3203 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 3204 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 3205 3206 switch (devctl2) { 3207 case IXGBE_PCIDEVCTRL2_65_130ms: 3208 pollcnt = 1300; /* 130 millisec */ 3209 break; 3210 case IXGBE_PCIDEVCTRL2_260_520ms: 3211 pollcnt = 5200; /* 520 millisec */ 3212 break; 3213 case IXGBE_PCIDEVCTRL2_1_2s: 3214 pollcnt = 20000; /* 2 sec */ 3215 break; 3216 case IXGBE_PCIDEVCTRL2_4_8s: 3217 pollcnt = 80000; /* 8 sec */ 3218 break; 3219 case IXGBE_PCIDEVCTRL2_17_34s: 3220 pollcnt = 34000; /* 34 sec */ 3221 break; 3222 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 3223 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 3224 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 3225 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 3226 default: 3227 pollcnt = 800; /* 80 millisec minimum */ 3228 break; 3229 } 3230 3231 /* add 10% to spec maximum */ 3232 return (pollcnt * 11) / 10; 3233 } 3234 3235 /** 3236 * ixgbe_disable_pcie_master - Disable PCI-express master access 3237 * @hw: pointer to hardware structure 3238 * 3239 * Disables PCI-Express master access and verifies there are no pending 3240 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 3241 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 3242 * is returned signifying master requests disabled. 3243 **/ 3244 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 3245 { 3246 s32 status = IXGBE_SUCCESS; 3247 u32 i, poll; 3248 u16 value; 3249 3250 DEBUGFUNC("ixgbe_disable_pcie_master"); 3251 3252 /* Always set this bit to ensure any future transactions are blocked */ 3253 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 3254 3255 /* Exit if master requests are blocked */ 3256 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 3257 IXGBE_REMOVED(hw->hw_addr)) 3258 goto out; 3259 3260 /* Poll for master request bit to clear */ 3261 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 3262 usec_delay(100); 3263 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 3264 goto out; 3265 } 3266 3267 /* 3268 * Two consecutive resets are required via CTRL.RST per datasheet 3269 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 3270 * of this need. The first reset prevents new master requests from 3271 * being issued by our device. We then must wait 1usec or more for any 3272 * remaining completions from the PCIe bus to trickle in, and then reset 3273 * again to clear out any effects they may have had on our device. 3274 */ 3275 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 3276 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 3277 3278 if (hw->mac.type >= ixgbe_mac_X550) 3279 goto out; 3280 3281 /* 3282 * Before proceeding, make sure that the PCIe block does not have 3283 * transactions pending. 3284 */ 3285 poll = ixgbe_pcie_timeout_poll(hw); 3286 for (i = 0; i < poll; i++) { 3287 usec_delay(100); 3288 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 3289 if (IXGBE_REMOVED(hw->hw_addr)) 3290 goto out; 3291 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3292 goto out; 3293 } 3294 3295 ERROR_REPORT1(IXGBE_ERROR_POLLING, 3296 "PCIe transaction pending bit also did not clear.\n"); 3297 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 3298 3299 out: 3300 return status; 3301 } 3302 3303 /** 3304 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 3305 * @hw: pointer to hardware structure 3306 * @mask: Mask to specify which semaphore to acquire 3307 * 3308 * Acquires the SWFW semaphore through the GSSR register for the specified 3309 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3310 **/ 3311 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3312 { 3313 u32 gssr = 0; 3314 u32 swmask = mask; 3315 u32 fwmask = mask << 5; 3316 u32 timeout = 200; 3317 u32 i; 3318 3319 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 3320 3321 for (i = 0; i < timeout; i++) { 3322 /* 3323 * SW NVM semaphore bit is used for access to all 3324 * SW_FW_SYNC bits (not just NVM) 3325 */ 3326 if (ixgbe_get_eeprom_semaphore(hw)) 3327 return IXGBE_ERR_SWFW_SYNC; 3328 3329 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3330 if (!(gssr & (fwmask | swmask))) { 3331 gssr |= swmask; 3332 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3333 ixgbe_release_eeprom_semaphore(hw); 3334 return IXGBE_SUCCESS; 3335 } else { 3336 /* Resource is currently in use by FW or SW */ 3337 ixgbe_release_eeprom_semaphore(hw); 3338 msec_delay(5); 3339 } 3340 } 3341 3342 /* If time expired clear the bits holding the lock and retry */ 3343 if (gssr & (fwmask | swmask)) 3344 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 3345 3346 msec_delay(5); 3347 return IXGBE_ERR_SWFW_SYNC; 3348 } 3349 3350 /** 3351 * ixgbe_release_swfw_sync - Release SWFW semaphore 3352 * @hw: pointer to hardware structure 3353 * @mask: Mask to specify which semaphore to release 3354 * 3355 * Releases the SWFW semaphore through the GSSR register for the specified 3356 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3357 **/ 3358 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3359 { 3360 u32 gssr; 3361 u32 swmask = mask; 3362 3363 DEBUGFUNC("ixgbe_release_swfw_sync"); 3364 3365 ixgbe_get_eeprom_semaphore(hw); 3366 3367 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3368 gssr &= ~swmask; 3369 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3370 3371 ixgbe_release_eeprom_semaphore(hw); 3372 } 3373 3374 /** 3375 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path 3376 * @hw: pointer to hardware structure 3377 * 3378 * Stops the receive data path and waits for the HW to internally empty 3379 * the Rx security block 3380 **/ 3381 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) 3382 { 3383 #define IXGBE_MAX_SECRX_POLL 4000 3384 3385 int i; 3386 int secrxreg; 3387 3388 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); 3389 3390 3391 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3392 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 3393 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3394 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 3395 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 3396 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 3397 break; 3398 else 3399 /* Use interrupt-safe sleep just in case */ 3400 usec_delay(10); 3401 } 3402 3403 /* For informational purposes only */ 3404 if (i >= IXGBE_MAX_SECRX_POLL) 3405 DEBUGOUT("Rx unit being enabled before security " 3406 "path fully disabled. Continuing with init.\n"); 3407 3408 return IXGBE_SUCCESS; 3409 } 3410 3411 /** 3412 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 3413 * @hw: pointer to hardware structure 3414 * @locked: bool to indicate whether the SW/FW lock was taken 3415 * @reg_val: Value we read from AUTOC 3416 * 3417 * The default case requires no protection so just to the register read. 3418 */ 3419 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 3420 { 3421 *locked = FALSE; 3422 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3423 return IXGBE_SUCCESS; 3424 } 3425 3426 /** 3427 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 3428 * @hw: pointer to hardware structure 3429 * @reg_val: value to write to AUTOC 3430 * @locked: bool to indicate whether the SW/FW lock was already taken by 3431 * previous read. 3432 * 3433 * The default case requires no protection so just to the register write. 3434 */ 3435 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 3436 { 3437 UNREFERENCED_1PARAMETER(locked); 3438 3439 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 3440 return IXGBE_SUCCESS; 3441 } 3442 3443 /** 3444 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path 3445 * @hw: pointer to hardware structure 3446 * 3447 * Enables the receive data path. 3448 **/ 3449 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) 3450 { 3451 u32 secrxreg; 3452 3453 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); 3454 3455 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3456 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 3457 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3458 IXGBE_WRITE_FLUSH(hw); 3459 3460 return IXGBE_SUCCESS; 3461 } 3462 3463 /** 3464 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 3465 * @hw: pointer to hardware structure 3466 * @regval: register value to write to RXCTRL 3467 * 3468 * Enables the Rx DMA unit 3469 **/ 3470 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 3471 { 3472 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 3473 3474 if (regval & IXGBE_RXCTRL_RXEN) 3475 ixgbe_enable_rx(hw); 3476 else 3477 ixgbe_disable_rx(hw); 3478 3479 return IXGBE_SUCCESS; 3480 } 3481 3482 /** 3483 * ixgbe_blink_led_start_generic - Blink LED based on index. 3484 * @hw: pointer to hardware structure 3485 * @index: led number to blink 3486 **/ 3487 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 3488 { 3489 ixgbe_link_speed speed = 0; 3490 bool link_up = 0; 3491 u32 autoc_reg = 0; 3492 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3493 s32 ret_val = IXGBE_SUCCESS; 3494 bool locked = FALSE; 3495 3496 DEBUGFUNC("ixgbe_blink_led_start_generic"); 3497 3498 if (index > 3) 3499 return IXGBE_ERR_PARAM; 3500 3501 /* 3502 * Link must be up to auto-blink the LEDs; 3503 * Force it if link is down. 3504 */ 3505 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3506 3507 if (!link_up) { 3508 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3509 if (ret_val != IXGBE_SUCCESS) 3510 goto out; 3511 3512 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3513 autoc_reg |= IXGBE_AUTOC_FLU; 3514 3515 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3516 if (ret_val != IXGBE_SUCCESS) 3517 goto out; 3518 3519 IXGBE_WRITE_FLUSH(hw); 3520 msec_delay(10); 3521 } 3522 3523 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3524 led_reg |= IXGBE_LED_BLINK(index); 3525 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3526 IXGBE_WRITE_FLUSH(hw); 3527 3528 out: 3529 return ret_val; 3530 } 3531 3532 /** 3533 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 3534 * @hw: pointer to hardware structure 3535 * @index: led number to stop blinking 3536 **/ 3537 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 3538 { 3539 u32 autoc_reg = 0; 3540 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3541 s32 ret_val = IXGBE_SUCCESS; 3542 bool locked = FALSE; 3543 3544 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 3545 3546 if (index > 3) 3547 return IXGBE_ERR_PARAM; 3548 3549 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3550 if (ret_val != IXGBE_SUCCESS) 3551 goto out; 3552 3553 autoc_reg &= ~IXGBE_AUTOC_FLU; 3554 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3555 3556 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3557 if (ret_val != IXGBE_SUCCESS) 3558 goto out; 3559 3560 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3561 led_reg &= ~IXGBE_LED_BLINK(index); 3562 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 3563 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3564 IXGBE_WRITE_FLUSH(hw); 3565 3566 out: 3567 return ret_val; 3568 } 3569 3570 /** 3571 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 3572 * @hw: pointer to hardware structure 3573 * @san_mac_offset: SAN MAC address offset 3574 * 3575 * This function will read the EEPROM location for the SAN MAC address 3576 * pointer, and returns the value at that location. This is used in both 3577 * get and set mac_addr routines. 3578 **/ 3579 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 3580 u16 *san_mac_offset) 3581 { 3582 s32 ret_val; 3583 3584 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 3585 3586 /* 3587 * First read the EEPROM pointer to see if the MAC addresses are 3588 * available. 3589 */ 3590 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 3591 san_mac_offset); 3592 if (ret_val) { 3593 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3594 "eeprom at offset %d failed", 3595 IXGBE_SAN_MAC_ADDR_PTR); 3596 } 3597 3598 return ret_val; 3599 } 3600 3601 /** 3602 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 3603 * @hw: pointer to hardware structure 3604 * @san_mac_addr: SAN MAC address 3605 * 3606 * Reads the SAN MAC address from the EEPROM, if it's available. This is 3607 * per-port, so set_lan_id() must be called before reading the addresses. 3608 * set_lan_id() is called by identify_sfp(), but this cannot be relied 3609 * upon for non-SFP connections, so we must call it here. 3610 **/ 3611 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3612 { 3613 u16 san_mac_data, san_mac_offset; 3614 u8 i; 3615 s32 ret_val; 3616 3617 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 3618 3619 /* 3620 * First read the EEPROM pointer to see if the MAC addresses are 3621 * available. If they're not, no point in calling set_lan_id() here. 3622 */ 3623 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3624 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3625 goto san_mac_addr_out; 3626 3627 /* make sure we know which port we need to program */ 3628 hw->mac.ops.set_lan_id(hw); 3629 /* apply the port offset to the address offset */ 3630 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3631 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3632 for (i = 0; i < 3; i++) { 3633 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 3634 &san_mac_data); 3635 if (ret_val) { 3636 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3637 "eeprom read at offset %d failed", 3638 san_mac_offset); 3639 goto san_mac_addr_out; 3640 } 3641 san_mac_addr[i * 2] = (u8)(san_mac_data); 3642 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 3643 san_mac_offset++; 3644 } 3645 return IXGBE_SUCCESS; 3646 3647 san_mac_addr_out: 3648 /* 3649 * No addresses available in this EEPROM. It's not an 3650 * error though, so just wipe the local address and return. 3651 */ 3652 for (i = 0; i < 6; i++) 3653 san_mac_addr[i] = 0xFF; 3654 return IXGBE_SUCCESS; 3655 } 3656 3657 /** 3658 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 3659 * @hw: pointer to hardware structure 3660 * @san_mac_addr: SAN MAC address 3661 * 3662 * Write a SAN MAC address to the EEPROM. 3663 **/ 3664 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3665 { 3666 s32 ret_val; 3667 u16 san_mac_data, san_mac_offset; 3668 u8 i; 3669 3670 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 3671 3672 /* Look for SAN mac address pointer. If not defined, return */ 3673 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3674 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3675 return IXGBE_ERR_NO_SAN_ADDR_PTR; 3676 3677 /* Make sure we know which port we need to write */ 3678 hw->mac.ops.set_lan_id(hw); 3679 /* Apply the port offset to the address offset */ 3680 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3681 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3682 3683 for (i = 0; i < 3; i++) { 3684 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 3685 san_mac_data |= (u16)(san_mac_addr[i * 2]); 3686 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 3687 san_mac_offset++; 3688 } 3689 3690 return IXGBE_SUCCESS; 3691 } 3692 3693 /** 3694 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 3695 * @hw: pointer to hardware structure 3696 * 3697 * Read PCIe configuration space, and get the MSI-X vector count from 3698 * the capabilities table. 3699 **/ 3700 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 3701 { 3702 u16 msix_count = 1; 3703 u16 max_msix_count; 3704 u16 pcie_offset; 3705 3706 switch (hw->mac.type) { 3707 case ixgbe_mac_82598EB: 3708 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 3709 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 3710 break; 3711 case ixgbe_mac_82599EB: 3712 case ixgbe_mac_X540: 3713 case ixgbe_mac_X550: 3714 case ixgbe_mac_X550EM_x: 3715 case ixgbe_mac_X550EM_a: 3716 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 3717 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 3718 break; 3719 default: 3720 return msix_count; 3721 } 3722 3723 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 3724 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); 3725 if (IXGBE_REMOVED(hw->hw_addr)) 3726 msix_count = 0; 3727 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 3728 3729 /* MSI-X count is zero-based in HW */ 3730 msix_count++; 3731 3732 if (msix_count > max_msix_count) 3733 msix_count = max_msix_count; 3734 3735 return msix_count; 3736 } 3737 3738 /** 3739 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 3740 * @hw: pointer to hardware structure 3741 * @addr: Address to put into receive address register 3742 * @vmdq: VMDq pool to assign 3743 * 3744 * Puts an ethernet address into a receive address register, or 3745 * finds the rar that it is already in; adds to the pool list 3746 **/ 3747 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 3748 { 3749 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 3750 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 3751 u32 rar; 3752 u32 rar_low, rar_high; 3753 u32 addr_low, addr_high; 3754 3755 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 3756 3757 /* swap bytes for HW little endian */ 3758 addr_low = addr[0] | (addr[1] << 8) 3759 | (addr[2] << 16) 3760 | (addr[3] << 24); 3761 addr_high = addr[4] | (addr[5] << 8); 3762 3763 /* 3764 * Either find the mac_id in rar or find the first empty space. 3765 * rar_highwater points to just after the highest currently used 3766 * rar in order to shorten the search. It grows when we add a new 3767 * rar to the top. 3768 */ 3769 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 3770 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 3771 3772 if (((IXGBE_RAH_AV & rar_high) == 0) 3773 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 3774 first_empty_rar = rar; 3775 } else if ((rar_high & 0xFFFF) == addr_high) { 3776 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 3777 if (rar_low == addr_low) 3778 break; /* found it already in the rars */ 3779 } 3780 } 3781 3782 if (rar < hw->mac.rar_highwater) { 3783 /* already there so just add to the pool bits */ 3784 ixgbe_set_vmdq(hw, rar, vmdq); 3785 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 3786 /* stick it into first empty RAR slot we found */ 3787 rar = first_empty_rar; 3788 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3789 } else if (rar == hw->mac.rar_highwater) { 3790 /* add it to the top of the list and inc the highwater mark */ 3791 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3792 hw->mac.rar_highwater++; 3793 } else if (rar >= hw->mac.num_rar_entries) { 3794 return IXGBE_ERR_INVALID_MAC_ADDR; 3795 } 3796 3797 /* 3798 * If we found rar[0], make sure the default pool bit (we use pool 0) 3799 * remains cleared to be sure default pool packets will get delivered 3800 */ 3801 if (rar == 0) 3802 ixgbe_clear_vmdq(hw, rar, 0); 3803 3804 return rar; 3805 } 3806 3807 /** 3808 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 3809 * @hw: pointer to hardware struct 3810 * @rar: receive address register index to disassociate 3811 * @vmdq: VMDq pool index to remove from the rar 3812 **/ 3813 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3814 { 3815 u32 mpsar_lo, mpsar_hi; 3816 u32 rar_entries = hw->mac.num_rar_entries; 3817 3818 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 3819 3820 /* Make sure we are using a valid rar index range */ 3821 if (rar >= rar_entries) { 3822 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3823 "RAR index %d is out of range.\n", rar); 3824 return IXGBE_ERR_INVALID_ARGUMENT; 3825 } 3826 3827 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3828 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3829 3830 if (IXGBE_REMOVED(hw->hw_addr)) 3831 goto done; 3832 3833 if (!mpsar_lo && !mpsar_hi) 3834 goto done; 3835 3836 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 3837 if (mpsar_lo) { 3838 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3839 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3840 } 3841 if (mpsar_hi) { 3842 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3843 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3844 } 3845 } else if (vmdq < 32) { 3846 mpsar_lo &= ~(1 << vmdq); 3847 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 3848 } else { 3849 mpsar_hi &= ~(1 << (vmdq - 32)); 3850 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 3851 } 3852 3853 /* was that the last pool using this rar? */ 3854 if (mpsar_lo == 0 && mpsar_hi == 0 && 3855 rar != 0 && rar != hw->mac.san_mac_rar_index) 3856 hw->mac.ops.clear_rar(hw, rar); 3857 done: 3858 return IXGBE_SUCCESS; 3859 } 3860 3861 /** 3862 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3863 * @hw: pointer to hardware struct 3864 * @rar: receive address register index to associate with a VMDq index 3865 * @vmdq: VMDq pool index 3866 **/ 3867 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3868 { 3869 u32 mpsar; 3870 u32 rar_entries = hw->mac.num_rar_entries; 3871 3872 DEBUGFUNC("ixgbe_set_vmdq_generic"); 3873 3874 /* Make sure we are using a valid rar index range */ 3875 if (rar >= rar_entries) { 3876 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3877 "RAR index %d is out of range.\n", rar); 3878 return IXGBE_ERR_INVALID_ARGUMENT; 3879 } 3880 3881 if (vmdq < 32) { 3882 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3883 mpsar |= 1 << vmdq; 3884 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3885 } else { 3886 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3887 mpsar |= 1 << (vmdq - 32); 3888 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3889 } 3890 return IXGBE_SUCCESS; 3891 } 3892 3893 /** 3894 * This function should only be involved in the IOV mode. 3895 * In IOV mode, Default pool is next pool after the number of 3896 * VFs advertized and not 0. 3897 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3898 * 3899 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3900 * @hw: pointer to hardware struct 3901 * @vmdq: VMDq pool index 3902 **/ 3903 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3904 { 3905 u32 rar = hw->mac.san_mac_rar_index; 3906 3907 DEBUGFUNC("ixgbe_set_vmdq_san_mac"); 3908 3909 if (vmdq < 32) { 3910 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); 3911 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3912 } else { 3913 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3914 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); 3915 } 3916 3917 return IXGBE_SUCCESS; 3918 } 3919 3920 /** 3921 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3922 * @hw: pointer to hardware structure 3923 **/ 3924 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3925 { 3926 int i; 3927 3928 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3929 DEBUGOUT(" Clearing UTA\n"); 3930 3931 for (i = 0; i < 128; i++) 3932 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3933 3934 return IXGBE_SUCCESS; 3935 } 3936 3937 /** 3938 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3939 * @hw: pointer to hardware structure 3940 * @vlan: VLAN id to write to VLAN filter 3941 * @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if 3942 * vlanid not found 3943 * 3944 * 3945 * return the VLVF index where this VLAN id should be placed 3946 * 3947 **/ 3948 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3949 { 3950 s32 regindex, first_empty_slot; 3951 u32 bits; 3952 3953 /* short cut the special case */ 3954 if (vlan == 0) 3955 return 0; 3956 3957 /* if vlvf_bypass is set we don't want to use an empty slot, we 3958 * will simply bypass the VLVF if there are no entries present in the 3959 * VLVF that contain our VLAN 3960 */ 3961 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; 3962 3963 /* add VLAN enable bit for comparison */ 3964 vlan |= IXGBE_VLVF_VIEN; 3965 3966 /* Search for the vlan id in the VLVF entries. Save off the first empty 3967 * slot found along the way. 3968 * 3969 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3970 */ 3971 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3972 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3973 if (bits == vlan) 3974 return regindex; 3975 if (!first_empty_slot && !bits) 3976 first_empty_slot = regindex; 3977 } 3978 3979 /* If we are here then we didn't find the VLAN. Return first empty 3980 * slot we found during our search, else error. 3981 */ 3982 if (!first_empty_slot) 3983 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); 3984 3985 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; 3986 } 3987 3988 /** 3989 * ixgbe_set_vfta_generic - Set VLAN filter table 3990 * @hw: pointer to hardware structure 3991 * @vlan: VLAN id to write to VLAN filter 3992 * @vind: VMDq output index that maps queue to VLAN id in VLVFB 3993 * @vlan_on: boolean flag to turn on/off VLAN 3994 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3995 * 3996 * Turn on/off specified VLAN in the VLAN filter table. 3997 **/ 3998 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3999 bool vlan_on, bool vlvf_bypass) 4000 { 4001 u32 regidx, vfta_delta, vfta; 4002 s32 ret_val; 4003 4004 DEBUGFUNC("ixgbe_set_vfta_generic"); 4005 4006 if (vlan > 4095 || vind > 63) 4007 return IXGBE_ERR_PARAM; 4008 4009 /* 4010 * this is a 2 part operation - first the VFTA, then the 4011 * VLVF and VLVFB if VT Mode is set 4012 * We don't write the VFTA until we know the VLVF part succeeded. 4013 */ 4014 4015 /* Part 1 4016 * The VFTA is a bitstring made up of 128 32-bit registers 4017 * that enable the particular VLAN id, much like the MTA: 4018 * bits[11-5]: which register 4019 * bits[4-0]: which bit in the register 4020 */ 4021 regidx = vlan / 32; 4022 vfta_delta = (u32)1 << (vlan % 32); 4023 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 4024 4025 /* 4026 * vfta_delta represents the difference between the current value 4027 * of vfta and the value we want in the register. Since the diff 4028 * is an XOR mask we can just update the vfta using an XOR 4029 */ 4030 vfta_delta &= vlan_on ? ~vfta : vfta; 4031 vfta ^= vfta_delta; 4032 4033 /* Part 2 4034 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF 4035 */ 4036 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, 4037 vfta, vlvf_bypass); 4038 if (ret_val != IXGBE_SUCCESS) { 4039 if (vlvf_bypass) 4040 goto vfta_update; 4041 return ret_val; 4042 } 4043 4044 vfta_update: 4045 /* Update VFTA now that we are ready for traffic */ 4046 if (vfta_delta) 4047 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 4048 4049 return IXGBE_SUCCESS; 4050 } 4051 4052 /** 4053 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter 4054 * @hw: pointer to hardware structure 4055 * @vlan: VLAN id to write to VLAN filter 4056 * @vind: VMDq output index that maps queue to VLAN id in VLVFB 4057 * @vlan_on: boolean flag to turn on/off VLAN in VLVF 4058 * @vfta_delta: pointer to the difference between the current value of VFTA 4059 * and the desired value 4060 * @vfta: the desired value of the VFTA 4061 * @vlvf_bypass: boolean flag indicating updating default pool is okay 4062 * 4063 * Turn on/off specified bit in VLVF table. 4064 **/ 4065 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 4066 bool vlan_on, u32 *vfta_delta, u32 vfta, 4067 bool vlvf_bypass) 4068 { 4069 u32 bits; 4070 s32 vlvf_index; 4071 4072 DEBUGFUNC("ixgbe_set_vlvf_generic"); 4073 4074 if (vlan > 4095 || vind > 63) 4075 return IXGBE_ERR_PARAM; 4076 4077 /* If VT Mode is set 4078 * Either vlan_on 4079 * make sure the vlan is in VLVF 4080 * set the vind bit in the matching VLVFB 4081 * Or !vlan_on 4082 * clear the pool bit and possibly the vind 4083 */ 4084 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 4085 return IXGBE_SUCCESS; 4086 4087 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 4088 if (vlvf_index < 0) 4089 return vlvf_index; 4090 4091 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 4092 4093 /* set the pool bit */ 4094 bits |= 1 << (vind % 32); 4095 if (vlan_on) 4096 goto vlvf_update; 4097 4098 /* clear the pool bit */ 4099 bits ^= 1 << (vind % 32); 4100 4101 if (!bits && 4102 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 4103 /* Clear VFTA first, then disable VLVF. Otherwise 4104 * we run the risk of stray packets leaking into 4105 * the PF via the default pool 4106 */ 4107 if (*vfta_delta) 4108 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); 4109 4110 /* disable VLVF and clear remaining bit from pool */ 4111 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 4112 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 4113 4114 return IXGBE_SUCCESS; 4115 } 4116 4117 /* If there are still bits set in the VLVFB registers 4118 * for the VLAN ID indicated we need to see if the 4119 * caller is requesting that we clear the VFTA entry bit. 4120 * If the caller has requested that we clear the VFTA 4121 * entry bit but there are still pools/VFs using this VLAN 4122 * ID entry then ignore the request. We're not worried 4123 * about the case where we're turning the VFTA VLAN ID 4124 * entry bit on, only when requested to turn it off as 4125 * there may be multiple pools and/or VFs using the 4126 * VLAN ID entry. In that case we cannot clear the 4127 * VFTA bit until all pools/VFs using that VLAN ID have also 4128 * been cleared. This will be indicated by "bits" being 4129 * zero. 4130 */ 4131 *vfta_delta = 0; 4132 4133 vlvf_update: 4134 /* record pool change and enable VLAN ID if not already enabled */ 4135 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 4136 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 4137 4138 return IXGBE_SUCCESS; 4139 } 4140 4141 /** 4142 * ixgbe_clear_vfta_generic - Clear VLAN filter table 4143 * @hw: pointer to hardware structure 4144 * 4145 * Clears the VLAN filter table, and the VMDq index associated with the filter 4146 **/ 4147 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 4148 { 4149 u32 offset; 4150 4151 DEBUGFUNC("ixgbe_clear_vfta_generic"); 4152 4153 for (offset = 0; offset < hw->mac.vft_size; offset++) 4154 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 4155 4156 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 4157 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 4158 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 4159 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0); 4160 } 4161 4162 return IXGBE_SUCCESS; 4163 } 4164 4165 /** 4166 * ixgbe_toggle_txdctl_generic - Toggle VF's queues 4167 * @hw: pointer to hardware structure 4168 * @vf_number: VF index 4169 * 4170 * Enable and disable each queue in VF. 4171 */ 4172 s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number) 4173 { 4174 u8 queue_count, i; 4175 u32 offset, reg; 4176 4177 if (vf_number > 63) 4178 return IXGBE_ERR_PARAM; 4179 4180 /* 4181 * Determine number of queues by checking 4182 * number of virtual functions 4183 */ 4184 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 4185 switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) { 4186 case IXGBE_GCR_EXT_VT_MODE_64: 4187 queue_count = 2; 4188 break; 4189 case IXGBE_GCR_EXT_VT_MODE_32: 4190 queue_count = 4; 4191 break; 4192 case IXGBE_GCR_EXT_VT_MODE_16: 4193 queue_count = 8; 4194 break; 4195 default: 4196 return IXGBE_ERR_CONFIG; 4197 } 4198 4199 /* Toggle queues */ 4200 for (i = 0; i < queue_count; ++i) { 4201 /* Calculate offset of current queue */ 4202 offset = queue_count * vf_number + i; 4203 4204 /* Enable queue */ 4205 reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset)); 4206 reg |= IXGBE_TXDCTL_ENABLE; 4207 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg); 4208 IXGBE_WRITE_FLUSH(hw); 4209 4210 /* Disable queue */ 4211 reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset)); 4212 reg &= ~IXGBE_TXDCTL_ENABLE; 4213 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg); 4214 IXGBE_WRITE_FLUSH(hw); 4215 } 4216 4217 return IXGBE_SUCCESS; 4218 } 4219 4220 /** 4221 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 4222 * @hw: pointer to hardware structure 4223 * 4224 * Contains the logic to identify if we need to verify link for the 4225 * crosstalk fix 4226 **/ 4227 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 4228 { 4229 4230 /* Does FW say we need the fix */ 4231 if (!hw->need_crosstalk_fix) 4232 return FALSE; 4233 4234 /* Only consider SFP+ PHYs i.e. media type fiber */ 4235 switch (hw->mac.ops.get_media_type(hw)) { 4236 case ixgbe_media_type_fiber: 4237 case ixgbe_media_type_fiber_qsfp: 4238 break; 4239 default: 4240 return FALSE; 4241 } 4242 4243 return TRUE; 4244 } 4245 4246 /** 4247 * ixgbe_check_mac_link_generic - Determine link and speed status 4248 * @hw: pointer to hardware structure 4249 * @speed: pointer to link speed 4250 * @link_up: TRUE when link is up 4251 * @link_up_wait_to_complete: bool used to wait for link up or not 4252 * 4253 * Reads the links register to determine if link is up and the current speed 4254 **/ 4255 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4256 bool *link_up, bool link_up_wait_to_complete) 4257 { 4258 u32 links_reg, links_orig; 4259 u32 i; 4260 4261 DEBUGFUNC("ixgbe_check_mac_link_generic"); 4262 4263 /* If Crosstalk fix enabled do the sanity check of making sure 4264 * the SFP+ cage is full. 4265 */ 4266 if (ixgbe_need_crosstalk_fix(hw)) { 4267 if ((hw->mac.type != ixgbe_mac_82598EB) && 4268 !ixgbe_sfp_cage_full(hw)) { 4269 *link_up = FALSE; 4270 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4271 return IXGBE_SUCCESS; 4272 } 4273 } 4274 4275 /* clear the old state */ 4276 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 4277 4278 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4279 4280 if (links_orig != links_reg) { 4281 DEBUGOUT2("LINKS changed from %08X to %08X\n", 4282 links_orig, links_reg); 4283 } 4284 4285 if (link_up_wait_to_complete) { 4286 for (i = 0; i < hw->mac.max_link_up_time; i++) { 4287 if (links_reg & IXGBE_LINKS_UP) { 4288 *link_up = TRUE; 4289 break; 4290 } else { 4291 *link_up = FALSE; 4292 } 4293 msec_delay(100); 4294 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4295 } 4296 } else { 4297 if (links_reg & IXGBE_LINKS_UP) 4298 *link_up = TRUE; 4299 else 4300 *link_up = FALSE; 4301 } 4302 4303 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4304 case IXGBE_LINKS_SPEED_10G_82599: 4305 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4306 if (hw->mac.type >= ixgbe_mac_X550) { 4307 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4308 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4309 } 4310 break; 4311 case IXGBE_LINKS_SPEED_1G_82599: 4312 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4313 break; 4314 case IXGBE_LINKS_SPEED_100_82599: 4315 *speed = IXGBE_LINK_SPEED_100_FULL; 4316 if (hw->mac.type >= ixgbe_mac_X550) { 4317 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4318 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4319 } 4320 break; 4321 case IXGBE_LINKS_SPEED_10_X550EM_A: 4322 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4323 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 4324 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 4325 *speed = IXGBE_LINK_SPEED_10_FULL; 4326 break; 4327 default: 4328 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4329 } 4330 4331 return IXGBE_SUCCESS; 4332 } 4333 4334 /** 4335 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 4336 * the EEPROM 4337 * @hw: pointer to hardware structure 4338 * @wwnn_prefix: the alternative WWNN prefix 4339 * @wwpn_prefix: the alternative WWPN prefix 4340 * 4341 * This function will read the EEPROM from the alternative SAN MAC address 4342 * block to check the support for the alternative WWNN/WWPN prefix support. 4343 **/ 4344 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 4345 u16 *wwpn_prefix) 4346 { 4347 u16 offset, caps; 4348 u16 alt_san_mac_blk_offset; 4349 4350 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 4351 4352 /* clear output first */ 4353 *wwnn_prefix = 0xFFFF; 4354 *wwpn_prefix = 0xFFFF; 4355 4356 /* check if alternative SAN MAC is supported */ 4357 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 4358 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 4359 goto wwn_prefix_err; 4360 4361 if ((alt_san_mac_blk_offset == 0) || 4362 (alt_san_mac_blk_offset == 0xFFFF)) 4363 goto wwn_prefix_out; 4364 4365 /* check capability in alternative san mac address block */ 4366 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 4367 if (hw->eeprom.ops.read(hw, offset, &caps)) 4368 goto wwn_prefix_err; 4369 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 4370 goto wwn_prefix_out; 4371 4372 /* get the corresponding prefix for WWNN/WWPN */ 4373 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 4374 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { 4375 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4376 "eeprom read at offset %d failed", offset); 4377 } 4378 4379 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 4380 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 4381 goto wwn_prefix_err; 4382 4383 wwn_prefix_out: 4384 return IXGBE_SUCCESS; 4385 4386 wwn_prefix_err: 4387 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4388 "eeprom read at offset %d failed", offset); 4389 return IXGBE_SUCCESS; 4390 } 4391 4392 /** 4393 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 4394 * @hw: pointer to hardware structure 4395 * @bs: the fcoe boot status 4396 * 4397 * This function will read the FCOE boot status from the iSCSI FCOE block 4398 **/ 4399 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 4400 { 4401 u16 offset, caps, flags; 4402 s32 status; 4403 4404 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 4405 4406 /* clear output first */ 4407 *bs = ixgbe_fcoe_bootstatus_unavailable; 4408 4409 /* check if FCOE IBA block is present */ 4410 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 4411 status = hw->eeprom.ops.read(hw, offset, &caps); 4412 if (status != IXGBE_SUCCESS) 4413 goto out; 4414 4415 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 4416 goto out; 4417 4418 /* check if iSCSI FCOE block is populated */ 4419 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 4420 if (status != IXGBE_SUCCESS) 4421 goto out; 4422 4423 if ((offset == 0) || (offset == 0xFFFF)) 4424 goto out; 4425 4426 /* read fcoe flags in iSCSI FCOE block */ 4427 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 4428 status = hw->eeprom.ops.read(hw, offset, &flags); 4429 if (status != IXGBE_SUCCESS) 4430 goto out; 4431 4432 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 4433 *bs = ixgbe_fcoe_bootstatus_enabled; 4434 else 4435 *bs = ixgbe_fcoe_bootstatus_disabled; 4436 4437 out: 4438 return status; 4439 } 4440 4441 /** 4442 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 4443 * @hw: pointer to hardware structure 4444 * @enable: enable or disable switch for MAC anti-spoofing 4445 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 4446 * 4447 **/ 4448 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4449 { 4450 int vf_target_reg = vf >> 3; 4451 int vf_target_shift = vf % 8; 4452 u32 pfvfspoof; 4453 4454 if (hw->mac.type == ixgbe_mac_82598EB) 4455 return; 4456 4457 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4458 if (enable) 4459 pfvfspoof |= (1 << vf_target_shift); 4460 else 4461 pfvfspoof &= ~(1 << vf_target_shift); 4462 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4463 } 4464 4465 /** 4466 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 4467 * @hw: pointer to hardware structure 4468 * @enable: enable or disable switch for VLAN anti-spoofing 4469 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 4470 * 4471 **/ 4472 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4473 { 4474 int vf_target_reg = vf >> 3; 4475 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 4476 u32 pfvfspoof; 4477 4478 if (hw->mac.type == ixgbe_mac_82598EB) 4479 return; 4480 4481 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4482 if (enable) 4483 pfvfspoof |= (1 << vf_target_shift); 4484 else 4485 pfvfspoof &= ~(1 << vf_target_shift); 4486 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4487 } 4488 4489 /** 4490 * ixgbe_get_device_caps_generic - Get additional device capabilities 4491 * @hw: pointer to hardware structure 4492 * @device_caps: the EEPROM word with the extra device capabilities 4493 * 4494 * This function will read the EEPROM location for the device capabilities, 4495 * and return the word through device_caps. 4496 **/ 4497 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 4498 { 4499 DEBUGFUNC("ixgbe_get_device_caps_generic"); 4500 4501 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 4502 4503 return IXGBE_SUCCESS; 4504 } 4505 4506 /** 4507 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering 4508 * @hw: pointer to hardware structure 4509 * 4510 **/ 4511 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) 4512 { 4513 u32 regval; 4514 u32 i; 4515 4516 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); 4517 4518 /* Enable relaxed ordering */ 4519 for (i = 0; i < hw->mac.max_tx_queues; i++) { 4520 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 4521 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 4522 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 4523 } 4524 4525 for (i = 0; i < hw->mac.max_rx_queues; i++) { 4526 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 4527 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 4528 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 4529 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 4530 } 4531 4532 } 4533 4534 /** 4535 * ixgbe_calculate_checksum - Calculate checksum for buffer 4536 * @buffer: pointer to EEPROM 4537 * @length: size of EEPROM to calculate a checksum for 4538 * Calculates the checksum for some buffer on a specified length. The 4539 * checksum calculated is returned. 4540 **/ 4541 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 4542 { 4543 u32 i; 4544 u8 sum = 0; 4545 4546 DEBUGFUNC("ixgbe_calculate_checksum"); 4547 4548 if (!buffer) 4549 return 0; 4550 4551 for (i = 0; i < length; i++) 4552 sum += buffer[i]; 4553 4554 return (u8) (0 - sum); 4555 } 4556 4557 /** 4558 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 4559 * @hw: pointer to the HW structure 4560 * @buffer: command to write and where the return status will be placed 4561 * @length: length of buffer, must be multiple of 4 bytes 4562 * @timeout: time in ms to wait for command completion 4563 * 4564 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4565 * else returns semaphore error when encountering an error acquiring 4566 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4567 * 4568 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 4569 * by the caller. 4570 **/ 4571 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 4572 u32 timeout) 4573 { 4574 u32 hicr, i, fwsts; 4575 u16 dword_len; 4576 4577 DEBUGFUNC("ixgbe_hic_unlocked"); 4578 4579 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4580 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); 4581 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4582 } 4583 4584 /* Set bit 9 of FWSTS clearing FW reset indication */ 4585 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 4586 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 4587 4588 /* Check that the host interface is enabled. */ 4589 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4590 if (!(hicr & IXGBE_HICR_EN)) { 4591 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); 4592 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4593 } 4594 4595 /* Calculate length in DWORDs. We must be DWORD aligned */ 4596 if (length % sizeof(u32)) { 4597 DEBUGOUT("Buffer length failure, not aligned to dword"); 4598 return IXGBE_ERR_INVALID_ARGUMENT; 4599 } 4600 4601 dword_len = length >> 2; 4602 4603 /* The device driver writes the relevant command block 4604 * into the ram area. 4605 */ 4606 for (i = 0; i < dword_len; i++) 4607 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 4608 i, IXGBE_CPU_TO_LE32(buffer[i])); 4609 4610 /* Setting this bit tells the ARC that a new command is pending. */ 4611 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 4612 4613 for (i = 0; i < timeout; i++) { 4614 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4615 if (!(hicr & IXGBE_HICR_C)) 4616 break; 4617 msec_delay(1); 4618 } 4619 4620 /* For each command except "Apply Update" perform 4621 * status checks in the HICR registry. 4622 */ 4623 if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) == 4624 IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD) 4625 return IXGBE_SUCCESS; 4626 4627 /* Check command completion */ 4628 if ((timeout && i == timeout) || 4629 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { 4630 ERROR_REPORT1(IXGBE_ERROR_CAUTION, 4631 "Command has failed with no status valid.\n"); 4632 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4633 } 4634 4635 return IXGBE_SUCCESS; 4636 } 4637 4638 /** 4639 * ixgbe_host_interface_command - Issue command to manageability block 4640 * @hw: pointer to the HW structure 4641 * @buffer: contains the command to write and where the return status will 4642 * be placed 4643 * @length: length of buffer, must be multiple of 4 bytes 4644 * @timeout: time in ms to wait for command completion 4645 * @return_data: read and return data from the buffer (TRUE) or not (FALSE) 4646 * Needed because FW structures are big endian and decoding of 4647 * these fields can be 8 bit or 16 bit based on command. Decoding 4648 * is not easily understood without making a table of commands. 4649 * So we will leave this up to the caller to read back the data 4650 * in these cases. 4651 * 4652 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4653 * else returns semaphore error when encountering an error acquiring 4654 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4655 **/ 4656 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 4657 u32 length, u32 timeout, bool return_data) 4658 { 4659 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 4660 struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer; 4661 u16 buf_len; 4662 s32 status; 4663 u32 bi; 4664 u32 dword_len; 4665 4666 DEBUGFUNC("ixgbe_host_interface_command"); 4667 4668 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4669 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); 4670 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4671 } 4672 4673 /* Take management host interface semaphore */ 4674 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4675 if (status) 4676 return status; 4677 4678 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 4679 if (status) 4680 goto rel_out; 4681 4682 if (!return_data) 4683 goto rel_out; 4684 4685 /* Calculate length in DWORDs */ 4686 dword_len = hdr_size >> 2; 4687 4688 /* first pull in the header so we know the buffer length */ 4689 for (bi = 0; bi < dword_len; bi++) { 4690 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4691 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4692 } 4693 4694 /* 4695 * If there is any thing in data position pull it in 4696 * Read Flash command requires reading buffer length from 4697 * two byes instead of one byte 4698 */ 4699 if (resp->cmd == IXGBE_HOST_INTERFACE_FLASH_READ_CMD || 4700 resp->cmd == IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD) { 4701 for (; bi < dword_len + 2; bi++) { 4702 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, 4703 bi); 4704 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4705 } 4706 buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3) 4707 & 0xF00) | resp->buf_len; 4708 hdr_size += (2 << 2); 4709 } else { 4710 buf_len = resp->buf_len; 4711 } 4712 if (!buf_len) 4713 goto rel_out; 4714 4715 if (length < buf_len + hdr_size) { 4716 DEBUGOUT("Buffer not large enough for reply message.\n"); 4717 status = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4718 goto rel_out; 4719 } 4720 4721 /* Calculate length in DWORDs, add 3 for odd lengths */ 4722 dword_len = (buf_len + 3) >> 2; 4723 4724 /* Pull in the rest of the buffer (bi is where we left off) */ 4725 for (; bi <= dword_len; bi++) { 4726 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4727 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4728 } 4729 4730 rel_out: 4731 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4732 4733 return status; 4734 } 4735 4736 /** 4737 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 4738 * @hw: pointer to the HW structure 4739 * @maj: driver version major number 4740 * @minr: driver version minor number 4741 * @build: driver version build number 4742 * @sub: driver version sub build number 4743 * @len: unused 4744 * @driver_ver: unused 4745 * 4746 * Sends driver version number to firmware through the manageability 4747 * block. On success return IXGBE_SUCCESS 4748 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 4749 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4750 **/ 4751 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr, 4752 u8 build, u8 sub, u16 len, 4753 const char *driver_ver) 4754 { 4755 struct ixgbe_hic_drv_info fw_cmd; 4756 int i; 4757 s32 ret_val = IXGBE_SUCCESS; 4758 4759 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); 4760 UNREFERENCED_2PARAMETER(len, driver_ver); 4761 4762 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 4763 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 4764 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 4765 fw_cmd.port_num = (u8)hw->bus.func; 4766 fw_cmd.ver_maj = maj; 4767 fw_cmd.ver_min = minr; 4768 fw_cmd.ver_build = build; 4769 fw_cmd.ver_sub = sub; 4770 fw_cmd.hdr.checksum = 0; 4771 fw_cmd.pad = 0; 4772 fw_cmd.pad2 = 0; 4773 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 4774 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 4775 4776 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 4777 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 4778 sizeof(fw_cmd), 4779 IXGBE_HI_COMMAND_TIMEOUT, 4780 TRUE); 4781 if (ret_val != IXGBE_SUCCESS) 4782 continue; 4783 4784 if (fw_cmd.hdr.cmd_or_resp.ret_status == 4785 FW_CEM_RESP_STATUS_SUCCESS) 4786 ret_val = IXGBE_SUCCESS; 4787 else 4788 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4789 4790 break; 4791 } 4792 4793 return ret_val; 4794 } 4795 4796 /** 4797 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer 4798 * @hw: pointer to hardware structure 4799 * @num_pb: number of packet buffers to allocate 4800 * @headroom: reserve n KB of headroom 4801 * @strategy: packet buffer allocation strategy 4802 **/ 4803 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, 4804 int strategy) 4805 { 4806 u32 pbsize = hw->mac.rx_pb_size; 4807 int i = 0; 4808 u32 rxpktsize, txpktsize, txpbthresh; 4809 4810 /* Reserve headroom */ 4811 pbsize -= headroom; 4812 4813 if (!num_pb) 4814 num_pb = 1; 4815 4816 /* Divide remaining packet buffer space amongst the number of packet 4817 * buffers requested using supplied strategy. 4818 */ 4819 switch (strategy) { 4820 case PBA_STRATEGY_WEIGHTED: 4821 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet 4822 * buffer with 5/8 of the packet buffer space. 4823 */ 4824 rxpktsize = (pbsize * 5) / (num_pb * 4); 4825 pbsize -= rxpktsize * (num_pb / 2); 4826 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 4827 for (; i < (num_pb / 2); i++) 4828 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4829 /* fall through - configure remaining packet buffers */ 4830 case PBA_STRATEGY_EQUAL: 4831 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 4832 for (; i < num_pb; i++) 4833 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4834 break; 4835 default: 4836 break; 4837 } 4838 4839 /* Only support an equally distributed Tx packet buffer strategy. */ 4840 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 4841 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 4842 for (i = 0; i < num_pb; i++) { 4843 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 4844 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 4845 } 4846 4847 /* Clear unused TCs, if any, to zero buffer size*/ 4848 for (; i < IXGBE_MAX_PB; i++) { 4849 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 4850 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 4851 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 4852 } 4853 } 4854 4855 /** 4856 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 4857 * @hw: pointer to the hardware structure 4858 * 4859 * The 82599 and x540 MACs can experience issues if TX work is still pending 4860 * when a reset occurs. This function prevents this by flushing the PCIe 4861 * buffers on the system. 4862 **/ 4863 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 4864 { 4865 u32 gcr_ext, hlreg0, i, poll; 4866 u16 value; 4867 4868 /* 4869 * If double reset is not requested then all transactions should 4870 * already be clear and as such there is no work to do 4871 */ 4872 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 4873 return; 4874 4875 /* 4876 * Set loopback enable to prevent any transmits from being sent 4877 * should the link come up. This assumes that the RXCTRL.RXEN bit 4878 * has already been cleared. 4879 */ 4880 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4881 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 4882 4883 /* Wait for a last completion before clearing buffers */ 4884 IXGBE_WRITE_FLUSH(hw); 4885 msec_delay(3); 4886 4887 /* 4888 * Before proceeding, make sure that the PCIe block does not have 4889 * transactions pending. 4890 */ 4891 poll = ixgbe_pcie_timeout_poll(hw); 4892 for (i = 0; i < poll; i++) { 4893 usec_delay(100); 4894 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 4895 if (IXGBE_REMOVED(hw->hw_addr)) 4896 goto out; 4897 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 4898 goto out; 4899 } 4900 4901 out: 4902 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 4903 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 4904 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 4905 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 4906 4907 /* Flush all writes and allow 20usec for all transactions to clear */ 4908 IXGBE_WRITE_FLUSH(hw); 4909 usec_delay(20); 4910 4911 /* restore previous register values */ 4912 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 4913 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4914 } 4915 4916 #define IXGBE_BYPASS_BB_WAIT 1 4917 4918 /** 4919 * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW 4920 * @hw: pointer to hardware structure 4921 * @cmd: Command we send to the FW 4922 * @status: The reply from the FW 4923 * 4924 * Bit-bangs the cmd to the by_pass FW status points to what is returned. 4925 **/ 4926 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status) 4927 { 4928 int i; 4929 u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo; 4930 u32 esdp; 4931 4932 if (!status) 4933 return IXGBE_ERR_PARAM; 4934 4935 *status = 0; 4936 4937 /* SDP vary by MAC type */ 4938 switch (hw->mac.type) { 4939 case ixgbe_mac_82599EB: 4940 sck = IXGBE_ESDP_SDP7; 4941 sdi = IXGBE_ESDP_SDP0; 4942 sdo = IXGBE_ESDP_SDP6; 4943 dir_sck = IXGBE_ESDP_SDP7_DIR; 4944 dir_sdi = IXGBE_ESDP_SDP0_DIR; 4945 dir_sdo = IXGBE_ESDP_SDP6_DIR; 4946 break; 4947 case ixgbe_mac_X540: 4948 sck = IXGBE_ESDP_SDP2; 4949 sdi = IXGBE_ESDP_SDP0; 4950 sdo = IXGBE_ESDP_SDP1; 4951 dir_sck = IXGBE_ESDP_SDP2_DIR; 4952 dir_sdi = IXGBE_ESDP_SDP0_DIR; 4953 dir_sdo = IXGBE_ESDP_SDP1_DIR; 4954 break; 4955 default: 4956 return IXGBE_ERR_DEVICE_NOT_SUPPORTED; 4957 } 4958 4959 /* Set SDP pins direction */ 4960 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 4961 esdp |= dir_sck; /* SCK as output */ 4962 esdp |= dir_sdi; /* SDI as output */ 4963 esdp &= ~dir_sdo; /* SDO as input */ 4964 esdp |= sck; 4965 esdp |= sdi; 4966 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4967 IXGBE_WRITE_FLUSH(hw); 4968 msec_delay(IXGBE_BYPASS_BB_WAIT); 4969 4970 /* Generate start condition */ 4971 esdp &= ~sdi; 4972 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4973 IXGBE_WRITE_FLUSH(hw); 4974 msec_delay(IXGBE_BYPASS_BB_WAIT); 4975 4976 esdp &= ~sck; 4977 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4978 IXGBE_WRITE_FLUSH(hw); 4979 msec_delay(IXGBE_BYPASS_BB_WAIT); 4980 4981 /* Clock out the new control word and clock in the status */ 4982 for (i = 0; i < 32; i++) { 4983 if ((cmd >> (31 - i)) & 0x01) { 4984 esdp |= sdi; 4985 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4986 } else { 4987 esdp &= ~sdi; 4988 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4989 } 4990 IXGBE_WRITE_FLUSH(hw); 4991 msec_delay(IXGBE_BYPASS_BB_WAIT); 4992 4993 esdp |= sck; 4994 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4995 IXGBE_WRITE_FLUSH(hw); 4996 msec_delay(IXGBE_BYPASS_BB_WAIT); 4997 4998 esdp &= ~sck; 4999 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 5000 IXGBE_WRITE_FLUSH(hw); 5001 msec_delay(IXGBE_BYPASS_BB_WAIT); 5002 5003 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 5004 if (esdp & sdo) 5005 *status = (*status << 1) | 0x01; 5006 else 5007 *status = (*status << 1) | 0x00; 5008 msec_delay(IXGBE_BYPASS_BB_WAIT); 5009 } 5010 5011 /* stop condition */ 5012 esdp |= sck; 5013 esdp &= ~sdi; 5014 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 5015 IXGBE_WRITE_FLUSH(hw); 5016 msec_delay(IXGBE_BYPASS_BB_WAIT); 5017 5018 esdp |= sdi; 5019 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 5020 IXGBE_WRITE_FLUSH(hw); 5021 5022 /* set the page bits to match the cmd that the status it belongs to */ 5023 *status = (*status & 0x3fffffff) | (cmd & 0xc0000000); 5024 5025 return IXGBE_SUCCESS; 5026 } 5027 5028 /** 5029 * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang. 5030 * @in_reg: The register cmd for the bit-bang read. 5031 * @out_reg: The register returned from a bit-bang read. 5032 * 5033 * If we send a write we can't be sure it took until we can read back 5034 * that same register. It can be a problem as some of the fields may 5035 * for valid reasons change in-between the time wrote the register and 5036 * we read it again to verify. So this function check everything we 5037 * can check and then assumes it worked. 5038 **/ 5039 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg) 5040 { 5041 u32 mask; 5042 5043 /* Page must match for all control pages */ 5044 if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M)) 5045 return FALSE; 5046 5047 switch (in_reg & BYPASS_PAGE_M) { 5048 case BYPASS_PAGE_CTL0: 5049 /* All the following can't change since the last write 5050 * - All the event actions 5051 * - The timeout value 5052 */ 5053 mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M | 5054 BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M | 5055 BYPASS_WDTIMEOUT_M | 5056 BYPASS_WDT_VALUE_M; 5057 if ((out_reg & mask) != (in_reg & mask)) 5058 return FALSE; 5059 5060 /* 0x0 is never a valid value for bypass status */ 5061 if (!(out_reg & BYPASS_STATUS_OFF_M)) 5062 return FALSE; 5063 break; 5064 case BYPASS_PAGE_CTL1: 5065 /* All the following can't change since the last write 5066 * - time valid bit 5067 * - time we last sent 5068 */ 5069 mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M; 5070 if ((out_reg & mask) != (in_reg & mask)) 5071 return FALSE; 5072 break; 5073 case BYPASS_PAGE_CTL2: 5074 /* All we can check in this page is control number 5075 * which is already done above. 5076 */ 5077 break; 5078 } 5079 5080 /* We are as sure as we can be return TRUE */ 5081 return TRUE; 5082 } 5083 5084 /** 5085 * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Register. 5086 * @hw: pointer to hardware structure 5087 * @ctrl: The control word we are setting. 5088 * @event: The event we are setting in the FW. This also happens to 5089 * be the mask for the event we are setting (handy) 5090 * @action: The action we set the event to in the FW. This is in a 5091 * bit field that happens to be what we want to put in 5092 * the event spot (also handy) 5093 **/ 5094 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event, 5095 u32 action) 5096 { 5097 u32 by_ctl = 0; 5098 u32 cmd, verify; 5099 u32 count = 0; 5100 5101 /* Get current values */ 5102 cmd = ctrl; /* just reading only need control number */ 5103 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) 5104 return IXGBE_ERR_INVALID_ARGUMENT; 5105 5106 /* Set to new action */ 5107 cmd = (by_ctl & ~event) | BYPASS_WE | action; 5108 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) 5109 return IXGBE_ERR_INVALID_ARGUMENT; 5110 5111 /* Page 0 force a FW eeprom write which is slow so verify */ 5112 if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) { 5113 verify = BYPASS_PAGE_CTL0; 5114 do { 5115 if (count++ > 5) 5116 return IXGBE_BYPASS_FW_WRITE_FAILURE; 5117 5118 if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl)) 5119 return IXGBE_ERR_INVALID_ARGUMENT; 5120 } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl)); 5121 } else { 5122 /* We have give the FW time for the write to stick */ 5123 msec_delay(100); 5124 } 5125 5126 return IXGBE_SUCCESS; 5127 } 5128 5129 /** 5130 * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address. 5131 * 5132 * @hw: pointer to hardware structure 5133 * @addr: The bypass eeprom address to read. 5134 * @value: The 8b of data at the address above. 5135 **/ 5136 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value) 5137 { 5138 u32 cmd; 5139 u32 status; 5140 5141 5142 /* send the request */ 5143 cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; 5144 cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M; 5145 if (ixgbe_bypass_rw_generic(hw, cmd, &status)) 5146 return IXGBE_ERR_INVALID_ARGUMENT; 5147 5148 /* We have give the FW time for the write to stick */ 5149 msec_delay(100); 5150 5151 /* now read the results */ 5152 cmd &= ~BYPASS_WE; 5153 if (ixgbe_bypass_rw_generic(hw, cmd, &status)) 5154 return IXGBE_ERR_INVALID_ARGUMENT; 5155 5156 *value = status & BYPASS_CTL2_DATA_M; 5157 5158 return IXGBE_SUCCESS; 5159 } 5160 5161 /** 5162 * ixgbe_get_orom_version - Return option ROM from EEPROM 5163 * 5164 * @hw: pointer to hardware structure 5165 * @nvm_ver: pointer to output structure 5166 * 5167 * if valid option ROM version, nvm_ver->or_valid set to TRUE 5168 * else nvm_ver->or_valid is FALSE. 5169 **/ 5170 void ixgbe_get_orom_version(struct ixgbe_hw *hw, 5171 struct ixgbe_nvm_version *nvm_ver) 5172 { 5173 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; 5174 5175 nvm_ver->or_valid = FALSE; 5176 /* Option Rom may or may not be present. Start with pointer */ 5177 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); 5178 5179 /* make sure offset is valid */ 5180 if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) 5181 return; 5182 5183 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); 5184 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); 5185 5186 /* option rom exists and is valid */ 5187 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || 5188 eeprom_cfg_blkl == NVM_VER_INVALID || 5189 eeprom_cfg_blkh == NVM_VER_INVALID) 5190 return; 5191 5192 nvm_ver->or_valid = TRUE; 5193 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; 5194 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | 5195 (eeprom_cfg_blkh >> NVM_OROM_SHIFT); 5196 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; 5197 } 5198 5199 /** 5200 * ixgbe_get_oem_prod_version - Return OEM Product version 5201 * 5202 * @hw: pointer to hardware structure 5203 * @nvm_ver: pointer to output structure 5204 * 5205 * if valid OEM product version, nvm_ver->oem_valid set to TRUE 5206 * else nvm_ver->oem_valid is FALSE. 5207 **/ 5208 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 5209 struct ixgbe_nvm_version *nvm_ver) 5210 { 5211 u16 rel_num, prod_ver, mod_len, cap, offset; 5212 5213 nvm_ver->oem_valid = FALSE; 5214 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 5215 5216 /* Return if offset to OEM Product Version block is invalid */ 5217 if (offset == 0x0 || offset == NVM_INVALID_PTR) 5218 return; 5219 5220 /* Read product version block */ 5221 hw->eeprom.ops.read(hw, offset, &mod_len); 5222 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); 5223 5224 /* Return if OEM product version block is invalid */ 5225 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || 5226 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) 5227 return; 5228 5229 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); 5230 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); 5231 5232 /* Return if version is invalid */ 5233 if ((rel_num | prod_ver) == 0x0 || 5234 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) 5235 return; 5236 5237 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; 5238 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; 5239 nvm_ver->oem_release = rel_num; 5240 nvm_ver->oem_valid = TRUE; 5241 } 5242 5243 /** 5244 * ixgbe_get_etk_id - Return Etrack ID from EEPROM 5245 * 5246 * @hw: pointer to hardware structure 5247 * @nvm_ver: pointer to output structure 5248 * 5249 * word read errors will return 0xFFFF 5250 **/ 5251 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) 5252 { 5253 u16 etk_id_l, etk_id_h; 5254 5255 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) 5256 etk_id_l = NVM_VER_INVALID; 5257 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) 5258 etk_id_h = NVM_VER_INVALID; 5259 5260 /* The word order for the version format is determined by high order 5261 * word bit 15. 5262 */ 5263 if ((etk_id_h & NVM_ETK_VALID) == 0) { 5264 nvm_ver->etk_id = etk_id_h; 5265 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); 5266 } else { 5267 nvm_ver->etk_id = etk_id_l; 5268 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); 5269 } 5270 } 5271 5272 5273 /** 5274 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg 5275 * @hw: pointer to hardware structure 5276 * @map: pointer to u8 arr for returning map 5277 * 5278 * Read the rtrup2tc HW register and resolve its content into map 5279 **/ 5280 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) 5281 { 5282 u32 reg, i; 5283 5284 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 5285 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 5286 map[i] = IXGBE_RTRUP2TC_UP_MASK & 5287 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 5288 return; 5289 } 5290 5291 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 5292 { 5293 u32 pfdtxgswc; 5294 u32 rxctrl; 5295 5296 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 5297 if (rxctrl & IXGBE_RXCTRL_RXEN) { 5298 if (hw->mac.type != ixgbe_mac_82598EB) { 5299 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 5300 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 5301 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 5302 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 5303 hw->mac.set_lben = TRUE; 5304 } else { 5305 hw->mac.set_lben = FALSE; 5306 } 5307 } 5308 rxctrl &= ~IXGBE_RXCTRL_RXEN; 5309 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 5310 } 5311 } 5312 5313 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 5314 { 5315 u32 pfdtxgswc; 5316 u32 rxctrl; 5317 5318 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 5319 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 5320 5321 if (hw->mac.type != ixgbe_mac_82598EB) { 5322 if (hw->mac.set_lben) { 5323 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 5324 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 5325 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 5326 hw->mac.set_lben = FALSE; 5327 } 5328 } 5329 } 5330 5331 /** 5332 * ixgbe_mng_present - returns TRUE when management capability is present 5333 * @hw: pointer to hardware structure 5334 */ 5335 bool ixgbe_mng_present(struct ixgbe_hw *hw) 5336 { 5337 u32 fwsm; 5338 5339 if (hw->mac.type < ixgbe_mac_82599EB) 5340 return FALSE; 5341 5342 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 5343 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); 5344 } 5345 5346 /** 5347 * ixgbe_mng_enabled - Is the manageability engine enabled? 5348 * @hw: pointer to hardware structure 5349 * 5350 * Returns TRUE if the manageability engine is enabled. 5351 **/ 5352 bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 5353 { 5354 u32 fwsm, manc, factps; 5355 5356 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 5357 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) 5358 return FALSE; 5359 5360 manc = IXGBE_READ_REG(hw, IXGBE_MANC); 5361 if (!(manc & IXGBE_MANC_RCV_TCO_EN)) 5362 return FALSE; 5363 5364 if (hw->mac.type <= ixgbe_mac_X540) { 5365 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 5366 if (factps & IXGBE_FACTPS_MNGCG) 5367 return FALSE; 5368 } 5369 5370 return TRUE; 5371 } 5372 5373 /** 5374 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 5375 * @hw: pointer to hardware structure 5376 * @speed: new link speed 5377 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 5378 * 5379 * Set the link speed in the MAC and/or PHY register and restarts link. 5380 **/ 5381 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 5382 ixgbe_link_speed speed, 5383 bool autoneg_wait_to_complete) 5384 { 5385 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 5386 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 5387 s32 status = IXGBE_SUCCESS; 5388 u32 speedcnt = 0; 5389 u32 i = 0; 5390 bool autoneg, link_up = FALSE; 5391 5392 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 5393 5394 /* Mask off requested but non-supported speeds */ 5395 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); 5396 if (status != IXGBE_SUCCESS) 5397 return status; 5398 5399 speed &= link_speed; 5400 5401 /* Try each speed one by one, highest priority first. We do this in 5402 * software because 10Gb fiber doesn't support speed autonegotiation. 5403 */ 5404 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 5405 speedcnt++; 5406 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 5407 5408 /* Set the module link speed */ 5409 switch (hw->phy.media_type) { 5410 case ixgbe_media_type_fiber_fixed: 5411 case ixgbe_media_type_fiber: 5412 ixgbe_set_rate_select_speed(hw, 5413 IXGBE_LINK_SPEED_10GB_FULL); 5414 break; 5415 case ixgbe_media_type_fiber_qsfp: 5416 /* QSFP module automatically detects MAC link speed */ 5417 break; 5418 default: 5419 DEBUGOUT("Unexpected media type.\n"); 5420 break; 5421 } 5422 5423 /* Allow module to change analog characteristics (1G->10G) */ 5424 msec_delay(40); 5425 5426 status = ixgbe_setup_mac_link(hw, 5427 IXGBE_LINK_SPEED_10GB_FULL, 5428 autoneg_wait_to_complete); 5429 if (status != IXGBE_SUCCESS) 5430 return status; 5431 5432 /* Flap the Tx laser if it has not already been done */ 5433 ixgbe_flap_tx_laser(hw); 5434 5435 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 5436 * Section 73.10.2, we may have to wait up to 1000ms if KR is 5437 * attempted. 82599 uses the same timing for 10g SFI. 5438 */ 5439 for (i = 0; i < 10; i++) { 5440 /* Wait for the link partner to also set speed */ 5441 msec_delay(100); 5442 5443 /* If we have link, just jump out */ 5444 status = ixgbe_check_link(hw, &link_speed, 5445 &link_up, FALSE); 5446 if (status != IXGBE_SUCCESS) 5447 return status; 5448 5449 if (link_up) 5450 goto out; 5451 } 5452 } 5453 5454 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 5455 speedcnt++; 5456 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 5457 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 5458 5459 /* Set the module link speed */ 5460 switch (hw->phy.media_type) { 5461 case ixgbe_media_type_fiber_fixed: 5462 case ixgbe_media_type_fiber: 5463 ixgbe_set_rate_select_speed(hw, 5464 IXGBE_LINK_SPEED_1GB_FULL); 5465 break; 5466 case ixgbe_media_type_fiber_qsfp: 5467 /* QSFP module automatically detects link speed */ 5468 break; 5469 default: 5470 DEBUGOUT("Unexpected media type.\n"); 5471 break; 5472 } 5473 5474 /* Allow module to change analog characteristics (10G->1G) */ 5475 msec_delay(40); 5476 5477 status = ixgbe_setup_mac_link(hw, 5478 IXGBE_LINK_SPEED_1GB_FULL, 5479 autoneg_wait_to_complete); 5480 if (status != IXGBE_SUCCESS) 5481 return status; 5482 5483 /* Flap the Tx laser if it has not already been done */ 5484 ixgbe_flap_tx_laser(hw); 5485 5486 /* Wait for the link partner to also set speed */ 5487 msec_delay(100); 5488 5489 /* If we have link, just jump out */ 5490 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 5491 if (status != IXGBE_SUCCESS) 5492 return status; 5493 5494 if (link_up) 5495 goto out; 5496 } 5497 5498 if (speed == 0) { 5499 /* Disable the Tx laser for media none */ 5500 ixgbe_disable_tx_laser(hw); 5501 5502 goto out; 5503 } 5504 5505 /* We didn't get link. Configure back to the highest speed we tried, 5506 * (if there was more than one). We call ourselves back with just the 5507 * single highest speed that the user requested. 5508 */ 5509 if (speedcnt > 1) 5510 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 5511 highest_link_speed, 5512 autoneg_wait_to_complete); 5513 5514 out: 5515 /* Set autoneg_advertised value based on input link speed */ 5516 hw->phy.autoneg_advertised = 0; 5517 5518 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 5519 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 5520 5521 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 5522 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 5523 5524 return status; 5525 } 5526 5527 /** 5528 * ixgbe_set_soft_rate_select_speed - Set module link speed 5529 * @hw: pointer to hardware structure 5530 * @speed: link speed to set 5531 * 5532 * Set module link speed via the soft rate select. 5533 */ 5534 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 5535 ixgbe_link_speed speed) 5536 { 5537 s32 status; 5538 u8 rs, eeprom_data; 5539 5540 switch (speed) { 5541 case IXGBE_LINK_SPEED_10GB_FULL: 5542 /* one bit mask same as setting on */ 5543 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 5544 break; 5545 case IXGBE_LINK_SPEED_1GB_FULL: 5546 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 5547 break; 5548 default: 5549 DEBUGOUT("Invalid fixed module speed\n"); 5550 return; 5551 } 5552 5553 /* Set RS0 */ 5554 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 5555 IXGBE_I2C_EEPROM_DEV_ADDR2, 5556 &eeprom_data); 5557 if (status) { 5558 DEBUGOUT("Failed to read Rx Rate Select RS0\n"); 5559 goto out; 5560 } 5561 5562 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 5563 5564 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 5565 IXGBE_I2C_EEPROM_DEV_ADDR2, 5566 eeprom_data); 5567 if (status) { 5568 DEBUGOUT("Failed to write Rx Rate Select RS0\n"); 5569 goto out; 5570 } 5571 5572 /* Set RS1 */ 5573 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 5574 IXGBE_I2C_EEPROM_DEV_ADDR2, 5575 &eeprom_data); 5576 if (status) { 5577 DEBUGOUT("Failed to read Rx Rate Select RS1\n"); 5578 goto out; 5579 } 5580 5581 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 5582 5583 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 5584 IXGBE_I2C_EEPROM_DEV_ADDR2, 5585 eeprom_data); 5586 if (status) { 5587 DEBUGOUT("Failed to write Rx Rate Select RS1\n"); 5588 goto out; 5589 } 5590 out: 5591 return; 5592 } 5593