1 /* $NetBSD: ixgbe_common.c,v 1.30 2020/08/31 14:12:50 msaitoh Exp $ */ 2 3 /****************************************************************************** 4 SPDX-License-Identifier: BSD-3-Clause 5 6 Copyright (c) 2001-2017, Intel Corporation 7 All rights reserved. 8 9 Redistribution and use in source and binary forms, with or without 10 modification, are permitted provided that the following conditions are met: 11 12 1. Redistributions of source code must retain the above copyright notice, 13 this list of conditions and the following disclaimer. 14 15 2. Redistributions in binary form must reproduce the above copyright 16 notice, this list of conditions and the following disclaimer in the 17 documentation and/or other materials provided with the distribution. 18 19 3. Neither the name of the Intel Corporation nor the names of its 20 contributors may be used to endorse or promote products derived from 21 this software without specific prior written permission. 22 23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 POSSIBILITY OF SUCH DAMAGE. 34 35 ******************************************************************************/ 36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 331224 2018-03-19 20:55:05Z erj $*/ 37 38 #include "ixgbe_common.h" 39 #include "ixgbe_phy.h" 40 #include "ixgbe_dcb.h" 41 #include "ixgbe_dcb_82599.h" 42 #include "ixgbe_api.h" 43 44 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 45 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 46 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 47 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 48 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 49 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 50 u16 count); 51 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 52 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 53 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 54 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 55 56 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 57 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 58 u16 *san_mac_offset); 59 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 60 u16 words, u16 *data); 61 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 62 u16 words, u16 *data); 63 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 64 u16 offset); 65 66 /** 67 * ixgbe_init_ops_generic - Inits function ptrs 68 * @hw: pointer to the hardware structure 69 * 70 * Initialize the function pointers. 71 **/ 72 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 73 { 74 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 75 struct ixgbe_mac_info *mac = &hw->mac; 76 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 77 78 DEBUGFUNC("ixgbe_init_ops_generic"); 79 80 /* EEPROM */ 81 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; 82 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 83 if (eec & IXGBE_EEC_PRES) { 84 eeprom->ops.read = ixgbe_read_eerd_generic; 85 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; 86 } else { 87 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; 88 eeprom->ops.read_buffer = 89 ixgbe_read_eeprom_buffer_bit_bang_generic; 90 } 91 eeprom->ops.write = ixgbe_write_eeprom_generic; 92 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; 93 eeprom->ops.validate_checksum = 94 ixgbe_validate_eeprom_checksum_generic; 95 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; 96 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; 97 98 /* MAC */ 99 mac->ops.init_hw = ixgbe_init_hw_generic; 100 mac->ops.reset_hw = NULL; 101 mac->ops.start_hw = ixgbe_start_hw_generic; 102 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; 103 mac->ops.get_media_type = NULL; 104 mac->ops.get_supported_physical_layer = NULL; 105 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; 106 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; 107 mac->ops.stop_adapter = ixgbe_stop_adapter_generic; 108 mac->ops.get_bus_info = ixgbe_get_bus_info_generic; 109 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; 110 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; 111 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; 112 mac->ops.prot_autoc_read = prot_autoc_read_generic; 113 mac->ops.prot_autoc_write = prot_autoc_write_generic; 114 115 /* LEDs */ 116 mac->ops.led_on = ixgbe_led_on_generic; 117 mac->ops.led_off = ixgbe_led_off_generic; 118 mac->ops.blink_led_start = ixgbe_blink_led_start_generic; 119 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; 120 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; 121 122 /* RAR, Multicast, VLAN */ 123 mac->ops.set_rar = ixgbe_set_rar_generic; 124 mac->ops.clear_rar = ixgbe_clear_rar_generic; 125 mac->ops.insert_mac_addr = NULL; 126 mac->ops.set_vmdq = NULL; 127 mac->ops.clear_vmdq = NULL; 128 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; 129 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; 130 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; 131 mac->ops.enable_mc = ixgbe_enable_mc_generic; 132 mac->ops.disable_mc = ixgbe_disable_mc_generic; 133 mac->ops.clear_vfta = NULL; 134 mac->ops.set_vfta = NULL; 135 mac->ops.set_vlvf = NULL; 136 mac->ops.init_uta_tables = NULL; 137 mac->ops.enable_rx = ixgbe_enable_rx_generic; 138 mac->ops.disable_rx = ixgbe_disable_rx_generic; 139 mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic; 140 141 /* Flow Control */ 142 mac->ops.fc_enable = ixgbe_fc_enable_generic; 143 mac->ops.setup_fc = ixgbe_setup_fc_generic; 144 mac->ops.fc_autoneg = ixgbe_fc_autoneg; 145 146 /* Link */ 147 mac->ops.get_link_capabilities = NULL; 148 mac->ops.setup_link = NULL; 149 mac->ops.check_link = NULL; 150 mac->ops.dmac_config = NULL; 151 mac->ops.dmac_update_tcs = NULL; 152 mac->ops.dmac_config_tcs = NULL; 153 154 return IXGBE_SUCCESS; 155 } 156 157 /** 158 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation 159 * of flow control 160 * @hw: pointer to hardware structure 161 * 162 * This function returns TRUE if the device supports flow control 163 * autonegotiation, and FALSE if it does not. 164 * 165 **/ 166 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 167 { 168 bool supported = FALSE; 169 ixgbe_link_speed speed; 170 bool link_up; 171 172 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 173 174 switch (hw->phy.media_type) { 175 case ixgbe_media_type_fiber_fixed: 176 case ixgbe_media_type_fiber_qsfp: 177 case ixgbe_media_type_fiber: 178 /* flow control autoneg black list */ 179 switch (hw->device_id) { 180 case IXGBE_DEV_ID_X550EM_A_SFP: 181 case IXGBE_DEV_ID_X550EM_A_SFP_N: 182 case IXGBE_DEV_ID_X550EM_A_QSFP: 183 case IXGBE_DEV_ID_X550EM_A_QSFP_N: 184 supported = FALSE; 185 break; 186 default: 187 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 188 /* if link is down, assume supported */ 189 if (link_up) 190 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? 191 TRUE : FALSE; 192 else 193 supported = TRUE; 194 } 195 196 break; 197 case ixgbe_media_type_backplane: 198 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) 199 supported = FALSE; 200 else 201 supported = TRUE; 202 break; 203 case ixgbe_media_type_copper: 204 /* only some copper devices support flow control autoneg */ 205 switch (hw->device_id) { 206 case IXGBE_DEV_ID_82599_T3_LOM: 207 case IXGBE_DEV_ID_X540T: 208 case IXGBE_DEV_ID_X540T1: 209 case IXGBE_DEV_ID_X540_BYPASS: 210 case IXGBE_DEV_ID_X550T: 211 case IXGBE_DEV_ID_X550T1: 212 case IXGBE_DEV_ID_X550EM_X_10G_T: 213 case IXGBE_DEV_ID_X550EM_A_10G_T: 214 case IXGBE_DEV_ID_X550EM_A_1G_T: 215 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 216 supported = TRUE; 217 break; 218 default: 219 supported = FALSE; 220 } 221 default: 222 break; 223 } 224 225 if (!supported) 226 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, 227 "Device %x does not support flow control autoneg", 228 hw->device_id); 229 230 return supported; 231 } 232 233 /** 234 * ixgbe_setup_fc_generic - Set up flow control 235 * @hw: pointer to hardware structure 236 * 237 * Called at init time to set up flow control. 238 **/ 239 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 240 { 241 s32 ret_val = IXGBE_SUCCESS; 242 u32 reg = 0, reg_bp = 0; 243 u16 reg_cu = 0; 244 bool locked = FALSE; 245 246 DEBUGFUNC("ixgbe_setup_fc_generic"); 247 248 /* Validate the requested mode */ 249 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 250 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, 251 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 252 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 253 goto out; 254 } 255 256 /* 257 * 10gig parts do not have a word in the EEPROM to determine the 258 * default flow control setting, so we explicitly set it to full. 259 */ 260 if (hw->fc.requested_mode == ixgbe_fc_default) 261 hw->fc.requested_mode = ixgbe_fc_full; 262 263 /* 264 * Set up the 1G and 10G flow control advertisement registers so the 265 * HW will be able to do fc autoneg once the cable is plugged in. If 266 * we link at 10G, the 1G advertisement is harmless and vice versa. 267 */ 268 switch (hw->phy.media_type) { 269 case ixgbe_media_type_backplane: 270 /* some MAC's need RMW protection on AUTOC */ 271 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 272 if (ret_val != IXGBE_SUCCESS) 273 goto out; 274 275 /* fall through - only backplane uses autoc */ 276 case ixgbe_media_type_fiber_fixed: 277 case ixgbe_media_type_fiber_qsfp: 278 case ixgbe_media_type_fiber: 279 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 280 281 break; 282 case ixgbe_media_type_copper: 283 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 284 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 285 break; 286 default: 287 break; 288 } 289 290 /* 291 * The possible values of fc.requested_mode are: 292 * 0: Flow control is completely disabled 293 * 1: Rx flow control is enabled (we can receive pause frames, 294 * but not send pause frames). 295 * 2: Tx flow control is enabled (we can send pause frames but 296 * we do not support receiving pause frames). 297 * 3: Both Rx and Tx flow control (symmetric) are enabled. 298 * other: Invalid. 299 */ 300 switch (hw->fc.requested_mode) { 301 case ixgbe_fc_none: 302 /* Flow control completely disabled by software override. */ 303 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 304 if (hw->phy.media_type == ixgbe_media_type_backplane) 305 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 306 IXGBE_AUTOC_ASM_PAUSE); 307 else if (hw->phy.media_type == ixgbe_media_type_copper) 308 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 309 break; 310 case ixgbe_fc_tx_pause: 311 /* 312 * Tx Flow control is enabled, and Rx Flow control is 313 * disabled by software override. 314 */ 315 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 316 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 317 if (hw->phy.media_type == ixgbe_media_type_backplane) { 318 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 319 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 320 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 321 reg_cu |= IXGBE_TAF_ASM_PAUSE; 322 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 323 } 324 break; 325 case ixgbe_fc_rx_pause: 326 /* 327 * Rx Flow control is enabled and Tx Flow control is 328 * disabled by software override. Since there really 329 * isn't a way to advertise that we are capable of RX 330 * Pause ONLY, we will advertise that we support both 331 * symmetric and asymmetric Rx PAUSE, as such we fall 332 * through to the fc_full statement. Later, we will 333 * disable the adapter's ability to send PAUSE frames. 334 */ 335 case ixgbe_fc_full: 336 /* Flow control (both Rx and Tx) is enabled by SW override. */ 337 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 338 if (hw->phy.media_type == ixgbe_media_type_backplane) 339 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 340 IXGBE_AUTOC_ASM_PAUSE; 341 else if (hw->phy.media_type == ixgbe_media_type_copper) 342 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 343 break; 344 default: 345 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 346 "Flow control param set incorrectly\n"); 347 ret_val = IXGBE_ERR_CONFIG; 348 goto out; 349 break; 350 } 351 352 if (hw->mac.type < ixgbe_mac_X540) { 353 /* 354 * Enable auto-negotiation between the MAC & PHY; 355 * the MAC will advertise clause 37 flow control. 356 */ 357 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 358 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 359 360 /* Disable AN timeout */ 361 if (hw->fc.strict_ieee) 362 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 363 364 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 365 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 366 } 367 368 /* 369 * AUTOC restart handles negotiation of 1G and 10G on backplane 370 * and copper. There is no need to set the PCS1GCTL register. 371 * 372 */ 373 if (hw->phy.media_type == ixgbe_media_type_backplane) { 374 reg_bp |= IXGBE_AUTOC_AN_RESTART; 375 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 376 if (ret_val) 377 goto out; 378 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 379 (ixgbe_device_supports_autoneg_fc(hw))) { 380 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 381 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 382 } 383 384 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 385 out: 386 return ret_val; 387 } 388 389 /** 390 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 391 * @hw: pointer to hardware structure 392 * 393 * Starts the hardware by filling the bus info structure and media type, clears 394 * all on chip counters, initializes receive address registers, multicast 395 * table, VLAN filter table, calls routine to set up link and flow control 396 * settings, and leaves transmit and receive units disabled and uninitialized 397 **/ 398 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 399 { 400 s32 ret_val; 401 u32 ctrl_ext; 402 u16 device_caps; 403 404 DEBUGFUNC("ixgbe_start_hw_generic"); 405 406 /* Set the media type */ 407 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 408 409 /* PHY ops initialization must be done in reset_hw() */ 410 411 /* Clear the VLAN filter table */ 412 hw->mac.ops.clear_vfta(hw); 413 414 /* Clear statistics registers */ 415 hw->mac.ops.clear_hw_cntrs(hw); 416 417 /* Set No Snoop Disable */ 418 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 419 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 420 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 421 IXGBE_WRITE_FLUSH(hw); 422 423 /* Setup flow control */ 424 ret_val = ixgbe_setup_fc(hw); 425 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { 426 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); 427 return ret_val; 428 } 429 430 /* Cache bit indicating need for crosstalk fix */ 431 switch (hw->mac.type) { 432 case ixgbe_mac_82599EB: 433 case ixgbe_mac_X550EM_x: 434 case ixgbe_mac_X550EM_a: 435 hw->mac.ops.get_device_caps(hw, &device_caps); 436 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 437 hw->need_crosstalk_fix = FALSE; 438 else 439 hw->need_crosstalk_fix = TRUE; 440 break; 441 default: 442 hw->need_crosstalk_fix = FALSE; 443 break; 444 } 445 446 /* Clear adapter stopped flag */ 447 hw->adapter_stopped = FALSE; 448 449 return IXGBE_SUCCESS; 450 } 451 452 /** 453 * ixgbe_start_hw_gen2 - Init sequence for common device family 454 * @hw: pointer to hw structure 455 * 456 * Performs the init sequence common to the second generation 457 * of 10 GbE devices. 458 * Devices in the second generation: 459 * 82599 460 * X540 461 **/ 462 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 463 { 464 u32 i; 465 u32 regval; 466 467 DEBUGFUNC("ixgbe_start_hw_gen2"); 468 469 /* Clear the rate limiters */ 470 for (i = 0; i < hw->mac.max_tx_queues; i++) { 471 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 472 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 473 } 474 IXGBE_WRITE_FLUSH(hw); 475 476 /* Disable relaxed ordering */ 477 for (i = 0; i < hw->mac.max_tx_queues; i++) { 478 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 479 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 480 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 481 } 482 483 for (i = 0; i < hw->mac.max_rx_queues; i++) { 484 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 485 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 486 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 487 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 488 } 489 490 return IXGBE_SUCCESS; 491 } 492 493 /** 494 * ixgbe_init_hw_generic - Generic hardware initialization 495 * @hw: pointer to hardware structure 496 * 497 * Initialize the hardware by resetting the hardware, filling the bus info 498 * structure and media type, clears all on chip counters, initializes receive 499 * address registers, multicast table, VLAN filter table, calls routine to set 500 * up link and flow control settings, and leaves transmit and receive units 501 * disabled and uninitialized 502 **/ 503 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 504 { 505 s32 status; 506 507 DEBUGFUNC("ixgbe_init_hw_generic"); 508 509 /* Reset the hardware */ 510 status = hw->mac.ops.reset_hw(hw); 511 512 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { 513 /* Start the HW */ 514 status = hw->mac.ops.start_hw(hw); 515 } 516 517 /* Initialize the LED link active for LED blink support */ 518 if (hw->mac.ops.init_led_link_act) 519 hw->mac.ops.init_led_link_act(hw); 520 521 if (status != IXGBE_SUCCESS) 522 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); 523 524 return status; 525 } 526 527 /** 528 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 529 * @hw: pointer to hardware structure 530 * 531 * Clears all hardware statistics counters by reading them from the hardware 532 * Statistics counters are clear on read. 533 **/ 534 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 535 { 536 u16 i = 0; 537 538 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 539 540 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 541 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 542 IXGBE_READ_REG(hw, IXGBE_ERRBC); 543 IXGBE_READ_REG(hw, IXGBE_MSPDC); 544 if (hw->mac.type >= ixgbe_mac_X550) 545 IXGBE_READ_REG(hw, IXGBE_MBSDC); 546 for (i = 0; i < 8; i++) 547 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 548 549 IXGBE_READ_REG(hw, IXGBE_MLFC); 550 IXGBE_READ_REG(hw, IXGBE_MRFC); 551 IXGBE_READ_REG(hw, IXGBE_RLEC); 552 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 553 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 554 if (hw->mac.type >= ixgbe_mac_82599EB) { 555 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 556 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 557 } else { 558 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 559 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 560 } 561 562 for (i = 0; i < 8; i++) { 563 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 564 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 565 if (hw->mac.type >= ixgbe_mac_82599EB) { 566 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 567 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 568 } else { 569 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 570 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 571 } 572 } 573 if (hw->mac.type >= ixgbe_mac_82599EB) 574 for (i = 0; i < 8; i++) 575 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 576 IXGBE_READ_REG(hw, IXGBE_PRC64); 577 IXGBE_READ_REG(hw, IXGBE_PRC127); 578 IXGBE_READ_REG(hw, IXGBE_PRC255); 579 IXGBE_READ_REG(hw, IXGBE_PRC511); 580 IXGBE_READ_REG(hw, IXGBE_PRC1023); 581 IXGBE_READ_REG(hw, IXGBE_PRC1522); 582 IXGBE_READ_REG(hw, IXGBE_GPRC); 583 IXGBE_READ_REG(hw, IXGBE_BPRC); 584 IXGBE_READ_REG(hw, IXGBE_MPRC); 585 IXGBE_READ_REG(hw, IXGBE_GPTC); 586 IXGBE_READ_REG(hw, IXGBE_GORCL); 587 IXGBE_READ_REG(hw, IXGBE_GORCH); 588 IXGBE_READ_REG(hw, IXGBE_GOTCL); 589 IXGBE_READ_REG(hw, IXGBE_GOTCH); 590 if (hw->mac.type == ixgbe_mac_82598EB) 591 for (i = 0; i < 8; i++) 592 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 593 IXGBE_READ_REG(hw, IXGBE_RUC); 594 IXGBE_READ_REG(hw, IXGBE_RFC); 595 IXGBE_READ_REG(hw, IXGBE_ROC); 596 IXGBE_READ_REG(hw, IXGBE_RJC); 597 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 598 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 599 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 600 IXGBE_READ_REG(hw, IXGBE_TORL); 601 IXGBE_READ_REG(hw, IXGBE_TORH); 602 IXGBE_READ_REG(hw, IXGBE_TPR); 603 IXGBE_READ_REG(hw, IXGBE_TPT); 604 IXGBE_READ_REG(hw, IXGBE_PTC64); 605 IXGBE_READ_REG(hw, IXGBE_PTC127); 606 IXGBE_READ_REG(hw, IXGBE_PTC255); 607 IXGBE_READ_REG(hw, IXGBE_PTC511); 608 IXGBE_READ_REG(hw, IXGBE_PTC1023); 609 IXGBE_READ_REG(hw, IXGBE_PTC1522); 610 IXGBE_READ_REG(hw, IXGBE_MPTC); 611 IXGBE_READ_REG(hw, IXGBE_BPTC); 612 for (i = 0; i < 16; i++) { 613 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 614 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 615 if (hw->mac.type >= ixgbe_mac_82599EB) { 616 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 617 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 618 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 619 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 620 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 621 } else { 622 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 623 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 624 } 625 } 626 627 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 628 if (hw->phy.id == 0) 629 ixgbe_identify_phy(hw); 630 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, 631 IXGBE_MDIO_PCS_DEV_TYPE, &i); 632 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, 633 IXGBE_MDIO_PCS_DEV_TYPE, &i); 634 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, 635 IXGBE_MDIO_PCS_DEV_TYPE, &i); 636 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, 637 IXGBE_MDIO_PCS_DEV_TYPE, &i); 638 } 639 640 return IXGBE_SUCCESS; 641 } 642 643 /** 644 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 645 * @hw: pointer to hardware structure 646 * @pba_num: stores the part number string from the EEPROM 647 * @pba_num_size: part number string buffer length 648 * 649 * Reads the part number string from the EEPROM. 650 **/ 651 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 652 u32 pba_num_size) 653 { 654 s32 ret_val; 655 u16 data; 656 u16 pba_ptr; 657 u16 offset; 658 u16 length; 659 660 DEBUGFUNC("ixgbe_read_pba_string_generic"); 661 662 if (pba_num == NULL) { 663 DEBUGOUT("PBA string buffer was null\n"); 664 return IXGBE_ERR_INVALID_ARGUMENT; 665 } 666 667 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 668 if (ret_val) { 669 DEBUGOUT("NVM Read Error\n"); 670 return ret_val; 671 } 672 673 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 674 if (ret_val) { 675 DEBUGOUT("NVM Read Error\n"); 676 return ret_val; 677 } 678 679 /* 680 * if data is not ptr guard the PBA must be in legacy format which 681 * means pba_ptr is actually our second data word for the PBA number 682 * and we can decode it into an ascii string 683 */ 684 if (data != IXGBE_PBANUM_PTR_GUARD) { 685 DEBUGOUT("NVM PBA number is not stored as string\n"); 686 687 /* we will need 11 characters to store the PBA */ 688 if (pba_num_size < 11) { 689 DEBUGOUT("PBA string buffer too small\n"); 690 return IXGBE_ERR_NO_SPACE; 691 } 692 693 /* extract hex string from data and pba_ptr */ 694 pba_num[0] = (data >> 12) & 0xF; 695 pba_num[1] = (data >> 8) & 0xF; 696 pba_num[2] = (data >> 4) & 0xF; 697 pba_num[3] = data & 0xF; 698 pba_num[4] = (pba_ptr >> 12) & 0xF; 699 pba_num[5] = (pba_ptr >> 8) & 0xF; 700 pba_num[6] = '-'; 701 pba_num[7] = 0; 702 pba_num[8] = (pba_ptr >> 4) & 0xF; 703 pba_num[9] = pba_ptr & 0xF; 704 705 /* put a null character on the end of our string */ 706 pba_num[10] = '\0'; 707 708 /* switch all the data but the '-' to hex char */ 709 for (offset = 0; offset < 10; offset++) { 710 if (pba_num[offset] < 0xA) 711 pba_num[offset] += '0'; 712 else if (pba_num[offset] < 0x10) 713 pba_num[offset] += 'A' - 0xA; 714 } 715 716 return IXGBE_SUCCESS; 717 } 718 719 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 720 if (ret_val) { 721 DEBUGOUT("NVM Read Error\n"); 722 return ret_val; 723 } 724 725 if (length == 0xFFFF || length == 0) { 726 DEBUGOUT("NVM PBA number section invalid length\n"); 727 return IXGBE_ERR_PBA_SECTION; 728 } 729 730 /* check if pba_num buffer is big enough */ 731 if (pba_num_size < (((u32)length * 2) - 1)) { 732 DEBUGOUT("PBA string buffer too small\n"); 733 return IXGBE_ERR_NO_SPACE; 734 } 735 736 /* trim pba length from start of string */ 737 pba_ptr++; 738 length--; 739 740 for (offset = 0; offset < length; offset++) { 741 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 742 if (ret_val) { 743 DEBUGOUT("NVM Read Error\n"); 744 return ret_val; 745 } 746 pba_num[offset * 2] = (u8)(data >> 8); 747 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 748 } 749 pba_num[offset * 2] = '\0'; 750 751 return IXGBE_SUCCESS; 752 } 753 754 /** 755 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 756 * @hw: pointer to hardware structure 757 * @pba_num: stores the part number from the EEPROM 758 * 759 * Reads the part number from the EEPROM. 760 **/ 761 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 762 { 763 s32 ret_val; 764 u16 data; 765 766 DEBUGFUNC("ixgbe_read_pba_num_generic"); 767 768 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 769 if (ret_val) { 770 DEBUGOUT("NVM Read Error\n"); 771 return ret_val; 772 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 773 DEBUGOUT("NVM Not supported\n"); 774 return IXGBE_NOT_IMPLEMENTED; 775 } 776 *pba_num = (u32)(data << 16); 777 778 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 779 if (ret_val) { 780 DEBUGOUT("NVM Read Error\n"); 781 return ret_val; 782 } 783 *pba_num |= data; 784 785 return IXGBE_SUCCESS; 786 } 787 788 /** 789 * ixgbe_read_pba_raw 790 * @hw: pointer to the HW structure 791 * @eeprom_buf: optional pointer to EEPROM image 792 * @eeprom_buf_size: size of EEPROM image in words 793 * @max_pba_block_size: PBA block size limit 794 * @pba: pointer to output PBA structure 795 * 796 * Reads PBA from EEPROM image when eeprom_buf is not NULL. 797 * Reads PBA from physical EEPROM device when eeprom_buf is NULL. 798 * 799 **/ 800 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 801 u32 eeprom_buf_size, u16 max_pba_block_size, 802 struct ixgbe_pba *pba) 803 { 804 s32 ret_val; 805 u16 pba_block_size; 806 807 if (pba == NULL) 808 return IXGBE_ERR_PARAM; 809 810 if (eeprom_buf == NULL) { 811 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 812 &pba->word[0]); 813 if (ret_val) 814 return ret_val; 815 } else { 816 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 817 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 818 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 819 } else { 820 return IXGBE_ERR_PARAM; 821 } 822 } 823 824 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 825 if (pba->pba_block == NULL) 826 return IXGBE_ERR_PARAM; 827 828 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, 829 eeprom_buf_size, 830 &pba_block_size); 831 if (ret_val) 832 return ret_val; 833 834 if (pba_block_size > max_pba_block_size) 835 return IXGBE_ERR_PARAM; 836 837 if (eeprom_buf == NULL) { 838 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], 839 pba_block_size, 840 pba->pba_block); 841 if (ret_val) 842 return ret_val; 843 } else { 844 if (eeprom_buf_size > (u32)(pba->word[1] + 845 pba_block_size)) { 846 memcpy(pba->pba_block, 847 &eeprom_buf[pba->word[1]], 848 pba_block_size * sizeof(u16)); 849 } else { 850 return IXGBE_ERR_PARAM; 851 } 852 } 853 } 854 855 return IXGBE_SUCCESS; 856 } 857 858 /** 859 * ixgbe_write_pba_raw 860 * @hw: pointer to the HW structure 861 * @eeprom_buf: optional pointer to EEPROM image 862 * @eeprom_buf_size: size of EEPROM image in words 863 * @pba: pointer to PBA structure 864 * 865 * Writes PBA to EEPROM image when eeprom_buf is not NULL. 866 * Writes PBA to physical EEPROM device when eeprom_buf is NULL. 867 * 868 **/ 869 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 870 u32 eeprom_buf_size, struct ixgbe_pba *pba) 871 { 872 s32 ret_val; 873 874 if (pba == NULL) 875 return IXGBE_ERR_PARAM; 876 877 if (eeprom_buf == NULL) { 878 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, 879 &pba->word[0]); 880 if (ret_val) 881 return ret_val; 882 } else { 883 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 884 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; 885 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; 886 } else { 887 return IXGBE_ERR_PARAM; 888 } 889 } 890 891 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 892 if (pba->pba_block == NULL) 893 return IXGBE_ERR_PARAM; 894 895 if (eeprom_buf == NULL) { 896 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], 897 pba->pba_block[0], 898 pba->pba_block); 899 if (ret_val) 900 return ret_val; 901 } else { 902 if (eeprom_buf_size > (u32)(pba->word[1] + 903 pba->pba_block[0])) { 904 memcpy(&eeprom_buf[pba->word[1]], 905 pba->pba_block, 906 pba->pba_block[0] * sizeof(u16)); 907 } else { 908 return IXGBE_ERR_PARAM; 909 } 910 } 911 } 912 913 return IXGBE_SUCCESS; 914 } 915 916 /** 917 * ixgbe_get_pba_block_size 918 * @hw: pointer to the HW structure 919 * @eeprom_buf: optional pointer to EEPROM image 920 * @eeprom_buf_size: size of EEPROM image in words 921 * @pba_data_size: pointer to output variable 922 * 923 * Returns the size of the PBA block in words. Function operates on EEPROM 924 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical 925 * EEPROM device. 926 * 927 **/ 928 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, 929 u32 eeprom_buf_size, u16 *pba_block_size) 930 { 931 s32 ret_val; 932 u16 pba_word[2]; 933 u16 length; 934 935 DEBUGFUNC("ixgbe_get_pba_block_size"); 936 937 if (eeprom_buf == NULL) { 938 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 939 &pba_word[0]); 940 if (ret_val) 941 return ret_val; 942 } else { 943 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 944 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 945 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 946 } else { 947 return IXGBE_ERR_PARAM; 948 } 949 } 950 951 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { 952 if (eeprom_buf == NULL) { 953 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, 954 &length); 955 if (ret_val) 956 return ret_val; 957 } else { 958 if (eeprom_buf_size > pba_word[1]) 959 length = eeprom_buf[pba_word[1] + 0]; 960 else 961 return IXGBE_ERR_PARAM; 962 } 963 964 if (length == 0xFFFF || length == 0) 965 return IXGBE_ERR_PBA_SECTION; 966 } else { 967 /* PBA number in legacy format, there is no PBA Block. */ 968 length = 0; 969 } 970 971 if (pba_block_size != NULL) 972 *pba_block_size = length; 973 974 return IXGBE_SUCCESS; 975 } 976 977 /** 978 * ixgbe_get_mac_addr_generic - Generic get MAC address 979 * @hw: pointer to hardware structure 980 * @mac_addr: Adapter MAC address 981 * 982 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 983 * A reset of the adapter must be performed prior to calling this function 984 * in order for the MAC address to have been loaded from the EEPROM into RAR0 985 **/ 986 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 987 { 988 u32 rar_high; 989 u32 rar_low; 990 u16 i; 991 992 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 993 994 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 995 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 996 997 for (i = 0; i < 4; i++) 998 mac_addr[i] = (u8)(rar_low >> (i*8)); 999 1000 for (i = 0; i < 2; i++) 1001 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 1002 1003 return IXGBE_SUCCESS; 1004 } 1005 1006 /** 1007 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info 1008 * @hw: pointer to hardware structure 1009 * @link_status: the link status returned by the PCI config space 1010 * 1011 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure 1012 **/ 1013 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) 1014 { 1015 struct ixgbe_mac_info *mac = &hw->mac; 1016 1017 if (hw->bus.type == ixgbe_bus_type_unknown) 1018 hw->bus.type = ixgbe_bus_type_pci_express; 1019 1020 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 1021 case IXGBE_PCI_LINK_WIDTH_1: 1022 hw->bus.width = ixgbe_bus_width_pcie_x1; 1023 break; 1024 case IXGBE_PCI_LINK_WIDTH_2: 1025 hw->bus.width = ixgbe_bus_width_pcie_x2; 1026 break; 1027 case IXGBE_PCI_LINK_WIDTH_4: 1028 hw->bus.width = ixgbe_bus_width_pcie_x4; 1029 break; 1030 case IXGBE_PCI_LINK_WIDTH_8: 1031 hw->bus.width = ixgbe_bus_width_pcie_x8; 1032 break; 1033 default: 1034 hw->bus.width = ixgbe_bus_width_unknown; 1035 break; 1036 } 1037 1038 switch (link_status & IXGBE_PCI_LINK_SPEED) { 1039 case IXGBE_PCI_LINK_SPEED_2500: 1040 hw->bus.speed = ixgbe_bus_speed_2500; 1041 break; 1042 case IXGBE_PCI_LINK_SPEED_5000: 1043 hw->bus.speed = ixgbe_bus_speed_5000; 1044 break; 1045 case IXGBE_PCI_LINK_SPEED_8000: 1046 hw->bus.speed = ixgbe_bus_speed_8000; 1047 break; 1048 default: 1049 hw->bus.speed = ixgbe_bus_speed_unknown; 1050 break; 1051 } 1052 1053 mac->ops.set_lan_id(hw); 1054 } 1055 1056 /** 1057 * ixgbe_get_bus_info_generic - Generic set PCI bus info 1058 * @hw: pointer to hardware structure 1059 * 1060 * Gets the PCI bus info (speed, width, type) then calls helper function to 1061 * store this data within the ixgbe_hw structure. 1062 **/ 1063 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 1064 { 1065 u16 link_status; 1066 1067 DEBUGFUNC("ixgbe_get_bus_info_generic"); 1068 1069 /* Get the negotiated link width and speed from PCI config space */ 1070 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 1071 1072 ixgbe_set_pci_config_data_generic(hw, link_status); 1073 1074 return IXGBE_SUCCESS; 1075 } 1076 1077 /** 1078 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 1079 * @hw: pointer to the HW structure 1080 * 1081 * Determines the LAN function id by reading memory-mapped registers and swaps 1082 * the port value if requested, and set MAC instance for devices that share 1083 * CS4227. 1084 **/ 1085 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 1086 { 1087 struct ixgbe_bus_info *bus = &hw->bus; 1088 u32 reg; 1089 u16 ee_ctrl_4; 1090 1091 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 1092 1093 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 1094 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 1095 bus->lan_id = (u8)bus->func; 1096 1097 /* check for a port swap */ 1098 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 1099 if (reg & IXGBE_FACTPS_LFS) 1100 bus->func ^= 0x1; 1101 1102 /* Get MAC instance from EEPROM for configuring CS4227 */ 1103 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 1104 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 1105 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> 1106 IXGBE_EE_CTRL_4_INST_ID_SHIFT; 1107 } 1108 } 1109 1110 /** 1111 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 1112 * @hw: pointer to hardware structure 1113 * 1114 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 1115 * disables transmit and receive units. The adapter_stopped flag is used by 1116 * the shared code and drivers to determine if the adapter is in a stopped 1117 * state and should not touch the hardware. 1118 **/ 1119 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 1120 { 1121 u32 reg_val; 1122 u16 i; 1123 1124 DEBUGFUNC("ixgbe_stop_adapter_generic"); 1125 1126 /* 1127 * Set the adapter_stopped flag so other driver functions stop touching 1128 * the hardware 1129 */ 1130 hw->adapter_stopped = TRUE; 1131 1132 /* Disable the receive unit */ 1133 ixgbe_disable_rx(hw); 1134 1135 /* Clear interrupt mask to stop interrupts from being generated */ 1136 /* 1137 * XXX 1138 * This function is called in the state of both interrupt disabled 1139 * and interrupt enabled, e.g. 1140 * + interrupt disabled case: 1141 * - ixgbe_stop_locked() 1142 * - ixgbe_disable_intr() // interrupt disabled here 1143 * - ixgbe_stop_adapter() 1144 * - hw->mac.ops.stop_adapter() 1145 * == this function 1146 * + interrupt enabled case: 1147 * - ixgbe_local_timer1() 1148 * - ixgbe_init_locked() 1149 * - ixgbe_stop_adapter() 1150 * - hw->mac.ops.stop_adapter() 1151 * == this function 1152 * Therefore, it causes nest status breaking to nest the status 1153 * (that is, que->im_nest++) at all times. So, this function must 1154 * use ixgbe_ensure_disabled_intr() instead of ixgbe_disable_intr(). 1155 */ 1156 ixgbe_ensure_disabled_intr(hw->back); 1157 1158 /* Clear any pending interrupts, flush previous writes */ 1159 IXGBE_READ_REG(hw, IXGBE_EICR); 1160 1161 /* Disable the transmit unit. Each queue must be disabled. */ 1162 for (i = 0; i < hw->mac.max_tx_queues; i++) 1163 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 1164 1165 /* Disable the receive unit by stopping each queue */ 1166 for (i = 0; i < hw->mac.max_rx_queues; i++) { 1167 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 1168 reg_val &= ~IXGBE_RXDCTL_ENABLE; 1169 reg_val |= IXGBE_RXDCTL_SWFLSH; 1170 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 1171 } 1172 1173 /* flush all queues disables */ 1174 IXGBE_WRITE_FLUSH(hw); 1175 msec_delay(2); 1176 1177 /* 1178 * Prevent the PCI-E bus from hanging by disabling PCI-E master 1179 * access and verify no pending requests 1180 */ 1181 return ixgbe_disable_pcie_master(hw); 1182 } 1183 1184 /** 1185 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 1186 * @hw: pointer to hardware structure 1187 * 1188 * Store the index for the link active LED. This will be used to support 1189 * blinking the LED. 1190 **/ 1191 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 1192 { 1193 struct ixgbe_mac_info *mac = &hw->mac; 1194 u32 led_reg, led_mode; 1195 u8 i; 1196 1197 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1198 1199 /* Get LED link active from the LEDCTL register */ 1200 for (i = 0; i < 4; i++) { 1201 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 1202 1203 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 1204 IXGBE_LED_LINK_ACTIVE) { 1205 mac->led_link_act = i; 1206 return IXGBE_SUCCESS; 1207 } 1208 } 1209 1210 /* 1211 * If LEDCTL register does not have the LED link active set, then use 1212 * known MAC defaults. 1213 */ 1214 switch (hw->mac.type) { 1215 case ixgbe_mac_X550EM_a: 1216 case ixgbe_mac_X550EM_x: 1217 mac->led_link_act = 1; 1218 break; 1219 default: 1220 mac->led_link_act = 2; 1221 } 1222 return IXGBE_SUCCESS; 1223 } 1224 1225 /** 1226 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 1227 * @hw: pointer to hardware structure 1228 * @index: led number to turn on 1229 **/ 1230 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 1231 { 1232 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1233 1234 DEBUGFUNC("ixgbe_led_on_generic"); 1235 1236 if (index > 3) 1237 return IXGBE_ERR_PARAM; 1238 1239 /* To turn on the LED, set mode to ON. */ 1240 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1241 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 1242 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1243 IXGBE_WRITE_FLUSH(hw); 1244 1245 return IXGBE_SUCCESS; 1246 } 1247 1248 /** 1249 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 1250 * @hw: pointer to hardware structure 1251 * @index: led number to turn off 1252 **/ 1253 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 1254 { 1255 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1256 1257 DEBUGFUNC("ixgbe_led_off_generic"); 1258 1259 if (index > 3) 1260 return IXGBE_ERR_PARAM; 1261 1262 /* To turn off the LED, set mode to OFF. */ 1263 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1264 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 1265 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1266 IXGBE_WRITE_FLUSH(hw); 1267 1268 return IXGBE_SUCCESS; 1269 } 1270 1271 /** 1272 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 1273 * @hw: pointer to hardware structure 1274 * 1275 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 1276 * ixgbe_hw struct in order to set up EEPROM access. 1277 **/ 1278 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 1279 { 1280 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 1281 u32 eec; 1282 u16 eeprom_size; 1283 1284 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 1285 1286 if (eeprom->type == ixgbe_eeprom_uninitialized) { 1287 eeprom->type = ixgbe_eeprom_none; 1288 /* Set default semaphore delay to 10ms which is a well 1289 * tested value */ 1290 eeprom->semaphore_delay = 10; 1291 /* Clear EEPROM page size, it will be initialized as needed */ 1292 eeprom->word_page_size = 0; 1293 1294 /* 1295 * Check for EEPROM present first. 1296 * If not present leave as none 1297 */ 1298 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1299 if (eec & IXGBE_EEC_PRES) { 1300 eeprom->type = ixgbe_eeprom_spi; 1301 1302 /* 1303 * SPI EEPROM is assumed here. This code would need to 1304 * change if a future EEPROM is not SPI. 1305 */ 1306 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 1307 IXGBE_EEC_SIZE_SHIFT); 1308 eeprom->word_size = 1 << (eeprom_size + 1309 IXGBE_EEPROM_WORD_SIZE_SHIFT); 1310 } 1311 1312 if (eec & IXGBE_EEC_ADDR_SIZE) 1313 eeprom->address_bits = 16; 1314 else 1315 eeprom->address_bits = 8; 1316 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 1317 "%d\n", eeprom->type, eeprom->word_size, 1318 eeprom->address_bits); 1319 } 1320 1321 return IXGBE_SUCCESS; 1322 } 1323 1324 /** 1325 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 1326 * @hw: pointer to hardware structure 1327 * @offset: offset within the EEPROM to write 1328 * @words: number of word(s) 1329 * @data: 16 bit word(s) to write to EEPROM 1330 * 1331 * Reads 16 bit word(s) from EEPROM through bit-bang method 1332 **/ 1333 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1334 u16 words, u16 *data) 1335 { 1336 s32 status = IXGBE_SUCCESS; 1337 u16 i, count; 1338 1339 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); 1340 1341 hw->eeprom.ops.init_params(hw); 1342 1343 if (words == 0) { 1344 status = IXGBE_ERR_INVALID_ARGUMENT; 1345 goto out; 1346 } 1347 1348 if (offset + words > hw->eeprom.word_size) { 1349 status = IXGBE_ERR_EEPROM; 1350 goto out; 1351 } 1352 1353 /* 1354 * The EEPROM page size cannot be queried from the chip. We do lazy 1355 * initialization. It is worth to do that when we write large buffer. 1356 */ 1357 if ((hw->eeprom.word_page_size == 0) && 1358 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 1359 ixgbe_detect_eeprom_page_size_generic(hw, offset); 1360 1361 /* 1362 * We cannot hold synchronization semaphores for too long 1363 * to avoid other entity starvation. However it is more efficient 1364 * to read in bursts than synchronizing access for each word. 1365 */ 1366 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1367 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1368 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1369 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 1370 count, &data[i]); 1371 1372 if (status != IXGBE_SUCCESS) 1373 break; 1374 } 1375 1376 out: 1377 return status; 1378 } 1379 1380 /** 1381 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 1382 * @hw: pointer to hardware structure 1383 * @offset: offset within the EEPROM to be written to 1384 * @words: number of word(s) 1385 * @data: 16 bit word(s) to be written to the EEPROM 1386 * 1387 * If ixgbe_eeprom_update_checksum is not called after this function, the 1388 * EEPROM will most likely contain an invalid checksum. 1389 **/ 1390 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1391 u16 words, u16 *data) 1392 { 1393 s32 status; 1394 u16 word; 1395 u16 page_size; 1396 u16 i; 1397 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 1398 1399 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); 1400 1401 /* Prepare the EEPROM for writing */ 1402 status = ixgbe_acquire_eeprom(hw); 1403 1404 if (status == IXGBE_SUCCESS) { 1405 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1406 ixgbe_release_eeprom(hw); 1407 status = IXGBE_ERR_EEPROM; 1408 } 1409 } 1410 1411 if (status == IXGBE_SUCCESS) { 1412 for (i = 0; i < words; i++) { 1413 ixgbe_standby_eeprom(hw); 1414 1415 /* Send the WRITE ENABLE command (8 bit opcode ) */ 1416 ixgbe_shift_out_eeprom_bits(hw, 1417 IXGBE_EEPROM_WREN_OPCODE_SPI, 1418 IXGBE_EEPROM_OPCODE_BITS); 1419 1420 ixgbe_standby_eeprom(hw); 1421 1422 /* 1423 * Some SPI eeproms use the 8th address bit embedded 1424 * in the opcode 1425 */ 1426 if ((hw->eeprom.address_bits == 8) && 1427 ((offset + i) >= 128)) 1428 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1429 1430 /* Send the Write command (8-bit opcode + addr) */ 1431 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 1432 IXGBE_EEPROM_OPCODE_BITS); 1433 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1434 hw->eeprom.address_bits); 1435 1436 page_size = hw->eeprom.word_page_size; 1437 1438 /* Send the data in burst via SPI*/ 1439 do { 1440 word = data[i]; 1441 word = (word >> 8) | (word << 8); 1442 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1443 1444 if (page_size == 0) 1445 break; 1446 1447 /* do not wrap around page */ 1448 if (((offset + i) & (page_size - 1)) == 1449 (page_size - 1)) 1450 break; 1451 } while (++i < words); 1452 1453 ixgbe_standby_eeprom(hw); 1454 msec_delay(10); 1455 } 1456 /* Done with writing - release the EEPROM */ 1457 ixgbe_release_eeprom(hw); 1458 } 1459 1460 return status; 1461 } 1462 1463 /** 1464 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1465 * @hw: pointer to hardware structure 1466 * @offset: offset within the EEPROM to be written to 1467 * @data: 16 bit word to be written to the EEPROM 1468 * 1469 * If ixgbe_eeprom_update_checksum is not called after this function, the 1470 * EEPROM will most likely contain an invalid checksum. 1471 **/ 1472 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1473 { 1474 s32 status; 1475 1476 DEBUGFUNC("ixgbe_write_eeprom_generic"); 1477 1478 hw->eeprom.ops.init_params(hw); 1479 1480 if (offset >= hw->eeprom.word_size) { 1481 status = IXGBE_ERR_EEPROM; 1482 goto out; 1483 } 1484 1485 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1486 1487 out: 1488 return status; 1489 } 1490 1491 /** 1492 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1493 * @hw: pointer to hardware structure 1494 * @offset: offset within the EEPROM to be read 1495 * @data: read 16 bit words(s) from EEPROM 1496 * @words: number of word(s) 1497 * 1498 * Reads 16 bit word(s) from EEPROM through bit-bang method 1499 **/ 1500 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1501 u16 words, u16 *data) 1502 { 1503 s32 status = IXGBE_SUCCESS; 1504 u16 i, count; 1505 1506 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); 1507 1508 hw->eeprom.ops.init_params(hw); 1509 1510 if (words == 0) { 1511 status = IXGBE_ERR_INVALID_ARGUMENT; 1512 goto out; 1513 } 1514 1515 if (offset + words > hw->eeprom.word_size) { 1516 status = IXGBE_ERR_EEPROM; 1517 goto out; 1518 } 1519 1520 /* 1521 * We cannot hold synchronization semaphores for too long 1522 * to avoid other entity starvation. However it is more efficient 1523 * to read in bursts than synchronizing access for each word. 1524 */ 1525 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1526 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1527 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1528 1529 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1530 count, &data[i]); 1531 1532 if (status != IXGBE_SUCCESS) 1533 break; 1534 } 1535 1536 out: 1537 return status; 1538 } 1539 1540 /** 1541 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1542 * @hw: pointer to hardware structure 1543 * @offset: offset within the EEPROM to be read 1544 * @words: number of word(s) 1545 * @data: read 16 bit word(s) from EEPROM 1546 * 1547 * Reads 16 bit word(s) from EEPROM through bit-bang method 1548 **/ 1549 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1550 u16 words, u16 *data) 1551 { 1552 s32 status; 1553 u16 word_in; 1554 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1555 u16 i; 1556 1557 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); 1558 1559 /* Prepare the EEPROM for reading */ 1560 status = ixgbe_acquire_eeprom(hw); 1561 1562 if (status == IXGBE_SUCCESS) { 1563 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1564 ixgbe_release_eeprom(hw); 1565 status = IXGBE_ERR_EEPROM; 1566 } 1567 } 1568 1569 if (status == IXGBE_SUCCESS) { 1570 for (i = 0; i < words; i++) { 1571 ixgbe_standby_eeprom(hw); 1572 /* 1573 * Some SPI eeproms use the 8th address bit embedded 1574 * in the opcode 1575 */ 1576 if ((hw->eeprom.address_bits == 8) && 1577 ((offset + i) >= 128)) 1578 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1579 1580 /* Send the READ command (opcode + addr) */ 1581 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1582 IXGBE_EEPROM_OPCODE_BITS); 1583 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1584 hw->eeprom.address_bits); 1585 1586 /* Read the data. */ 1587 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1588 data[i] = (word_in >> 8) | (word_in << 8); 1589 } 1590 1591 /* End this read operation */ 1592 ixgbe_release_eeprom(hw); 1593 } 1594 1595 return status; 1596 } 1597 1598 /** 1599 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1600 * @hw: pointer to hardware structure 1601 * @offset: offset within the EEPROM to be read 1602 * @data: read 16 bit value from EEPROM 1603 * 1604 * Reads 16 bit value from EEPROM through bit-bang method 1605 **/ 1606 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1607 u16 *data) 1608 { 1609 s32 status; 1610 1611 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 1612 1613 hw->eeprom.ops.init_params(hw); 1614 1615 if (offset >= hw->eeprom.word_size) { 1616 status = IXGBE_ERR_EEPROM; 1617 goto out; 1618 } 1619 1620 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1621 1622 out: 1623 return status; 1624 } 1625 1626 /** 1627 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1628 * @hw: pointer to hardware structure 1629 * @offset: offset of word in the EEPROM to read 1630 * @words: number of word(s) 1631 * @data: 16 bit word(s) from the EEPROM 1632 * 1633 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1634 **/ 1635 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1636 u16 words, u16 *data) 1637 { 1638 u32 eerd; 1639 s32 status = IXGBE_SUCCESS; 1640 u32 i; 1641 1642 DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); 1643 1644 hw->eeprom.ops.init_params(hw); 1645 1646 if (words == 0) { 1647 status = IXGBE_ERR_INVALID_ARGUMENT; 1648 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1649 goto out; 1650 } 1651 1652 if (offset >= hw->eeprom.word_size) { 1653 status = IXGBE_ERR_EEPROM; 1654 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1655 goto out; 1656 } 1657 1658 for (i = 0; i < words; i++) { 1659 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1660 IXGBE_EEPROM_RW_REG_START; 1661 1662 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1663 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1664 1665 if (status == IXGBE_SUCCESS) { 1666 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1667 IXGBE_EEPROM_RW_REG_DATA); 1668 } else { 1669 DEBUGOUT("Eeprom read timed out\n"); 1670 goto out; 1671 } 1672 } 1673 out: 1674 return status; 1675 } 1676 1677 /** 1678 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1679 * @hw: pointer to hardware structure 1680 * @offset: offset within the EEPROM to be used as a scratch pad 1681 * 1682 * Discover EEPROM page size by writing marching data at given offset. 1683 * This function is called only when we are writing a new large buffer 1684 * at given offset so the data would be overwritten anyway. 1685 **/ 1686 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1687 u16 offset) 1688 { 1689 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1690 s32 status = IXGBE_SUCCESS; 1691 u16 i; 1692 1693 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); 1694 1695 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1696 data[i] = i; 1697 1698 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1699 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1700 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1701 hw->eeprom.word_page_size = 0; 1702 if (status != IXGBE_SUCCESS) 1703 goto out; 1704 1705 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1706 if (status != IXGBE_SUCCESS) 1707 goto out; 1708 1709 /* 1710 * When writing in burst more than the actual page size 1711 * EEPROM address wraps around current page. 1712 */ 1713 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1714 1715 DEBUGOUT1("Detected EEPROM page size = %d words.", 1716 hw->eeprom.word_page_size); 1717 out: 1718 return status; 1719 } 1720 1721 /** 1722 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1723 * @hw: pointer to hardware structure 1724 * @offset: offset of word in the EEPROM to read 1725 * @data: word read from the EEPROM 1726 * 1727 * Reads a 16 bit word from the EEPROM using the EERD register. 1728 **/ 1729 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1730 { 1731 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1732 } 1733 1734 /** 1735 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1736 * @hw: pointer to hardware structure 1737 * @offset: offset of word in the EEPROM to write 1738 * @words: number of word(s) 1739 * @data: word(s) write to the EEPROM 1740 * 1741 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1742 **/ 1743 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1744 u16 words, u16 *data) 1745 { 1746 u32 eewr; 1747 s32 status = IXGBE_SUCCESS; 1748 u16 i; 1749 1750 DEBUGFUNC("ixgbe_write_eewr_generic"); 1751 1752 hw->eeprom.ops.init_params(hw); 1753 1754 if (words == 0) { 1755 status = IXGBE_ERR_INVALID_ARGUMENT; 1756 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1757 goto out; 1758 } 1759 1760 if (offset >= hw->eeprom.word_size) { 1761 status = IXGBE_ERR_EEPROM; 1762 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1763 goto out; 1764 } 1765 1766 for (i = 0; i < words; i++) { 1767 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1768 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1769 IXGBE_EEPROM_RW_REG_START; 1770 1771 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1772 if (status != IXGBE_SUCCESS) { 1773 DEBUGOUT("Eeprom write EEWR timed out\n"); 1774 goto out; 1775 } 1776 1777 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1778 1779 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1780 if (status != IXGBE_SUCCESS) { 1781 DEBUGOUT("Eeprom write EEWR timed out\n"); 1782 goto out; 1783 } 1784 } 1785 1786 out: 1787 return status; 1788 } 1789 1790 /** 1791 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1792 * @hw: pointer to hardware structure 1793 * @offset: offset of word in the EEPROM to write 1794 * @data: word write to the EEPROM 1795 * 1796 * Write a 16 bit word to the EEPROM using the EEWR register. 1797 **/ 1798 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1799 { 1800 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1801 } 1802 1803 /** 1804 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1805 * @hw: pointer to hardware structure 1806 * @ee_reg: EEPROM flag for polling 1807 * 1808 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1809 * read or write is done respectively. 1810 **/ 1811 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1812 { 1813 u32 i; 1814 u32 reg; 1815 s32 status = IXGBE_ERR_EEPROM; 1816 1817 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1818 1819 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1820 if (ee_reg == IXGBE_NVM_POLL_READ) 1821 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1822 else 1823 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1824 1825 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1826 status = IXGBE_SUCCESS; 1827 break; 1828 } 1829 usec_delay(5); 1830 } 1831 1832 if (i == IXGBE_EERD_EEWR_ATTEMPTS) 1833 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1834 "EEPROM read/write done polling timed out"); 1835 1836 return status; 1837 } 1838 1839 /** 1840 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1841 * @hw: pointer to hardware structure 1842 * 1843 * Prepares EEPROM for access using bit-bang method. This function should 1844 * be called before issuing a command to the EEPROM. 1845 **/ 1846 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1847 { 1848 s32 status = IXGBE_SUCCESS; 1849 u32 eec; 1850 u32 i; 1851 1852 DEBUGFUNC("ixgbe_acquire_eeprom"); 1853 1854 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) 1855 != IXGBE_SUCCESS) 1856 status = IXGBE_ERR_SWFW_SYNC; 1857 1858 if (status == IXGBE_SUCCESS) { 1859 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1860 1861 /* Request EEPROM Access */ 1862 eec |= IXGBE_EEC_REQ; 1863 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1864 1865 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1866 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1867 if (eec & IXGBE_EEC_GNT) 1868 break; 1869 usec_delay(5); 1870 } 1871 1872 /* Release if grant not acquired */ 1873 if (!(eec & IXGBE_EEC_GNT)) { 1874 eec &= ~IXGBE_EEC_REQ; 1875 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1876 DEBUGOUT("Could not acquire EEPROM grant\n"); 1877 1878 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1879 status = IXGBE_ERR_EEPROM; 1880 } 1881 1882 /* Setup EEPROM for Read/Write */ 1883 if (status == IXGBE_SUCCESS) { 1884 /* Clear CS and SK */ 1885 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1886 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1887 IXGBE_WRITE_FLUSH(hw); 1888 usec_delay(1); 1889 } 1890 } 1891 return status; 1892 } 1893 1894 /** 1895 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1896 * @hw: pointer to hardware structure 1897 * 1898 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1899 **/ 1900 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1901 { 1902 s32 status = IXGBE_ERR_EEPROM; 1903 u32 timeout = 2000; 1904 u32 i; 1905 u32 swsm; 1906 1907 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1908 1909 1910 /* Get SMBI software semaphore between device drivers first */ 1911 for (i = 0; i < timeout; i++) { 1912 /* 1913 * If the SMBI bit is 0 when we read it, then the bit will be 1914 * set and we have the semaphore 1915 */ 1916 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1917 if (!(swsm & IXGBE_SWSM_SMBI)) { 1918 status = IXGBE_SUCCESS; 1919 break; 1920 } 1921 usec_delay(50); 1922 } 1923 1924 if (i == timeout) { 1925 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " 1926 "not granted.\n"); 1927 /* 1928 * this release is particularly important because our attempts 1929 * above to get the semaphore may have succeeded, and if there 1930 * was a timeout, we should unconditionally clear the semaphore 1931 * bits to free the driver to make progress 1932 */ 1933 ixgbe_release_eeprom_semaphore(hw); 1934 1935 usec_delay(50); 1936 /* 1937 * one last try 1938 * If the SMBI bit is 0 when we read it, then the bit will be 1939 * set and we have the semaphore 1940 */ 1941 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1942 if (!(swsm & IXGBE_SWSM_SMBI)) 1943 status = IXGBE_SUCCESS; 1944 } 1945 1946 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1947 if (status == IXGBE_SUCCESS) { 1948 for (i = 0; i < timeout; i++) { 1949 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1950 1951 /* Set the SW EEPROM semaphore bit to request access */ 1952 swsm |= IXGBE_SWSM_SWESMBI; 1953 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); 1954 1955 /* 1956 * If we set the bit successfully then we got the 1957 * semaphore. 1958 */ 1959 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1960 if (swsm & IXGBE_SWSM_SWESMBI) 1961 break; 1962 1963 usec_delay(50); 1964 } 1965 1966 /* 1967 * Release semaphores and return error if SW EEPROM semaphore 1968 * was not granted because we don't have access to the EEPROM 1969 */ 1970 if (i >= timeout) { 1971 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1972 "SWESMBI Software EEPROM semaphore not granted.\n"); 1973 ixgbe_release_eeprom_semaphore(hw); 1974 status = IXGBE_ERR_EEPROM; 1975 } 1976 } else { 1977 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1978 "Software semaphore SMBI between device drivers " 1979 "not granted.\n"); 1980 } 1981 1982 return status; 1983 } 1984 1985 /** 1986 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1987 * @hw: pointer to hardware structure 1988 * 1989 * This function clears hardware semaphore bits. 1990 **/ 1991 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1992 { 1993 u32 swsm; 1994 1995 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1996 1997 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1998 1999 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 2000 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 2001 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 2002 IXGBE_WRITE_FLUSH(hw); 2003 } 2004 2005 /** 2006 * ixgbe_ready_eeprom - Polls for EEPROM ready 2007 * @hw: pointer to hardware structure 2008 **/ 2009 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 2010 { 2011 s32 status = IXGBE_SUCCESS; 2012 u16 i; 2013 u8 spi_stat_reg; 2014 2015 DEBUGFUNC("ixgbe_ready_eeprom"); 2016 2017 /* 2018 * Read "Status Register" repeatedly until the LSB is cleared. The 2019 * EEPROM will signal that the command has been completed by clearing 2020 * bit 0 of the internal status register. If it's not cleared within 2021 * 5 milliseconds, then error out. 2022 */ 2023 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 2024 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 2025 IXGBE_EEPROM_OPCODE_BITS); 2026 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 2027 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 2028 break; 2029 2030 usec_delay(5); 2031 ixgbe_standby_eeprom(hw); 2032 } 2033 2034 /* 2035 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 2036 * devices (and only 0-5mSec on 5V devices) 2037 */ 2038 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 2039 DEBUGOUT("SPI EEPROM Status error\n"); 2040 status = IXGBE_ERR_EEPROM; 2041 } 2042 2043 return status; 2044 } 2045 2046 /** 2047 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 2048 * @hw: pointer to hardware structure 2049 **/ 2050 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 2051 { 2052 u32 eec; 2053 2054 DEBUGFUNC("ixgbe_standby_eeprom"); 2055 2056 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2057 2058 /* Toggle CS to flush commands */ 2059 eec |= IXGBE_EEC_CS; 2060 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2061 IXGBE_WRITE_FLUSH(hw); 2062 usec_delay(1); 2063 eec &= ~IXGBE_EEC_CS; 2064 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2065 IXGBE_WRITE_FLUSH(hw); 2066 usec_delay(1); 2067 } 2068 2069 /** 2070 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 2071 * @hw: pointer to hardware structure 2072 * @data: data to send to the EEPROM 2073 * @count: number of bits to shift out 2074 **/ 2075 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 2076 u16 count) 2077 { 2078 u32 eec; 2079 u32 mask; 2080 u32 i; 2081 2082 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 2083 2084 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2085 2086 /* 2087 * Mask is used to shift "count" bits of "data" out to the EEPROM 2088 * one bit at a time. Determine the starting bit based on count 2089 */ 2090 mask = 0x01 << (count - 1); 2091 2092 for (i = 0; i < count; i++) { 2093 /* 2094 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 2095 * "1", and then raising and then lowering the clock (the SK 2096 * bit controls the clock input to the EEPROM). A "0" is 2097 * shifted out to the EEPROM by setting "DI" to "0" and then 2098 * raising and then lowering the clock. 2099 */ 2100 if (data & mask) 2101 eec |= IXGBE_EEC_DI; 2102 else 2103 eec &= ~IXGBE_EEC_DI; 2104 2105 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2106 IXGBE_WRITE_FLUSH(hw); 2107 2108 usec_delay(1); 2109 2110 ixgbe_raise_eeprom_clk(hw, &eec); 2111 ixgbe_lower_eeprom_clk(hw, &eec); 2112 2113 /* 2114 * Shift mask to signify next bit of data to shift in to the 2115 * EEPROM 2116 */ 2117 mask = mask >> 1; 2118 } 2119 2120 /* We leave the "DI" bit set to "0" when we leave this routine. */ 2121 eec &= ~IXGBE_EEC_DI; 2122 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2123 IXGBE_WRITE_FLUSH(hw); 2124 } 2125 2126 /** 2127 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 2128 * @hw: pointer to hardware structure 2129 * @count: number of bits to shift 2130 **/ 2131 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 2132 { 2133 u32 eec; 2134 u32 i; 2135 u16 data = 0; 2136 2137 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 2138 2139 /* 2140 * In order to read a register from the EEPROM, we need to shift 2141 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 2142 * the clock input to the EEPROM (setting the SK bit), and then reading 2143 * the value of the "DO" bit. During this "shifting in" process the 2144 * "DI" bit should always be clear. 2145 */ 2146 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2147 2148 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 2149 2150 for (i = 0; i < count; i++) { 2151 data = data << 1; 2152 ixgbe_raise_eeprom_clk(hw, &eec); 2153 2154 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2155 2156 eec &= ~(IXGBE_EEC_DI); 2157 if (eec & IXGBE_EEC_DO) 2158 data |= 1; 2159 2160 ixgbe_lower_eeprom_clk(hw, &eec); 2161 } 2162 2163 return data; 2164 } 2165 2166 /** 2167 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 2168 * @hw: pointer to hardware structure 2169 * @eec: EEC register's current value 2170 **/ 2171 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2172 { 2173 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 2174 2175 /* 2176 * Raise the clock input to the EEPROM 2177 * (setting the SK bit), then delay 2178 */ 2179 *eec = *eec | IXGBE_EEC_SK; 2180 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2181 IXGBE_WRITE_FLUSH(hw); 2182 usec_delay(1); 2183 } 2184 2185 /** 2186 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 2187 * @hw: pointer to hardware structure 2188 * @eec: EEC's current value 2189 **/ 2190 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2191 { 2192 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 2193 2194 /* 2195 * Lower the clock input to the EEPROM (clearing the SK bit), then 2196 * delay 2197 */ 2198 *eec = *eec & ~IXGBE_EEC_SK; 2199 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2200 IXGBE_WRITE_FLUSH(hw); 2201 usec_delay(1); 2202 } 2203 2204 /** 2205 * ixgbe_release_eeprom - Release EEPROM, release semaphores 2206 * @hw: pointer to hardware structure 2207 **/ 2208 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 2209 { 2210 u32 eec; 2211 2212 DEBUGFUNC("ixgbe_release_eeprom"); 2213 2214 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2215 2216 eec |= IXGBE_EEC_CS; /* Pull CS high */ 2217 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 2218 2219 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2220 IXGBE_WRITE_FLUSH(hw); 2221 2222 usec_delay(1); 2223 2224 /* Stop requesting EEPROM access */ 2225 eec &= ~IXGBE_EEC_REQ; 2226 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2227 2228 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 2229 2230 /* Delay before attempt to obtain semaphore again to allow FW access */ 2231 msec_delay(hw->eeprom.semaphore_delay); 2232 } 2233 2234 /** 2235 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 2236 * @hw: pointer to hardware structure 2237 * 2238 * Returns a negative error code on error, or the 16-bit checksum 2239 **/ 2240 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 2241 { 2242 u16 i; 2243 u16 j; 2244 u16 checksum = 0; 2245 u16 length = 0; 2246 u16 pointer = 0; 2247 u16 word = 0; 2248 2249 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 2250 2251 /* Include 0x0-0x3F in the checksum */ 2252 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 2253 if (hw->eeprom.ops.read(hw, i, &word)) { 2254 DEBUGOUT("EEPROM read failed\n"); 2255 return IXGBE_ERR_EEPROM; 2256 } 2257 checksum += word; 2258 } 2259 2260 /* Include all data from pointers except for the fw pointer */ 2261 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 2262 if (hw->eeprom.ops.read(hw, i, &pointer)) { 2263 DEBUGOUT("EEPROM read failed\n"); 2264 return IXGBE_ERR_EEPROM; 2265 } 2266 2267 /* If the pointer seems invalid */ 2268 if (pointer == 0xFFFF || pointer == 0) 2269 continue; 2270 2271 if (hw->eeprom.ops.read(hw, pointer, &length)) { 2272 DEBUGOUT("EEPROM read failed\n"); 2273 return IXGBE_ERR_EEPROM; 2274 } 2275 2276 if (length == 0xFFFF || length == 0) 2277 continue; 2278 2279 for (j = pointer + 1; j <= pointer + length; j++) { 2280 if (hw->eeprom.ops.read(hw, j, &word)) { 2281 DEBUGOUT("EEPROM read failed\n"); 2282 return IXGBE_ERR_EEPROM; 2283 } 2284 checksum += word; 2285 } 2286 } 2287 2288 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 2289 2290 return (s32)checksum; 2291 } 2292 2293 /** 2294 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 2295 * @hw: pointer to hardware structure 2296 * @checksum_val: calculated checksum 2297 * 2298 * Performs checksum calculation and validates the EEPROM checksum. If the 2299 * caller does not need checksum_val, the value can be NULL. 2300 **/ 2301 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 2302 u16 *checksum_val) 2303 { 2304 s32 status; 2305 u16 checksum; 2306 u16 read_checksum = 0; 2307 2308 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 2309 2310 /* Read the first word from the EEPROM. If this times out or fails, do 2311 * not continue or we could be in for a very long wait while every 2312 * EEPROM read fails 2313 */ 2314 status = hw->eeprom.ops.read(hw, 0, &checksum); 2315 if (status) { 2316 DEBUGOUT("EEPROM read failed\n"); 2317 return status; 2318 } 2319 2320 status = hw->eeprom.ops.calc_checksum(hw); 2321 if (status < 0) 2322 return status; 2323 2324 checksum = (u16)(status & 0xffff); 2325 2326 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 2327 if (status) { 2328 DEBUGOUT("EEPROM read failed\n"); 2329 return status; 2330 } 2331 2332 /* Verify read checksum from EEPROM is the same as 2333 * calculated checksum 2334 */ 2335 if (read_checksum != checksum) 2336 status = IXGBE_ERR_EEPROM_CHECKSUM; 2337 2338 /* If the user cares, return the calculated checksum */ 2339 if (checksum_val) 2340 *checksum_val = checksum; 2341 2342 return status; 2343 } 2344 2345 /** 2346 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 2347 * @hw: pointer to hardware structure 2348 **/ 2349 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 2350 { 2351 s32 status; 2352 u16 checksum; 2353 2354 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 2355 2356 /* Read the first word from the EEPROM. If this times out or fails, do 2357 * not continue or we could be in for a very long wait while every 2358 * EEPROM read fails 2359 */ 2360 status = hw->eeprom.ops.read(hw, 0, &checksum); 2361 if (status) { 2362 DEBUGOUT("EEPROM read failed\n"); 2363 return status; 2364 } 2365 2366 status = hw->eeprom.ops.calc_checksum(hw); 2367 if (status < 0) 2368 return status; 2369 2370 checksum = (u16)(status & 0xffff); 2371 2372 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 2373 2374 return status; 2375 } 2376 2377 /** 2378 * ixgbe_validate_mac_addr - Validate MAC address 2379 * @mac_addr: pointer to MAC address. 2380 * 2381 * Tests a MAC address to ensure it is a valid Individual Address. 2382 **/ 2383 s32 ixgbe_validate_mac_addr(u8 *mac_addr) 2384 { 2385 s32 status = IXGBE_SUCCESS; 2386 2387 DEBUGFUNC("ixgbe_validate_mac_addr"); 2388 2389 /* Make sure it is not a multicast address */ 2390 if (IXGBE_IS_MULTICAST(mac_addr)) { 2391 status = IXGBE_ERR_INVALID_MAC_ADDR; 2392 /* Not a broadcast address */ 2393 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 2394 status = IXGBE_ERR_INVALID_MAC_ADDR; 2395 /* Reject the zero address */ 2396 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 2397 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 2398 status = IXGBE_ERR_INVALID_MAC_ADDR; 2399 } 2400 return status; 2401 } 2402 2403 /** 2404 * ixgbe_set_rar_generic - Set Rx address register 2405 * @hw: pointer to hardware structure 2406 * @index: Receive address register to write 2407 * @addr: Address to put into receive address register 2408 * @vmdq: VMDq "set" or "pool" index 2409 * @enable_addr: set flag that address is active 2410 * 2411 * Puts an ethernet address into a receive address register. 2412 **/ 2413 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 2414 u32 enable_addr) 2415 { 2416 u32 rar_low, rar_high; 2417 u32 rar_entries = hw->mac.num_rar_entries; 2418 2419 DEBUGFUNC("ixgbe_set_rar_generic"); 2420 2421 /* Make sure we are using a valid rar index range */ 2422 if (index >= rar_entries) { 2423 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2424 "RAR index %d is out of range.\n", index); 2425 return IXGBE_ERR_INVALID_ARGUMENT; 2426 } 2427 2428 /* setup VMDq pool selection before this RAR gets enabled */ 2429 hw->mac.ops.set_vmdq(hw, index, vmdq); 2430 2431 /* 2432 * HW expects these in little endian so we reverse the byte 2433 * order from network order (big endian) to little endian 2434 */ 2435 rar_low = ((u32)addr[0] | 2436 ((u32)addr[1] << 8) | 2437 ((u32)addr[2] << 16) | 2438 ((u32)addr[3] << 24)); 2439 /* 2440 * Some parts put the VMDq setting in the extra RAH bits, 2441 * so save everything except the lower 16 bits that hold part 2442 * of the address and the address valid bit. 2443 */ 2444 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2445 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2446 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 2447 2448 if (enable_addr != 0) 2449 rar_high |= IXGBE_RAH_AV; 2450 2451 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 2452 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2453 2454 return IXGBE_SUCCESS; 2455 } 2456 2457 /** 2458 * ixgbe_clear_rar_generic - Remove Rx address register 2459 * @hw: pointer to hardware structure 2460 * @index: Receive address register to write 2461 * 2462 * Clears an ethernet address from a receive address register. 2463 **/ 2464 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 2465 { 2466 u32 rar_high; 2467 u32 rar_entries = hw->mac.num_rar_entries; 2468 2469 DEBUGFUNC("ixgbe_clear_rar_generic"); 2470 2471 /* Make sure we are using a valid rar index range */ 2472 if (index >= rar_entries) { 2473 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2474 "RAR index %d is out of range.\n", index); 2475 return IXGBE_ERR_INVALID_ARGUMENT; 2476 } 2477 2478 /* 2479 * Some parts put the VMDq setting in the extra RAH bits, 2480 * so save everything except the lower 16 bits that hold part 2481 * of the address and the address valid bit. 2482 */ 2483 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2484 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2485 2486 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 2487 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2488 2489 /* clear VMDq pool/queue selection for this RAR */ 2490 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 2491 2492 return IXGBE_SUCCESS; 2493 } 2494 2495 /** 2496 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 2497 * @hw: pointer to hardware structure 2498 * 2499 * Places the MAC address in receive address register 0 and clears the rest 2500 * of the receive address registers. Clears the multicast table. Assumes 2501 * the receiver is in reset when the routine is called. 2502 **/ 2503 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 2504 { 2505 u32 i; 2506 u32 rar_entries = hw->mac.num_rar_entries; 2507 2508 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 2509 2510 /* 2511 * If the current mac address is valid, assume it is a software override 2512 * to the permanent address. 2513 * Otherwise, use the permanent address from the eeprom. 2514 */ 2515 if (ixgbe_validate_mac_addr(hw->mac.addr) == 2516 IXGBE_ERR_INVALID_MAC_ADDR) { 2517 /* Get the MAC address from the RAR0 for later reference */ 2518 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2519 2520 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 2521 hw->mac.addr[0], hw->mac.addr[1], 2522 hw->mac.addr[2]); 2523 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2524 hw->mac.addr[4], hw->mac.addr[5]); 2525 } else { 2526 /* Setup the receive address. */ 2527 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 2528 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 2529 hw->mac.addr[0], hw->mac.addr[1], 2530 hw->mac.addr[2]); 2531 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2532 hw->mac.addr[4], hw->mac.addr[5]); 2533 2534 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2535 } 2536 2537 /* clear VMDq pool/queue selection for RAR 0 */ 2538 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 2539 2540 hw->addr_ctrl.overflow_promisc = 0; 2541 2542 hw->addr_ctrl.rar_used_count = 1; 2543 2544 /* Zero out the other receive addresses. */ 2545 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 2546 for (i = 1; i < rar_entries; i++) { 2547 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 2548 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 2549 } 2550 2551 /* Clear the MTA */ 2552 hw->addr_ctrl.mta_in_use = 0; 2553 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2554 2555 DEBUGOUT(" Clearing MTA\n"); 2556 for (i = 0; i < hw->mac.mcft_size; i++) 2557 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 2558 2559 ixgbe_init_uta_tables(hw); 2560 2561 return IXGBE_SUCCESS; 2562 } 2563 2564 /** 2565 * ixgbe_add_uc_addr - Adds a secondary unicast address. 2566 * @hw: pointer to hardware structure 2567 * @addr: new address 2568 * @vmdq: VMDq "set" or "pool" index 2569 * 2570 * Adds it to unused receive address register or goes into promiscuous mode. 2571 **/ 2572 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2573 { 2574 u32 rar_entries = hw->mac.num_rar_entries; 2575 u32 rar; 2576 2577 DEBUGFUNC("ixgbe_add_uc_addr"); 2578 2579 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 2580 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 2581 2582 /* 2583 * Place this address in the RAR if there is room, 2584 * else put the controller into promiscuous mode 2585 */ 2586 if (hw->addr_ctrl.rar_used_count < rar_entries) { 2587 rar = hw->addr_ctrl.rar_used_count; 2588 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2589 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 2590 hw->addr_ctrl.rar_used_count++; 2591 } else { 2592 hw->addr_ctrl.overflow_promisc++; 2593 } 2594 2595 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 2596 } 2597 2598 /** 2599 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 2600 * @hw: pointer to hardware structure 2601 * @addr_list: the list of new addresses 2602 * @addr_count: number of addresses 2603 * @next: iterator function to walk the address list 2604 * 2605 * The given list replaces any existing list. Clears the secondary addrs from 2606 * receive address registers. Uses unused receive address registers for the 2607 * first secondary addresses, and falls back to promiscuous mode as needed. 2608 * 2609 * Drivers using secondary unicast addresses must set user_set_promisc when 2610 * manually putting the device into promiscuous mode. 2611 **/ 2612 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 2613 u32 addr_count, ixgbe_mc_addr_itr next) 2614 { 2615 u8 *addr; 2616 u32 i; 2617 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 2618 u32 uc_addr_in_use; 2619 u32 fctrl; 2620 u32 vmdq; 2621 2622 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 2623 2624 /* 2625 * Clear accounting of old secondary address list, 2626 * don't count RAR[0] 2627 */ 2628 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 2629 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 2630 hw->addr_ctrl.overflow_promisc = 0; 2631 2632 /* Zero out the other receive addresses */ 2633 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 2634 for (i = 0; i < uc_addr_in_use; i++) { 2635 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 2636 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 2637 } 2638 2639 /* Add the new addresses */ 2640 for (i = 0; i < addr_count; i++) { 2641 DEBUGOUT(" Adding the secondary addresses:\n"); 2642 addr = next(hw, &addr_list, &vmdq); 2643 ixgbe_add_uc_addr(hw, addr, vmdq); 2644 } 2645 2646 if (hw->addr_ctrl.overflow_promisc) { 2647 /* enable promisc if not already in overflow or set by user */ 2648 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2649 DEBUGOUT(" Entering address overflow promisc mode\n"); 2650 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2651 fctrl |= IXGBE_FCTRL_UPE; 2652 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2653 } 2654 } else { 2655 /* only disable if set by overflow, not by user */ 2656 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2657 DEBUGOUT(" Leaving address overflow promisc mode\n"); 2658 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2659 fctrl &= ~IXGBE_FCTRL_UPE; 2660 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2661 } 2662 } 2663 2664 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 2665 return IXGBE_SUCCESS; 2666 } 2667 2668 /** 2669 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 2670 * @hw: pointer to hardware structure 2671 * @mc_addr: the multicast address 2672 * 2673 * Extracts the 12 bits, from a multicast address, to determine which 2674 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 2675 * incoming rx multicast addresses, to determine the bit-vector to check in 2676 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 2677 * by the MO field of the MCSTCTRL. The MO field is set during initialization 2678 * to mc_filter_type. 2679 **/ 2680 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 2681 { 2682 u32 vector = 0; 2683 2684 DEBUGFUNC("ixgbe_mta_vector"); 2685 2686 switch (hw->mac.mc_filter_type) { 2687 case 0: /* use bits [47:36] of the address */ 2688 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2689 break; 2690 case 1: /* use bits [46:35] of the address */ 2691 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2692 break; 2693 case 2: /* use bits [45:34] of the address */ 2694 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2695 break; 2696 case 3: /* use bits [43:32] of the address */ 2697 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2698 break; 2699 default: /* Invalid mc_filter_type */ 2700 DEBUGOUT("MC filter type param set incorrectly\n"); 2701 ASSERT(0); 2702 break; 2703 } 2704 2705 /* vector can only be 12-bits or boundary will be exceeded */ 2706 vector &= 0xFFF; 2707 return vector; 2708 } 2709 2710 /** 2711 * ixgbe_set_mta - Set bit-vector in multicast table 2712 * @hw: pointer to hardware structure 2713 * @mc_addr: Multicast address 2714 * 2715 * Sets the bit-vector in the multicast table. 2716 **/ 2717 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2718 { 2719 u32 vector; 2720 u32 vector_bit; 2721 u32 vector_reg; 2722 2723 DEBUGFUNC("ixgbe_set_mta"); 2724 2725 hw->addr_ctrl.mta_in_use++; 2726 2727 vector = ixgbe_mta_vector(hw, mc_addr); 2728 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 2729 2730 /* 2731 * The MTA is a register array of 128 32-bit registers. It is treated 2732 * like an array of 4096 bits. We want to set bit 2733 * BitArray[vector_value]. So we figure out what register the bit is 2734 * in, read it, OR in the new bit, then write back the new value. The 2735 * register is determined by the upper 7 bits of the vector value and 2736 * the bit within that register are determined by the lower 5 bits of 2737 * the value. 2738 */ 2739 vector_reg = (vector >> 5) & 0x7F; 2740 vector_bit = vector & 0x1F; 2741 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 2742 } 2743 2744 /** 2745 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2746 * @hw: pointer to hardware structure 2747 * @mc_addr_list: the list of new multicast addresses 2748 * @mc_addr_count: number of addresses 2749 * @next: iterator function to walk the multicast address list 2750 * @clear: flag, when set clears the table beforehand 2751 * 2752 * When the clear flag is set, the given list replaces any existing list. 2753 * Hashes the given addresses into the multicast table. 2754 **/ 2755 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 2756 u32 mc_addr_count, ixgbe_mc_addr_itr next, 2757 bool clear) 2758 { 2759 u32 i; 2760 u32 vmdq; 2761 2762 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 2763 2764 /* 2765 * Set the new number of MC addresses that we are being requested to 2766 * use. 2767 */ 2768 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 2769 hw->addr_ctrl.mta_in_use = 0; 2770 2771 /* Clear mta_shadow */ 2772 if (clear) { 2773 DEBUGOUT(" Clearing MTA\n"); 2774 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2775 } 2776 2777 /* Update mta_shadow */ 2778 for (i = 0; i < mc_addr_count; i++) { 2779 DEBUGOUT(" Adding the multicast addresses:\n"); 2780 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 2781 } 2782 2783 /* Enable mta */ 2784 for (i = 0; i < hw->mac.mcft_size; i++) 2785 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2786 hw->mac.mta_shadow[i]); 2787 2788 if (hw->addr_ctrl.mta_in_use > 0) 2789 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2790 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2791 2792 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 2793 return IXGBE_SUCCESS; 2794 } 2795 2796 /** 2797 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2798 * @hw: pointer to hardware structure 2799 * 2800 * Enables multicast address in RAR and the use of the multicast hash table. 2801 **/ 2802 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2803 { 2804 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2805 2806 DEBUGFUNC("ixgbe_enable_mc_generic"); 2807 2808 if (a->mta_in_use > 0) 2809 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2810 hw->mac.mc_filter_type); 2811 2812 return IXGBE_SUCCESS; 2813 } 2814 2815 /** 2816 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2817 * @hw: pointer to hardware structure 2818 * 2819 * Disables multicast address in RAR and the use of the multicast hash table. 2820 **/ 2821 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2822 { 2823 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2824 2825 DEBUGFUNC("ixgbe_disable_mc_generic"); 2826 2827 if (a->mta_in_use > 0) 2828 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2829 2830 return IXGBE_SUCCESS; 2831 } 2832 2833 /** 2834 * ixgbe_fc_enable_generic - Enable flow control 2835 * @hw: pointer to hardware structure 2836 * 2837 * Enable flow control according to the current settings. 2838 **/ 2839 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2840 { 2841 s32 ret_val = IXGBE_SUCCESS; 2842 u32 mflcn_reg, fccfg_reg; 2843 u32 reg; 2844 u32 fcrtl, fcrth; 2845 int i; 2846 2847 DEBUGFUNC("ixgbe_fc_enable_generic"); 2848 2849 /* Validate the water mark configuration */ 2850 if (!hw->fc.pause_time) { 2851 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2852 goto out; 2853 } 2854 2855 /* Low water mark of zero causes XOFF floods */ 2856 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2857 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2858 hw->fc.high_water[i]) { 2859 if (!hw->fc.low_water[i] || 2860 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2861 DEBUGOUT("Invalid water mark configuration\n"); 2862 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2863 goto out; 2864 } 2865 } 2866 } 2867 2868 /* Negotiate the fc mode to use */ 2869 hw->mac.ops.fc_autoneg(hw); 2870 2871 /* Disable any previous flow control settings */ 2872 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2873 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2874 2875 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2876 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2877 2878 /* 2879 * The possible values of fc.current_mode are: 2880 * 0: Flow control is completely disabled 2881 * 1: Rx flow control is enabled (we can receive pause frames, 2882 * but not send pause frames). 2883 * 2: Tx flow control is enabled (we can send pause frames but 2884 * we do not support receiving pause frames). 2885 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2886 * other: Invalid. 2887 */ 2888 switch (hw->fc.current_mode) { 2889 case ixgbe_fc_none: 2890 /* 2891 * Flow control is disabled by software override or autoneg. 2892 * The code below will actually disable it in the HW. 2893 */ 2894 break; 2895 case ixgbe_fc_rx_pause: 2896 /* 2897 * Rx Flow control is enabled and Tx Flow control is 2898 * disabled by software override. Since there really 2899 * isn't a way to advertise that we are capable of RX 2900 * Pause ONLY, we will advertise that we support both 2901 * symmetric and asymmetric Rx PAUSE. Later, we will 2902 * disable the adapter's ability to send PAUSE frames. 2903 */ 2904 mflcn_reg |= IXGBE_MFLCN_RFCE; 2905 break; 2906 case ixgbe_fc_tx_pause: 2907 /* 2908 * Tx Flow control is enabled, and Rx Flow control is 2909 * disabled by software override. 2910 */ 2911 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2912 break; 2913 case ixgbe_fc_full: 2914 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2915 mflcn_reg |= IXGBE_MFLCN_RFCE; 2916 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2917 break; 2918 default: 2919 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 2920 "Flow control param set incorrectly\n"); 2921 ret_val = IXGBE_ERR_CONFIG; 2922 goto out; 2923 break; 2924 } 2925 2926 /* Set 802.3x based flow control settings. */ 2927 mflcn_reg |= IXGBE_MFLCN_DPF; 2928 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2929 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2930 2931 2932 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2933 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2934 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2935 hw->fc.high_water[i]) { 2936 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2937 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2938 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2939 } else { 2940 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2941 /* 2942 * In order to prevent Tx hangs when the internal Tx 2943 * switch is enabled we must set the high water mark 2944 * to the Rx packet buffer size - 24KB. This allows 2945 * the Tx switch to function even under heavy Rx 2946 * workloads. 2947 */ 2948 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2949 } 2950 2951 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2952 } 2953 2954 /* Configure pause time (2 TCs per register) */ 2955 reg = (u32)hw->fc.pause_time * 0x00010001; 2956 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 2957 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2958 2959 /* Configure flow control refresh threshold value */ 2960 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2961 2962 out: 2963 return ret_val; 2964 } 2965 2966 /** 2967 * ixgbe_negotiate_fc - Negotiate flow control 2968 * @hw: pointer to hardware structure 2969 * @adv_reg: flow control advertised settings 2970 * @lp_reg: link partner's flow control settings 2971 * @adv_sym: symmetric pause bit in advertisement 2972 * @adv_asm: asymmetric pause bit in advertisement 2973 * @lp_sym: symmetric pause bit in link partner advertisement 2974 * @lp_asm: asymmetric pause bit in link partner advertisement 2975 * 2976 * Find the intersection between advertised settings and link partner's 2977 * advertised settings 2978 **/ 2979 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2980 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2981 { 2982 if ((!(adv_reg)) || (!(lp_reg))) { 2983 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, 2984 "Local or link partner's advertised flow control " 2985 "settings are NULL. Local: %x, link partner: %x\n", 2986 adv_reg, lp_reg); 2987 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2988 } 2989 2990 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2991 /* 2992 * Now we need to check if the user selected Rx ONLY 2993 * of pause frames. In this case, we had to advertise 2994 * FULL flow control because we could not advertise RX 2995 * ONLY. Hence, we must now check to see if we need to 2996 * turn OFF the TRANSMISSION of PAUSE frames. 2997 */ 2998 if (hw->fc.requested_mode == ixgbe_fc_full) { 2999 hw->fc.current_mode = ixgbe_fc_full; 3000 DEBUGOUT("Flow Control = FULL.\n"); 3001 } else { 3002 hw->fc.current_mode = ixgbe_fc_rx_pause; 3003 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 3004 } 3005 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 3006 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 3007 hw->fc.current_mode = ixgbe_fc_tx_pause; 3008 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 3009 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 3010 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 3011 hw->fc.current_mode = ixgbe_fc_rx_pause; 3012 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 3013 } else { 3014 hw->fc.current_mode = ixgbe_fc_none; 3015 DEBUGOUT("Flow Control = NONE.\n"); 3016 } 3017 return IXGBE_SUCCESS; 3018 } 3019 3020 /** 3021 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 3022 * @hw: pointer to hardware structure 3023 * 3024 * Enable flow control according on 1 gig fiber. 3025 **/ 3026 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 3027 { 3028 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 3029 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3030 3031 /* 3032 * On multispeed fiber at 1g, bail out if 3033 * - link is up but AN did not complete, or if 3034 * - link is up and AN completed but timed out 3035 */ 3036 3037 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 3038 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 3039 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 3040 DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); 3041 goto out; 3042 } 3043 3044 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 3045 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 3046 3047 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 3048 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 3049 IXGBE_PCS1GANA_ASM_PAUSE, 3050 IXGBE_PCS1GANA_SYM_PAUSE, 3051 IXGBE_PCS1GANA_ASM_PAUSE); 3052 3053 out: 3054 return ret_val; 3055 } 3056 3057 /** 3058 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 3059 * @hw: pointer to hardware structure 3060 * 3061 * Enable flow control according to IEEE clause 37. 3062 **/ 3063 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 3064 { 3065 u32 links2, anlp1_reg, autoc_reg, links; 3066 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3067 3068 /* 3069 * On backplane, bail out if 3070 * - backplane autoneg was not completed, or if 3071 * - we are 82599 and link partner is not AN enabled 3072 */ 3073 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 3074 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 3075 DEBUGOUT("Auto-Negotiation did not complete\n"); 3076 goto out; 3077 } 3078 3079 if (hw->mac.type == ixgbe_mac_82599EB) { 3080 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 3081 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 3082 DEBUGOUT("Link partner is not AN enabled\n"); 3083 goto out; 3084 } 3085 } 3086 /* 3087 * Read the 10g AN autoc and LP ability registers and resolve 3088 * local flow control settings accordingly 3089 */ 3090 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3091 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 3092 3093 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 3094 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 3095 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 3096 3097 out: 3098 return ret_val; 3099 } 3100 3101 /** 3102 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 3103 * @hw: pointer to hardware structure 3104 * 3105 * Enable flow control according to IEEE clause 37. 3106 **/ 3107 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 3108 { 3109 u16 technology_ability_reg = 0; 3110 u16 lp_technology_ability_reg = 0; 3111 3112 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 3113 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 3114 &technology_ability_reg); 3115 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 3116 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 3117 &lp_technology_ability_reg); 3118 3119 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 3120 (u32)lp_technology_ability_reg, 3121 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 3122 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 3123 } 3124 3125 /** 3126 * ixgbe_fc_autoneg - Configure flow control 3127 * @hw: pointer to hardware structure 3128 * 3129 * Compares our advertised flow control capabilities to those advertised by 3130 * our link partner, and determines the proper flow control mode to use. 3131 **/ 3132 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 3133 { 3134 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3135 ixgbe_link_speed speed; 3136 bool link_up; 3137 3138 DEBUGFUNC("ixgbe_fc_autoneg"); 3139 3140 /* 3141 * AN should have completed when the cable was plugged in. 3142 * Look for reasons to bail out. Bail out if: 3143 * - FC autoneg is disabled, or if 3144 * - link is not up. 3145 */ 3146 if (hw->fc.disable_fc_autoneg) { 3147 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, 3148 "Flow control autoneg is disabled"); 3149 goto out; 3150 } 3151 3152 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3153 if (!link_up) { 3154 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); 3155 goto out; 3156 } 3157 3158 switch (hw->phy.media_type) { 3159 /* Autoneg flow control on fiber adapters */ 3160 case ixgbe_media_type_fiber_fixed: 3161 case ixgbe_media_type_fiber_qsfp: 3162 case ixgbe_media_type_fiber: 3163 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 3164 ret_val = ixgbe_fc_autoneg_fiber(hw); 3165 break; 3166 3167 /* Autoneg flow control on backplane adapters */ 3168 case ixgbe_media_type_backplane: 3169 ret_val = ixgbe_fc_autoneg_backplane(hw); 3170 break; 3171 3172 /* Autoneg flow control on copper adapters */ 3173 case ixgbe_media_type_copper: 3174 if (ixgbe_device_supports_autoneg_fc(hw)) 3175 ret_val = ixgbe_fc_autoneg_copper(hw); 3176 break; 3177 3178 default: 3179 break; 3180 } 3181 3182 out: 3183 if (ret_val == IXGBE_SUCCESS) { 3184 hw->fc.fc_was_autonegged = TRUE; 3185 } else { 3186 hw->fc.fc_was_autonegged = FALSE; 3187 hw->fc.current_mode = hw->fc.requested_mode; 3188 } 3189 } 3190 3191 /* 3192 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 3193 * @hw: pointer to hardware structure 3194 * 3195 * System-wide timeout range is encoded in PCIe Device Control2 register. 3196 * 3197 * Add 10% to specified maximum and return the number of times to poll for 3198 * completion timeout, in units of 100 microsec. Never return less than 3199 * 800 = 80 millisec. 3200 */ 3201 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 3202 { 3203 s16 devctl2; 3204 u32 pollcnt; 3205 3206 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 3207 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 3208 3209 switch (devctl2) { 3210 case IXGBE_PCIDEVCTRL2_65_130ms: 3211 pollcnt = 1300; /* 130 millisec */ 3212 break; 3213 case IXGBE_PCIDEVCTRL2_260_520ms: 3214 pollcnt = 5200; /* 520 millisec */ 3215 break; 3216 case IXGBE_PCIDEVCTRL2_1_2s: 3217 pollcnt = 20000; /* 2 sec */ 3218 break; 3219 case IXGBE_PCIDEVCTRL2_4_8s: 3220 pollcnt = 80000; /* 8 sec */ 3221 break; 3222 case IXGBE_PCIDEVCTRL2_17_34s: 3223 pollcnt = 34000; /* 34 sec */ 3224 break; 3225 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 3226 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 3227 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 3228 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 3229 default: 3230 pollcnt = 800; /* 80 millisec minimum */ 3231 break; 3232 } 3233 3234 /* add 10% to spec maximum */ 3235 return (pollcnt * 11) / 10; 3236 } 3237 3238 /** 3239 * ixgbe_disable_pcie_master - Disable PCI-express master access 3240 * @hw: pointer to hardware structure 3241 * 3242 * Disables PCI-Express master access and verifies there are no pending 3243 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 3244 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 3245 * is returned signifying master requests disabled. 3246 **/ 3247 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 3248 { 3249 s32 status = IXGBE_SUCCESS; 3250 u32 i, poll; 3251 u16 value; 3252 3253 DEBUGFUNC("ixgbe_disable_pcie_master"); 3254 3255 /* Always set this bit to ensure any future transactions are blocked */ 3256 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 3257 3258 /* Exit if master requests are blocked */ 3259 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 3260 IXGBE_REMOVED(hw->hw_addr)) 3261 goto out; 3262 3263 /* Poll for master request bit to clear */ 3264 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 3265 usec_delay(100); 3266 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 3267 goto out; 3268 } 3269 3270 /* 3271 * Two consecutive resets are required via CTRL.RST per datasheet 3272 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 3273 * of this need. The first reset prevents new master requests from 3274 * being issued by our device. We then must wait 1usec or more for any 3275 * remaining completions from the PCIe bus to trickle in, and then reset 3276 * again to clear out any effects they may have had on our device. 3277 */ 3278 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 3279 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 3280 3281 if (hw->mac.type >= ixgbe_mac_X550) 3282 goto out; 3283 3284 /* 3285 * Before proceeding, make sure that the PCIe block does not have 3286 * transactions pending. 3287 */ 3288 poll = ixgbe_pcie_timeout_poll(hw); 3289 for (i = 0; i < poll; i++) { 3290 usec_delay(100); 3291 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 3292 if (IXGBE_REMOVED(hw->hw_addr)) 3293 goto out; 3294 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3295 goto out; 3296 } 3297 3298 ERROR_REPORT1(IXGBE_ERROR_POLLING, 3299 "PCIe transaction pending bit also did not clear.\n"); 3300 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 3301 3302 out: 3303 return status; 3304 } 3305 3306 /** 3307 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 3308 * @hw: pointer to hardware structure 3309 * @mask: Mask to specify which semaphore to acquire 3310 * 3311 * Acquires the SWFW semaphore through the GSSR register for the specified 3312 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3313 **/ 3314 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3315 { 3316 u32 gssr = 0; 3317 u32 swmask = mask; 3318 u32 fwmask = mask << 5; 3319 u32 timeout = 200; 3320 u32 i; 3321 3322 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 3323 3324 for (i = 0; i < timeout; i++) { 3325 /* 3326 * SW NVM semaphore bit is used for access to all 3327 * SW_FW_SYNC bits (not just NVM) 3328 */ 3329 if (ixgbe_get_eeprom_semaphore(hw)) 3330 return IXGBE_ERR_SWFW_SYNC; 3331 3332 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3333 if (!(gssr & (fwmask | swmask))) { 3334 gssr |= swmask; 3335 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3336 ixgbe_release_eeprom_semaphore(hw); 3337 return IXGBE_SUCCESS; 3338 } else { 3339 /* Resource is currently in use by FW or SW */ 3340 ixgbe_release_eeprom_semaphore(hw); 3341 msec_delay(5); 3342 } 3343 } 3344 3345 /* If time expired clear the bits holding the lock and retry */ 3346 if (gssr & (fwmask | swmask)) 3347 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 3348 3349 msec_delay(5); 3350 return IXGBE_ERR_SWFW_SYNC; 3351 } 3352 3353 /** 3354 * ixgbe_release_swfw_sync - Release SWFW semaphore 3355 * @hw: pointer to hardware structure 3356 * @mask: Mask to specify which semaphore to release 3357 * 3358 * Releases the SWFW semaphore through the GSSR register for the specified 3359 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3360 **/ 3361 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3362 { 3363 u32 gssr; 3364 u32 swmask = mask; 3365 3366 DEBUGFUNC("ixgbe_release_swfw_sync"); 3367 3368 ixgbe_get_eeprom_semaphore(hw); 3369 3370 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3371 gssr &= ~swmask; 3372 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3373 3374 ixgbe_release_eeprom_semaphore(hw); 3375 } 3376 3377 /** 3378 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path 3379 * @hw: pointer to hardware structure 3380 * 3381 * Stops the receive data path and waits for the HW to internally empty 3382 * the Rx security block 3383 **/ 3384 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) 3385 { 3386 #define IXGBE_MAX_SECRX_POLL 4000 3387 3388 int i; 3389 int secrxreg; 3390 3391 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); 3392 3393 3394 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3395 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 3396 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3397 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 3398 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 3399 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 3400 break; 3401 else 3402 /* Use interrupt-safe sleep just in case */ 3403 usec_delay(10); 3404 } 3405 3406 /* For informational purposes only */ 3407 if (i >= IXGBE_MAX_SECRX_POLL) 3408 DEBUGOUT("Rx unit being enabled before security " 3409 "path fully disabled. Continuing with init.\n"); 3410 3411 return IXGBE_SUCCESS; 3412 } 3413 3414 /** 3415 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 3416 * @hw: pointer to hardware structure 3417 * @locked: bool to indicate whether the SW/FW lock was taken 3418 * @reg_val: Value we read from AUTOC 3419 * 3420 * The default case requires no protection so just to the register read. 3421 */ 3422 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 3423 { 3424 *locked = FALSE; 3425 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3426 return IXGBE_SUCCESS; 3427 } 3428 3429 /** 3430 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 3431 * @hw: pointer to hardware structure 3432 * @reg_val: value to write to AUTOC 3433 * @locked: bool to indicate whether the SW/FW lock was already taken by 3434 * previous read. 3435 * 3436 * The default case requires no protection so just to the register write. 3437 */ 3438 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 3439 { 3440 UNREFERENCED_1PARAMETER(locked); 3441 3442 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 3443 return IXGBE_SUCCESS; 3444 } 3445 3446 /** 3447 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path 3448 * @hw: pointer to hardware structure 3449 * 3450 * Enables the receive data path. 3451 **/ 3452 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) 3453 { 3454 u32 secrxreg; 3455 3456 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); 3457 3458 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3459 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 3460 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3461 IXGBE_WRITE_FLUSH(hw); 3462 3463 return IXGBE_SUCCESS; 3464 } 3465 3466 /** 3467 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 3468 * @hw: pointer to hardware structure 3469 * @regval: register value to write to RXCTRL 3470 * 3471 * Enables the Rx DMA unit 3472 **/ 3473 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 3474 { 3475 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 3476 3477 if (regval & IXGBE_RXCTRL_RXEN) 3478 ixgbe_enable_rx(hw); 3479 else 3480 ixgbe_disable_rx(hw); 3481 3482 return IXGBE_SUCCESS; 3483 } 3484 3485 /** 3486 * ixgbe_blink_led_start_generic - Blink LED based on index. 3487 * @hw: pointer to hardware structure 3488 * @index: led number to blink 3489 **/ 3490 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 3491 { 3492 ixgbe_link_speed speed = 0; 3493 bool link_up = 0; 3494 u32 autoc_reg = 0; 3495 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3496 s32 ret_val = IXGBE_SUCCESS; 3497 bool locked = FALSE; 3498 3499 DEBUGFUNC("ixgbe_blink_led_start_generic"); 3500 3501 if (index > 3) 3502 return IXGBE_ERR_PARAM; 3503 3504 /* 3505 * Link must be up to auto-blink the LEDs; 3506 * Force it if link is down. 3507 */ 3508 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3509 3510 if (!link_up) { 3511 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3512 if (ret_val != IXGBE_SUCCESS) 3513 goto out; 3514 3515 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3516 autoc_reg |= IXGBE_AUTOC_FLU; 3517 3518 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3519 if (ret_val != IXGBE_SUCCESS) 3520 goto out; 3521 3522 IXGBE_WRITE_FLUSH(hw); 3523 msec_delay(10); 3524 } 3525 3526 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3527 led_reg |= IXGBE_LED_BLINK(index); 3528 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3529 IXGBE_WRITE_FLUSH(hw); 3530 3531 out: 3532 return ret_val; 3533 } 3534 3535 /** 3536 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 3537 * @hw: pointer to hardware structure 3538 * @index: led number to stop blinking 3539 **/ 3540 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 3541 { 3542 u32 autoc_reg = 0; 3543 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3544 s32 ret_val = IXGBE_SUCCESS; 3545 bool locked = FALSE; 3546 3547 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 3548 3549 if (index > 3) 3550 return IXGBE_ERR_PARAM; 3551 3552 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3553 if (ret_val != IXGBE_SUCCESS) 3554 goto out; 3555 3556 autoc_reg &= ~IXGBE_AUTOC_FLU; 3557 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3558 3559 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3560 if (ret_val != IXGBE_SUCCESS) 3561 goto out; 3562 3563 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3564 led_reg &= ~IXGBE_LED_BLINK(index); 3565 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 3566 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3567 IXGBE_WRITE_FLUSH(hw); 3568 3569 out: 3570 return ret_val; 3571 } 3572 3573 /** 3574 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 3575 * @hw: pointer to hardware structure 3576 * @san_mac_offset: SAN MAC address offset 3577 * 3578 * This function will read the EEPROM location for the SAN MAC address 3579 * pointer, and returns the value at that location. This is used in both 3580 * get and set mac_addr routines. 3581 **/ 3582 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 3583 u16 *san_mac_offset) 3584 { 3585 s32 ret_val; 3586 3587 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 3588 3589 /* 3590 * First read the EEPROM pointer to see if the MAC addresses are 3591 * available. 3592 */ 3593 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 3594 san_mac_offset); 3595 if (ret_val) { 3596 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3597 "eeprom at offset %d failed", 3598 IXGBE_SAN_MAC_ADDR_PTR); 3599 } 3600 3601 return ret_val; 3602 } 3603 3604 /** 3605 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 3606 * @hw: pointer to hardware structure 3607 * @san_mac_addr: SAN MAC address 3608 * 3609 * Reads the SAN MAC address from the EEPROM, if it's available. This is 3610 * per-port, so set_lan_id() must be called before reading the addresses. 3611 * set_lan_id() is called by identify_sfp(), but this cannot be relied 3612 * upon for non-SFP connections, so we must call it here. 3613 **/ 3614 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3615 { 3616 u16 san_mac_data, san_mac_offset; 3617 u8 i; 3618 s32 ret_val; 3619 3620 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 3621 3622 /* 3623 * First read the EEPROM pointer to see if the MAC addresses are 3624 * available. If they're not, no point in calling set_lan_id() here. 3625 */ 3626 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3627 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3628 goto san_mac_addr_out; 3629 3630 /* make sure we know which port we need to program */ 3631 hw->mac.ops.set_lan_id(hw); 3632 /* apply the port offset to the address offset */ 3633 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3634 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3635 for (i = 0; i < 3; i++) { 3636 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 3637 &san_mac_data); 3638 if (ret_val) { 3639 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3640 "eeprom read at offset %d failed", 3641 san_mac_offset); 3642 goto san_mac_addr_out; 3643 } 3644 san_mac_addr[i * 2] = (u8)(san_mac_data); 3645 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 3646 san_mac_offset++; 3647 } 3648 return IXGBE_SUCCESS; 3649 3650 san_mac_addr_out: 3651 /* 3652 * No addresses available in this EEPROM. It's not an 3653 * error though, so just wipe the local address and return. 3654 */ 3655 for (i = 0; i < 6; i++) 3656 san_mac_addr[i] = 0xFF; 3657 return IXGBE_SUCCESS; 3658 } 3659 3660 /** 3661 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 3662 * @hw: pointer to hardware structure 3663 * @san_mac_addr: SAN MAC address 3664 * 3665 * Write a SAN MAC address to the EEPROM. 3666 **/ 3667 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3668 { 3669 s32 ret_val; 3670 u16 san_mac_data, san_mac_offset; 3671 u8 i; 3672 3673 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 3674 3675 /* Look for SAN mac address pointer. If not defined, return */ 3676 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3677 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3678 return IXGBE_ERR_NO_SAN_ADDR_PTR; 3679 3680 /* Make sure we know which port we need to write */ 3681 hw->mac.ops.set_lan_id(hw); 3682 /* Apply the port offset to the address offset */ 3683 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3684 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3685 3686 for (i = 0; i < 3; i++) { 3687 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 3688 san_mac_data |= (u16)(san_mac_addr[i * 2]); 3689 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 3690 san_mac_offset++; 3691 } 3692 3693 return IXGBE_SUCCESS; 3694 } 3695 3696 /** 3697 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 3698 * @hw: pointer to hardware structure 3699 * 3700 * Read PCIe configuration space, and get the MSI-X vector count from 3701 * the capabilities table. 3702 **/ 3703 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 3704 { 3705 u16 msix_count = 1; 3706 u16 max_msix_count; 3707 u16 pcie_offset; 3708 3709 switch (hw->mac.type) { 3710 case ixgbe_mac_82598EB: 3711 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 3712 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 3713 break; 3714 case ixgbe_mac_82599EB: 3715 case ixgbe_mac_X540: 3716 case ixgbe_mac_X550: 3717 case ixgbe_mac_X550EM_x: 3718 case ixgbe_mac_X550EM_a: 3719 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 3720 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 3721 break; 3722 default: 3723 return msix_count; 3724 } 3725 3726 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 3727 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); 3728 if (IXGBE_REMOVED(hw->hw_addr)) 3729 msix_count = 0; 3730 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 3731 3732 /* MSI-X count is zero-based in HW */ 3733 msix_count++; 3734 3735 if (msix_count > max_msix_count) 3736 msix_count = max_msix_count; 3737 3738 return msix_count; 3739 } 3740 3741 /** 3742 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 3743 * @hw: pointer to hardware structure 3744 * @addr: Address to put into receive address register 3745 * @vmdq: VMDq pool to assign 3746 * 3747 * Puts an ethernet address into a receive address register, or 3748 * finds the rar that it is already in; adds to the pool list 3749 **/ 3750 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 3751 { 3752 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 3753 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 3754 u32 rar; 3755 u32 rar_low, rar_high; 3756 u32 addr_low, addr_high; 3757 3758 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 3759 3760 /* swap bytes for HW little endian */ 3761 addr_low = addr[0] | (addr[1] << 8) 3762 | (addr[2] << 16) 3763 | (addr[3] << 24); 3764 addr_high = addr[4] | (addr[5] << 8); 3765 3766 /* 3767 * Either find the mac_id in rar or find the first empty space. 3768 * rar_highwater points to just after the highest currently used 3769 * rar in order to shorten the search. It grows when we add a new 3770 * rar to the top. 3771 */ 3772 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 3773 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 3774 3775 if (((IXGBE_RAH_AV & rar_high) == 0) 3776 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 3777 first_empty_rar = rar; 3778 } else if ((rar_high & 0xFFFF) == addr_high) { 3779 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 3780 if (rar_low == addr_low) 3781 break; /* found it already in the rars */ 3782 } 3783 } 3784 3785 if (rar < hw->mac.rar_highwater) { 3786 /* already there so just add to the pool bits */ 3787 ixgbe_set_vmdq(hw, rar, vmdq); 3788 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 3789 /* stick it into first empty RAR slot we found */ 3790 rar = first_empty_rar; 3791 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3792 } else if (rar == hw->mac.rar_highwater) { 3793 /* add it to the top of the list and inc the highwater mark */ 3794 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3795 hw->mac.rar_highwater++; 3796 } else if (rar >= hw->mac.num_rar_entries) { 3797 return IXGBE_ERR_INVALID_MAC_ADDR; 3798 } 3799 3800 /* 3801 * If we found rar[0], make sure the default pool bit (we use pool 0) 3802 * remains cleared to be sure default pool packets will get delivered 3803 */ 3804 if (rar == 0) 3805 ixgbe_clear_vmdq(hw, rar, 0); 3806 3807 return rar; 3808 } 3809 3810 /** 3811 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 3812 * @hw: pointer to hardware struct 3813 * @rar: receive address register index to disassociate 3814 * @vmdq: VMDq pool index to remove from the rar 3815 **/ 3816 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3817 { 3818 u32 mpsar_lo, mpsar_hi; 3819 u32 rar_entries = hw->mac.num_rar_entries; 3820 3821 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 3822 3823 /* Make sure we are using a valid rar index range */ 3824 if (rar >= rar_entries) { 3825 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3826 "RAR index %d is out of range.\n", rar); 3827 return IXGBE_ERR_INVALID_ARGUMENT; 3828 } 3829 3830 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3831 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3832 3833 if (IXGBE_REMOVED(hw->hw_addr)) 3834 goto done; 3835 3836 if (!mpsar_lo && !mpsar_hi) 3837 goto done; 3838 3839 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 3840 if (mpsar_lo) { 3841 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3842 mpsar_lo = 0; 3843 } 3844 if (mpsar_hi) { 3845 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3846 mpsar_hi = 0; 3847 } 3848 } else if (vmdq < 32) { 3849 mpsar_lo &= ~(1 << vmdq); 3850 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 3851 } else { 3852 mpsar_hi &= ~(1 << (vmdq - 32)); 3853 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 3854 } 3855 3856 /* was that the last pool using this rar? */ 3857 if (mpsar_lo == 0 && mpsar_hi == 0 && 3858 rar != 0 && rar != hw->mac.san_mac_rar_index) 3859 hw->mac.ops.clear_rar(hw, rar); 3860 done: 3861 return IXGBE_SUCCESS; 3862 } 3863 3864 /** 3865 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3866 * @hw: pointer to hardware struct 3867 * @rar: receive address register index to associate with a VMDq index 3868 * @vmdq: VMDq pool index 3869 **/ 3870 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3871 { 3872 u32 mpsar; 3873 u32 rar_entries = hw->mac.num_rar_entries; 3874 3875 DEBUGFUNC("ixgbe_set_vmdq_generic"); 3876 3877 /* Make sure we are using a valid rar index range */ 3878 if (rar >= rar_entries) { 3879 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3880 "RAR index %d is out of range.\n", rar); 3881 return IXGBE_ERR_INVALID_ARGUMENT; 3882 } 3883 3884 if (vmdq < 32) { 3885 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3886 mpsar |= 1 << vmdq; 3887 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3888 } else { 3889 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3890 mpsar |= 1 << (vmdq - 32); 3891 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3892 } 3893 return IXGBE_SUCCESS; 3894 } 3895 3896 /** 3897 * This function should only be involved in the IOV mode. 3898 * In IOV mode, Default pool is next pool after the number of 3899 * VFs advertized and not 0. 3900 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3901 * 3902 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3903 * @hw: pointer to hardware struct 3904 * @vmdq: VMDq pool index 3905 **/ 3906 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3907 { 3908 u32 rar = hw->mac.san_mac_rar_index; 3909 3910 DEBUGFUNC("ixgbe_set_vmdq_san_mac"); 3911 3912 if (vmdq < 32) { 3913 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); 3914 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3915 } else { 3916 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3917 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); 3918 } 3919 3920 return IXGBE_SUCCESS; 3921 } 3922 3923 /** 3924 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3925 * @hw: pointer to hardware structure 3926 **/ 3927 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3928 { 3929 int i; 3930 3931 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3932 DEBUGOUT(" Clearing UTA\n"); 3933 3934 for (i = 0; i < 128; i++) 3935 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3936 3937 return IXGBE_SUCCESS; 3938 } 3939 3940 /** 3941 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3942 * @hw: pointer to hardware structure 3943 * @vlan: VLAN id to write to VLAN filter 3944 * @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if 3945 * vlanid not found 3946 * 3947 * 3948 * return the VLVF index where this VLAN id should be placed 3949 * 3950 **/ 3951 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3952 { 3953 s32 regindex, first_empty_slot; 3954 u32 bits; 3955 3956 /* short cut the special case */ 3957 if (vlan == 0) 3958 return 0; 3959 3960 /* if vlvf_bypass is set we don't want to use an empty slot, we 3961 * will simply bypass the VLVF if there are no entries present in the 3962 * VLVF that contain our VLAN 3963 */ 3964 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; 3965 3966 /* add VLAN enable bit for comparison */ 3967 vlan |= IXGBE_VLVF_VIEN; 3968 3969 /* Search for the vlan id in the VLVF entries. Save off the first empty 3970 * slot found along the way. 3971 * 3972 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3973 */ 3974 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3975 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3976 if (bits == vlan) 3977 return regindex; 3978 if (!first_empty_slot && !bits) 3979 first_empty_slot = regindex; 3980 } 3981 3982 /* If we are here then we didn't find the VLAN. Return first empty 3983 * slot we found during our search, else error. 3984 */ 3985 if (!first_empty_slot) 3986 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); 3987 3988 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; 3989 } 3990 3991 /** 3992 * ixgbe_set_vfta_generic - Set VLAN filter table 3993 * @hw: pointer to hardware structure 3994 * @vlan: VLAN id to write to VLAN filter 3995 * @vind: VMDq output index that maps queue to VLAN id in VLVFB 3996 * @vlan_on: boolean flag to turn on/off VLAN 3997 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3998 * 3999 * Turn on/off specified VLAN in the VLAN filter table. 4000 **/ 4001 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 4002 bool vlan_on, bool vlvf_bypass) 4003 { 4004 u32 regidx, vfta_delta, vfta; 4005 s32 ret_val; 4006 4007 DEBUGFUNC("ixgbe_set_vfta_generic"); 4008 4009 if (vlan > 4095 || vind > 63) 4010 return IXGBE_ERR_PARAM; 4011 4012 /* 4013 * this is a 2 part operation - first the VFTA, then the 4014 * VLVF and VLVFB if VT Mode is set 4015 * We don't write the VFTA until we know the VLVF part succeeded. 4016 */ 4017 4018 /* Part 1 4019 * The VFTA is a bitstring made up of 128 32-bit registers 4020 * that enable the particular VLAN id, much like the MTA: 4021 * bits[11-5]: which register 4022 * bits[4-0]: which bit in the register 4023 */ 4024 regidx = vlan / 32; 4025 vfta_delta = (u32)1 << (vlan % 32); 4026 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 4027 4028 /* 4029 * vfta_delta represents the difference between the current value 4030 * of vfta and the value we want in the register. Since the diff 4031 * is an XOR mask we can just update the vfta using an XOR 4032 */ 4033 vfta_delta &= vlan_on ? ~vfta : vfta; 4034 vfta ^= vfta_delta; 4035 4036 /* Part 2 4037 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF 4038 */ 4039 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, 4040 vfta, vlvf_bypass); 4041 if (ret_val != IXGBE_SUCCESS) { 4042 if (vlvf_bypass) 4043 goto vfta_update; 4044 return ret_val; 4045 } 4046 4047 vfta_update: 4048 /* Update VFTA now that we are ready for traffic */ 4049 if (vfta_delta) 4050 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 4051 4052 return IXGBE_SUCCESS; 4053 } 4054 4055 /** 4056 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter 4057 * @hw: pointer to hardware structure 4058 * @vlan: VLAN id to write to VLAN filter 4059 * @vind: VMDq output index that maps queue to VLAN id in VLVFB 4060 * @vlan_on: boolean flag to turn on/off VLAN in VLVF 4061 * @vfta_delta: pointer to the difference between the current value of VFTA 4062 * and the desired value 4063 * @vfta: the desired value of the VFTA 4064 * @vlvf_bypass: boolean flag indicating updating default pool is okay 4065 * 4066 * Turn on/off specified bit in VLVF table. 4067 **/ 4068 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 4069 bool vlan_on, u32 *vfta_delta, u32 vfta, 4070 bool vlvf_bypass) 4071 { 4072 u32 bits; 4073 s32 vlvf_index; 4074 4075 DEBUGFUNC("ixgbe_set_vlvf_generic"); 4076 4077 if (vlan > 4095 || vind > 63) 4078 return IXGBE_ERR_PARAM; 4079 4080 /* If VT Mode is set 4081 * Either vlan_on 4082 * make sure the vlan is in VLVF 4083 * set the vind bit in the matching VLVFB 4084 * Or !vlan_on 4085 * clear the pool bit and possibly the vind 4086 */ 4087 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 4088 return IXGBE_SUCCESS; 4089 4090 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 4091 if (vlvf_index < 0) 4092 return vlvf_index; 4093 4094 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 4095 4096 /* set the pool bit */ 4097 bits |= 1 << (vind % 32); 4098 if (vlan_on) 4099 goto vlvf_update; 4100 4101 /* clear the pool bit */ 4102 bits ^= 1 << (vind % 32); 4103 4104 if (!bits && 4105 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 4106 /* Clear VFTA first, then disable VLVF. Otherwise 4107 * we run the risk of stray packets leaking into 4108 * the PF via the default pool 4109 */ 4110 if (*vfta_delta) 4111 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); 4112 4113 /* disable VLVF and clear remaining bit from pool */ 4114 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 4115 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 4116 4117 return IXGBE_SUCCESS; 4118 } 4119 4120 /* If there are still bits set in the VLVFB registers 4121 * for the VLAN ID indicated we need to see if the 4122 * caller is requesting that we clear the VFTA entry bit. 4123 * If the caller has requested that we clear the VFTA 4124 * entry bit but there are still pools/VFs using this VLAN 4125 * ID entry then ignore the request. We're not worried 4126 * about the case where we're turning the VFTA VLAN ID 4127 * entry bit on, only when requested to turn it off as 4128 * there may be multiple pools and/or VFs using the 4129 * VLAN ID entry. In that case we cannot clear the 4130 * VFTA bit until all pools/VFs using that VLAN ID have also 4131 * been cleared. This will be indicated by "bits" being 4132 * zero. 4133 */ 4134 *vfta_delta = 0; 4135 4136 vlvf_update: 4137 /* record pool change and enable VLAN ID if not already enabled */ 4138 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 4139 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 4140 4141 return IXGBE_SUCCESS; 4142 } 4143 4144 /** 4145 * ixgbe_clear_vfta_generic - Clear VLAN filter table 4146 * @hw: pointer to hardware structure 4147 * 4148 * Clears the VLAN filer table, and the VMDq index associated with the filter 4149 **/ 4150 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 4151 { 4152 u32 offset; 4153 4154 DEBUGFUNC("ixgbe_clear_vfta_generic"); 4155 4156 for (offset = 0; offset < hw->mac.vft_size; offset++) 4157 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 4158 4159 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 4160 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 4161 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 4162 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0); 4163 } 4164 4165 return IXGBE_SUCCESS; 4166 } 4167 4168 /** 4169 * ixgbe_toggle_txdctl_generic - Toggle VF's queues 4170 * @hw: pointer to hardware structure 4171 * @vf_number: VF index 4172 * 4173 * Enable and disable each queue in VF. 4174 */ 4175 s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number) 4176 { 4177 u8 queue_count, i; 4178 u32 offset, reg; 4179 4180 if (vf_number > 63) 4181 return IXGBE_ERR_PARAM; 4182 4183 /* 4184 * Determine number of queues by checking 4185 * number of virtual functions 4186 */ 4187 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 4188 switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) { 4189 case IXGBE_GCR_EXT_VT_MODE_64: 4190 queue_count = 2; 4191 break; 4192 case IXGBE_GCR_EXT_VT_MODE_32: 4193 queue_count = 4; 4194 break; 4195 case IXGBE_GCR_EXT_VT_MODE_16: 4196 queue_count = 8; 4197 break; 4198 default: 4199 return IXGBE_ERR_CONFIG; 4200 } 4201 4202 /* Toggle queues */ 4203 for (i = 0; i < queue_count; ++i) { 4204 /* Calculate offset of current queue */ 4205 offset = queue_count * vf_number + i; 4206 4207 /* Enable queue */ 4208 reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset)); 4209 reg |= IXGBE_TXDCTL_ENABLE; 4210 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg); 4211 IXGBE_WRITE_FLUSH(hw); 4212 4213 /* Disable queue */ 4214 reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset)); 4215 reg &= ~IXGBE_TXDCTL_ENABLE; 4216 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg); 4217 IXGBE_WRITE_FLUSH(hw); 4218 } 4219 4220 return IXGBE_SUCCESS; 4221 } 4222 4223 /** 4224 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 4225 * @hw: pointer to hardware structure 4226 * 4227 * Contains the logic to identify if we need to verify link for the 4228 * crosstalk fix 4229 **/ 4230 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 4231 { 4232 4233 /* Does FW say we need the fix */ 4234 if (!hw->need_crosstalk_fix) 4235 return FALSE; 4236 4237 /* Only consider SFP+ PHYs i.e. media type fiber */ 4238 switch (hw->mac.ops.get_media_type(hw)) { 4239 case ixgbe_media_type_fiber: 4240 case ixgbe_media_type_fiber_qsfp: 4241 break; 4242 default: 4243 return FALSE; 4244 } 4245 4246 return TRUE; 4247 } 4248 4249 /** 4250 * ixgbe_check_mac_link_generic - Determine link and speed status 4251 * @hw: pointer to hardware structure 4252 * @speed: pointer to link speed 4253 * @link_up: TRUE when link is up 4254 * @link_up_wait_to_complete: bool used to wait for link up or not 4255 * 4256 * Reads the links register to determine if link is up and the current speed 4257 **/ 4258 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4259 bool *link_up, bool link_up_wait_to_complete) 4260 { 4261 u32 links_reg, links_orig; 4262 u32 i; 4263 4264 DEBUGFUNC("ixgbe_check_mac_link_generic"); 4265 4266 /* If Crosstalk fix enabled do the sanity check of making sure 4267 * the SFP+ cage is full. 4268 */ 4269 if (ixgbe_need_crosstalk_fix(hw)) { 4270 if ((hw->mac.type != ixgbe_mac_82598EB) && 4271 !ixgbe_sfp_cage_full(hw)) { 4272 *link_up = FALSE; 4273 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4274 return IXGBE_SUCCESS; 4275 } 4276 } 4277 4278 /* clear the old state */ 4279 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 4280 4281 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4282 4283 if (links_orig != links_reg) { 4284 DEBUGOUT2("LINKS changed from %08X to %08X\n", 4285 links_orig, links_reg); 4286 } 4287 4288 if (link_up_wait_to_complete) { 4289 for (i = 0; i < hw->mac.max_link_up_time; i++) { 4290 if (links_reg & IXGBE_LINKS_UP) { 4291 *link_up = TRUE; 4292 break; 4293 } else { 4294 *link_up = FALSE; 4295 } 4296 msec_delay(100); 4297 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4298 } 4299 } else { 4300 if (links_reg & IXGBE_LINKS_UP) 4301 *link_up = TRUE; 4302 else 4303 *link_up = FALSE; 4304 } 4305 4306 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4307 case IXGBE_LINKS_SPEED_10G_82599: 4308 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4309 if (hw->mac.type >= ixgbe_mac_X550) { 4310 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4311 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4312 } 4313 break; 4314 case IXGBE_LINKS_SPEED_1G_82599: 4315 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4316 break; 4317 case IXGBE_LINKS_SPEED_100_82599: 4318 *speed = IXGBE_LINK_SPEED_100_FULL; 4319 if (hw->mac.type >= ixgbe_mac_X550) { 4320 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4321 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4322 } 4323 break; 4324 case IXGBE_LINKS_SPEED_10_X550EM_A: 4325 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4326 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 4327 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 4328 *speed = IXGBE_LINK_SPEED_10_FULL; 4329 break; 4330 default: 4331 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4332 } 4333 4334 return IXGBE_SUCCESS; 4335 } 4336 4337 /** 4338 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 4339 * the EEPROM 4340 * @hw: pointer to hardware structure 4341 * @wwnn_prefix: the alternative WWNN prefix 4342 * @wwpn_prefix: the alternative WWPN prefix 4343 * 4344 * This function will read the EEPROM from the alternative SAN MAC address 4345 * block to check the support for the alternative WWNN/WWPN prefix support. 4346 **/ 4347 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 4348 u16 *wwpn_prefix) 4349 { 4350 u16 offset, caps; 4351 u16 alt_san_mac_blk_offset; 4352 4353 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 4354 4355 /* clear output first */ 4356 *wwnn_prefix = 0xFFFF; 4357 *wwpn_prefix = 0xFFFF; 4358 4359 /* check if alternative SAN MAC is supported */ 4360 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 4361 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 4362 goto wwn_prefix_err; 4363 4364 if ((alt_san_mac_blk_offset == 0) || 4365 (alt_san_mac_blk_offset == 0xFFFF)) 4366 goto wwn_prefix_out; 4367 4368 /* check capability in alternative san mac address block */ 4369 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 4370 if (hw->eeprom.ops.read(hw, offset, &caps)) 4371 goto wwn_prefix_err; 4372 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 4373 goto wwn_prefix_out; 4374 4375 /* get the corresponding prefix for WWNN/WWPN */ 4376 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 4377 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { 4378 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4379 "eeprom read at offset %d failed", offset); 4380 } 4381 4382 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 4383 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 4384 goto wwn_prefix_err; 4385 4386 wwn_prefix_out: 4387 return IXGBE_SUCCESS; 4388 4389 wwn_prefix_err: 4390 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4391 "eeprom read at offset %d failed", offset); 4392 return IXGBE_SUCCESS; 4393 } 4394 4395 /** 4396 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 4397 * @hw: pointer to hardware structure 4398 * @bs: the fcoe boot status 4399 * 4400 * This function will read the FCOE boot status from the iSCSI FCOE block 4401 **/ 4402 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 4403 { 4404 u16 offset, caps, flags; 4405 s32 status; 4406 4407 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 4408 4409 /* clear output first */ 4410 *bs = ixgbe_fcoe_bootstatus_unavailable; 4411 4412 /* check if FCOE IBA block is present */ 4413 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 4414 status = hw->eeprom.ops.read(hw, offset, &caps); 4415 if (status != IXGBE_SUCCESS) 4416 goto out; 4417 4418 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 4419 goto out; 4420 4421 /* check if iSCSI FCOE block is populated */ 4422 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 4423 if (status != IXGBE_SUCCESS) 4424 goto out; 4425 4426 if ((offset == 0) || (offset == 0xFFFF)) 4427 goto out; 4428 4429 /* read fcoe flags in iSCSI FCOE block */ 4430 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 4431 status = hw->eeprom.ops.read(hw, offset, &flags); 4432 if (status != IXGBE_SUCCESS) 4433 goto out; 4434 4435 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 4436 *bs = ixgbe_fcoe_bootstatus_enabled; 4437 else 4438 *bs = ixgbe_fcoe_bootstatus_disabled; 4439 4440 out: 4441 return status; 4442 } 4443 4444 /** 4445 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 4446 * @hw: pointer to hardware structure 4447 * @enable: enable or disable switch for MAC anti-spoofing 4448 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 4449 * 4450 **/ 4451 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4452 { 4453 int vf_target_reg = vf >> 3; 4454 int vf_target_shift = vf % 8; 4455 u32 pfvfspoof; 4456 4457 if (hw->mac.type == ixgbe_mac_82598EB) 4458 return; 4459 4460 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4461 if (enable) 4462 pfvfspoof |= (1 << vf_target_shift); 4463 else 4464 pfvfspoof &= ~(1 << vf_target_shift); 4465 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4466 } 4467 4468 /** 4469 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 4470 * @hw: pointer to hardware structure 4471 * @enable: enable or disable switch for VLAN anti-spoofing 4472 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 4473 * 4474 **/ 4475 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4476 { 4477 int vf_target_reg = vf >> 3; 4478 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 4479 u32 pfvfspoof; 4480 4481 if (hw->mac.type == ixgbe_mac_82598EB) 4482 return; 4483 4484 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4485 if (enable) 4486 pfvfspoof |= (1 << vf_target_shift); 4487 else 4488 pfvfspoof &= ~(1 << vf_target_shift); 4489 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4490 } 4491 4492 /** 4493 * ixgbe_get_device_caps_generic - Get additional device capabilities 4494 * @hw: pointer to hardware structure 4495 * @device_caps: the EEPROM word with the extra device capabilities 4496 * 4497 * This function will read the EEPROM location for the device capabilities, 4498 * and return the word through device_caps. 4499 **/ 4500 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 4501 { 4502 DEBUGFUNC("ixgbe_get_device_caps_generic"); 4503 4504 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 4505 4506 return IXGBE_SUCCESS; 4507 } 4508 4509 /** 4510 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering 4511 * @hw: pointer to hardware structure 4512 * 4513 **/ 4514 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) 4515 { 4516 u32 regval; 4517 u32 i; 4518 4519 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); 4520 4521 /* Enable relaxed ordering */ 4522 for (i = 0; i < hw->mac.max_tx_queues; i++) { 4523 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 4524 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 4525 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 4526 } 4527 4528 for (i = 0; i < hw->mac.max_rx_queues; i++) { 4529 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 4530 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 4531 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 4532 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 4533 } 4534 4535 } 4536 4537 /** 4538 * ixgbe_calculate_checksum - Calculate checksum for buffer 4539 * @buffer: pointer to EEPROM 4540 * @length: size of EEPROM to calculate a checksum for 4541 * Calculates the checksum for some buffer on a specified length. The 4542 * checksum calculated is returned. 4543 **/ 4544 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 4545 { 4546 u32 i; 4547 u8 sum = 0; 4548 4549 DEBUGFUNC("ixgbe_calculate_checksum"); 4550 4551 if (!buffer) 4552 return 0; 4553 4554 for (i = 0; i < length; i++) 4555 sum += buffer[i]; 4556 4557 return (u8) (0 - sum); 4558 } 4559 4560 /** 4561 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 4562 * @hw: pointer to the HW structure 4563 * @buffer: command to write and where the return status will be placed 4564 * @length: length of buffer, must be multiple of 4 bytes 4565 * @timeout: time in ms to wait for command completion 4566 * 4567 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4568 * else returns semaphore error when encountering an error acquiring 4569 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4570 * 4571 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 4572 * by the caller. 4573 **/ 4574 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 4575 u32 timeout) 4576 { 4577 u32 hicr, i, fwsts; 4578 u16 dword_len; 4579 4580 DEBUGFUNC("ixgbe_hic_unlocked"); 4581 4582 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4583 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); 4584 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4585 } 4586 4587 /* Set bit 9 of FWSTS clearing FW reset indication */ 4588 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 4589 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 4590 4591 /* Check that the host interface is enabled. */ 4592 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4593 if (!(hicr & IXGBE_HICR_EN)) { 4594 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); 4595 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4596 } 4597 4598 /* Calculate length in DWORDs. We must be DWORD aligned */ 4599 if (length % sizeof(u32)) { 4600 DEBUGOUT("Buffer length failure, not aligned to dword"); 4601 return IXGBE_ERR_INVALID_ARGUMENT; 4602 } 4603 4604 dword_len = length >> 2; 4605 4606 /* The device driver writes the relevant command block 4607 * into the ram area. 4608 */ 4609 for (i = 0; i < dword_len; i++) 4610 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 4611 i, IXGBE_CPU_TO_LE32(buffer[i])); 4612 4613 /* Setting this bit tells the ARC that a new command is pending. */ 4614 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 4615 4616 for (i = 0; i < timeout; i++) { 4617 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4618 if (!(hicr & IXGBE_HICR_C)) 4619 break; 4620 msec_delay(1); 4621 } 4622 4623 /* For each command except "Apply Update" perform 4624 * status checks in the HICR registry. 4625 */ 4626 if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) == 4627 IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD) 4628 return IXGBE_SUCCESS; 4629 4630 /* Check command completion */ 4631 if ((timeout && i == timeout) || 4632 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { 4633 ERROR_REPORT1(IXGBE_ERROR_CAUTION, 4634 "Command has failed with no status valid.\n"); 4635 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4636 } 4637 4638 return IXGBE_SUCCESS; 4639 } 4640 4641 /** 4642 * ixgbe_host_interface_command - Issue command to manageability block 4643 * @hw: pointer to the HW structure 4644 * @buffer: contains the command to write and where the return status will 4645 * be placed 4646 * @length: length of buffer, must be multiple of 4 bytes 4647 * @timeout: time in ms to wait for command completion 4648 * @return_data: read and return data from the buffer (TRUE) or not (FALSE) 4649 * Needed because FW structures are big endian and decoding of 4650 * these fields can be 8 bit or 16 bit based on command. Decoding 4651 * is not easily understood without making a table of commands. 4652 * So we will leave this up to the caller to read back the data 4653 * in these cases. 4654 * 4655 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4656 * else returns semaphore error when encountering an error acquiring 4657 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4658 **/ 4659 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 4660 u32 length, u32 timeout, bool return_data) 4661 { 4662 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 4663 struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer; 4664 u16 buf_len; 4665 s32 status; 4666 u32 bi; 4667 u32 dword_len; 4668 4669 DEBUGFUNC("ixgbe_host_interface_command"); 4670 4671 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4672 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); 4673 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4674 } 4675 4676 /* Take management host interface semaphore */ 4677 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4678 if (status) 4679 return status; 4680 4681 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 4682 if (status) 4683 goto rel_out; 4684 4685 if (!return_data) 4686 goto rel_out; 4687 4688 /* Calculate length in DWORDs */ 4689 dword_len = hdr_size >> 2; 4690 4691 /* first pull in the header so we know the buffer length */ 4692 for (bi = 0; bi < dword_len; bi++) { 4693 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4694 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4695 } 4696 4697 /* 4698 * If there is any thing in data position pull it in 4699 * Read Flash command requires reading buffer length from 4700 * two byes instead of one byte 4701 */ 4702 if (resp->cmd == 0x30 || resp->cmd == 0x31) { 4703 for (; bi < dword_len + 2; bi++) { 4704 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, 4705 bi); 4706 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4707 } 4708 buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3) 4709 & 0xF00) | resp->buf_len; 4710 hdr_size += (2 << 2); 4711 } else { 4712 buf_len = resp->buf_len; 4713 } 4714 if (!buf_len) 4715 goto rel_out; 4716 4717 if (length < buf_len + hdr_size) { 4718 DEBUGOUT("Buffer not large enough for reply message.\n"); 4719 status = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4720 goto rel_out; 4721 } 4722 4723 /* Calculate length in DWORDs, add 3 for odd lengths */ 4724 dword_len = (buf_len + 3) >> 2; 4725 4726 /* Pull in the rest of the buffer (bi is where we left off) */ 4727 for (; bi <= dword_len; bi++) { 4728 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4729 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4730 } 4731 4732 rel_out: 4733 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4734 4735 return status; 4736 } 4737 4738 /** 4739 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 4740 * @hw: pointer to the HW structure 4741 * @maj: driver version major number 4742 * @minr: driver version minor number 4743 * @build: driver version build number 4744 * @sub: driver version sub build number 4745 * @len: unused 4746 * @driver_ver: unused 4747 * 4748 * Sends driver version number to firmware through the manageability 4749 * block. On success return IXGBE_SUCCESS 4750 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 4751 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4752 **/ 4753 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr, 4754 u8 build, u8 sub, u16 len, 4755 const char *driver_ver) 4756 { 4757 struct ixgbe_hic_drv_info fw_cmd; 4758 int i; 4759 s32 ret_val = IXGBE_SUCCESS; 4760 4761 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); 4762 UNREFERENCED_2PARAMETER(len, driver_ver); 4763 4764 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 4765 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 4766 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 4767 fw_cmd.port_num = (u8)hw->bus.func; 4768 fw_cmd.ver_maj = maj; 4769 fw_cmd.ver_min = minr; 4770 fw_cmd.ver_build = build; 4771 fw_cmd.ver_sub = sub; 4772 fw_cmd.hdr.checksum = 0; 4773 fw_cmd.pad = 0; 4774 fw_cmd.pad2 = 0; 4775 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 4776 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 4777 4778 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 4779 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 4780 sizeof(fw_cmd), 4781 IXGBE_HI_COMMAND_TIMEOUT, 4782 TRUE); 4783 if (ret_val != IXGBE_SUCCESS) 4784 continue; 4785 4786 if (fw_cmd.hdr.cmd_or_resp.ret_status == 4787 FW_CEM_RESP_STATUS_SUCCESS) 4788 ret_val = IXGBE_SUCCESS; 4789 else 4790 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4791 4792 break; 4793 } 4794 4795 return ret_val; 4796 } 4797 4798 /** 4799 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer 4800 * @hw: pointer to hardware structure 4801 * @num_pb: number of packet buffers to allocate 4802 * @headroom: reserve n KB of headroom 4803 * @strategy: packet buffer allocation strategy 4804 **/ 4805 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, 4806 int strategy) 4807 { 4808 u32 pbsize = hw->mac.rx_pb_size; 4809 int i = 0; 4810 u32 rxpktsize, txpktsize, txpbthresh; 4811 4812 /* Reserve headroom */ 4813 pbsize -= headroom; 4814 4815 if (!num_pb) 4816 num_pb = 1; 4817 4818 /* Divide remaining packet buffer space amongst the number of packet 4819 * buffers requested using supplied strategy. 4820 */ 4821 switch (strategy) { 4822 case PBA_STRATEGY_WEIGHTED: 4823 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet 4824 * buffer with 5/8 of the packet buffer space. 4825 */ 4826 rxpktsize = (pbsize * 5) / (num_pb * 4); 4827 pbsize -= rxpktsize * (num_pb / 2); 4828 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 4829 for (; i < (num_pb / 2); i++) 4830 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4831 /* fall through - configure remaining packet buffers */ 4832 case PBA_STRATEGY_EQUAL: 4833 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 4834 for (; i < num_pb; i++) 4835 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4836 break; 4837 default: 4838 break; 4839 } 4840 4841 /* Only support an equally distributed Tx packet buffer strategy. */ 4842 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 4843 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 4844 for (i = 0; i < num_pb; i++) { 4845 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 4846 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 4847 } 4848 4849 /* Clear unused TCs, if any, to zero buffer size*/ 4850 for (; i < IXGBE_MAX_PB; i++) { 4851 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 4852 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 4853 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 4854 } 4855 } 4856 4857 /** 4858 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 4859 * @hw: pointer to the hardware structure 4860 * 4861 * The 82599 and x540 MACs can experience issues if TX work is still pending 4862 * when a reset occurs. This function prevents this by flushing the PCIe 4863 * buffers on the system. 4864 **/ 4865 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 4866 { 4867 u32 gcr_ext, hlreg0, i, poll; 4868 u16 value; 4869 4870 /* 4871 * If double reset is not requested then all transactions should 4872 * already be clear and as such there is no work to do 4873 */ 4874 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 4875 return; 4876 4877 /* 4878 * Set loopback enable to prevent any transmits from being sent 4879 * should the link come up. This assumes that the RXCTRL.RXEN bit 4880 * has already been cleared. 4881 */ 4882 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4883 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 4884 4885 /* Wait for a last completion before clearing buffers */ 4886 IXGBE_WRITE_FLUSH(hw); 4887 msec_delay(3); 4888 4889 /* 4890 * Before proceeding, make sure that the PCIe block does not have 4891 * transactions pending. 4892 */ 4893 poll = ixgbe_pcie_timeout_poll(hw); 4894 for (i = 0; i < poll; i++) { 4895 usec_delay(100); 4896 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 4897 if (IXGBE_REMOVED(hw->hw_addr)) 4898 goto out; 4899 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 4900 goto out; 4901 } 4902 4903 out: 4904 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 4905 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 4906 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 4907 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 4908 4909 /* Flush all writes and allow 20usec for all transactions to clear */ 4910 IXGBE_WRITE_FLUSH(hw); 4911 usec_delay(20); 4912 4913 /* restore previous register values */ 4914 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 4915 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4916 } 4917 4918 /** 4919 * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW 4920 * 4921 * @hw: pointer to hardware structure 4922 * @cmd: Command we send to the FW 4923 * @status: The reply from the FW 4924 * 4925 * Bit-bangs the cmd to the by_pass FW status points to what is returned. 4926 **/ 4927 #define IXGBE_BYPASS_BB_WAIT 1 4928 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status) 4929 { 4930 int i; 4931 u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo; 4932 u32 esdp; 4933 4934 if (!status) 4935 return IXGBE_ERR_PARAM; 4936 4937 *status = 0; 4938 4939 /* SDP vary by MAC type */ 4940 switch (hw->mac.type) { 4941 case ixgbe_mac_82599EB: 4942 sck = IXGBE_ESDP_SDP7; 4943 sdi = IXGBE_ESDP_SDP0; 4944 sdo = IXGBE_ESDP_SDP6; 4945 dir_sck = IXGBE_ESDP_SDP7_DIR; 4946 dir_sdi = IXGBE_ESDP_SDP0_DIR; 4947 dir_sdo = IXGBE_ESDP_SDP6_DIR; 4948 break; 4949 case ixgbe_mac_X540: 4950 sck = IXGBE_ESDP_SDP2; 4951 sdi = IXGBE_ESDP_SDP0; 4952 sdo = IXGBE_ESDP_SDP1; 4953 dir_sck = IXGBE_ESDP_SDP2_DIR; 4954 dir_sdi = IXGBE_ESDP_SDP0_DIR; 4955 dir_sdo = IXGBE_ESDP_SDP1_DIR; 4956 break; 4957 default: 4958 return IXGBE_ERR_DEVICE_NOT_SUPPORTED; 4959 } 4960 4961 /* Set SDP pins direction */ 4962 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 4963 esdp |= dir_sck; /* SCK as output */ 4964 esdp |= dir_sdi; /* SDI as output */ 4965 esdp &= ~dir_sdo; /* SDO as input */ 4966 esdp |= sck; 4967 esdp |= sdi; 4968 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4969 IXGBE_WRITE_FLUSH(hw); 4970 msec_delay(IXGBE_BYPASS_BB_WAIT); 4971 4972 /* Generate start condition */ 4973 esdp &= ~sdi; 4974 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4975 IXGBE_WRITE_FLUSH(hw); 4976 msec_delay(IXGBE_BYPASS_BB_WAIT); 4977 4978 esdp &= ~sck; 4979 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4980 IXGBE_WRITE_FLUSH(hw); 4981 msec_delay(IXGBE_BYPASS_BB_WAIT); 4982 4983 /* Clock out the new control word and clock in the status */ 4984 for (i = 0; i < 32; i++) { 4985 if ((cmd >> (31 - i)) & 0x01) { 4986 esdp |= sdi; 4987 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4988 } else { 4989 esdp &= ~sdi; 4990 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4991 } 4992 IXGBE_WRITE_FLUSH(hw); 4993 msec_delay(IXGBE_BYPASS_BB_WAIT); 4994 4995 esdp |= sck; 4996 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4997 IXGBE_WRITE_FLUSH(hw); 4998 msec_delay(IXGBE_BYPASS_BB_WAIT); 4999 5000 esdp &= ~sck; 5001 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 5002 IXGBE_WRITE_FLUSH(hw); 5003 msec_delay(IXGBE_BYPASS_BB_WAIT); 5004 5005 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 5006 if (esdp & sdo) 5007 *status = (*status << 1) | 0x01; 5008 else 5009 *status = (*status << 1) | 0x00; 5010 msec_delay(IXGBE_BYPASS_BB_WAIT); 5011 } 5012 5013 /* stop condition */ 5014 esdp |= sck; 5015 esdp &= ~sdi; 5016 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 5017 IXGBE_WRITE_FLUSH(hw); 5018 msec_delay(IXGBE_BYPASS_BB_WAIT); 5019 5020 esdp |= sdi; 5021 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 5022 IXGBE_WRITE_FLUSH(hw); 5023 5024 /* set the page bits to match the cmd that the status it belongs to */ 5025 *status = (*status & 0x3fffffff) | (cmd & 0xc0000000); 5026 5027 return IXGBE_SUCCESS; 5028 } 5029 5030 /** 5031 * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang. 5032 * 5033 * If we send a write we can't be sure it took until we can read back 5034 * that same register. It can be a problem as some of the feilds may 5035 * for valid reasons change inbetween the time wrote the register and 5036 * we read it again to verify. So this function check everything we 5037 * can check and then assumes it worked. 5038 * 5039 * @u32 in_reg - The register cmd for the bit-bang read. 5040 * @u32 out_reg - The register returned from a bit-bang read. 5041 **/ 5042 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg) 5043 { 5044 u32 mask; 5045 5046 /* Page must match for all control pages */ 5047 if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M)) 5048 return FALSE; 5049 5050 switch (in_reg & BYPASS_PAGE_M) { 5051 case BYPASS_PAGE_CTL0: 5052 /* All the following can't change since the last write 5053 * - All the event actions 5054 * - The timeout value 5055 */ 5056 mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M | 5057 BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M | 5058 BYPASS_WDTIMEOUT_M | 5059 BYPASS_WDT_VALUE_M; 5060 if ((out_reg & mask) != (in_reg & mask)) 5061 return FALSE; 5062 5063 /* 0x0 is never a valid value for bypass status */ 5064 if (!(out_reg & BYPASS_STATUS_OFF_M)) 5065 return FALSE; 5066 break; 5067 case BYPASS_PAGE_CTL1: 5068 /* All the following can't change since the last write 5069 * - time valid bit 5070 * - time we last sent 5071 */ 5072 mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M; 5073 if ((out_reg & mask) != (in_reg & mask)) 5074 return FALSE; 5075 break; 5076 case BYPASS_PAGE_CTL2: 5077 /* All we can check in this page is control number 5078 * which is already done above. 5079 */ 5080 break; 5081 } 5082 5083 /* We are as sure as we can be return TRUE */ 5084 return TRUE; 5085 } 5086 5087 /** 5088 * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter. 5089 * 5090 * @hw: pointer to hardware structure 5091 * @cmd: The control word we are setting. 5092 * @event: The event we are setting in the FW. This also happens to 5093 * be the mask for the event we are setting (handy) 5094 * @action: The action we set the event to in the FW. This is in a 5095 * bit field that happens to be what we want to put in 5096 * the event spot (also handy) 5097 **/ 5098 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event, 5099 u32 action) 5100 { 5101 u32 by_ctl = 0; 5102 u32 cmd, verify; 5103 u32 count = 0; 5104 5105 /* Get current values */ 5106 cmd = ctrl; /* just reading only need control number */ 5107 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) 5108 return IXGBE_ERR_INVALID_ARGUMENT; 5109 5110 /* Set to new action */ 5111 cmd = (by_ctl & ~event) | BYPASS_WE | action; 5112 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) 5113 return IXGBE_ERR_INVALID_ARGUMENT; 5114 5115 /* Page 0 force a FW eeprom write which is slow so verify */ 5116 if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) { 5117 verify = BYPASS_PAGE_CTL0; 5118 do { 5119 if (count++ > 5) 5120 return IXGBE_BYPASS_FW_WRITE_FAILURE; 5121 5122 if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl)) 5123 return IXGBE_ERR_INVALID_ARGUMENT; 5124 } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl)); 5125 } else { 5126 /* We have give the FW time for the write to stick */ 5127 msec_delay(100); 5128 } 5129 5130 return IXGBE_SUCCESS; 5131 } 5132 5133 /** 5134 * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres. 5135 * 5136 * @hw: pointer to hardware structure 5137 * @addr: The bypass eeprom address to read. 5138 * @value: The 8b of data at the address above. 5139 **/ 5140 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value) 5141 { 5142 u32 cmd; 5143 u32 status; 5144 5145 5146 /* send the request */ 5147 cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; 5148 cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M; 5149 if (ixgbe_bypass_rw_generic(hw, cmd, &status)) 5150 return IXGBE_ERR_INVALID_ARGUMENT; 5151 5152 /* We have give the FW time for the write to stick */ 5153 msec_delay(100); 5154 5155 /* now read the results */ 5156 cmd &= ~BYPASS_WE; 5157 if (ixgbe_bypass_rw_generic(hw, cmd, &status)) 5158 return IXGBE_ERR_INVALID_ARGUMENT; 5159 5160 *value = status & BYPASS_CTL2_DATA_M; 5161 5162 return IXGBE_SUCCESS; 5163 } 5164 5165 /** 5166 * ixgbe_get_orom_version - Return option ROM from EEPROM 5167 * 5168 * @hw: pointer to hardware structure 5169 * @nvm_ver: pointer to output structure 5170 * 5171 * if valid option ROM version, nvm_ver->or_valid set to TRUE 5172 * else nvm_ver->or_valid is FALSE. 5173 **/ 5174 void ixgbe_get_orom_version(struct ixgbe_hw *hw, 5175 struct ixgbe_nvm_version *nvm_ver) 5176 { 5177 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; 5178 5179 nvm_ver->or_valid = FALSE; 5180 /* Option Rom may or may not be present. Start with pointer */ 5181 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); 5182 5183 /* make sure offset is valid */ 5184 if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) 5185 return; 5186 5187 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); 5188 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); 5189 5190 /* option rom exists and is valid */ 5191 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || 5192 eeprom_cfg_blkl == NVM_VER_INVALID || 5193 eeprom_cfg_blkh == NVM_VER_INVALID) 5194 return; 5195 5196 nvm_ver->or_valid = TRUE; 5197 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; 5198 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | 5199 (eeprom_cfg_blkh >> NVM_OROM_SHIFT); 5200 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; 5201 } 5202 5203 /** 5204 * ixgbe_get_oem_prod_version - Return OEM Product version 5205 * 5206 * @hw: pointer to hardware structure 5207 * @nvm_ver: pointer to output structure 5208 * 5209 * if valid OEM product version, nvm_ver->oem_valid set to TRUE 5210 * else nvm_ver->oem_valid is FALSE. 5211 **/ 5212 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, 5213 struct ixgbe_nvm_version *nvm_ver) 5214 { 5215 u16 rel_num, prod_ver, mod_len, cap, offset; 5216 5217 nvm_ver->oem_valid = FALSE; 5218 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 5219 5220 /* Return if offset to OEM Product Version block is invalid */ 5221 if (offset == 0x0 || offset == NVM_INVALID_PTR) 5222 return; 5223 5224 /* Read product version block */ 5225 hw->eeprom.ops.read(hw, offset, &mod_len); 5226 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); 5227 5228 /* Return if OEM product version block is invalid */ 5229 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || 5230 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) 5231 return; 5232 5233 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); 5234 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); 5235 5236 /* Return if version is invalid */ 5237 if ((rel_num | prod_ver) == 0x0 || 5238 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) 5239 return; 5240 5241 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; 5242 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; 5243 nvm_ver->oem_release = rel_num; 5244 nvm_ver->oem_valid = TRUE; 5245 } 5246 5247 /** 5248 * ixgbe_get_etk_id - Return Etrack ID from EEPROM 5249 * 5250 * @hw: pointer to hardware structure 5251 * @nvm_ver: pointer to output structure 5252 * 5253 * word read errors will return 0xFFFF 5254 **/ 5255 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) 5256 { 5257 u16 etk_id_l, etk_id_h; 5258 5259 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) 5260 etk_id_l = NVM_VER_INVALID; 5261 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) 5262 etk_id_h = NVM_VER_INVALID; 5263 5264 /* The word order for the version format is determined by high order 5265 * word bit 15. 5266 */ 5267 if ((etk_id_h & NVM_ETK_VALID) == 0) { 5268 nvm_ver->etk_id = etk_id_h; 5269 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); 5270 } else { 5271 nvm_ver->etk_id = etk_id_l; 5272 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); 5273 } 5274 } 5275 5276 5277 /** 5278 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg 5279 * @hw: pointer to hardware structure 5280 * @map: pointer to u8 arr for returning map 5281 * 5282 * Read the rtrup2tc HW register and resolve its content into map 5283 **/ 5284 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) 5285 { 5286 u32 reg, i; 5287 5288 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 5289 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 5290 map[i] = IXGBE_RTRUP2TC_UP_MASK & 5291 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 5292 return; 5293 } 5294 5295 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 5296 { 5297 u32 pfdtxgswc; 5298 u32 rxctrl; 5299 5300 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 5301 if (rxctrl & IXGBE_RXCTRL_RXEN) { 5302 if (hw->mac.type != ixgbe_mac_82598EB) { 5303 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 5304 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 5305 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 5306 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 5307 hw->mac.set_lben = TRUE; 5308 } else { 5309 hw->mac.set_lben = FALSE; 5310 } 5311 } 5312 rxctrl &= ~IXGBE_RXCTRL_RXEN; 5313 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 5314 } 5315 } 5316 5317 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 5318 { 5319 u32 pfdtxgswc; 5320 u32 rxctrl; 5321 5322 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 5323 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 5324 5325 if (hw->mac.type != ixgbe_mac_82598EB) { 5326 if (hw->mac.set_lben) { 5327 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 5328 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 5329 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 5330 hw->mac.set_lben = FALSE; 5331 } 5332 } 5333 } 5334 5335 /** 5336 * ixgbe_mng_present - returns TRUE when management capability is present 5337 * @hw: pointer to hardware structure 5338 */ 5339 bool ixgbe_mng_present(struct ixgbe_hw *hw) 5340 { 5341 u32 fwsm; 5342 5343 if (hw->mac.type < ixgbe_mac_82599EB) 5344 return FALSE; 5345 5346 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 5347 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); 5348 } 5349 5350 /** 5351 * ixgbe_mng_enabled - Is the manageability engine enabled? 5352 * @hw: pointer to hardware structure 5353 * 5354 * Returns TRUE if the manageability engine is enabled. 5355 **/ 5356 bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 5357 { 5358 u32 fwsm, manc, factps; 5359 5360 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 5361 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) 5362 return FALSE; 5363 5364 manc = IXGBE_READ_REG(hw, IXGBE_MANC); 5365 if (!(manc & IXGBE_MANC_RCV_TCO_EN)) 5366 return FALSE; 5367 5368 if (hw->mac.type <= ixgbe_mac_X540) { 5369 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 5370 if (factps & IXGBE_FACTPS_MNGCG) 5371 return FALSE; 5372 } 5373 5374 return TRUE; 5375 } 5376 5377 /** 5378 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 5379 * @hw: pointer to hardware structure 5380 * @speed: new link speed 5381 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 5382 * 5383 * Set the link speed in the MAC and/or PHY register and restarts link. 5384 **/ 5385 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 5386 ixgbe_link_speed speed, 5387 bool autoneg_wait_to_complete) 5388 { 5389 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 5390 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 5391 s32 status = IXGBE_SUCCESS; 5392 u32 speedcnt = 0; 5393 u32 i = 0; 5394 bool autoneg, link_up = FALSE; 5395 5396 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 5397 5398 /* Mask off requested but non-supported speeds */ 5399 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); 5400 if (status != IXGBE_SUCCESS) 5401 return status; 5402 5403 speed &= link_speed; 5404 5405 /* Try each speed one by one, highest priority first. We do this in 5406 * software because 10Gb fiber doesn't support speed autonegotiation. 5407 */ 5408 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 5409 speedcnt++; 5410 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 5411 5412 /* Set the module link speed */ 5413 switch (hw->phy.media_type) { 5414 case ixgbe_media_type_fiber_fixed: 5415 case ixgbe_media_type_fiber: 5416 ixgbe_set_rate_select_speed(hw, 5417 IXGBE_LINK_SPEED_10GB_FULL); 5418 break; 5419 case ixgbe_media_type_fiber_qsfp: 5420 /* QSFP module automatically detects MAC link speed */ 5421 break; 5422 default: 5423 DEBUGOUT("Unexpected media type.\n"); 5424 break; 5425 } 5426 5427 /* Allow module to change analog characteristics (1G->10G) */ 5428 msec_delay(40); 5429 5430 status = ixgbe_setup_mac_link(hw, 5431 IXGBE_LINK_SPEED_10GB_FULL, 5432 autoneg_wait_to_complete); 5433 if (status != IXGBE_SUCCESS) 5434 return status; 5435 5436 /* Flap the Tx laser if it has not already been done */ 5437 ixgbe_flap_tx_laser(hw); 5438 5439 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 5440 * Section 73.10.2, we may have to wait up to 500ms if KR is 5441 * attempted. 82599 uses the same timing for 10g SFI. 5442 */ 5443 for (i = 0; i < 5; i++) { 5444 /* Wait for the link partner to also set speed */ 5445 msec_delay(100); 5446 5447 /* If we have link, just jump out */ 5448 status = ixgbe_check_link(hw, &link_speed, 5449 &link_up, FALSE); 5450 if (status != IXGBE_SUCCESS) 5451 return status; 5452 5453 if (link_up) 5454 goto out; 5455 } 5456 } 5457 5458 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 5459 speedcnt++; 5460 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 5461 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 5462 5463 /* Set the module link speed */ 5464 switch (hw->phy.media_type) { 5465 case ixgbe_media_type_fiber_fixed: 5466 case ixgbe_media_type_fiber: 5467 ixgbe_set_rate_select_speed(hw, 5468 IXGBE_LINK_SPEED_1GB_FULL); 5469 break; 5470 case ixgbe_media_type_fiber_qsfp: 5471 /* QSFP module automatically detects link speed */ 5472 break; 5473 default: 5474 DEBUGOUT("Unexpected media type.\n"); 5475 break; 5476 } 5477 5478 /* Allow module to change analog characteristics (10G->1G) */ 5479 msec_delay(40); 5480 5481 status = ixgbe_setup_mac_link(hw, 5482 IXGBE_LINK_SPEED_1GB_FULL, 5483 autoneg_wait_to_complete); 5484 if (status != IXGBE_SUCCESS) 5485 return status; 5486 5487 /* Flap the Tx laser if it has not already been done */ 5488 ixgbe_flap_tx_laser(hw); 5489 5490 /* Wait for the link partner to also set speed */ 5491 msec_delay(100); 5492 5493 /* If we have link, just jump out */ 5494 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 5495 if (status != IXGBE_SUCCESS) 5496 return status; 5497 5498 if (link_up) 5499 goto out; 5500 } 5501 5502 if (speed == 0) { 5503 /* Disable the Tx laser for media none */ 5504 ixgbe_disable_tx_laser(hw); 5505 5506 goto out; 5507 } 5508 5509 /* We didn't get link. Configure back to the highest speed we tried, 5510 * (if there was more than one). We call ourselves back with just the 5511 * single highest speed that the user requested. 5512 */ 5513 if (speedcnt > 1) 5514 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 5515 highest_link_speed, 5516 autoneg_wait_to_complete); 5517 5518 out: 5519 /* Set autoneg_advertised value based on input link speed */ 5520 hw->phy.autoneg_advertised = 0; 5521 5522 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 5523 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 5524 5525 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 5526 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 5527 5528 return status; 5529 } 5530 5531 /** 5532 * ixgbe_set_soft_rate_select_speed - Set module link speed 5533 * @hw: pointer to hardware structure 5534 * @speed: link speed to set 5535 * 5536 * Set module link speed via the soft rate select. 5537 */ 5538 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 5539 ixgbe_link_speed speed) 5540 { 5541 s32 status; 5542 u8 rs, eeprom_data; 5543 5544 switch (speed) { 5545 case IXGBE_LINK_SPEED_10GB_FULL: 5546 /* one bit mask same as setting on */ 5547 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 5548 break; 5549 case IXGBE_LINK_SPEED_1GB_FULL: 5550 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 5551 break; 5552 default: 5553 DEBUGOUT("Invalid fixed module speed\n"); 5554 return; 5555 } 5556 5557 /* Set RS0 */ 5558 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 5559 IXGBE_I2C_EEPROM_DEV_ADDR2, 5560 &eeprom_data); 5561 if (status) { 5562 DEBUGOUT("Failed to read Rx Rate Select RS0\n"); 5563 goto out; 5564 } 5565 5566 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 5567 5568 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 5569 IXGBE_I2C_EEPROM_DEV_ADDR2, 5570 eeprom_data); 5571 if (status) { 5572 DEBUGOUT("Failed to write Rx Rate Select RS0\n"); 5573 goto out; 5574 } 5575 5576 /* Set RS1 */ 5577 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 5578 IXGBE_I2C_EEPROM_DEV_ADDR2, 5579 &eeprom_data); 5580 if (status) { 5581 DEBUGOUT("Failed to read Rx Rate Select RS1\n"); 5582 goto out; 5583 } 5584 5585 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 5586 5587 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 5588 IXGBE_I2C_EEPROM_DEV_ADDR2, 5589 eeprom_data); 5590 if (status) { 5591 DEBUGOUT("Failed to write Rx Rate Select RS1\n"); 5592 goto out; 5593 } 5594 out: 5595 return; 5596 } 5597