1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 299200 2016-05-06 22:54:56Z pfg $*/ 34 /*$NetBSD: ixgbe_common.c,v 1.13 2017/05/26 08:36:42 msaitoh Exp $*/ 35 36 #include "ixgbe_common.h" 37 #include "ixgbe_phy.h" 38 #include "ixgbe_dcb.h" 39 #include "ixgbe_dcb_82599.h" 40 #include "ixgbe_api.h" 41 42 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 43 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 44 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 45 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 46 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 47 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 48 u16 count); 49 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 50 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 51 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 52 static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 53 54 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 55 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 56 u16 *san_mac_offset); 57 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 58 u16 words, u16 *data); 59 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 60 u16 words, u16 *data); 61 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 62 u16 offset); 63 64 /** 65 * ixgbe_init_ops_generic - Inits function ptrs 66 * @hw: pointer to the hardware structure 67 * 68 * Initialize the function pointers. 69 **/ 70 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 71 { 72 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 73 struct ixgbe_mac_info *mac = &hw->mac; 74 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 75 76 DEBUGFUNC("ixgbe_init_ops_generic"); 77 78 /* EEPROM */ 79 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; 80 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 81 if (eec & IXGBE_EEC_PRES) { 82 eeprom->ops.read = ixgbe_read_eerd_generic; 83 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; 84 } else { 85 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; 86 eeprom->ops.read_buffer = 87 ixgbe_read_eeprom_buffer_bit_bang_generic; 88 } 89 eeprom->ops.write = ixgbe_write_eeprom_generic; 90 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; 91 eeprom->ops.validate_checksum = 92 ixgbe_validate_eeprom_checksum_generic; 93 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; 94 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; 95 96 /* MAC */ 97 mac->ops.init_hw = ixgbe_init_hw_generic; 98 mac->ops.reset_hw = NULL; 99 mac->ops.start_hw = ixgbe_start_hw_generic; 100 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; 101 mac->ops.get_media_type = NULL; 102 mac->ops.get_supported_physical_layer = NULL; 103 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; 104 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; 105 mac->ops.stop_adapter = ixgbe_stop_adapter_generic; 106 mac->ops.get_bus_info = ixgbe_get_bus_info_generic; 107 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; 108 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; 109 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; 110 mac->ops.prot_autoc_read = prot_autoc_read_generic; 111 mac->ops.prot_autoc_write = prot_autoc_write_generic; 112 113 /* LEDs */ 114 mac->ops.led_on = ixgbe_led_on_generic; 115 mac->ops.led_off = ixgbe_led_off_generic; 116 mac->ops.blink_led_start = ixgbe_blink_led_start_generic; 117 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; 118 119 /* RAR, Multicast, VLAN */ 120 mac->ops.set_rar = ixgbe_set_rar_generic; 121 mac->ops.clear_rar = ixgbe_clear_rar_generic; 122 mac->ops.insert_mac_addr = NULL; 123 mac->ops.set_vmdq = NULL; 124 mac->ops.clear_vmdq = NULL; 125 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; 126 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; 127 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; 128 mac->ops.enable_mc = ixgbe_enable_mc_generic; 129 mac->ops.disable_mc = ixgbe_disable_mc_generic; 130 mac->ops.clear_vfta = NULL; 131 mac->ops.set_vfta = NULL; 132 mac->ops.set_vlvf = NULL; 133 mac->ops.init_uta_tables = NULL; 134 mac->ops.enable_rx = ixgbe_enable_rx_generic; 135 mac->ops.disable_rx = ixgbe_disable_rx_generic; 136 137 /* Flow Control */ 138 mac->ops.fc_enable = ixgbe_fc_enable_generic; 139 mac->ops.setup_fc = ixgbe_setup_fc_generic; 140 141 /* Link */ 142 mac->ops.get_link_capabilities = NULL; 143 mac->ops.setup_link = NULL; 144 mac->ops.check_link = NULL; 145 mac->ops.dmac_config = NULL; 146 mac->ops.dmac_update_tcs = NULL; 147 mac->ops.dmac_config_tcs = NULL; 148 149 return IXGBE_SUCCESS; 150 } 151 152 /** 153 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation 154 * of flow control 155 * @hw: pointer to hardware structure 156 * 157 * This function returns TRUE if the device supports flow control 158 * autonegotiation, and FALSE if it does not. 159 * 160 **/ 161 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 162 { 163 bool supported = FALSE; 164 ixgbe_link_speed speed; 165 bool link_up; 166 167 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 168 169 switch (hw->phy.media_type) { 170 case ixgbe_media_type_fiber_fixed: 171 case ixgbe_media_type_fiber_qsfp: 172 case ixgbe_media_type_fiber: 173 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 174 /* if link is down, assume supported */ 175 if (link_up) 176 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? 177 TRUE : FALSE; 178 else 179 supported = TRUE; 180 break; 181 case ixgbe_media_type_backplane: 182 supported = TRUE; 183 break; 184 case ixgbe_media_type_copper: 185 /* only some copper devices support flow control autoneg */ 186 switch (hw->device_id) { 187 case IXGBE_DEV_ID_82599_T3_LOM: 188 case IXGBE_DEV_ID_X540T: 189 case IXGBE_DEV_ID_X540T1: 190 case IXGBE_DEV_ID_X540_BYPASS: 191 case IXGBE_DEV_ID_X550T: 192 case IXGBE_DEV_ID_X550T1: 193 case IXGBE_DEV_ID_X550EM_X_10G_T: 194 supported = TRUE; 195 break; 196 default: 197 supported = FALSE; 198 } 199 default: 200 break; 201 } 202 203 if (!supported) { 204 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, 205 "Device %x does not support flow control autoneg", 206 hw->device_id); 207 } 208 209 return supported; 210 } 211 212 /** 213 * ixgbe_setup_fc_generic - Set up flow control 214 * @hw: pointer to hardware structure 215 * 216 * Called at init time to set up flow control. 217 **/ 218 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 219 { 220 s32 ret_val = IXGBE_SUCCESS; 221 u32 reg = 0, reg_bp = 0; 222 u16 reg_cu = 0; 223 bool locked = FALSE; 224 225 DEBUGFUNC("ixgbe_setup_fc_generic"); 226 227 /* Validate the requested mode */ 228 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 229 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, 230 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 231 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 232 goto out; 233 } 234 235 /* 236 * 10gig parts do not have a word in the EEPROM to determine the 237 * default flow control setting, so we explicitly set it to full. 238 */ 239 if (hw->fc.requested_mode == ixgbe_fc_default) 240 hw->fc.requested_mode = ixgbe_fc_full; 241 242 /* 243 * Set up the 1G and 10G flow control advertisement registers so the 244 * HW will be able to do fc autoneg once the cable is plugged in. If 245 * we link at 10G, the 1G advertisement is harmless and vice versa. 246 */ 247 switch (hw->phy.media_type) { 248 case ixgbe_media_type_backplane: 249 /* some MAC's need RMW protection on AUTOC */ 250 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 251 if (ret_val != IXGBE_SUCCESS) 252 goto out; 253 254 /* only backplane uses autoc so fall though */ 255 case ixgbe_media_type_fiber_fixed: 256 case ixgbe_media_type_fiber_qsfp: 257 case ixgbe_media_type_fiber: 258 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 259 260 break; 261 case ixgbe_media_type_copper: 262 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 263 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 264 break; 265 default: 266 break; 267 } 268 269 /* 270 * The possible values of fc.requested_mode are: 271 * 0: Flow control is completely disabled 272 * 1: Rx flow control is enabled (we can receive pause frames, 273 * but not send pause frames). 274 * 2: Tx flow control is enabled (we can send pause frames but 275 * we do not support receiving pause frames). 276 * 3: Both Rx and Tx flow control (symmetric) are enabled. 277 * other: Invalid. 278 */ 279 switch (hw->fc.requested_mode) { 280 case ixgbe_fc_none: 281 /* Flow control completely disabled by software override. */ 282 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 283 if (hw->phy.media_type == ixgbe_media_type_backplane) 284 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 285 IXGBE_AUTOC_ASM_PAUSE); 286 else if (hw->phy.media_type == ixgbe_media_type_copper) 287 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 288 break; 289 case ixgbe_fc_tx_pause: 290 /* 291 * Tx Flow control is enabled, and Rx Flow control is 292 * disabled by software override. 293 */ 294 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 295 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 296 if (hw->phy.media_type == ixgbe_media_type_backplane) { 297 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 298 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 299 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 300 reg_cu |= IXGBE_TAF_ASM_PAUSE; 301 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 302 } 303 break; 304 case ixgbe_fc_rx_pause: 305 /* 306 * Rx Flow control is enabled and Tx Flow control is 307 * disabled by software override. Since there really 308 * isn't a way to advertise that we are capable of RX 309 * Pause ONLY, we will advertise that we support both 310 * symmetric and asymmetric Rx PAUSE, as such we fall 311 * through to the fc_full statement. Later, we will 312 * disable the adapter's ability to send PAUSE frames. 313 */ 314 case ixgbe_fc_full: 315 /* Flow control (both Rx and Tx) is enabled by SW override. */ 316 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 317 if (hw->phy.media_type == ixgbe_media_type_backplane) 318 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 319 IXGBE_AUTOC_ASM_PAUSE; 320 else if (hw->phy.media_type == ixgbe_media_type_copper) 321 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 322 break; 323 default: 324 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 325 "Flow control param set incorrectly\n"); 326 ret_val = IXGBE_ERR_CONFIG; 327 goto out; 328 break; 329 } 330 331 if (hw->mac.type < ixgbe_mac_X540) { 332 /* 333 * Enable auto-negotiation between the MAC & PHY; 334 * the MAC will advertise clause 37 flow control. 335 */ 336 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 337 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 338 339 /* Disable AN timeout */ 340 if (hw->fc.strict_ieee) 341 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 342 343 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 344 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 345 } 346 347 /* 348 * AUTOC restart handles negotiation of 1G and 10G on backplane 349 * and copper. There is no need to set the PCS1GCTL register. 350 * 351 */ 352 if (hw->phy.media_type == ixgbe_media_type_backplane) { 353 reg_bp |= IXGBE_AUTOC_AN_RESTART; 354 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 355 if (ret_val) 356 goto out; 357 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 358 (ixgbe_device_supports_autoneg_fc(hw))) { 359 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 360 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 361 } 362 363 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 364 out: 365 return ret_val; 366 } 367 368 /** 369 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 370 * @hw: pointer to hardware structure 371 * 372 * Starts the hardware by filling the bus info structure and media type, clears 373 * all on chip counters, initializes receive address registers, multicast 374 * table, VLAN filter table, calls routine to set up link and flow control 375 * settings, and leaves transmit and receive units disabled and uninitialized 376 **/ 377 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 378 { 379 s32 ret_val; 380 u32 ctrl_ext; 381 382 DEBUGFUNC("ixgbe_start_hw_generic"); 383 384 /* Set the media type */ 385 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 386 387 /* PHY ops initialization must be done in reset_hw() */ 388 389 /* Clear the VLAN filter table */ 390 hw->mac.ops.clear_vfta(hw); 391 392 /* Clear statistics registers */ 393 hw->mac.ops.clear_hw_cntrs(hw); 394 395 /* Set No Snoop Disable */ 396 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 397 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 398 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 399 IXGBE_WRITE_FLUSH(hw); 400 401 /* Setup flow control */ 402 ret_val = ixgbe_setup_fc(hw); 403 if (ret_val != IXGBE_SUCCESS) 404 goto out; 405 406 /* Clear adapter stopped flag */ 407 hw->adapter_stopped = FALSE; 408 409 out: 410 return ret_val; 411 } 412 413 /** 414 * ixgbe_start_hw_gen2 - Init sequence for common device family 415 * @hw: pointer to hw structure 416 * 417 * Performs the init sequence common to the second generation 418 * of 10 GbE devices. 419 * Devices in the second generation: 420 * 82599 421 * X540 422 **/ 423 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 424 { 425 u32 i; 426 u32 regval; 427 428 /* Clear the rate limiters */ 429 for (i = 0; i < hw->mac.max_tx_queues; i++) { 430 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 431 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 432 } 433 IXGBE_WRITE_FLUSH(hw); 434 435 /* Disable relaxed ordering */ 436 for (i = 0; i < hw->mac.max_tx_queues; i++) { 437 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 438 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 439 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 440 } 441 442 for (i = 0; i < hw->mac.max_rx_queues; i++) { 443 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 444 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 445 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 446 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 447 } 448 449 return IXGBE_SUCCESS; 450 } 451 452 /** 453 * ixgbe_init_hw_generic - Generic hardware initialization 454 * @hw: pointer to hardware structure 455 * 456 * Initialize the hardware by resetting the hardware, filling the bus info 457 * structure and media type, clears all on chip counters, initializes receive 458 * address registers, multicast table, VLAN filter table, calls routine to set 459 * up link and flow control settings, and leaves transmit and receive units 460 * disabled and uninitialized 461 **/ 462 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 463 { 464 s32 status; 465 466 DEBUGFUNC("ixgbe_init_hw_generic"); 467 468 /* Reset the hardware */ 469 status = hw->mac.ops.reset_hw(hw); 470 471 if (status == IXGBE_SUCCESS) { 472 /* Start the HW */ 473 status = hw->mac.ops.start_hw(hw); 474 } 475 476 return status; 477 } 478 479 /** 480 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 481 * @hw: pointer to hardware structure 482 * 483 * Clears all hardware statistics counters by reading them from the hardware 484 * Statistics counters are clear on read. 485 **/ 486 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 487 { 488 u16 i = 0; 489 490 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 491 492 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 493 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 494 IXGBE_READ_REG(hw, IXGBE_ERRBC); 495 IXGBE_READ_REG(hw, IXGBE_MSPDC); 496 if (hw->mac.type >= ixgbe_mac_X550) 497 IXGBE_READ_REG(hw, IXGBE_MBSDC); 498 for (i = 0; i < 8; i++) 499 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 500 501 IXGBE_READ_REG(hw, IXGBE_MLFC); 502 IXGBE_READ_REG(hw, IXGBE_MRFC); 503 IXGBE_READ_REG(hw, IXGBE_RLEC); 504 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 505 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 506 if (hw->mac.type >= ixgbe_mac_82599EB) { 507 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 508 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 509 } else { 510 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 511 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 512 } 513 514 for (i = 0; i < 8; i++) { 515 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 516 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 517 if (hw->mac.type >= ixgbe_mac_82599EB) { 518 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 519 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 520 } else { 521 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 522 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 523 } 524 } 525 if (hw->mac.type >= ixgbe_mac_82599EB) 526 for (i = 0; i < 8; i++) 527 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 528 IXGBE_READ_REG(hw, IXGBE_PRC64); 529 IXGBE_READ_REG(hw, IXGBE_PRC127); 530 IXGBE_READ_REG(hw, IXGBE_PRC255); 531 IXGBE_READ_REG(hw, IXGBE_PRC511); 532 IXGBE_READ_REG(hw, IXGBE_PRC1023); 533 IXGBE_READ_REG(hw, IXGBE_PRC1522); 534 IXGBE_READ_REG(hw, IXGBE_GPRC); 535 IXGBE_READ_REG(hw, IXGBE_BPRC); 536 IXGBE_READ_REG(hw, IXGBE_MPRC); 537 IXGBE_READ_REG(hw, IXGBE_GPTC); 538 IXGBE_READ_REG(hw, IXGBE_GORCL); 539 IXGBE_READ_REG(hw, IXGBE_GORCH); 540 IXGBE_READ_REG(hw, IXGBE_GOTCL); 541 IXGBE_READ_REG(hw, IXGBE_GOTCH); 542 if (hw->mac.type == ixgbe_mac_82598EB) 543 for (i = 0; i < 8; i++) 544 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 545 IXGBE_READ_REG(hw, IXGBE_RUC); 546 IXGBE_READ_REG(hw, IXGBE_RFC); 547 IXGBE_READ_REG(hw, IXGBE_ROC); 548 IXGBE_READ_REG(hw, IXGBE_RJC); 549 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 550 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 551 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 552 IXGBE_READ_REG(hw, IXGBE_TORL); 553 IXGBE_READ_REG(hw, IXGBE_TORH); 554 IXGBE_READ_REG(hw, IXGBE_TPR); 555 IXGBE_READ_REG(hw, IXGBE_TPT); 556 IXGBE_READ_REG(hw, IXGBE_PTC64); 557 IXGBE_READ_REG(hw, IXGBE_PTC127); 558 IXGBE_READ_REG(hw, IXGBE_PTC255); 559 IXGBE_READ_REG(hw, IXGBE_PTC511); 560 IXGBE_READ_REG(hw, IXGBE_PTC1023); 561 IXGBE_READ_REG(hw, IXGBE_PTC1522); 562 IXGBE_READ_REG(hw, IXGBE_MPTC); 563 IXGBE_READ_REG(hw, IXGBE_BPTC); 564 for (i = 0; i < 16; i++) { 565 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 566 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 567 if (hw->mac.type >= ixgbe_mac_82599EB) { 568 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 569 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 570 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 571 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 572 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 573 } else { 574 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 575 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 576 } 577 } 578 579 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 580 if (hw->phy.id == 0) 581 ixgbe_identify_phy(hw); 582 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, 583 IXGBE_MDIO_PCS_DEV_TYPE, &i); 584 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, 585 IXGBE_MDIO_PCS_DEV_TYPE, &i); 586 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, 587 IXGBE_MDIO_PCS_DEV_TYPE, &i); 588 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, 589 IXGBE_MDIO_PCS_DEV_TYPE, &i); 590 } 591 592 return IXGBE_SUCCESS; 593 } 594 595 /** 596 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 597 * @hw: pointer to hardware structure 598 * @pba_num: stores the part number string from the EEPROM 599 * @pba_num_size: part number string buffer length 600 * 601 * Reads the part number string from the EEPROM. 602 **/ 603 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 604 u32 pba_num_size) 605 { 606 s32 ret_val; 607 u16 data; 608 u16 pba_ptr; 609 u16 offset; 610 u16 length; 611 612 DEBUGFUNC("ixgbe_read_pba_string_generic"); 613 614 if (pba_num == NULL) { 615 DEBUGOUT("PBA string buffer was null\n"); 616 return IXGBE_ERR_INVALID_ARGUMENT; 617 } 618 619 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 620 if (ret_val) { 621 DEBUGOUT("NVM Read Error\n"); 622 return ret_val; 623 } 624 625 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 626 if (ret_val) { 627 DEBUGOUT("NVM Read Error\n"); 628 return ret_val; 629 } 630 631 /* 632 * if data is not ptr guard the PBA must be in legacy format which 633 * means pba_ptr is actually our second data word for the PBA number 634 * and we can decode it into an ascii string 635 */ 636 if (data != IXGBE_PBANUM_PTR_GUARD) { 637 DEBUGOUT("NVM PBA number is not stored as string\n"); 638 639 /* we will need 11 characters to store the PBA */ 640 if (pba_num_size < 11) { 641 DEBUGOUT("PBA string buffer too small\n"); 642 return IXGBE_ERR_NO_SPACE; 643 } 644 645 /* extract hex string from data and pba_ptr */ 646 pba_num[0] = (data >> 12) & 0xF; 647 pba_num[1] = (data >> 8) & 0xF; 648 pba_num[2] = (data >> 4) & 0xF; 649 pba_num[3] = data & 0xF; 650 pba_num[4] = (pba_ptr >> 12) & 0xF; 651 pba_num[5] = (pba_ptr >> 8) & 0xF; 652 pba_num[6] = '-'; 653 pba_num[7] = 0; 654 pba_num[8] = (pba_ptr >> 4) & 0xF; 655 pba_num[9] = pba_ptr & 0xF; 656 657 /* put a null character on the end of our string */ 658 pba_num[10] = '\0'; 659 660 /* switch all the data but the '-' to hex char */ 661 for (offset = 0; offset < 10; offset++) { 662 if (pba_num[offset] < 0xA) 663 pba_num[offset] += '0'; 664 else if (pba_num[offset] < 0x10) 665 pba_num[offset] += 'A' - 0xA; 666 } 667 668 return IXGBE_SUCCESS; 669 } 670 671 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 672 if (ret_val) { 673 DEBUGOUT("NVM Read Error\n"); 674 return ret_val; 675 } 676 677 if (length == 0xFFFF || length == 0) { 678 DEBUGOUT("NVM PBA number section invalid length\n"); 679 return IXGBE_ERR_PBA_SECTION; 680 } 681 682 /* check if pba_num buffer is big enough */ 683 if (pba_num_size < (((u32)length * 2) - 1)) { 684 DEBUGOUT("PBA string buffer too small\n"); 685 return IXGBE_ERR_NO_SPACE; 686 } 687 688 /* trim pba length from start of string */ 689 pba_ptr++; 690 length--; 691 692 for (offset = 0; offset < length; offset++) { 693 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 694 if (ret_val) { 695 DEBUGOUT("NVM Read Error\n"); 696 return ret_val; 697 } 698 pba_num[offset * 2] = (u8)(data >> 8); 699 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 700 } 701 pba_num[offset * 2] = '\0'; 702 703 return IXGBE_SUCCESS; 704 } 705 706 /** 707 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 708 * @hw: pointer to hardware structure 709 * @pba_num: stores the part number from the EEPROM 710 * 711 * Reads the part number from the EEPROM. 712 **/ 713 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 714 { 715 s32 ret_val; 716 u16 data; 717 718 DEBUGFUNC("ixgbe_read_pba_num_generic"); 719 720 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 721 if (ret_val) { 722 DEBUGOUT("NVM Read Error\n"); 723 return ret_val; 724 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 725 DEBUGOUT("NVM Not supported\n"); 726 return IXGBE_NOT_IMPLEMENTED; 727 } 728 *pba_num = (u32)(data << 16); 729 730 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 731 if (ret_val) { 732 DEBUGOUT("NVM Read Error\n"); 733 return ret_val; 734 } 735 *pba_num |= data; 736 737 return IXGBE_SUCCESS; 738 } 739 740 /** 741 * ixgbe_read_pba_raw 742 * @hw: pointer to the HW structure 743 * @eeprom_buf: optional pointer to EEPROM image 744 * @eeprom_buf_size: size of EEPROM image in words 745 * @max_pba_block_size: PBA block size limit 746 * @pba: pointer to output PBA structure 747 * 748 * Reads PBA from EEPROM image when eeprom_buf is not NULL. 749 * Reads PBA from physical EEPROM device when eeprom_buf is NULL. 750 * 751 **/ 752 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 753 u32 eeprom_buf_size, u16 max_pba_block_size, 754 struct ixgbe_pba *pba) 755 { 756 s32 ret_val; 757 u16 pba_block_size; 758 759 if (pba == NULL) 760 return IXGBE_ERR_PARAM; 761 762 if (eeprom_buf == NULL) { 763 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 764 &pba->word[0]); 765 if (ret_val) 766 return ret_val; 767 } else { 768 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 769 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 770 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 771 } else { 772 return IXGBE_ERR_PARAM; 773 } 774 } 775 776 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 777 if (pba->pba_block == NULL) 778 return IXGBE_ERR_PARAM; 779 780 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, 781 eeprom_buf_size, 782 &pba_block_size); 783 if (ret_val) 784 return ret_val; 785 786 if (pba_block_size > max_pba_block_size) 787 return IXGBE_ERR_PARAM; 788 789 if (eeprom_buf == NULL) { 790 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], 791 pba_block_size, 792 pba->pba_block); 793 if (ret_val) 794 return ret_val; 795 } else { 796 if (eeprom_buf_size > (u32)(pba->word[1] + 797 pba_block_size)) { 798 memcpy(pba->pba_block, 799 &eeprom_buf[pba->word[1]], 800 pba_block_size * sizeof(u16)); 801 } else { 802 return IXGBE_ERR_PARAM; 803 } 804 } 805 } 806 807 return IXGBE_SUCCESS; 808 } 809 810 /** 811 * ixgbe_write_pba_raw 812 * @hw: pointer to the HW structure 813 * @eeprom_buf: optional pointer to EEPROM image 814 * @eeprom_buf_size: size of EEPROM image in words 815 * @pba: pointer to PBA structure 816 * 817 * Writes PBA to EEPROM image when eeprom_buf is not NULL. 818 * Writes PBA to physical EEPROM device when eeprom_buf is NULL. 819 * 820 **/ 821 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 822 u32 eeprom_buf_size, struct ixgbe_pba *pba) 823 { 824 s32 ret_val; 825 826 if (pba == NULL) 827 return IXGBE_ERR_PARAM; 828 829 if (eeprom_buf == NULL) { 830 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, 831 &pba->word[0]); 832 if (ret_val) 833 return ret_val; 834 } else { 835 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 836 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; 837 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; 838 } else { 839 return IXGBE_ERR_PARAM; 840 } 841 } 842 843 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 844 if (pba->pba_block == NULL) 845 return IXGBE_ERR_PARAM; 846 847 if (eeprom_buf == NULL) { 848 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], 849 pba->pba_block[0], 850 pba->pba_block); 851 if (ret_val) 852 return ret_val; 853 } else { 854 if (eeprom_buf_size > (u32)(pba->word[1] + 855 pba->pba_block[0])) { 856 memcpy(&eeprom_buf[pba->word[1]], 857 pba->pba_block, 858 pba->pba_block[0] * sizeof(u16)); 859 } else { 860 return IXGBE_ERR_PARAM; 861 } 862 } 863 } 864 865 return IXGBE_SUCCESS; 866 } 867 868 /** 869 * ixgbe_get_pba_block_size 870 * @hw: pointer to the HW structure 871 * @eeprom_buf: optional pointer to EEPROM image 872 * @eeprom_buf_size: size of EEPROM image in words 873 * @pba_data_size: pointer to output variable 874 * 875 * Returns the size of the PBA block in words. Function operates on EEPROM 876 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical 877 * EEPROM device. 878 * 879 **/ 880 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, 881 u32 eeprom_buf_size, u16 *pba_block_size) 882 { 883 s32 ret_val; 884 u16 pba_word[2]; 885 u16 length; 886 887 DEBUGFUNC("ixgbe_get_pba_block_size"); 888 889 if (eeprom_buf == NULL) { 890 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 891 &pba_word[0]); 892 if (ret_val) 893 return ret_val; 894 } else { 895 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 896 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 897 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 898 } else { 899 return IXGBE_ERR_PARAM; 900 } 901 } 902 903 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { 904 if (eeprom_buf == NULL) { 905 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, 906 &length); 907 if (ret_val) 908 return ret_val; 909 } else { 910 if (eeprom_buf_size > pba_word[1]) 911 length = eeprom_buf[pba_word[1] + 0]; 912 else 913 return IXGBE_ERR_PARAM; 914 } 915 916 if (length == 0xFFFF || length == 0) 917 return IXGBE_ERR_PBA_SECTION; 918 } else { 919 /* PBA number in legacy format, there is no PBA Block. */ 920 length = 0; 921 } 922 923 if (pba_block_size != NULL) 924 *pba_block_size = length; 925 926 return IXGBE_SUCCESS; 927 } 928 929 /** 930 * ixgbe_get_mac_addr_generic - Generic get MAC address 931 * @hw: pointer to hardware structure 932 * @mac_addr: Adapter MAC address 933 * 934 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 935 * A reset of the adapter must be performed prior to calling this function 936 * in order for the MAC address to have been loaded from the EEPROM into RAR0 937 **/ 938 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 939 { 940 u32 rar_high; 941 u32 rar_low; 942 u16 i; 943 944 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 945 946 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 947 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 948 949 for (i = 0; i < 4; i++) 950 mac_addr[i] = (u8)(rar_low >> (i*8)); 951 952 for (i = 0; i < 2; i++) 953 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 954 955 return IXGBE_SUCCESS; 956 } 957 958 /** 959 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info 960 * @hw: pointer to hardware structure 961 * @link_status: the link status returned by the PCI config space 962 * 963 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure 964 **/ 965 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) 966 { 967 struct ixgbe_mac_info *mac = &hw->mac; 968 969 if (hw->bus.type == ixgbe_bus_type_unknown) 970 hw->bus.type = ixgbe_bus_type_pci_express; 971 972 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 973 case IXGBE_PCI_LINK_WIDTH_1: 974 hw->bus.width = ixgbe_bus_width_pcie_x1; 975 break; 976 case IXGBE_PCI_LINK_WIDTH_2: 977 hw->bus.width = ixgbe_bus_width_pcie_x2; 978 break; 979 case IXGBE_PCI_LINK_WIDTH_4: 980 hw->bus.width = ixgbe_bus_width_pcie_x4; 981 break; 982 case IXGBE_PCI_LINK_WIDTH_8: 983 hw->bus.width = ixgbe_bus_width_pcie_x8; 984 break; 985 default: 986 hw->bus.width = ixgbe_bus_width_unknown; 987 break; 988 } 989 990 switch (link_status & IXGBE_PCI_LINK_SPEED) { 991 case IXGBE_PCI_LINK_SPEED_2500: 992 hw->bus.speed = ixgbe_bus_speed_2500; 993 break; 994 case IXGBE_PCI_LINK_SPEED_5000: 995 hw->bus.speed = ixgbe_bus_speed_5000; 996 break; 997 case IXGBE_PCI_LINK_SPEED_8000: 998 hw->bus.speed = ixgbe_bus_speed_8000; 999 break; 1000 default: 1001 hw->bus.speed = ixgbe_bus_speed_unknown; 1002 break; 1003 } 1004 1005 mac->ops.set_lan_id(hw); 1006 } 1007 1008 /** 1009 * ixgbe_get_bus_info_generic - Generic set PCI bus info 1010 * @hw: pointer to hardware structure 1011 * 1012 * Gets the PCI bus info (speed, width, type) then calls helper function to 1013 * store this data within the ixgbe_hw structure. 1014 **/ 1015 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 1016 { 1017 u16 link_status; 1018 1019 DEBUGFUNC("ixgbe_get_bus_info_generic"); 1020 1021 /* Get the negotiated link width and speed from PCI config space */ 1022 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 1023 1024 ixgbe_set_pci_config_data_generic(hw, link_status); 1025 1026 return IXGBE_SUCCESS; 1027 } 1028 1029 /** 1030 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 1031 * @hw: pointer to the HW structure 1032 * 1033 * Determines the LAN function id by reading memory-mapped registers 1034 * and swaps the port value if requested. 1035 **/ 1036 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 1037 { 1038 struct ixgbe_bus_info *bus = &hw->bus; 1039 u32 reg; 1040 1041 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 1042 1043 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 1044 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 1045 bus->lan_id = bus->func; 1046 1047 /* check for a port swap */ 1048 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 1049 if (reg & IXGBE_FACTPS_LFS) 1050 bus->func ^= 0x1; 1051 } 1052 1053 /** 1054 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 1055 * @hw: pointer to hardware structure 1056 * 1057 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 1058 * disables transmit and receive units. The adapter_stopped flag is used by 1059 * the shared code and drivers to determine if the adapter is in a stopped 1060 * state and should not touch the hardware. 1061 **/ 1062 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 1063 { 1064 u32 reg_val; 1065 u16 i; 1066 1067 DEBUGFUNC("ixgbe_stop_adapter_generic"); 1068 1069 /* 1070 * Set the adapter_stopped flag so other driver functions stop touching 1071 * the hardware 1072 */ 1073 hw->adapter_stopped = TRUE; 1074 1075 /* Disable the receive unit */ 1076 ixgbe_disable_rx(hw); 1077 1078 /* Clear interrupt mask to stop interrupts from being generated */ 1079 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 1080 1081 /* Clear any pending interrupts, flush previous writes */ 1082 IXGBE_READ_REG(hw, IXGBE_EICR); 1083 1084 /* Disable the transmit unit. Each queue must be disabled. */ 1085 for (i = 0; i < hw->mac.max_tx_queues; i++) 1086 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 1087 1088 /* Disable the receive unit by stopping each queue */ 1089 for (i = 0; i < hw->mac.max_rx_queues; i++) { 1090 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 1091 reg_val &= ~IXGBE_RXDCTL_ENABLE; 1092 reg_val |= IXGBE_RXDCTL_SWFLSH; 1093 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 1094 } 1095 1096 /* flush all queues disables */ 1097 IXGBE_WRITE_FLUSH(hw); 1098 msec_delay(2); 1099 1100 /* 1101 * Prevent the PCI-E bus from hanging by disabling PCI-E master 1102 * access and verify no pending requests 1103 */ 1104 return ixgbe_disable_pcie_master(hw); 1105 } 1106 1107 /** 1108 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 1109 * @hw: pointer to hardware structure 1110 * @index: led number to turn on 1111 **/ 1112 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 1113 { 1114 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1115 1116 DEBUGFUNC("ixgbe_led_on_generic"); 1117 1118 /* To turn on the LED, set mode to ON. */ 1119 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1120 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 1121 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1122 IXGBE_WRITE_FLUSH(hw); 1123 1124 return IXGBE_SUCCESS; 1125 } 1126 1127 /** 1128 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 1129 * @hw: pointer to hardware structure 1130 * @index: led number to turn off 1131 **/ 1132 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 1133 { 1134 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1135 1136 DEBUGFUNC("ixgbe_led_off_generic"); 1137 1138 /* To turn off the LED, set mode to OFF. */ 1139 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1140 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 1141 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1142 IXGBE_WRITE_FLUSH(hw); 1143 1144 return IXGBE_SUCCESS; 1145 } 1146 1147 /** 1148 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 1149 * @hw: pointer to hardware structure 1150 * 1151 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 1152 * ixgbe_hw struct in order to set up EEPROM access. 1153 **/ 1154 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 1155 { 1156 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 1157 u32 eec; 1158 u16 eeprom_size; 1159 1160 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 1161 1162 if (eeprom->type == ixgbe_eeprom_uninitialized) { 1163 eeprom->type = ixgbe_eeprom_none; 1164 /* Set default semaphore delay to 10ms which is a well 1165 * tested value */ 1166 eeprom->semaphore_delay = 10; 1167 /* Clear EEPROM page size, it will be initialized as needed */ 1168 eeprom->word_page_size = 0; 1169 1170 /* 1171 * Check for EEPROM present first. 1172 * If not present leave as none 1173 */ 1174 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1175 if (eec & IXGBE_EEC_PRES) { 1176 eeprom->type = ixgbe_eeprom_spi; 1177 1178 /* 1179 * SPI EEPROM is assumed here. This code would need to 1180 * change if a future EEPROM is not SPI. 1181 */ 1182 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 1183 IXGBE_EEC_SIZE_SHIFT); 1184 eeprom->word_size = 1 << (eeprom_size + 1185 IXGBE_EEPROM_WORD_SIZE_SHIFT); 1186 } 1187 1188 if (eec & IXGBE_EEC_ADDR_SIZE) 1189 eeprom->address_bits = 16; 1190 else 1191 eeprom->address_bits = 8; 1192 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 1193 "%d\n", eeprom->type, eeprom->word_size, 1194 eeprom->address_bits); 1195 } 1196 1197 return IXGBE_SUCCESS; 1198 } 1199 1200 /** 1201 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 1202 * @hw: pointer to hardware structure 1203 * @offset: offset within the EEPROM to write 1204 * @words: number of word(s) 1205 * @data: 16 bit word(s) to write to EEPROM 1206 * 1207 * Reads 16 bit word(s) from EEPROM through bit-bang method 1208 **/ 1209 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1210 u16 words, u16 *data) 1211 { 1212 s32 status = IXGBE_SUCCESS; 1213 u16 i, count; 1214 1215 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); 1216 1217 hw->eeprom.ops.init_params(hw); 1218 1219 if (words == 0) { 1220 status = IXGBE_ERR_INVALID_ARGUMENT; 1221 goto out; 1222 } 1223 1224 if (offset + words > hw->eeprom.word_size) { 1225 status = IXGBE_ERR_EEPROM; 1226 goto out; 1227 } 1228 1229 /* 1230 * The EEPROM page size cannot be queried from the chip. We do lazy 1231 * initialization. It is worth to do that when we write large buffer. 1232 */ 1233 if ((hw->eeprom.word_page_size == 0) && 1234 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 1235 ixgbe_detect_eeprom_page_size_generic(hw, offset); 1236 1237 /* 1238 * We cannot hold synchronization semaphores for too long 1239 * to avoid other entity starvation. However it is more efficient 1240 * to read in bursts than synchronizing access for each word. 1241 */ 1242 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1243 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1244 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1245 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 1246 count, &data[i]); 1247 1248 if (status != IXGBE_SUCCESS) 1249 break; 1250 } 1251 1252 out: 1253 return status; 1254 } 1255 1256 /** 1257 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 1258 * @hw: pointer to hardware structure 1259 * @offset: offset within the EEPROM to be written to 1260 * @words: number of word(s) 1261 * @data: 16 bit word(s) to be written to the EEPROM 1262 * 1263 * If ixgbe_eeprom_update_checksum is not called after this function, the 1264 * EEPROM will most likely contain an invalid checksum. 1265 **/ 1266 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1267 u16 words, u16 *data) 1268 { 1269 s32 status; 1270 u16 word; 1271 u16 page_size; 1272 u16 i; 1273 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 1274 1275 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); 1276 1277 /* Prepare the EEPROM for writing */ 1278 status = ixgbe_acquire_eeprom(hw); 1279 1280 if (status == IXGBE_SUCCESS) { 1281 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1282 ixgbe_release_eeprom(hw); 1283 status = IXGBE_ERR_EEPROM; 1284 } 1285 } 1286 1287 if (status == IXGBE_SUCCESS) { 1288 for (i = 0; i < words; i++) { 1289 ixgbe_standby_eeprom(hw); 1290 1291 /* Send the WRITE ENABLE command (8 bit opcode ) */ 1292 ixgbe_shift_out_eeprom_bits(hw, 1293 IXGBE_EEPROM_WREN_OPCODE_SPI, 1294 IXGBE_EEPROM_OPCODE_BITS); 1295 1296 ixgbe_standby_eeprom(hw); 1297 1298 /* 1299 * Some SPI eeproms use the 8th address bit embedded 1300 * in the opcode 1301 */ 1302 if ((hw->eeprom.address_bits == 8) && 1303 ((offset + i) >= 128)) 1304 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1305 1306 /* Send the Write command (8-bit opcode + addr) */ 1307 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 1308 IXGBE_EEPROM_OPCODE_BITS); 1309 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1310 hw->eeprom.address_bits); 1311 1312 page_size = hw->eeprom.word_page_size; 1313 1314 /* Send the data in burst via SPI*/ 1315 do { 1316 word = data[i]; 1317 word = (word >> 8) | (word << 8); 1318 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1319 1320 if (page_size == 0) 1321 break; 1322 1323 /* do not wrap around page */ 1324 if (((offset + i) & (page_size - 1)) == 1325 (page_size - 1)) 1326 break; 1327 } while (++i < words); 1328 1329 ixgbe_standby_eeprom(hw); 1330 msec_delay(10); 1331 } 1332 /* Done with writing - release the EEPROM */ 1333 ixgbe_release_eeprom(hw); 1334 } 1335 1336 return status; 1337 } 1338 1339 /** 1340 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1341 * @hw: pointer to hardware structure 1342 * @offset: offset within the EEPROM to be written to 1343 * @data: 16 bit word to be written to the EEPROM 1344 * 1345 * If ixgbe_eeprom_update_checksum is not called after this function, the 1346 * EEPROM will most likely contain an invalid checksum. 1347 **/ 1348 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1349 { 1350 s32 status; 1351 1352 DEBUGFUNC("ixgbe_write_eeprom_generic"); 1353 1354 hw->eeprom.ops.init_params(hw); 1355 1356 if (offset >= hw->eeprom.word_size) { 1357 status = IXGBE_ERR_EEPROM; 1358 goto out; 1359 } 1360 1361 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1362 1363 out: 1364 return status; 1365 } 1366 1367 /** 1368 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1369 * @hw: pointer to hardware structure 1370 * @offset: offset within the EEPROM to be read 1371 * @data: read 16 bit words(s) from EEPROM 1372 * @words: number of word(s) 1373 * 1374 * Reads 16 bit word(s) from EEPROM through bit-bang method 1375 **/ 1376 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1377 u16 words, u16 *data) 1378 { 1379 s32 status = IXGBE_SUCCESS; 1380 u16 i, count; 1381 1382 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); 1383 1384 hw->eeprom.ops.init_params(hw); 1385 1386 if (words == 0) { 1387 status = IXGBE_ERR_INVALID_ARGUMENT; 1388 goto out; 1389 } 1390 1391 if (offset + words > hw->eeprom.word_size) { 1392 status = IXGBE_ERR_EEPROM; 1393 goto out; 1394 } 1395 1396 /* 1397 * We cannot hold synchronization semaphores for too long 1398 * to avoid other entity starvation. However it is more efficient 1399 * to read in bursts than synchronizing access for each word. 1400 */ 1401 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1402 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1403 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1404 1405 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1406 count, &data[i]); 1407 1408 if (status != IXGBE_SUCCESS) 1409 break; 1410 } 1411 1412 out: 1413 return status; 1414 } 1415 1416 /** 1417 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1418 * @hw: pointer to hardware structure 1419 * @offset: offset within the EEPROM to be read 1420 * @words: number of word(s) 1421 * @data: read 16 bit word(s) from EEPROM 1422 * 1423 * Reads 16 bit word(s) from EEPROM through bit-bang method 1424 **/ 1425 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1426 u16 words, u16 *data) 1427 { 1428 s32 status; 1429 u16 word_in; 1430 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1431 u16 i; 1432 1433 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); 1434 1435 /* Prepare the EEPROM for reading */ 1436 status = ixgbe_acquire_eeprom(hw); 1437 1438 if (status == IXGBE_SUCCESS) { 1439 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1440 ixgbe_release_eeprom(hw); 1441 status = IXGBE_ERR_EEPROM; 1442 } 1443 } 1444 1445 if (status == IXGBE_SUCCESS) { 1446 for (i = 0; i < words; i++) { 1447 ixgbe_standby_eeprom(hw); 1448 /* 1449 * Some SPI eeproms use the 8th address bit embedded 1450 * in the opcode 1451 */ 1452 if ((hw->eeprom.address_bits == 8) && 1453 ((offset + i) >= 128)) 1454 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1455 1456 /* Send the READ command (opcode + addr) */ 1457 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1458 IXGBE_EEPROM_OPCODE_BITS); 1459 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1460 hw->eeprom.address_bits); 1461 1462 /* Read the data. */ 1463 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1464 data[i] = (word_in >> 8) | (word_in << 8); 1465 } 1466 1467 /* End this read operation */ 1468 ixgbe_release_eeprom(hw); 1469 } 1470 1471 return status; 1472 } 1473 1474 /** 1475 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1476 * @hw: pointer to hardware structure 1477 * @offset: offset within the EEPROM to be read 1478 * @data: read 16 bit value from EEPROM 1479 * 1480 * Reads 16 bit value from EEPROM through bit-bang method 1481 **/ 1482 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1483 u16 *data) 1484 { 1485 s32 status; 1486 1487 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 1488 1489 hw->eeprom.ops.init_params(hw); 1490 1491 if (offset >= hw->eeprom.word_size) { 1492 status = IXGBE_ERR_EEPROM; 1493 goto out; 1494 } 1495 1496 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1497 1498 out: 1499 return status; 1500 } 1501 1502 /** 1503 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1504 * @hw: pointer to hardware structure 1505 * @offset: offset of word in the EEPROM to read 1506 * @words: number of word(s) 1507 * @data: 16 bit word(s) from the EEPROM 1508 * 1509 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1510 **/ 1511 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1512 u16 words, u16 *data) 1513 { 1514 u32 eerd; 1515 s32 status = IXGBE_SUCCESS; 1516 u32 i; 1517 1518 DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); 1519 1520 hw->eeprom.ops.init_params(hw); 1521 1522 if (words == 0) { 1523 status = IXGBE_ERR_INVALID_ARGUMENT; 1524 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1525 goto out; 1526 } 1527 1528 if (offset >= hw->eeprom.word_size) { 1529 status = IXGBE_ERR_EEPROM; 1530 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1531 goto out; 1532 } 1533 1534 for (i = 0; i < words; i++) { 1535 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1536 IXGBE_EEPROM_RW_REG_START; 1537 1538 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1539 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1540 1541 if (status == IXGBE_SUCCESS) { 1542 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1543 IXGBE_EEPROM_RW_REG_DATA); 1544 } else { 1545 DEBUGOUT("Eeprom read timed out\n"); 1546 goto out; 1547 } 1548 } 1549 out: 1550 return status; 1551 } 1552 1553 /** 1554 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1555 * @hw: pointer to hardware structure 1556 * @offset: offset within the EEPROM to be used as a scratch pad 1557 * 1558 * Discover EEPROM page size by writing marching data at given offset. 1559 * This function is called only when we are writing a new large buffer 1560 * at given offset so the data would be overwritten anyway. 1561 **/ 1562 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1563 u16 offset) 1564 { 1565 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1566 s32 status = IXGBE_SUCCESS; 1567 u16 i; 1568 1569 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); 1570 1571 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1572 data[i] = i; 1573 1574 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1575 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1576 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1577 hw->eeprom.word_page_size = 0; 1578 if (status != IXGBE_SUCCESS) 1579 goto out; 1580 1581 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1582 if (status != IXGBE_SUCCESS) 1583 goto out; 1584 1585 /* 1586 * When writing in burst more than the actual page size 1587 * EEPROM address wraps around current page. 1588 */ 1589 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1590 1591 DEBUGOUT1("Detected EEPROM page size = %d words.", 1592 hw->eeprom.word_page_size); 1593 out: 1594 return status; 1595 } 1596 1597 /** 1598 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1599 * @hw: pointer to hardware structure 1600 * @offset: offset of word in the EEPROM to read 1601 * @data: word read from the EEPROM 1602 * 1603 * Reads a 16 bit word from the EEPROM using the EERD register. 1604 **/ 1605 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1606 { 1607 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1608 } 1609 1610 /** 1611 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1612 * @hw: pointer to hardware structure 1613 * @offset: offset of word in the EEPROM to write 1614 * @words: number of word(s) 1615 * @data: word(s) write to the EEPROM 1616 * 1617 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1618 **/ 1619 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1620 u16 words, u16 *data) 1621 { 1622 u32 eewr; 1623 s32 status = IXGBE_SUCCESS; 1624 u16 i; 1625 1626 DEBUGFUNC("ixgbe_write_eewr_generic"); 1627 1628 hw->eeprom.ops.init_params(hw); 1629 1630 if (words == 0) { 1631 status = IXGBE_ERR_INVALID_ARGUMENT; 1632 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1633 goto out; 1634 } 1635 1636 if (offset >= hw->eeprom.word_size) { 1637 status = IXGBE_ERR_EEPROM; 1638 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1639 goto out; 1640 } 1641 1642 for (i = 0; i < words; i++) { 1643 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1644 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1645 IXGBE_EEPROM_RW_REG_START; 1646 1647 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1648 if (status != IXGBE_SUCCESS) { 1649 DEBUGOUT("Eeprom write EEWR timed out\n"); 1650 goto out; 1651 } 1652 1653 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1654 1655 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1656 if (status != IXGBE_SUCCESS) { 1657 DEBUGOUT("Eeprom write EEWR timed out\n"); 1658 goto out; 1659 } 1660 } 1661 1662 out: 1663 return status; 1664 } 1665 1666 /** 1667 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1668 * @hw: pointer to hardware structure 1669 * @offset: offset of word in the EEPROM to write 1670 * @data: word write to the EEPROM 1671 * 1672 * Write a 16 bit word to the EEPROM using the EEWR register. 1673 **/ 1674 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1675 { 1676 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1677 } 1678 1679 /** 1680 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1681 * @hw: pointer to hardware structure 1682 * @ee_reg: EEPROM flag for polling 1683 * 1684 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1685 * read or write is done respectively. 1686 **/ 1687 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1688 { 1689 u32 i; 1690 u32 reg; 1691 s32 status = IXGBE_ERR_EEPROM; 1692 1693 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1694 1695 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1696 if (ee_reg == IXGBE_NVM_POLL_READ) 1697 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1698 else 1699 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1700 1701 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1702 status = IXGBE_SUCCESS; 1703 break; 1704 } 1705 usec_delay(5); 1706 } 1707 1708 if (i == IXGBE_EERD_EEWR_ATTEMPTS) 1709 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1710 "EEPROM read/write done polling timed out"); 1711 1712 return status; 1713 } 1714 1715 /** 1716 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1717 * @hw: pointer to hardware structure 1718 * 1719 * Prepares EEPROM for access using bit-bang method. This function should 1720 * be called before issuing a command to the EEPROM. 1721 **/ 1722 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1723 { 1724 s32 status = IXGBE_SUCCESS; 1725 u32 eec; 1726 u32 i; 1727 1728 DEBUGFUNC("ixgbe_acquire_eeprom"); 1729 1730 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) 1731 != IXGBE_SUCCESS) 1732 status = IXGBE_ERR_SWFW_SYNC; 1733 1734 if (status == IXGBE_SUCCESS) { 1735 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1736 1737 /* Request EEPROM Access */ 1738 eec |= IXGBE_EEC_REQ; 1739 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1740 1741 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1742 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1743 if (eec & IXGBE_EEC_GNT) 1744 break; 1745 usec_delay(5); 1746 } 1747 1748 /* Release if grant not acquired */ 1749 if (!(eec & IXGBE_EEC_GNT)) { 1750 eec &= ~IXGBE_EEC_REQ; 1751 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1752 DEBUGOUT("Could not acquire EEPROM grant\n"); 1753 1754 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1755 status = IXGBE_ERR_EEPROM; 1756 } 1757 1758 /* Setup EEPROM for Read/Write */ 1759 if (status == IXGBE_SUCCESS) { 1760 /* Clear CS and SK */ 1761 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1762 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1763 IXGBE_WRITE_FLUSH(hw); 1764 usec_delay(1); 1765 } 1766 } 1767 return status; 1768 } 1769 1770 /** 1771 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1772 * @hw: pointer to hardware structure 1773 * 1774 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1775 **/ 1776 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1777 { 1778 s32 status = IXGBE_ERR_EEPROM; 1779 u32 timeout = 2000; 1780 u32 i; 1781 u32 swsm; 1782 1783 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1784 1785 1786 /* Get SMBI software semaphore between device drivers first */ 1787 for (i = 0; i < timeout; i++) { 1788 /* 1789 * If the SMBI bit is 0 when we read it, then the bit will be 1790 * set and we have the semaphore 1791 */ 1792 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1793 if (!(swsm & IXGBE_SWSM_SMBI)) { 1794 status = IXGBE_SUCCESS; 1795 break; 1796 } 1797 usec_delay(50); 1798 } 1799 1800 if (i == timeout) { 1801 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " 1802 "not granted.\n"); 1803 /* 1804 * this release is particularly important because our attempts 1805 * above to get the semaphore may have succeeded, and if there 1806 * was a timeout, we should unconditionally clear the semaphore 1807 * bits to free the driver to make progress 1808 */ 1809 ixgbe_release_eeprom_semaphore(hw); 1810 1811 usec_delay(50); 1812 /* 1813 * one last try 1814 * If the SMBI bit is 0 when we read it, then the bit will be 1815 * set and we have the semaphore 1816 */ 1817 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1818 if (!(swsm & IXGBE_SWSM_SMBI)) 1819 status = IXGBE_SUCCESS; 1820 } 1821 1822 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1823 if (status == IXGBE_SUCCESS) { 1824 for (i = 0; i < timeout; i++) { 1825 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1826 1827 /* Set the SW EEPROM semaphore bit to request access */ 1828 swsm |= IXGBE_SWSM_SWESMBI; 1829 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); 1830 1831 /* 1832 * If we set the bit successfully then we got the 1833 * semaphore. 1834 */ 1835 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1836 if (swsm & IXGBE_SWSM_SWESMBI) 1837 break; 1838 1839 usec_delay(50); 1840 } 1841 1842 /* 1843 * Release semaphores and return error if SW EEPROM semaphore 1844 * was not granted because we don't have access to the EEPROM 1845 */ 1846 if (i >= timeout) { 1847 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1848 "SWESMBI Software EEPROM semaphore not granted.\n"); 1849 ixgbe_release_eeprom_semaphore(hw); 1850 status = IXGBE_ERR_EEPROM; 1851 } 1852 } else { 1853 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1854 "Software semaphore SMBI between device drivers " 1855 "not granted.\n"); 1856 } 1857 1858 return status; 1859 } 1860 1861 /** 1862 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1863 * @hw: pointer to hardware structure 1864 * 1865 * This function clears hardware semaphore bits. 1866 **/ 1867 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1868 { 1869 u32 swsm; 1870 1871 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1872 1873 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1874 1875 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1876 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1877 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1878 IXGBE_WRITE_FLUSH(hw); 1879 } 1880 1881 /** 1882 * ixgbe_ready_eeprom - Polls for EEPROM ready 1883 * @hw: pointer to hardware structure 1884 **/ 1885 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1886 { 1887 s32 status = IXGBE_SUCCESS; 1888 u16 i; 1889 u8 spi_stat_reg; 1890 1891 DEBUGFUNC("ixgbe_ready_eeprom"); 1892 1893 /* 1894 * Read "Status Register" repeatedly until the LSB is cleared. The 1895 * EEPROM will signal that the command has been completed by clearing 1896 * bit 0 of the internal status register. If it's not cleared within 1897 * 5 milliseconds, then error out. 1898 */ 1899 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1900 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1901 IXGBE_EEPROM_OPCODE_BITS); 1902 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1903 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1904 break; 1905 1906 usec_delay(5); 1907 ixgbe_standby_eeprom(hw); 1908 } 1909 1910 /* 1911 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1912 * devices (and only 0-5mSec on 5V devices) 1913 */ 1914 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1915 DEBUGOUT("SPI EEPROM Status error\n"); 1916 status = IXGBE_ERR_EEPROM; 1917 } 1918 1919 return status; 1920 } 1921 1922 /** 1923 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1924 * @hw: pointer to hardware structure 1925 **/ 1926 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1927 { 1928 u32 eec; 1929 1930 DEBUGFUNC("ixgbe_standby_eeprom"); 1931 1932 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1933 1934 /* Toggle CS to flush commands */ 1935 eec |= IXGBE_EEC_CS; 1936 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1937 IXGBE_WRITE_FLUSH(hw); 1938 usec_delay(1); 1939 eec &= ~IXGBE_EEC_CS; 1940 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1941 IXGBE_WRITE_FLUSH(hw); 1942 usec_delay(1); 1943 } 1944 1945 /** 1946 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1947 * @hw: pointer to hardware structure 1948 * @data: data to send to the EEPROM 1949 * @count: number of bits to shift out 1950 **/ 1951 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1952 u16 count) 1953 { 1954 u32 eec; 1955 u32 mask; 1956 u32 i; 1957 1958 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 1959 1960 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1961 1962 /* 1963 * Mask is used to shift "count" bits of "data" out to the EEPROM 1964 * one bit at a time. Determine the starting bit based on count 1965 */ 1966 mask = 0x01 << (count - 1); 1967 1968 for (i = 0; i < count; i++) { 1969 /* 1970 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1971 * "1", and then raising and then lowering the clock (the SK 1972 * bit controls the clock input to the EEPROM). A "0" is 1973 * shifted out to the EEPROM by setting "DI" to "0" and then 1974 * raising and then lowering the clock. 1975 */ 1976 if (data & mask) 1977 eec |= IXGBE_EEC_DI; 1978 else 1979 eec &= ~IXGBE_EEC_DI; 1980 1981 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1982 IXGBE_WRITE_FLUSH(hw); 1983 1984 usec_delay(1); 1985 1986 ixgbe_raise_eeprom_clk(hw, &eec); 1987 ixgbe_lower_eeprom_clk(hw, &eec); 1988 1989 /* 1990 * Shift mask to signify next bit of data to shift in to the 1991 * EEPROM 1992 */ 1993 mask = mask >> 1; 1994 } 1995 1996 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1997 eec &= ~IXGBE_EEC_DI; 1998 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1999 IXGBE_WRITE_FLUSH(hw); 2000 } 2001 2002 /** 2003 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 2004 * @hw: pointer to hardware structure 2005 **/ 2006 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 2007 { 2008 u32 eec; 2009 u32 i; 2010 u16 data = 0; 2011 2012 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 2013 2014 /* 2015 * In order to read a register from the EEPROM, we need to shift 2016 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 2017 * the clock input to the EEPROM (setting the SK bit), and then reading 2018 * the value of the "DO" bit. During this "shifting in" process the 2019 * "DI" bit should always be clear. 2020 */ 2021 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2022 2023 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 2024 2025 for (i = 0; i < count; i++) { 2026 data = data << 1; 2027 ixgbe_raise_eeprom_clk(hw, &eec); 2028 2029 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2030 2031 eec &= ~(IXGBE_EEC_DI); 2032 if (eec & IXGBE_EEC_DO) 2033 data |= 1; 2034 2035 ixgbe_lower_eeprom_clk(hw, &eec); 2036 } 2037 2038 return data; 2039 } 2040 2041 /** 2042 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 2043 * @hw: pointer to hardware structure 2044 * @eec: EEC register's current value 2045 **/ 2046 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2047 { 2048 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 2049 2050 /* 2051 * Raise the clock input to the EEPROM 2052 * (setting the SK bit), then delay 2053 */ 2054 *eec = *eec | IXGBE_EEC_SK; 2055 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2056 IXGBE_WRITE_FLUSH(hw); 2057 usec_delay(1); 2058 } 2059 2060 /** 2061 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 2062 * @hw: pointer to hardware structure 2063 * @eecd: EECD's current value 2064 **/ 2065 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2066 { 2067 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 2068 2069 /* 2070 * Lower the clock input to the EEPROM (clearing the SK bit), then 2071 * delay 2072 */ 2073 *eec = *eec & ~IXGBE_EEC_SK; 2074 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2075 IXGBE_WRITE_FLUSH(hw); 2076 usec_delay(1); 2077 } 2078 2079 /** 2080 * ixgbe_release_eeprom - Release EEPROM, release semaphores 2081 * @hw: pointer to hardware structure 2082 **/ 2083 static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 2084 { 2085 u32 eec; 2086 2087 DEBUGFUNC("ixgbe_release_eeprom"); 2088 2089 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2090 2091 eec |= IXGBE_EEC_CS; /* Pull CS high */ 2092 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 2093 2094 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2095 IXGBE_WRITE_FLUSH(hw); 2096 2097 usec_delay(1); 2098 2099 /* Stop requesting EEPROM access */ 2100 eec &= ~IXGBE_EEC_REQ; 2101 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2102 2103 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 2104 2105 /* Delay before attempt to obtain semaphore again to allow FW access */ 2106 msec_delay(hw->eeprom.semaphore_delay); 2107 } 2108 2109 /** 2110 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 2111 * @hw: pointer to hardware structure 2112 * 2113 * Returns a negative error code on error, or the 16-bit checksum 2114 **/ 2115 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 2116 { 2117 u16 i; 2118 u16 j; 2119 u16 checksum = 0; 2120 u16 length = 0; 2121 u16 pointer = 0; 2122 u16 word = 0; 2123 2124 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 2125 2126 /* Include 0x0-0x3F in the checksum */ 2127 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 2128 if (hw->eeprom.ops.read(hw, i, &word)) { 2129 DEBUGOUT("EEPROM read failed\n"); 2130 return IXGBE_ERR_EEPROM; 2131 } 2132 checksum += word; 2133 } 2134 2135 /* Include all data from pointers except for the fw pointer */ 2136 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 2137 if (hw->eeprom.ops.read(hw, i, &pointer)) { 2138 DEBUGOUT("EEPROM read failed\n"); 2139 return IXGBE_ERR_EEPROM; 2140 } 2141 2142 /* If the pointer seems invalid */ 2143 if (pointer == 0xFFFF || pointer == 0) 2144 continue; 2145 2146 if (hw->eeprom.ops.read(hw, pointer, &length)) { 2147 DEBUGOUT("EEPROM read failed\n"); 2148 return IXGBE_ERR_EEPROM; 2149 } 2150 2151 if (length == 0xFFFF || length == 0) 2152 continue; 2153 2154 for (j = pointer + 1; j <= pointer + length; j++) { 2155 if (hw->eeprom.ops.read(hw, j, &word)) { 2156 DEBUGOUT("EEPROM read failed\n"); 2157 return IXGBE_ERR_EEPROM; 2158 } 2159 checksum += word; 2160 } 2161 } 2162 2163 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 2164 2165 return (s32)checksum; 2166 } 2167 2168 /** 2169 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 2170 * @hw: pointer to hardware structure 2171 * @checksum_val: calculated checksum 2172 * 2173 * Performs checksum calculation and validates the EEPROM checksum. If the 2174 * caller does not need checksum_val, the value can be NULL. 2175 **/ 2176 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 2177 u16 *checksum_val) 2178 { 2179 s32 status; 2180 u16 checksum; 2181 u16 read_checksum = 0; 2182 2183 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 2184 2185 /* Read the first word from the EEPROM. If this times out or fails, do 2186 * not continue or we could be in for a very long wait while every 2187 * EEPROM read fails 2188 */ 2189 status = hw->eeprom.ops.read(hw, 0, &checksum); 2190 if (status) { 2191 DEBUGOUT("EEPROM read failed\n"); 2192 return status; 2193 } 2194 2195 status = hw->eeprom.ops.calc_checksum(hw); 2196 if (status < 0) 2197 return status; 2198 2199 checksum = (u16)(status & 0xffff); 2200 2201 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 2202 if (status) { 2203 DEBUGOUT("EEPROM read failed\n"); 2204 return status; 2205 } 2206 2207 /* Verify read checksum from EEPROM is the same as 2208 * calculated checksum 2209 */ 2210 if (read_checksum != checksum) 2211 status = IXGBE_ERR_EEPROM_CHECKSUM; 2212 2213 /* If the user cares, return the calculated checksum */ 2214 if (checksum_val) 2215 *checksum_val = checksum; 2216 2217 return status; 2218 } 2219 2220 /** 2221 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 2222 * @hw: pointer to hardware structure 2223 **/ 2224 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 2225 { 2226 s32 status; 2227 u16 checksum; 2228 2229 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 2230 2231 /* Read the first word from the EEPROM. If this times out or fails, do 2232 * not continue or we could be in for a very long wait while every 2233 * EEPROM read fails 2234 */ 2235 status = hw->eeprom.ops.read(hw, 0, &checksum); 2236 if (status) { 2237 DEBUGOUT("EEPROM read failed\n"); 2238 return status; 2239 } 2240 2241 status = hw->eeprom.ops.calc_checksum(hw); 2242 if (status < 0) 2243 return status; 2244 2245 checksum = (u16)(status & 0xffff); 2246 2247 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 2248 2249 return status; 2250 } 2251 2252 /** 2253 * ixgbe_validate_mac_addr - Validate MAC address 2254 * @mac_addr: pointer to MAC address. 2255 * 2256 * Tests a MAC address to ensure it is a valid Individual Address 2257 **/ 2258 s32 ixgbe_validate_mac_addr(u8 *mac_addr) 2259 { 2260 s32 status = IXGBE_SUCCESS; 2261 2262 DEBUGFUNC("ixgbe_validate_mac_addr"); 2263 2264 /* Make sure it is not a multicast address */ 2265 if (IXGBE_IS_MULTICAST(mac_addr)) { 2266 DEBUGOUT("MAC address is multicast\n"); 2267 status = IXGBE_ERR_INVALID_MAC_ADDR; 2268 /* Not a broadcast address */ 2269 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 2270 DEBUGOUT("MAC address is broadcast\n"); 2271 status = IXGBE_ERR_INVALID_MAC_ADDR; 2272 /* Reject the zero address */ 2273 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 2274 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 2275 DEBUGOUT("MAC address is all zeros\n"); 2276 status = IXGBE_ERR_INVALID_MAC_ADDR; 2277 } 2278 return status; 2279 } 2280 2281 /** 2282 * ixgbe_set_rar_generic - Set Rx address register 2283 * @hw: pointer to hardware structure 2284 * @index: Receive address register to write 2285 * @addr: Address to put into receive address register 2286 * @vmdq: VMDq "set" or "pool" index 2287 * @enable_addr: set flag that address is active 2288 * 2289 * Puts an ethernet address into a receive address register. 2290 **/ 2291 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 2292 u32 enable_addr) 2293 { 2294 u32 rar_low, rar_high; 2295 u32 rar_entries = hw->mac.num_rar_entries; 2296 2297 DEBUGFUNC("ixgbe_set_rar_generic"); 2298 2299 /* Make sure we are using a valid rar index range */ 2300 if (index >= rar_entries) { 2301 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2302 "RAR index %d is out of range.\n", index); 2303 return IXGBE_ERR_INVALID_ARGUMENT; 2304 } 2305 2306 /* setup VMDq pool selection before this RAR gets enabled */ 2307 hw->mac.ops.set_vmdq(hw, index, vmdq); 2308 2309 /* 2310 * HW expects these in little endian so we reverse the byte 2311 * order from network order (big endian) to little endian 2312 */ 2313 rar_low = ((u32)addr[0] | 2314 ((u32)addr[1] << 8) | 2315 ((u32)addr[2] << 16) | 2316 ((u32)addr[3] << 24)); 2317 /* 2318 * Some parts put the VMDq setting in the extra RAH bits, 2319 * so save everything except the lower 16 bits that hold part 2320 * of the address and the address valid bit. 2321 */ 2322 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2323 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2324 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 2325 2326 if (enable_addr != 0) 2327 rar_high |= IXGBE_RAH_AV; 2328 2329 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 2330 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2331 2332 return IXGBE_SUCCESS; 2333 } 2334 2335 /** 2336 * ixgbe_clear_rar_generic - Remove Rx address register 2337 * @hw: pointer to hardware structure 2338 * @index: Receive address register to write 2339 * 2340 * Clears an ethernet address from a receive address register. 2341 **/ 2342 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 2343 { 2344 u32 rar_high; 2345 u32 rar_entries = hw->mac.num_rar_entries; 2346 2347 DEBUGFUNC("ixgbe_clear_rar_generic"); 2348 2349 /* Make sure we are using a valid rar index range */ 2350 if (index >= rar_entries) { 2351 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2352 "RAR index %d is out of range.\n", index); 2353 return IXGBE_ERR_INVALID_ARGUMENT; 2354 } 2355 2356 /* 2357 * Some parts put the VMDq setting in the extra RAH bits, 2358 * so save everything except the lower 16 bits that hold part 2359 * of the address and the address valid bit. 2360 */ 2361 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2362 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2363 2364 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 2365 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2366 2367 /* clear VMDq pool/queue selection for this RAR */ 2368 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 2369 2370 return IXGBE_SUCCESS; 2371 } 2372 2373 /** 2374 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 2375 * @hw: pointer to hardware structure 2376 * 2377 * Places the MAC address in receive address register 0 and clears the rest 2378 * of the receive address registers. Clears the multicast table. Assumes 2379 * the receiver is in reset when the routine is called. 2380 **/ 2381 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 2382 { 2383 u32 i; 2384 u32 rar_entries = hw->mac.num_rar_entries; 2385 2386 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 2387 2388 /* 2389 * If the current mac address is valid, assume it is a software override 2390 * to the permanent address. 2391 * Otherwise, use the permanent address from the eeprom. 2392 */ 2393 if (ixgbe_validate_mac_addr(hw->mac.addr) == 2394 IXGBE_ERR_INVALID_MAC_ADDR) { 2395 /* Get the MAC address from the RAR0 for later reference */ 2396 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2397 2398 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 2399 hw->mac.addr[0], hw->mac.addr[1], 2400 hw->mac.addr[2]); 2401 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2402 hw->mac.addr[4], hw->mac.addr[5]); 2403 } else { 2404 /* Setup the receive address. */ 2405 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 2406 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 2407 hw->mac.addr[0], hw->mac.addr[1], 2408 hw->mac.addr[2]); 2409 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2410 hw->mac.addr[4], hw->mac.addr[5]); 2411 2412 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2413 2414 /* clear VMDq pool/queue selection for RAR 0 */ 2415 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 2416 } 2417 hw->addr_ctrl.overflow_promisc = 0; 2418 2419 hw->addr_ctrl.rar_used_count = 1; 2420 2421 /* Zero out the other receive addresses. */ 2422 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 2423 for (i = 1; i < rar_entries; i++) { 2424 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 2425 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 2426 } 2427 2428 /* Clear the MTA */ 2429 hw->addr_ctrl.mta_in_use = 0; 2430 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2431 2432 DEBUGOUT(" Clearing MTA\n"); 2433 for (i = 0; i < hw->mac.mcft_size; i++) 2434 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 2435 2436 ixgbe_init_uta_tables(hw); 2437 2438 return IXGBE_SUCCESS; 2439 } 2440 2441 /** 2442 * ixgbe_add_uc_addr - Adds a secondary unicast address. 2443 * @hw: pointer to hardware structure 2444 * @addr: new address 2445 * 2446 * Adds it to unused receive address register or goes into promiscuous mode. 2447 **/ 2448 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2449 { 2450 u32 rar_entries = hw->mac.num_rar_entries; 2451 u32 rar; 2452 2453 DEBUGFUNC("ixgbe_add_uc_addr"); 2454 2455 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 2456 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 2457 2458 /* 2459 * Place this address in the RAR if there is room, 2460 * else put the controller into promiscuous mode 2461 */ 2462 if (hw->addr_ctrl.rar_used_count < rar_entries) { 2463 rar = hw->addr_ctrl.rar_used_count; 2464 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2465 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 2466 hw->addr_ctrl.rar_used_count++; 2467 } else { 2468 hw->addr_ctrl.overflow_promisc++; 2469 } 2470 2471 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 2472 } 2473 2474 /** 2475 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 2476 * @hw: pointer to hardware structure 2477 * @addr_list: the list of new addresses 2478 * @addr_count: number of addresses 2479 * @next: iterator function to walk the address list 2480 * 2481 * The given list replaces any existing list. Clears the secondary addrs from 2482 * receive address registers. Uses unused receive address registers for the 2483 * first secondary addresses, and falls back to promiscuous mode as needed. 2484 * 2485 * Drivers using secondary unicast addresses must set user_set_promisc when 2486 * manually putting the device into promiscuous mode. 2487 **/ 2488 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 2489 u32 addr_count, ixgbe_mc_addr_itr next) 2490 { 2491 u8 *addr; 2492 u32 i; 2493 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 2494 u32 uc_addr_in_use; 2495 u32 fctrl; 2496 u32 vmdq; 2497 2498 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 2499 2500 /* 2501 * Clear accounting of old secondary address list, 2502 * don't count RAR[0] 2503 */ 2504 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 2505 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 2506 hw->addr_ctrl.overflow_promisc = 0; 2507 2508 /* Zero out the other receive addresses */ 2509 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 2510 for (i = 0; i < uc_addr_in_use; i++) { 2511 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 2512 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 2513 } 2514 2515 /* Add the new addresses */ 2516 for (i = 0; i < addr_count; i++) { 2517 DEBUGOUT(" Adding the secondary addresses:\n"); 2518 addr = next(hw, &addr_list, &vmdq); 2519 ixgbe_add_uc_addr(hw, addr, vmdq); 2520 } 2521 2522 if (hw->addr_ctrl.overflow_promisc) { 2523 /* enable promisc if not already in overflow or set by user */ 2524 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2525 DEBUGOUT(" Entering address overflow promisc mode\n"); 2526 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2527 fctrl |= IXGBE_FCTRL_UPE; 2528 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2529 } 2530 } else { 2531 /* only disable if set by overflow, not by user */ 2532 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2533 DEBUGOUT(" Leaving address overflow promisc mode\n"); 2534 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2535 fctrl &= ~IXGBE_FCTRL_UPE; 2536 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2537 } 2538 } 2539 2540 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 2541 return IXGBE_SUCCESS; 2542 } 2543 2544 /** 2545 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 2546 * @hw: pointer to hardware structure 2547 * @mc_addr: the multicast address 2548 * 2549 * Extracts the 12 bits, from a multicast address, to determine which 2550 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 2551 * incoming rx multicast addresses, to determine the bit-vector to check in 2552 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 2553 * by the MO field of the MCSTCTRL. The MO field is set during initialization 2554 * to mc_filter_type. 2555 **/ 2556 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 2557 { 2558 u32 vector = 0; 2559 2560 DEBUGFUNC("ixgbe_mta_vector"); 2561 2562 switch (hw->mac.mc_filter_type) { 2563 case 0: /* use bits [47:36] of the address */ 2564 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2565 break; 2566 case 1: /* use bits [46:35] of the address */ 2567 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2568 break; 2569 case 2: /* use bits [45:34] of the address */ 2570 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2571 break; 2572 case 3: /* use bits [43:32] of the address */ 2573 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2574 break; 2575 default: /* Invalid mc_filter_type */ 2576 DEBUGOUT("MC filter type param set incorrectly\n"); 2577 ASSERT(0); 2578 break; 2579 } 2580 2581 /* vector can only be 12-bits or boundary will be exceeded */ 2582 vector &= 0xFFF; 2583 return vector; 2584 } 2585 2586 /** 2587 * ixgbe_set_mta - Set bit-vector in multicast table 2588 * @hw: pointer to hardware structure 2589 * @hash_value: Multicast address hash value 2590 * 2591 * Sets the bit-vector in the multicast table. 2592 **/ 2593 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2594 { 2595 u32 vector; 2596 u32 vector_bit; 2597 u32 vector_reg; 2598 2599 DEBUGFUNC("ixgbe_set_mta"); 2600 2601 hw->addr_ctrl.mta_in_use++; 2602 2603 vector = ixgbe_mta_vector(hw, mc_addr); 2604 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 2605 2606 /* 2607 * The MTA is a register array of 128 32-bit registers. It is treated 2608 * like an array of 4096 bits. We want to set bit 2609 * BitArray[vector_value]. So we figure out what register the bit is 2610 * in, read it, OR in the new bit, then write back the new value. The 2611 * register is determined by the upper 7 bits of the vector value and 2612 * the bit within that register are determined by the lower 5 bits of 2613 * the value. 2614 */ 2615 vector_reg = (vector >> 5) & 0x7F; 2616 vector_bit = vector & 0x1F; 2617 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 2618 } 2619 2620 /** 2621 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2622 * @hw: pointer to hardware structure 2623 * @mc_addr_list: the list of new multicast addresses 2624 * @mc_addr_count: number of addresses 2625 * @next: iterator function to walk the multicast address list 2626 * @clear: flag, when set clears the table beforehand 2627 * 2628 * When the clear flag is set, the given list replaces any existing list. 2629 * Hashes the given addresses into the multicast table. 2630 **/ 2631 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 2632 u32 mc_addr_count, ixgbe_mc_addr_itr next, 2633 bool clear) 2634 { 2635 u32 i; 2636 u32 vmdq; 2637 2638 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 2639 2640 /* 2641 * Set the new number of MC addresses that we are being requested to 2642 * use. 2643 */ 2644 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 2645 hw->addr_ctrl.mta_in_use = 0; 2646 2647 /* Clear mta_shadow */ 2648 if (clear) { 2649 DEBUGOUT(" Clearing MTA\n"); 2650 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2651 } 2652 2653 /* Update mta_shadow */ 2654 for (i = 0; i < mc_addr_count; i++) { 2655 DEBUGOUT(" Adding the multicast addresses:\n"); 2656 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 2657 } 2658 2659 /* Enable mta */ 2660 for (i = 0; i < hw->mac.mcft_size; i++) 2661 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2662 hw->mac.mta_shadow[i]); 2663 2664 if (hw->addr_ctrl.mta_in_use > 0) 2665 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2666 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2667 2668 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 2669 return IXGBE_SUCCESS; 2670 } 2671 2672 /** 2673 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2674 * @hw: pointer to hardware structure 2675 * 2676 * Enables multicast address in RAR and the use of the multicast hash table. 2677 **/ 2678 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2679 { 2680 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2681 2682 DEBUGFUNC("ixgbe_enable_mc_generic"); 2683 2684 if (a->mta_in_use > 0) 2685 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2686 hw->mac.mc_filter_type); 2687 2688 return IXGBE_SUCCESS; 2689 } 2690 2691 /** 2692 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2693 * @hw: pointer to hardware structure 2694 * 2695 * Disables multicast address in RAR and the use of the multicast hash table. 2696 **/ 2697 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2698 { 2699 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2700 2701 DEBUGFUNC("ixgbe_disable_mc_generic"); 2702 2703 if (a->mta_in_use > 0) 2704 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2705 2706 return IXGBE_SUCCESS; 2707 } 2708 2709 /** 2710 * ixgbe_fc_enable_generic - Enable flow control 2711 * @hw: pointer to hardware structure 2712 * 2713 * Enable flow control according to the current settings. 2714 **/ 2715 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2716 { 2717 s32 ret_val = IXGBE_SUCCESS; 2718 u32 mflcn_reg, fccfg_reg; 2719 u32 reg; 2720 u32 fcrtl, fcrth; 2721 int i; 2722 2723 DEBUGFUNC("ixgbe_fc_enable_generic"); 2724 2725 /* Validate the water mark configuration */ 2726 if (!hw->fc.pause_time) { 2727 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2728 goto out; 2729 } 2730 2731 /* Low water mark of zero causes XOFF floods */ 2732 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2733 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2734 hw->fc.high_water[i]) { 2735 if (!hw->fc.low_water[i] || 2736 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2737 DEBUGOUT("Invalid water mark configuration\n"); 2738 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2739 goto out; 2740 } 2741 } 2742 } 2743 2744 /* Negotiate the fc mode to use */ 2745 ixgbe_fc_autoneg(hw); 2746 2747 /* Disable any previous flow control settings */ 2748 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2749 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2750 2751 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2752 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2753 2754 /* 2755 * The possible values of fc.current_mode are: 2756 * 0: Flow control is completely disabled 2757 * 1: Rx flow control is enabled (we can receive pause frames, 2758 * but not send pause frames). 2759 * 2: Tx flow control is enabled (we can send pause frames but 2760 * we do not support receiving pause frames). 2761 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2762 * other: Invalid. 2763 */ 2764 switch (hw->fc.current_mode) { 2765 case ixgbe_fc_none: 2766 /* 2767 * Flow control is disabled by software override or autoneg. 2768 * The code below will actually disable it in the HW. 2769 */ 2770 break; 2771 case ixgbe_fc_rx_pause: 2772 /* 2773 * Rx Flow control is enabled and Tx Flow control is 2774 * disabled by software override. Since there really 2775 * isn't a way to advertise that we are capable of RX 2776 * Pause ONLY, we will advertise that we support both 2777 * symmetric and asymmetric Rx PAUSE. Later, we will 2778 * disable the adapter's ability to send PAUSE frames. 2779 */ 2780 mflcn_reg |= IXGBE_MFLCN_RFCE; 2781 break; 2782 case ixgbe_fc_tx_pause: 2783 /* 2784 * Tx Flow control is enabled, and Rx Flow control is 2785 * disabled by software override. 2786 */ 2787 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2788 break; 2789 case ixgbe_fc_full: 2790 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2791 mflcn_reg |= IXGBE_MFLCN_RFCE; 2792 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2793 break; 2794 default: 2795 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 2796 "Flow control param set incorrectly\n"); 2797 ret_val = IXGBE_ERR_CONFIG; 2798 goto out; 2799 break; 2800 } 2801 2802 /* Set 802.3x based flow control settings. */ 2803 mflcn_reg |= IXGBE_MFLCN_DPF; 2804 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2805 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2806 2807 2808 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2809 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2810 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2811 hw->fc.high_water[i]) { 2812 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2813 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2814 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2815 } else { 2816 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2817 /* 2818 * In order to prevent Tx hangs when the internal Tx 2819 * switch is enabled we must set the high water mark 2820 * to the Rx packet buffer size - 24KB. This allows 2821 * the Tx switch to function even under heavy Rx 2822 * workloads. 2823 */ 2824 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2825 } 2826 2827 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2828 } 2829 2830 /* Configure pause time (2 TCs per register) */ 2831 reg = hw->fc.pause_time * 0x00010001; 2832 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 2833 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2834 2835 /* Configure flow control refresh threshold value */ 2836 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2837 2838 out: 2839 return ret_val; 2840 } 2841 2842 /** 2843 * ixgbe_negotiate_fc - Negotiate flow control 2844 * @hw: pointer to hardware structure 2845 * @adv_reg: flow control advertised settings 2846 * @lp_reg: link partner's flow control settings 2847 * @adv_sym: symmetric pause bit in advertisement 2848 * @adv_asm: asymmetric pause bit in advertisement 2849 * @lp_sym: symmetric pause bit in link partner advertisement 2850 * @lp_asm: asymmetric pause bit in link partner advertisement 2851 * 2852 * Find the intersection between advertised settings and link partner's 2853 * advertised settings 2854 **/ 2855 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2856 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2857 { 2858 if ((!(adv_reg)) || (!(lp_reg))) { 2859 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, 2860 "Local or link partner's advertised flow control " 2861 "settings are NULL. Local: %x, link partner: %x\n", 2862 adv_reg, lp_reg); 2863 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2864 } 2865 2866 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2867 /* 2868 * Now we need to check if the user selected Rx ONLY 2869 * of pause frames. In this case, we had to advertise 2870 * FULL flow control because we could not advertise RX 2871 * ONLY. Hence, we must now check to see if we need to 2872 * turn OFF the TRANSMISSION of PAUSE frames. 2873 */ 2874 if (hw->fc.requested_mode == ixgbe_fc_full) { 2875 hw->fc.current_mode = ixgbe_fc_full; 2876 DEBUGOUT("Flow Control = FULL.\n"); 2877 } else { 2878 hw->fc.current_mode = ixgbe_fc_rx_pause; 2879 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 2880 } 2881 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2882 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2883 hw->fc.current_mode = ixgbe_fc_tx_pause; 2884 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2885 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2886 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2887 hw->fc.current_mode = ixgbe_fc_rx_pause; 2888 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2889 } else { 2890 hw->fc.current_mode = ixgbe_fc_none; 2891 DEBUGOUT("Flow Control = NONE.\n"); 2892 } 2893 return IXGBE_SUCCESS; 2894 } 2895 2896 /** 2897 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2898 * @hw: pointer to hardware structure 2899 * 2900 * Enable flow control according on 1 gig fiber. 2901 **/ 2902 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2903 { 2904 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2905 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2906 2907 /* 2908 * On multispeed fiber at 1g, bail out if 2909 * - link is up but AN did not complete, or if 2910 * - link is up and AN completed but timed out 2911 */ 2912 2913 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2914 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2915 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2916 DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); 2917 goto out; 2918 } 2919 2920 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2921 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2922 2923 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2924 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2925 IXGBE_PCS1GANA_ASM_PAUSE, 2926 IXGBE_PCS1GANA_SYM_PAUSE, 2927 IXGBE_PCS1GANA_ASM_PAUSE); 2928 2929 out: 2930 return ret_val; 2931 } 2932 2933 /** 2934 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2935 * @hw: pointer to hardware structure 2936 * 2937 * Enable flow control according to IEEE clause 37. 2938 **/ 2939 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2940 { 2941 u32 links2, anlp1_reg, autoc_reg, links; 2942 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2943 2944 /* 2945 * On backplane, bail out if 2946 * - backplane autoneg was not completed, or if 2947 * - we are 82599 and link partner is not AN enabled 2948 */ 2949 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2950 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 2951 DEBUGOUT("Auto-Negotiation did not complete\n"); 2952 goto out; 2953 } 2954 2955 if (hw->mac.type == ixgbe_mac_82599EB) { 2956 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2957 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2958 DEBUGOUT("Link partner is not AN enabled\n"); 2959 goto out; 2960 } 2961 } 2962 /* 2963 * Read the 10g AN autoc and LP ability registers and resolve 2964 * local flow control settings accordingly 2965 */ 2966 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2967 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2968 2969 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2970 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2971 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2972 2973 out: 2974 return ret_val; 2975 } 2976 2977 /** 2978 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2979 * @hw: pointer to hardware structure 2980 * 2981 * Enable flow control according to IEEE clause 37. 2982 **/ 2983 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2984 { 2985 u16 technology_ability_reg = 0; 2986 u16 lp_technology_ability_reg = 0; 2987 2988 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2989 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2990 &technology_ability_reg); 2991 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 2992 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2993 &lp_technology_ability_reg); 2994 2995 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2996 (u32)lp_technology_ability_reg, 2997 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2998 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2999 } 3000 3001 /** 3002 * ixgbe_fc_autoneg - Configure flow control 3003 * @hw: pointer to hardware structure 3004 * 3005 * Compares our advertised flow control capabilities to those advertised by 3006 * our link partner, and determines the proper flow control mode to use. 3007 **/ 3008 void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 3009 { 3010 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3011 ixgbe_link_speed speed; 3012 bool link_up; 3013 3014 DEBUGFUNC("ixgbe_fc_autoneg"); 3015 3016 /* 3017 * AN should have completed when the cable was plugged in. 3018 * Look for reasons to bail out. Bail out if: 3019 * - FC autoneg is disabled, or if 3020 * - link is not up. 3021 */ 3022 if (hw->fc.disable_fc_autoneg) { 3023 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, 3024 "Flow control autoneg is disabled"); 3025 goto out; 3026 } 3027 3028 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3029 if (!link_up) { 3030 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); 3031 goto out; 3032 } 3033 3034 switch (hw->phy.media_type) { 3035 /* Autoneg flow control on fiber adapters */ 3036 case ixgbe_media_type_fiber_fixed: 3037 case ixgbe_media_type_fiber_qsfp: 3038 case ixgbe_media_type_fiber: 3039 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 3040 ret_val = ixgbe_fc_autoneg_fiber(hw); 3041 break; 3042 3043 /* Autoneg flow control on backplane adapters */ 3044 case ixgbe_media_type_backplane: 3045 ret_val = ixgbe_fc_autoneg_backplane(hw); 3046 break; 3047 3048 /* Autoneg flow control on copper adapters */ 3049 case ixgbe_media_type_copper: 3050 if (ixgbe_device_supports_autoneg_fc(hw)) 3051 ret_val = ixgbe_fc_autoneg_copper(hw); 3052 break; 3053 3054 default: 3055 break; 3056 } 3057 3058 out: 3059 if (ret_val == IXGBE_SUCCESS) { 3060 hw->fc.fc_was_autonegged = TRUE; 3061 } else { 3062 hw->fc.fc_was_autonegged = FALSE; 3063 hw->fc.current_mode = hw->fc.requested_mode; 3064 } 3065 } 3066 3067 /* 3068 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 3069 * @hw: pointer to hardware structure 3070 * 3071 * System-wide timeout range is encoded in PCIe Device Control2 register. 3072 * 3073 * Add 10% to specified maximum and return the number of times to poll for 3074 * completion timeout, in units of 100 microsec. Never return less than 3075 * 800 = 80 millisec. 3076 */ 3077 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 3078 { 3079 s16 devctl2; 3080 u32 pollcnt; 3081 3082 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 3083 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 3084 3085 switch (devctl2) { 3086 case IXGBE_PCIDEVCTRL2_65_130ms: 3087 pollcnt = 1300; /* 130 millisec */ 3088 break; 3089 case IXGBE_PCIDEVCTRL2_260_520ms: 3090 pollcnt = 5200; /* 520 millisec */ 3091 break; 3092 case IXGBE_PCIDEVCTRL2_1_2s: 3093 pollcnt = 20000; /* 2 sec */ 3094 break; 3095 case IXGBE_PCIDEVCTRL2_4_8s: 3096 pollcnt = 80000; /* 8 sec */ 3097 break; 3098 case IXGBE_PCIDEVCTRL2_17_34s: 3099 pollcnt = 34000; /* 34 sec */ 3100 break; 3101 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 3102 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 3103 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 3104 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 3105 default: 3106 pollcnt = 800; /* 80 millisec minimum */ 3107 break; 3108 } 3109 3110 /* add 10% to spec maximum */ 3111 return (pollcnt * 11) / 10; 3112 } 3113 3114 /** 3115 * ixgbe_disable_pcie_master - Disable PCI-express master access 3116 * @hw: pointer to hardware structure 3117 * 3118 * Disables PCI-Express master access and verifies there are no pending 3119 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 3120 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 3121 * is returned signifying master requests disabled. 3122 **/ 3123 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 3124 { 3125 s32 status = IXGBE_SUCCESS; 3126 u32 i, poll; 3127 u16 value; 3128 3129 DEBUGFUNC("ixgbe_disable_pcie_master"); 3130 3131 /* Always set this bit to ensure any future transactions are blocked */ 3132 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 3133 3134 /* Exit if master requests are blocked */ 3135 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 3136 IXGBE_REMOVED(hw->hw_addr)) 3137 goto out; 3138 3139 /* Poll for master request bit to clear */ 3140 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 3141 usec_delay(100); 3142 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 3143 goto out; 3144 } 3145 3146 /* 3147 * Two consecutive resets are required via CTRL.RST per datasheet 3148 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 3149 * of this need. The first reset prevents new master requests from 3150 * being issued by our device. We then must wait 1usec or more for any 3151 * remaining completions from the PCIe bus to trickle in, and then reset 3152 * again to clear out any effects they may have had on our device. 3153 */ 3154 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 3155 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 3156 3157 if (hw->mac.type >= ixgbe_mac_X550) 3158 goto out; 3159 3160 /* 3161 * Before proceeding, make sure that the PCIe block does not have 3162 * transactions pending. 3163 */ 3164 poll = ixgbe_pcie_timeout_poll(hw); 3165 for (i = 0; i < poll; i++) { 3166 usec_delay(100); 3167 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 3168 if (IXGBE_REMOVED(hw->hw_addr)) 3169 goto out; 3170 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3171 goto out; 3172 } 3173 3174 ERROR_REPORT1(IXGBE_ERROR_POLLING, 3175 "PCIe transaction pending bit also did not clear.\n"); 3176 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 3177 3178 out: 3179 return status; 3180 } 3181 3182 /** 3183 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 3184 * @hw: pointer to hardware structure 3185 * @mask: Mask to specify which semaphore to acquire 3186 * 3187 * Acquires the SWFW semaphore through the GSSR register for the specified 3188 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3189 **/ 3190 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3191 { 3192 u32 gssr = 0; 3193 u32 swmask = mask; 3194 u32 fwmask = mask << 5; 3195 u32 timeout = 200; 3196 u32 i; 3197 3198 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 3199 3200 for (i = 0; i < timeout; i++) { 3201 /* 3202 * SW NVM semaphore bit is used for access to all 3203 * SW_FW_SYNC bits (not just NVM) 3204 */ 3205 if (ixgbe_get_eeprom_semaphore(hw)) 3206 return IXGBE_ERR_SWFW_SYNC; 3207 3208 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3209 if (!(gssr & (fwmask | swmask))) { 3210 gssr |= swmask; 3211 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3212 ixgbe_release_eeprom_semaphore(hw); 3213 return IXGBE_SUCCESS; 3214 } else { 3215 /* Resource is currently in use by FW or SW */ 3216 ixgbe_release_eeprom_semaphore(hw); 3217 msec_delay(5); 3218 } 3219 } 3220 3221 /* If time expired clear the bits holding the lock and retry */ 3222 if (gssr & (fwmask | swmask)) 3223 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 3224 3225 msec_delay(5); 3226 return IXGBE_ERR_SWFW_SYNC; 3227 } 3228 3229 /** 3230 * ixgbe_release_swfw_sync - Release SWFW semaphore 3231 * @hw: pointer to hardware structure 3232 * @mask: Mask to specify which semaphore to release 3233 * 3234 * Releases the SWFW semaphore through the GSSR register for the specified 3235 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3236 **/ 3237 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3238 { 3239 u32 gssr; 3240 u32 swmask = mask; 3241 3242 DEBUGFUNC("ixgbe_release_swfw_sync"); 3243 3244 ixgbe_get_eeprom_semaphore(hw); 3245 3246 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3247 gssr &= ~swmask; 3248 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3249 3250 ixgbe_release_eeprom_semaphore(hw); 3251 } 3252 3253 /** 3254 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path 3255 * @hw: pointer to hardware structure 3256 * 3257 * Stops the receive data path and waits for the HW to internally empty 3258 * the Rx security block 3259 **/ 3260 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) 3261 { 3262 #define IXGBE_MAX_SECRX_POLL 40 3263 3264 int i; 3265 int secrxreg; 3266 3267 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); 3268 3269 3270 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3271 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 3272 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3273 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 3274 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 3275 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 3276 break; 3277 else 3278 /* Use interrupt-safe sleep just in case */ 3279 usec_delay(1000); 3280 } 3281 3282 /* For informational purposes only */ 3283 if (i >= IXGBE_MAX_SECRX_POLL) 3284 DEBUGOUT("Rx unit being enabled before security " 3285 "path fully disabled. Continuing with init.\n"); 3286 3287 return IXGBE_SUCCESS; 3288 } 3289 3290 /** 3291 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 3292 * @hw: pointer to hardware structure 3293 * @reg_val: Value we read from AUTOC 3294 * 3295 * The default case requires no protection so just to the register read. 3296 */ 3297 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 3298 { 3299 *locked = FALSE; 3300 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3301 return IXGBE_SUCCESS; 3302 } 3303 3304 /** 3305 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 3306 * @hw: pointer to hardware structure 3307 * @reg_val: value to write to AUTOC 3308 * @locked: bool to indicate whether the SW/FW lock was already taken by 3309 * previous read. 3310 * 3311 * The default case requires no protection so just to the register write. 3312 */ 3313 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 3314 { 3315 UNREFERENCED_1PARAMETER(locked); 3316 3317 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 3318 return IXGBE_SUCCESS; 3319 } 3320 3321 /** 3322 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path 3323 * @hw: pointer to hardware structure 3324 * 3325 * Enables the receive data path. 3326 **/ 3327 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) 3328 { 3329 int secrxreg; 3330 3331 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); 3332 3333 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3334 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 3335 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3336 IXGBE_WRITE_FLUSH(hw); 3337 3338 return IXGBE_SUCCESS; 3339 } 3340 3341 /** 3342 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 3343 * @hw: pointer to hardware structure 3344 * @regval: register value to write to RXCTRL 3345 * 3346 * Enables the Rx DMA unit 3347 **/ 3348 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 3349 { 3350 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 3351 3352 if (regval & IXGBE_RXCTRL_RXEN) 3353 ixgbe_enable_rx(hw); 3354 else 3355 ixgbe_disable_rx(hw); 3356 3357 return IXGBE_SUCCESS; 3358 } 3359 3360 /** 3361 * ixgbe_blink_led_start_generic - Blink LED based on index. 3362 * @hw: pointer to hardware structure 3363 * @index: led number to blink 3364 **/ 3365 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 3366 { 3367 ixgbe_link_speed speed = 0; 3368 bool link_up = 0; 3369 u32 autoc_reg = 0; 3370 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3371 s32 ret_val = IXGBE_SUCCESS; 3372 bool locked = FALSE; 3373 3374 DEBUGFUNC("ixgbe_blink_led_start_generic"); 3375 3376 /* 3377 * Link must be up to auto-blink the LEDs; 3378 * Force it if link is down. 3379 */ 3380 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3381 3382 if (!link_up) { 3383 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3384 if (ret_val != IXGBE_SUCCESS) 3385 goto out; 3386 3387 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3388 autoc_reg |= IXGBE_AUTOC_FLU; 3389 3390 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3391 if (ret_val != IXGBE_SUCCESS) 3392 goto out; 3393 3394 IXGBE_WRITE_FLUSH(hw); 3395 msec_delay(10); 3396 } 3397 3398 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3399 led_reg |= IXGBE_LED_BLINK(index); 3400 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3401 IXGBE_WRITE_FLUSH(hw); 3402 3403 out: 3404 return ret_val; 3405 } 3406 3407 /** 3408 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 3409 * @hw: pointer to hardware structure 3410 * @index: led number to stop blinking 3411 **/ 3412 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 3413 { 3414 u32 autoc_reg = 0; 3415 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3416 s32 ret_val = IXGBE_SUCCESS; 3417 bool locked = FALSE; 3418 3419 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 3420 3421 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3422 if (ret_val != IXGBE_SUCCESS) 3423 goto out; 3424 3425 autoc_reg &= ~IXGBE_AUTOC_FLU; 3426 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3427 3428 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3429 if (ret_val != IXGBE_SUCCESS) 3430 goto out; 3431 3432 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3433 led_reg &= ~IXGBE_LED_BLINK(index); 3434 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 3435 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3436 IXGBE_WRITE_FLUSH(hw); 3437 3438 out: 3439 return ret_val; 3440 } 3441 3442 /** 3443 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 3444 * @hw: pointer to hardware structure 3445 * @san_mac_offset: SAN MAC address offset 3446 * 3447 * This function will read the EEPROM location for the SAN MAC address 3448 * pointer, and returns the value at that location. This is used in both 3449 * get and set mac_addr routines. 3450 **/ 3451 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 3452 u16 *san_mac_offset) 3453 { 3454 s32 ret_val; 3455 3456 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 3457 3458 /* 3459 * First read the EEPROM pointer to see if the MAC addresses are 3460 * available. 3461 */ 3462 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 3463 san_mac_offset); 3464 if (ret_val) { 3465 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3466 "eeprom at offset %d failed", 3467 IXGBE_SAN_MAC_ADDR_PTR); 3468 } 3469 3470 return ret_val; 3471 } 3472 3473 /** 3474 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 3475 * @hw: pointer to hardware structure 3476 * @san_mac_addr: SAN MAC address 3477 * 3478 * Reads the SAN MAC address from the EEPROM, if it's available. This is 3479 * per-port, so set_lan_id() must be called before reading the addresses. 3480 * set_lan_id() is called by identify_sfp(), but this cannot be relied 3481 * upon for non-SFP connections, so we must call it here. 3482 **/ 3483 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3484 { 3485 u16 san_mac_data, san_mac_offset; 3486 u8 i; 3487 s32 ret_val; 3488 3489 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 3490 3491 /* 3492 * First read the EEPROM pointer to see if the MAC addresses are 3493 * available. If they're not, no point in calling set_lan_id() here. 3494 */ 3495 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3496 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3497 goto san_mac_addr_out; 3498 3499 /* make sure we know which port we need to program */ 3500 hw->mac.ops.set_lan_id(hw); 3501 /* apply the port offset to the address offset */ 3502 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3503 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3504 for (i = 0; i < 3; i++) { 3505 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 3506 &san_mac_data); 3507 if (ret_val) { 3508 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3509 "eeprom read at offset %d failed", 3510 san_mac_offset); 3511 goto san_mac_addr_out; 3512 } 3513 san_mac_addr[i * 2] = (u8)(san_mac_data); 3514 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 3515 san_mac_offset++; 3516 } 3517 return IXGBE_SUCCESS; 3518 3519 san_mac_addr_out: 3520 /* 3521 * No addresses available in this EEPROM. It's not an 3522 * error though, so just wipe the local address and return. 3523 */ 3524 for (i = 0; i < 6; i++) 3525 san_mac_addr[i] = 0xFF; 3526 return IXGBE_SUCCESS; 3527 } 3528 3529 /** 3530 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 3531 * @hw: pointer to hardware structure 3532 * @san_mac_addr: SAN MAC address 3533 * 3534 * Write a SAN MAC address to the EEPROM. 3535 **/ 3536 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3537 { 3538 s32 ret_val; 3539 u16 san_mac_data, san_mac_offset; 3540 u8 i; 3541 3542 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 3543 3544 /* Look for SAN mac address pointer. If not defined, return */ 3545 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3546 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3547 return IXGBE_ERR_NO_SAN_ADDR_PTR; 3548 3549 /* Make sure we know which port we need to write */ 3550 hw->mac.ops.set_lan_id(hw); 3551 /* Apply the port offset to the address offset */ 3552 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3553 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3554 3555 for (i = 0; i < 3; i++) { 3556 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 3557 san_mac_data |= (u16)(san_mac_addr[i * 2]); 3558 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 3559 san_mac_offset++; 3560 } 3561 3562 return IXGBE_SUCCESS; 3563 } 3564 3565 /** 3566 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 3567 * @hw: pointer to hardware structure 3568 * 3569 * Read PCIe configuration space, and get the MSI-X vector count from 3570 * the capabilities table. 3571 **/ 3572 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 3573 { 3574 u16 msix_count = 1; 3575 u16 max_msix_count; 3576 u16 pcie_offset; 3577 3578 switch (hw->mac.type) { 3579 case ixgbe_mac_82598EB: 3580 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 3581 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 3582 break; 3583 case ixgbe_mac_82599EB: 3584 case ixgbe_mac_X540: 3585 case ixgbe_mac_X550: 3586 case ixgbe_mac_X550EM_x: 3587 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 3588 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 3589 break; 3590 default: 3591 return msix_count; 3592 } 3593 3594 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 3595 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); 3596 if (IXGBE_REMOVED(hw->hw_addr)) 3597 msix_count = 0; 3598 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 3599 3600 /* MSI-X count is zero-based in HW */ 3601 msix_count++; 3602 3603 if (msix_count > max_msix_count) 3604 msix_count = max_msix_count; 3605 3606 return msix_count; 3607 } 3608 3609 /** 3610 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 3611 * @hw: pointer to hardware structure 3612 * @addr: Address to put into receive address register 3613 * @vmdq: VMDq pool to assign 3614 * 3615 * Puts an ethernet address into a receive address register, or 3616 * finds the rar that it is already in; adds to the pool list 3617 **/ 3618 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 3619 { 3620 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 3621 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 3622 u32 rar; 3623 u32 rar_low, rar_high; 3624 u32 addr_low, addr_high; 3625 3626 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 3627 3628 /* swap bytes for HW little endian */ 3629 addr_low = addr[0] | (addr[1] << 8) 3630 | (addr[2] << 16) 3631 | (addr[3] << 24); 3632 addr_high = addr[4] | (addr[5] << 8); 3633 3634 /* 3635 * Either find the mac_id in rar or find the first empty space. 3636 * rar_highwater points to just after the highest currently used 3637 * rar in order to shorten the search. It grows when we add a new 3638 * rar to the top. 3639 */ 3640 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 3641 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 3642 3643 if (((IXGBE_RAH_AV & rar_high) == 0) 3644 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 3645 first_empty_rar = rar; 3646 } else if ((rar_high & 0xFFFF) == addr_high) { 3647 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 3648 if (rar_low == addr_low) 3649 break; /* found it already in the rars */ 3650 } 3651 } 3652 3653 if (rar < hw->mac.rar_highwater) { 3654 /* already there so just add to the pool bits */ 3655 ixgbe_set_vmdq(hw, rar, vmdq); 3656 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 3657 /* stick it into first empty RAR slot we found */ 3658 rar = first_empty_rar; 3659 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3660 } else if (rar == hw->mac.rar_highwater) { 3661 /* add it to the top of the list and inc the highwater mark */ 3662 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3663 hw->mac.rar_highwater++; 3664 } else if (rar >= hw->mac.num_rar_entries) { 3665 return IXGBE_ERR_INVALID_MAC_ADDR; 3666 } 3667 3668 /* 3669 * If we found rar[0], make sure the default pool bit (we use pool 0) 3670 * remains cleared to be sure default pool packets will get delivered 3671 */ 3672 if (rar == 0) 3673 ixgbe_clear_vmdq(hw, rar, 0); 3674 3675 return rar; 3676 } 3677 3678 /** 3679 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 3680 * @hw: pointer to hardware struct 3681 * @rar: receive address register index to disassociate 3682 * @vmdq: VMDq pool index to remove from the rar 3683 **/ 3684 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3685 { 3686 u32 mpsar_lo, mpsar_hi; 3687 u32 rar_entries = hw->mac.num_rar_entries; 3688 3689 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 3690 3691 /* Make sure we are using a valid rar index range */ 3692 if (rar >= rar_entries) { 3693 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3694 "RAR index %d is out of range.\n", rar); 3695 return IXGBE_ERR_INVALID_ARGUMENT; 3696 } 3697 3698 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3699 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3700 3701 if (IXGBE_REMOVED(hw->hw_addr)) 3702 goto done; 3703 3704 if (!mpsar_lo && !mpsar_hi) 3705 goto done; 3706 3707 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 3708 if (mpsar_lo) { 3709 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3710 mpsar_lo = 0; 3711 } 3712 if (mpsar_hi) { 3713 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3714 mpsar_hi = 0; 3715 } 3716 } else if (vmdq < 32) { 3717 mpsar_lo &= ~(1 << vmdq); 3718 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 3719 } else { 3720 mpsar_hi &= ~(1 << (vmdq - 32)); 3721 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 3722 } 3723 3724 /* was that the last pool using this rar? */ 3725 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 3726 hw->mac.ops.clear_rar(hw, rar); 3727 done: 3728 return IXGBE_SUCCESS; 3729 } 3730 3731 /** 3732 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3733 * @hw: pointer to hardware struct 3734 * @rar: receive address register index to associate with a VMDq index 3735 * @vmdq: VMDq pool index 3736 **/ 3737 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3738 { 3739 u32 mpsar; 3740 u32 rar_entries = hw->mac.num_rar_entries; 3741 3742 DEBUGFUNC("ixgbe_set_vmdq_generic"); 3743 3744 /* Make sure we are using a valid rar index range */ 3745 if (rar >= rar_entries) { 3746 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3747 "RAR index %d is out of range.\n", rar); 3748 return IXGBE_ERR_INVALID_ARGUMENT; 3749 } 3750 3751 if (vmdq < 32) { 3752 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3753 mpsar |= 1 << vmdq; 3754 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3755 } else { 3756 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3757 mpsar |= 1 << (vmdq - 32); 3758 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3759 } 3760 return IXGBE_SUCCESS; 3761 } 3762 3763 /** 3764 * This function should only be involved in the IOV mode. 3765 * In IOV mode, Default pool is next pool after the number of 3766 * VFs advertized and not 0. 3767 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3768 * 3769 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3770 * @hw: pointer to hardware struct 3771 * @vmdq: VMDq pool index 3772 **/ 3773 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3774 { 3775 u32 rar = hw->mac.san_mac_rar_index; 3776 3777 DEBUGFUNC("ixgbe_set_vmdq_san_mac"); 3778 3779 if (vmdq < 32) { 3780 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); 3781 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3782 } else { 3783 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3784 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); 3785 } 3786 3787 return IXGBE_SUCCESS; 3788 } 3789 3790 /** 3791 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3792 * @hw: pointer to hardware structure 3793 **/ 3794 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3795 { 3796 int i; 3797 3798 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3799 DEBUGOUT(" Clearing UTA\n"); 3800 3801 for (i = 0; i < 128; i++) 3802 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3803 3804 return IXGBE_SUCCESS; 3805 } 3806 3807 /** 3808 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3809 * @hw: pointer to hardware structure 3810 * @vlan: VLAN id to write to VLAN filter 3811 * 3812 * return the VLVF index where this VLAN id should be placed 3813 * 3814 **/ 3815 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 3816 { 3817 u32 bits = 0; 3818 u32 first_empty_slot = 0; 3819 s32 regindex; 3820 3821 /* short cut the special case */ 3822 if (vlan == 0) 3823 return 0; 3824 3825 /* 3826 * Search for the vlan id in the VLVF entries. Save off the first empty 3827 * slot found along the way 3828 */ 3829 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 3830 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3831 if (!bits && !(first_empty_slot)) 3832 first_empty_slot = regindex; 3833 else if ((bits & 0x0FFF) == vlan) 3834 break; 3835 } 3836 3837 /* 3838 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan 3839 * in the VLVF. Else use the first empty VLVF register for this 3840 * vlan id. 3841 */ 3842 if (regindex >= IXGBE_VLVF_ENTRIES) { 3843 if (first_empty_slot) 3844 regindex = first_empty_slot; 3845 else { 3846 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, 3847 "No space in VLVF.\n"); 3848 regindex = IXGBE_ERR_NO_SPACE; 3849 } 3850 } 3851 3852 return regindex; 3853 } 3854 3855 /** 3856 * ixgbe_set_vfta_generic - Set VLAN filter table 3857 * @hw: pointer to hardware structure 3858 * @vlan: VLAN id to write to VLAN filter 3859 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3860 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3861 * 3862 * Turn on/off specified VLAN in the VLAN filter table. 3863 **/ 3864 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3865 bool vlan_on) 3866 { 3867 s32 regindex; 3868 u32 bitindex; 3869 u32 vfta; 3870 u32 targetbit; 3871 s32 ret_val = IXGBE_SUCCESS; 3872 bool vfta_changed = FALSE; 3873 3874 DEBUGFUNC("ixgbe_set_vfta_generic"); 3875 3876 if (vlan > 4095) 3877 return IXGBE_ERR_PARAM; 3878 3879 /* 3880 * this is a 2 part operation - first the VFTA, then the 3881 * VLVF and VLVFB if VT Mode is set 3882 * We don't write the VFTA until we know the VLVF part succeeded. 3883 */ 3884 3885 /* Part 1 3886 * The VFTA is a bitstring made up of 128 32-bit registers 3887 * that enable the particular VLAN id, much like the MTA: 3888 * bits[11-5]: which register 3889 * bits[4-0]: which bit in the register 3890 */ 3891 regindex = (vlan >> 5) & 0x7F; 3892 bitindex = vlan & 0x1F; 3893 targetbit = (1 << bitindex); 3894 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 3895 3896 if (vlan_on) { 3897 if (!(vfta & targetbit)) { 3898 vfta |= targetbit; 3899 vfta_changed = TRUE; 3900 } 3901 } else { 3902 if ((vfta & targetbit)) { 3903 vfta &= ~targetbit; 3904 vfta_changed = TRUE; 3905 } 3906 } 3907 3908 /* Part 2 3909 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF 3910 */ 3911 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, 3912 &vfta_changed); 3913 if (ret_val != IXGBE_SUCCESS) 3914 return ret_val; 3915 3916 if (vfta_changed) 3917 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); 3918 3919 return IXGBE_SUCCESS; 3920 } 3921 3922 /** 3923 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter 3924 * @hw: pointer to hardware structure 3925 * @vlan: VLAN id to write to VLAN filter 3926 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3927 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3928 * @vfta_changed: pointer to boolean flag which indicates whether VFTA 3929 * should be changed 3930 * 3931 * Turn on/off specified bit in VLVF table. 3932 **/ 3933 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3934 bool vlan_on, bool *vfta_changed) 3935 { 3936 u32 vt; 3937 3938 DEBUGFUNC("ixgbe_set_vlvf_generic"); 3939 3940 if (vlan > 4095) 3941 return IXGBE_ERR_PARAM; 3942 3943 /* If VT Mode is set 3944 * Either vlan_on 3945 * make sure the vlan is in VLVF 3946 * set the vind bit in the matching VLVFB 3947 * Or !vlan_on 3948 * clear the pool bit and possibly the vind 3949 */ 3950 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3951 if (vt & IXGBE_VT_CTL_VT_ENABLE) { 3952 s32 vlvf_index; 3953 u32 bits; 3954 3955 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); 3956 if (vlvf_index < 0) 3957 return vlvf_index; 3958 3959 if (vlan_on) { 3960 /* set the pool bit */ 3961 if (vind < 32) { 3962 bits = IXGBE_READ_REG(hw, 3963 IXGBE_VLVFB(vlvf_index * 2)); 3964 bits |= (1 << vind); 3965 IXGBE_WRITE_REG(hw, 3966 IXGBE_VLVFB(vlvf_index * 2), 3967 bits); 3968 } else { 3969 bits = IXGBE_READ_REG(hw, 3970 IXGBE_VLVFB((vlvf_index * 2) + 1)); 3971 bits |= (1 << (vind - 32)); 3972 IXGBE_WRITE_REG(hw, 3973 IXGBE_VLVFB((vlvf_index * 2) + 1), 3974 bits); 3975 } 3976 } else { 3977 /* clear the pool bit */ 3978 if (vind < 32) { 3979 bits = IXGBE_READ_REG(hw, 3980 IXGBE_VLVFB(vlvf_index * 2)); 3981 bits &= ~(1 << vind); 3982 IXGBE_WRITE_REG(hw, 3983 IXGBE_VLVFB(vlvf_index * 2), 3984 bits); 3985 bits |= IXGBE_READ_REG(hw, 3986 IXGBE_VLVFB((vlvf_index * 2) + 1)); 3987 } else { 3988 bits = IXGBE_READ_REG(hw, 3989 IXGBE_VLVFB((vlvf_index * 2) + 1)); 3990 bits &= ~(1 << (vind - 32)); 3991 IXGBE_WRITE_REG(hw, 3992 IXGBE_VLVFB((vlvf_index * 2) + 1), 3993 bits); 3994 bits |= IXGBE_READ_REG(hw, 3995 IXGBE_VLVFB(vlvf_index * 2)); 3996 } 3997 } 3998 3999 /* 4000 * If there are still bits set in the VLVFB registers 4001 * for the VLAN ID indicated we need to see if the 4002 * caller is requesting that we clear the VFTA entry bit. 4003 * If the caller has requested that we clear the VFTA 4004 * entry bit but there are still pools/VFs using this VLAN 4005 * ID entry then ignore the request. We're not worried 4006 * about the case where we're turning the VFTA VLAN ID 4007 * entry bit on, only when requested to turn it off as 4008 * there may be multiple pools and/or VFs using the 4009 * VLAN ID entry. In that case we cannot clear the 4010 * VFTA bit until all pools/VFs using that VLAN ID have also 4011 * been cleared. This will be indicated by "bits" being 4012 * zero. 4013 */ 4014 if (bits) { 4015 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 4016 (IXGBE_VLVF_VIEN | vlan)); 4017 if ((!vlan_on) && (vfta_changed != NULL)) { 4018 /* someone wants to clear the vfta entry 4019 * but some pools/VFs are still using it. 4020 * Ignore it. */ 4021 *vfta_changed = FALSE; 4022 } 4023 } else 4024 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 4025 } 4026 4027 return IXGBE_SUCCESS; 4028 } 4029 4030 /** 4031 * ixgbe_clear_vfta_generic - Clear VLAN filter table 4032 * @hw: pointer to hardware structure 4033 * 4034 * Clears the VLAN filer table, and the VMDq index associated with the filter 4035 **/ 4036 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 4037 { 4038 u32 offset; 4039 4040 DEBUGFUNC("ixgbe_clear_vfta_generic"); 4041 4042 for (offset = 0; offset < hw->mac.vft_size; offset++) 4043 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 4044 4045 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 4046 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 4047 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 4048 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0); 4049 } 4050 4051 return IXGBE_SUCCESS; 4052 } 4053 4054 /** 4055 * ixgbe_check_mac_link_generic - Determine link and speed status 4056 * @hw: pointer to hardware structure 4057 * @speed: pointer to link speed 4058 * @link_up: TRUE when link is up 4059 * @link_up_wait_to_complete: bool used to wait for link up or not 4060 * 4061 * Reads the links register to determine if link is up and the current speed 4062 **/ 4063 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4064 bool *link_up, bool link_up_wait_to_complete) 4065 { 4066 u32 links_reg, links_orig; 4067 u32 i; 4068 4069 DEBUGFUNC("ixgbe_check_mac_link_generic"); 4070 4071 /* clear the old state */ 4072 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 4073 4074 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4075 4076 if (links_orig != links_reg) { 4077 DEBUGOUT2("LINKS changed from %08X to %08X\n", 4078 links_orig, links_reg); 4079 } 4080 4081 if (link_up_wait_to_complete) { 4082 for (i = 0; i < hw->mac.max_link_up_time; i++) { 4083 if (links_reg & IXGBE_LINKS_UP) { 4084 *link_up = TRUE; 4085 break; 4086 } else { 4087 *link_up = FALSE; 4088 } 4089 msec_delay(100); 4090 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4091 } 4092 } else { 4093 if (links_reg & IXGBE_LINKS_UP) 4094 *link_up = TRUE; 4095 else 4096 *link_up = FALSE; 4097 } 4098 4099 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4100 case IXGBE_LINKS_SPEED_10G_82599: 4101 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4102 if (hw->mac.type >= ixgbe_mac_X550) { 4103 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4104 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4105 } 4106 break; 4107 case IXGBE_LINKS_SPEED_1G_82599: 4108 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4109 break; 4110 case IXGBE_LINKS_SPEED_100_82599: 4111 *speed = IXGBE_LINK_SPEED_100_FULL; 4112 if (hw->mac.type >= ixgbe_mac_X550) { 4113 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4114 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4115 } 4116 break; 4117 default: 4118 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4119 } 4120 4121 return IXGBE_SUCCESS; 4122 } 4123 4124 /** 4125 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 4126 * the EEPROM 4127 * @hw: pointer to hardware structure 4128 * @wwnn_prefix: the alternative WWNN prefix 4129 * @wwpn_prefix: the alternative WWPN prefix 4130 * 4131 * This function will read the EEPROM from the alternative SAN MAC address 4132 * block to check the support for the alternative WWNN/WWPN prefix support. 4133 **/ 4134 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 4135 u16 *wwpn_prefix) 4136 { 4137 u16 offset, caps; 4138 u16 alt_san_mac_blk_offset; 4139 4140 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 4141 4142 /* clear output first */ 4143 *wwnn_prefix = 0xFFFF; 4144 *wwpn_prefix = 0xFFFF; 4145 4146 /* check if alternative SAN MAC is supported */ 4147 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 4148 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 4149 goto wwn_prefix_err; 4150 4151 if ((alt_san_mac_blk_offset == 0) || 4152 (alt_san_mac_blk_offset == 0xFFFF)) 4153 goto wwn_prefix_out; 4154 4155 /* check capability in alternative san mac address block */ 4156 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 4157 if (hw->eeprom.ops.read(hw, offset, &caps)) 4158 goto wwn_prefix_err; 4159 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 4160 goto wwn_prefix_out; 4161 4162 /* get the corresponding prefix for WWNN/WWPN */ 4163 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 4164 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { 4165 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4166 "eeprom read at offset %d failed", offset); 4167 } 4168 4169 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 4170 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 4171 goto wwn_prefix_err; 4172 4173 wwn_prefix_out: 4174 return IXGBE_SUCCESS; 4175 4176 wwn_prefix_err: 4177 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4178 "eeprom read at offset %d failed", offset); 4179 return IXGBE_SUCCESS; 4180 } 4181 4182 /** 4183 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 4184 * @hw: pointer to hardware structure 4185 * @bs: the fcoe boot status 4186 * 4187 * This function will read the FCOE boot status from the iSCSI FCOE block 4188 **/ 4189 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 4190 { 4191 u16 offset, caps, flags; 4192 s32 status; 4193 4194 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 4195 4196 /* clear output first */ 4197 *bs = ixgbe_fcoe_bootstatus_unavailable; 4198 4199 /* check if FCOE IBA block is present */ 4200 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 4201 status = hw->eeprom.ops.read(hw, offset, &caps); 4202 if (status != IXGBE_SUCCESS) 4203 goto out; 4204 4205 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 4206 goto out; 4207 4208 /* check if iSCSI FCOE block is populated */ 4209 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 4210 if (status != IXGBE_SUCCESS) 4211 goto out; 4212 4213 if ((offset == 0) || (offset == 0xFFFF)) 4214 goto out; 4215 4216 /* read fcoe flags in iSCSI FCOE block */ 4217 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 4218 status = hw->eeprom.ops.read(hw, offset, &flags); 4219 if (status != IXGBE_SUCCESS) 4220 goto out; 4221 4222 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 4223 *bs = ixgbe_fcoe_bootstatus_enabled; 4224 else 4225 *bs = ixgbe_fcoe_bootstatus_disabled; 4226 4227 out: 4228 return status; 4229 } 4230 4231 /** 4232 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 4233 * @hw: pointer to hardware structure 4234 * @enable: enable or disable switch for anti-spoofing 4235 * @pf: Physical Function pool - do not enable anti-spoofing for the PF 4236 * 4237 **/ 4238 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) 4239 { 4240 int j; 4241 int pf_target_reg = pf >> 3; 4242 int pf_target_shift = pf % 8; 4243 u32 pfvfspoof = 0; 4244 4245 if (hw->mac.type == ixgbe_mac_82598EB) 4246 return; 4247 4248 if (enable) 4249 pfvfspoof = IXGBE_SPOOF_MACAS_MASK; 4250 4251 /* 4252 * PFVFSPOOF register array is size 8 with 8 bits assigned to 4253 * MAC anti-spoof enables in each register array element. 4254 */ 4255 for (j = 0; j < pf_target_reg; j++) 4256 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 4257 4258 /* 4259 * The PF should be allowed to spoof so that it can support 4260 * emulation mode NICs. Do not set the bits assigned to the PF 4261 */ 4262 pfvfspoof &= (1 << pf_target_shift) - 1; 4263 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 4264 4265 /* 4266 * Remaining pools belong to the PF so they do not need to have 4267 * anti-spoofing enabled. 4268 */ 4269 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) 4270 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); 4271 } 4272 4273 /** 4274 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 4275 * @hw: pointer to hardware structure 4276 * @enable: enable or disable switch for VLAN anti-spoofing 4277 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 4278 * 4279 **/ 4280 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4281 { 4282 int vf_target_reg = vf >> 3; 4283 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 4284 u32 pfvfspoof; 4285 4286 if (hw->mac.type == ixgbe_mac_82598EB) 4287 return; 4288 4289 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4290 if (enable) 4291 pfvfspoof |= (1 << vf_target_shift); 4292 else 4293 pfvfspoof &= ~(1 << vf_target_shift); 4294 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4295 } 4296 4297 /** 4298 * ixgbe_get_device_caps_generic - Get additional device capabilities 4299 * @hw: pointer to hardware structure 4300 * @device_caps: the EEPROM word with the extra device capabilities 4301 * 4302 * This function will read the EEPROM location for the device capabilities, 4303 * and return the word through device_caps. 4304 **/ 4305 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 4306 { 4307 DEBUGFUNC("ixgbe_get_device_caps_generic"); 4308 4309 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 4310 4311 return IXGBE_SUCCESS; 4312 } 4313 4314 /** 4315 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering 4316 * @hw: pointer to hardware structure 4317 * 4318 **/ 4319 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) 4320 { 4321 u32 regval; 4322 u32 i; 4323 4324 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); 4325 4326 /* Enable relaxed ordering */ 4327 for (i = 0; i < hw->mac.max_tx_queues; i++) { 4328 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 4329 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 4330 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 4331 } 4332 4333 for (i = 0; i < hw->mac.max_rx_queues; i++) { 4334 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 4335 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 4336 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 4337 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 4338 } 4339 4340 } 4341 4342 /** 4343 * ixgbe_calculate_checksum - Calculate checksum for buffer 4344 * @buffer: pointer to EEPROM 4345 * @length: size of EEPROM to calculate a checksum for 4346 * Calculates the checksum for some buffer on a specified length. The 4347 * checksum calculated is returned. 4348 **/ 4349 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 4350 { 4351 u32 i; 4352 u8 sum = 0; 4353 4354 DEBUGFUNC("ixgbe_calculate_checksum"); 4355 4356 if (!buffer) 4357 return 0; 4358 4359 for (i = 0; i < length; i++) 4360 sum += buffer[i]; 4361 4362 return (u8) (0 - sum); 4363 } 4364 4365 /** 4366 * ixgbe_host_interface_command - Issue command to manageability block 4367 * @hw: pointer to the HW structure 4368 * @buffer: contains the command to write and where the return status will 4369 * be placed 4370 * @length: length of buffer, must be multiple of 4 bytes 4371 * @timeout: time in ms to wait for command completion 4372 * @return_data: read and return data from the buffer (TRUE) or not (FALSE) 4373 * Needed because FW structures are big endian and decoding of 4374 * these fields can be 8 bit or 16 bit based on command. Decoding 4375 * is not easily understood without making a table of commands. 4376 * So we will leave this up to the caller to read back the data 4377 * in these cases. 4378 * 4379 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4380 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 4381 **/ 4382 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 4383 u32 length, u32 timeout, bool return_data) 4384 { 4385 u32 hicr, i, bi, fwsts; 4386 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 4387 u16 buf_len; 4388 u16 dword_len; 4389 4390 DEBUGFUNC("ixgbe_host_interface_command"); 4391 4392 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4393 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); 4394 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4395 } 4396 /* Set bit 9 of FWSTS clearing FW reset indication */ 4397 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 4398 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 4399 4400 /* Check that the host interface is enabled. */ 4401 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4402 if ((hicr & IXGBE_HICR_EN) == 0) { 4403 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); 4404 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4405 } 4406 4407 /* Calculate length in DWORDs. We must be DWORD aligned */ 4408 if ((length % (sizeof(u32))) != 0) { 4409 DEBUGOUT("Buffer length failure, not aligned to dword"); 4410 return IXGBE_ERR_INVALID_ARGUMENT; 4411 } 4412 4413 dword_len = length >> 2; 4414 4415 /* The device driver writes the relevant command block 4416 * into the ram area. 4417 */ 4418 for (i = 0; i < dword_len; i++) 4419 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 4420 i, IXGBE_CPU_TO_LE32(buffer[i])); 4421 4422 /* Setting this bit tells the ARC that a new command is pending. */ 4423 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 4424 4425 for (i = 0; i < timeout; i++) { 4426 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4427 if (!(hicr & IXGBE_HICR_C)) 4428 break; 4429 msec_delay(1); 4430 } 4431 4432 /* Check command completion */ 4433 if ((timeout != 0 && i == timeout) || 4434 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { 4435 ERROR_REPORT1(IXGBE_ERROR_CAUTION, 4436 "Command has failed with no status valid.\n"); 4437 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4438 } 4439 4440 if (!return_data) 4441 return 0; 4442 4443 /* Calculate length in DWORDs */ 4444 dword_len = hdr_size >> 2; 4445 4446 /* first pull in the header so we know the buffer length */ 4447 for (bi = 0; bi < dword_len; bi++) { 4448 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4449 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4450 } 4451 4452 /* If there is any thing in data position pull it in */ 4453 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; 4454 if (buf_len == 0) 4455 return 0; 4456 4457 if (length < buf_len + hdr_size) { 4458 DEBUGOUT("Buffer not large enough for reply message.\n"); 4459 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4460 } 4461 4462 /* Calculate length in DWORDs, add 3 for odd lengths */ 4463 dword_len = (buf_len + 3) >> 2; 4464 4465 /* Pull in the rest of the buffer (bi is where we left off) */ 4466 for (; bi <= dword_len; bi++) { 4467 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4468 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4469 } 4470 4471 return 0; 4472 } 4473 4474 /** 4475 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 4476 * @hw: pointer to the HW structure 4477 * @maj: driver version major number 4478 * @minr: driver version minor number 4479 * @build: driver version build number 4480 * @sub: driver version sub build number 4481 * 4482 * Sends driver version number to firmware through the manageability 4483 * block. On success return IXGBE_SUCCESS 4484 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 4485 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4486 **/ 4487 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr, 4488 u8 build, u8 sub) 4489 { 4490 struct ixgbe_hic_drv_info fw_cmd; 4491 int i; 4492 s32 ret_val = IXGBE_SUCCESS; 4493 4494 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); 4495 4496 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) 4497 != IXGBE_SUCCESS) { 4498 ret_val = IXGBE_ERR_SWFW_SYNC; 4499 goto out; 4500 } 4501 4502 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 4503 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 4504 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 4505 fw_cmd.port_num = (u8)hw->bus.func; 4506 fw_cmd.ver_maj = maj; 4507 fw_cmd.ver_min = minr; 4508 fw_cmd.ver_build = build; 4509 fw_cmd.ver_sub = sub; 4510 fw_cmd.hdr.checksum = 0; 4511 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 4512 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 4513 fw_cmd.pad = 0; 4514 fw_cmd.pad2 = 0; 4515 4516 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 4517 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 4518 sizeof(fw_cmd), 4519 IXGBE_HI_COMMAND_TIMEOUT, 4520 TRUE); 4521 if (ret_val != IXGBE_SUCCESS) 4522 continue; 4523 4524 if (fw_cmd.hdr.cmd_or_resp.ret_status == 4525 FW_CEM_RESP_STATUS_SUCCESS) 4526 ret_val = IXGBE_SUCCESS; 4527 else 4528 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4529 4530 break; 4531 } 4532 4533 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4534 out: 4535 return ret_val; 4536 } 4537 4538 /** 4539 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer 4540 * @hw: pointer to hardware structure 4541 * @num_pb: number of packet buffers to allocate 4542 * @headroom: reserve n KB of headroom 4543 * @strategy: packet buffer allocation strategy 4544 **/ 4545 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, 4546 int strategy) 4547 { 4548 u32 pbsize = hw->mac.rx_pb_size; 4549 int i = 0; 4550 u32 rxpktsize, txpktsize, txpbthresh; 4551 4552 /* Reserve headroom */ 4553 pbsize -= headroom; 4554 4555 if (!num_pb) 4556 num_pb = 1; 4557 4558 /* Divide remaining packet buffer space amongst the number of packet 4559 * buffers requested using supplied strategy. 4560 */ 4561 switch (strategy) { 4562 case PBA_STRATEGY_WEIGHTED: 4563 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet 4564 * buffer with 5/8 of the packet buffer space. 4565 */ 4566 rxpktsize = (pbsize * 5) / (num_pb * 4); 4567 pbsize -= rxpktsize * (num_pb / 2); 4568 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 4569 for (; i < (num_pb / 2); i++) 4570 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4571 /* Fall through to configure remaining packet buffers */ 4572 case PBA_STRATEGY_EQUAL: 4573 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 4574 for (; i < num_pb; i++) 4575 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4576 break; 4577 default: 4578 break; 4579 } 4580 4581 /* Only support an equally distributed Tx packet buffer strategy. */ 4582 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 4583 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 4584 for (i = 0; i < num_pb; i++) { 4585 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 4586 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 4587 } 4588 4589 /* Clear unused TCs, if any, to zero buffer size*/ 4590 for (; i < IXGBE_MAX_PB; i++) { 4591 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 4592 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 4593 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 4594 } 4595 } 4596 4597 /** 4598 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 4599 * @hw: pointer to the hardware structure 4600 * 4601 * The 82599 and x540 MACs can experience issues if TX work is still pending 4602 * when a reset occurs. This function prevents this by flushing the PCIe 4603 * buffers on the system. 4604 **/ 4605 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 4606 { 4607 u32 gcr_ext, hlreg0, i, poll; 4608 u16 value; 4609 4610 /* 4611 * If double reset is not requested then all transactions should 4612 * already be clear and as such there is no work to do 4613 */ 4614 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 4615 return; 4616 4617 /* 4618 * Set loopback enable to prevent any transmits from being sent 4619 * should the link come up. This assumes that the RXCTRL.RXEN bit 4620 * has already been cleared. 4621 */ 4622 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4623 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 4624 4625 /* Wait for a last completion before clearing buffers */ 4626 IXGBE_WRITE_FLUSH(hw); 4627 msec_delay(3); 4628 4629 /* 4630 * Before proceeding, make sure that the PCIe block does not have 4631 * transactions pending. 4632 */ 4633 poll = ixgbe_pcie_timeout_poll(hw); 4634 for (i = 0; i < poll; i++) { 4635 usec_delay(100); 4636 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 4637 if (IXGBE_REMOVED(hw->hw_addr)) 4638 goto out; 4639 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 4640 goto out; 4641 } 4642 4643 out: 4644 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 4645 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 4646 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 4647 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 4648 4649 /* Flush all writes and allow 20usec for all transactions to clear */ 4650 IXGBE_WRITE_FLUSH(hw); 4651 usec_delay(20); 4652 4653 /* restore previous register values */ 4654 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 4655 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4656 } 4657 4658 4659 /** 4660 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg 4661 * @hw: pointer to hardware structure 4662 * @map: pointer to u8 arr for returning map 4663 * 4664 * Read the rtrup2tc HW register and resolve its content into map 4665 **/ 4666 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) 4667 { 4668 u32 reg, i; 4669 4670 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 4671 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 4672 map[i] = IXGBE_RTRUP2TC_UP_MASK & 4673 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 4674 return; 4675 } 4676 4677 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 4678 { 4679 u32 pfdtxgswc; 4680 u32 rxctrl; 4681 4682 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4683 if (rxctrl & IXGBE_RXCTRL_RXEN) { 4684 if (hw->mac.type != ixgbe_mac_82598EB) { 4685 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4686 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 4687 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 4688 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4689 hw->mac.set_lben = TRUE; 4690 } else { 4691 hw->mac.set_lben = FALSE; 4692 } 4693 } 4694 rxctrl &= ~IXGBE_RXCTRL_RXEN; 4695 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 4696 } 4697 } 4698 4699 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 4700 { 4701 u32 pfdtxgswc; 4702 u32 rxctrl; 4703 4704 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4705 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 4706 4707 if (hw->mac.type != ixgbe_mac_82598EB) { 4708 if (hw->mac.set_lben) { 4709 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 4710 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 4711 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 4712 hw->mac.set_lben = FALSE; 4713 } 4714 } 4715 } 4716 4717 /** 4718 * ixgbe_mng_present - returns TRUE when management capability is present 4719 * @hw: pointer to hardware structure 4720 */ 4721 bool ixgbe_mng_present(struct ixgbe_hw *hw) 4722 { 4723 u32 fwsm; 4724 4725 if (hw->mac.type < ixgbe_mac_82599EB) 4726 return FALSE; 4727 4728 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 4729 fwsm &= IXGBE_FWSM_MODE_MASK; 4730 return fwsm == IXGBE_FWSM_FW_MODE_PT; 4731 } 4732 4733 /** 4734 * ixgbe_mng_enabled - Is the manageability engine enabled? 4735 * @hw: pointer to hardware structure 4736 * 4737 * Returns TRUE if the manageability engine is enabled. 4738 **/ 4739 bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 4740 { 4741 u32 fwsm, manc, factps; 4742 4743 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 4744 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) 4745 return FALSE; 4746 4747 manc = IXGBE_READ_REG(hw, IXGBE_MANC); 4748 if (!(manc & IXGBE_MANC_RCV_TCO_EN)) 4749 return FALSE; 4750 4751 if (hw->mac.type <= ixgbe_mac_X540) { 4752 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 4753 if (factps & IXGBE_FACTPS_MNGCG) 4754 return FALSE; 4755 } 4756 4757 return TRUE; 4758 } 4759 4760 /** 4761 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 4762 * @hw: pointer to hardware structure 4763 * @speed: new link speed 4764 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 4765 * 4766 * Set the link speed in the MAC and/or PHY register and restarts link. 4767 **/ 4768 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 4769 ixgbe_link_speed speed, 4770 bool autoneg_wait_to_complete) 4771 { 4772 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4773 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4774 s32 status = IXGBE_SUCCESS; 4775 u32 speedcnt = 0; 4776 u32 i = 0; 4777 bool autoneg, link_up = FALSE; 4778 4779 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 4780 4781 /* Mask off requested but non-supported speeds */ 4782 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); 4783 if (status != IXGBE_SUCCESS) 4784 return status; 4785 4786 speed &= link_speed; 4787 4788 /* Try each speed one by one, highest priority first. We do this in 4789 * software because 10Gb fiber doesn't support speed autonegotiation. 4790 */ 4791 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 4792 speedcnt++; 4793 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 4794 4795 /* If we already have link at this speed, just jump out */ 4796 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 4797 if (status != IXGBE_SUCCESS) 4798 return status; 4799 4800 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 4801 goto out; 4802 4803 /* Set the module link speed */ 4804 switch (hw->phy.media_type) { 4805 case ixgbe_media_type_fiber_fixed: 4806 case ixgbe_media_type_fiber: 4807 ixgbe_set_rate_select_speed(hw, 4808 IXGBE_LINK_SPEED_10GB_FULL); 4809 break; 4810 case ixgbe_media_type_fiber_qsfp: 4811 /* QSFP module automatically detects MAC link speed */ 4812 break; 4813 default: 4814 DEBUGOUT("Unexpected media type.\n"); 4815 break; 4816 } 4817 4818 /* Allow module to change analog characteristics (1G->10G) */ 4819 msec_delay(40); 4820 4821 status = ixgbe_setup_mac_link(hw, 4822 IXGBE_LINK_SPEED_10GB_FULL, 4823 autoneg_wait_to_complete); 4824 if (status != IXGBE_SUCCESS) 4825 return status; 4826 4827 /* Flap the Tx laser if it has not already been done */ 4828 ixgbe_flap_tx_laser(hw); 4829 4830 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 4831 * Section 73.10.2, we may have to wait up to 500ms if KR is 4832 * attempted. 82599 uses the same timing for 10g SFI. 4833 */ 4834 for (i = 0; i < 5; i++) { 4835 /* Wait for the link partner to also set speed */ 4836 msec_delay(100); 4837 4838 /* If we have link, just jump out */ 4839 status = ixgbe_check_link(hw, &link_speed, 4840 &link_up, FALSE); 4841 if (status != IXGBE_SUCCESS) 4842 return status; 4843 4844 if (link_up) 4845 goto out; 4846 } 4847 } 4848 4849 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 4850 speedcnt++; 4851 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 4852 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 4853 4854 /* If we already have link at this speed, just jump out */ 4855 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 4856 if (status != IXGBE_SUCCESS) 4857 return status; 4858 4859 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 4860 goto out; 4861 4862 /* Set the module link speed */ 4863 switch (hw->phy.media_type) { 4864 case ixgbe_media_type_fiber_fixed: 4865 case ixgbe_media_type_fiber: 4866 ixgbe_set_rate_select_speed(hw, 4867 IXGBE_LINK_SPEED_1GB_FULL); 4868 break; 4869 case ixgbe_media_type_fiber_qsfp: 4870 /* QSFP module automatically detects link speed */ 4871 break; 4872 default: 4873 DEBUGOUT("Unexpected media type.\n"); 4874 break; 4875 } 4876 4877 /* Allow module to change analog characteristics (10G->1G) */ 4878 msec_delay(40); 4879 4880 status = ixgbe_setup_mac_link(hw, 4881 IXGBE_LINK_SPEED_1GB_FULL, 4882 autoneg_wait_to_complete); 4883 if (status != IXGBE_SUCCESS) 4884 return status; 4885 4886 /* Flap the Tx laser if it has not already been done */ 4887 ixgbe_flap_tx_laser(hw); 4888 4889 /* Wait for the link partner to also set speed */ 4890 msec_delay(100); 4891 4892 /* If we have link, just jump out */ 4893 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 4894 if (status != IXGBE_SUCCESS) 4895 return status; 4896 4897 if (link_up) 4898 goto out; 4899 } 4900 4901 /* We didn't get link. Configure back to the highest speed we tried, 4902 * (if there was more than one). We call ourselves back with just the 4903 * single highest speed that the user requested. 4904 */ 4905 if (speedcnt > 1) 4906 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 4907 highest_link_speed, 4908 autoneg_wait_to_complete); 4909 4910 out: 4911 /* Set autoneg_advertised value based on input link speed */ 4912 hw->phy.autoneg_advertised = 0; 4913 4914 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 4915 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 4916 4917 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 4918 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 4919 4920 return status; 4921 } 4922 4923 /** 4924 * ixgbe_set_soft_rate_select_speed - Set module link speed 4925 * @hw: pointer to hardware structure 4926 * @speed: link speed to set 4927 * 4928 * Set module link speed via the soft rate select. 4929 */ 4930 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 4931 ixgbe_link_speed speed) 4932 { 4933 s32 status; 4934 u8 rs, eeprom_data; 4935 4936 switch (speed) { 4937 case IXGBE_LINK_SPEED_10GB_FULL: 4938 /* one bit mask same as setting on */ 4939 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 4940 break; 4941 case IXGBE_LINK_SPEED_1GB_FULL: 4942 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 4943 break; 4944 default: 4945 DEBUGOUT("Invalid fixed module speed\n"); 4946 return; 4947 } 4948 4949 /* Set RS0 */ 4950 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4951 IXGBE_I2C_EEPROM_DEV_ADDR2, 4952 &eeprom_data); 4953 if (status) { 4954 DEBUGOUT("Failed to read Rx Rate Select RS0\n"); 4955 goto out; 4956 } 4957 4958 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4959 4960 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 4961 IXGBE_I2C_EEPROM_DEV_ADDR2, 4962 eeprom_data); 4963 if (status) { 4964 DEBUGOUT("Failed to write Rx Rate Select RS0\n"); 4965 goto out; 4966 } 4967 4968 /* Set RS1 */ 4969 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4970 IXGBE_I2C_EEPROM_DEV_ADDR2, 4971 &eeprom_data); 4972 if (status) { 4973 DEBUGOUT("Failed to read Rx Rate Select RS1\n"); 4974 goto out; 4975 } 4976 4977 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 4978 4979 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 4980 IXGBE_I2C_EEPROM_DEV_ADDR2, 4981 eeprom_data); 4982 if (status) { 4983 DEBUGOUT("Failed to write Rx Rate Select RS1\n"); 4984 goto out; 4985 } 4986 out: 4987 return; 4988 } 4989