xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_x540.c (revision 8feb0f0b7eaff0608f8350bbfa3098827b4bb91b)
1 /* $NetBSD: ixgbe_x540.c,v 1.23 2021/12/24 05:11:04 msaitoh Exp $ */
2 
3 /******************************************************************************
4   SPDX-License-Identifier: BSD-3-Clause
5 
6   Copyright (c) 2001-2020, Intel Corporation
7   All rights reserved.
8 
9   Redistribution and use in source and binary forms, with or without
10   modification, are permitted provided that the following conditions are met:
11 
12    1. Redistributions of source code must retain the above copyright notice,
13       this list of conditions and the following disclaimer.
14 
15    2. Redistributions in binary form must reproduce the above copyright
16       notice, this list of conditions and the following disclaimer in the
17       documentation and/or other materials provided with the distribution.
18 
19    3. Neither the name of the Intel Corporation nor the names of its
20       contributors may be used to endorse or promote products derived from
21       this software without specific prior written permission.
22 
23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33   POSSIBILITY OF SUCH DAMAGE.
34 
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x540.c 331224 2018-03-19 20:55:05Z erj $*/
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_x540.c,v 1.23 2021/12/24 05:11:04 msaitoh Exp $");
40 
41 #include "ixgbe_x540.h"
42 #include "ixgbe_type.h"
43 #include "ixgbe_api.h"
44 #include "ixgbe_common.h"
45 #include "ixgbe_phy.h"
46 
47 #define IXGBE_X540_MAX_TX_QUEUES	128
48 #define IXGBE_X540_MAX_RX_QUEUES	128
49 #define IXGBE_X540_RAR_ENTRIES		128
50 #define IXGBE_X540_MC_TBL_SIZE		128
51 #define IXGBE_X540_VFT_TBL_SIZE		128
52 #define IXGBE_X540_RX_PB_SIZE		384
53 
54 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
55 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
56 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
57 
58 /**
59  * ixgbe_init_ops_X540 - Inits func ptrs and MAC type
60  * @hw: pointer to hardware structure
61  *
62  * Initialize the function pointers and assign the MAC type for X540.
63  * Does not touch the hardware.
64  **/
65 s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
66 {
67 	struct ixgbe_mac_info *mac = &hw->mac;
68 	struct ixgbe_phy_info *phy = &hw->phy;
69 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
70 	s32 ret_val;
71 	u16 i;
72 
73 	DEBUGFUNC("ixgbe_init_ops_X540");
74 
75 	ret_val = ixgbe_init_phy_ops_generic(hw);
76 	ret_val = ixgbe_init_ops_generic(hw);
77 
78 
79 	/* EEPROM */
80 	eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
81 	eeprom->ops.read = ixgbe_read_eerd_X540;
82 	eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
83 	eeprom->ops.write = ixgbe_write_eewr_X540;
84 	eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
85 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
86 	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
87 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
88 
89 	/* PHY */
90 	phy->ops.init = ixgbe_init_phy_ops_generic;
91 	phy->ops.reset = NULL;
92 	phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
93 
94 	/* MAC */
95 	mac->ops.reset_hw = ixgbe_reset_hw_X540;
96 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
97 	mac->ops.get_media_type = ixgbe_get_media_type_X540;
98 	mac->ops.get_supported_physical_layer =
99 				    ixgbe_get_supported_physical_layer_X540;
100 	mac->ops.read_analog_reg8 = NULL;
101 	mac->ops.write_analog_reg8 = NULL;
102 	mac->ops.start_hw = ixgbe_start_hw_X540;
103 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
104 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
105 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
106 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
107 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
108 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
109 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
110 	mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540;
111 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
112 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
113 
114 	/* RAR, Multicast, VLAN */
115 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
116 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
117 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
118 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
119 	mac->rar_highwater = 1;
120 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
121 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
122 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
123 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
124 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
125 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
126 
127 	/* Link */
128 	mac->ops.get_link_capabilities =
129 				ixgbe_get_copper_link_capabilities_generic;
130 	mac->ops.setup_link = ixgbe_setup_mac_link_X540;
131 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
132 	mac->ops.check_link = ixgbe_check_mac_link_generic;
133 	mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
134 	mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
135 	mac->ops.bypass_set = ixgbe_bypass_set_generic;
136 	mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
137 
138 
139 	mac->mcft_size		= IXGBE_X540_MC_TBL_SIZE;
140 	mac->vft_size		= IXGBE_X540_VFT_TBL_SIZE;
141 	mac->num_rar_entries	= IXGBE_X540_RAR_ENTRIES;
142 	mac->rx_pb_size		= IXGBE_X540_RX_PB_SIZE;
143 	mac->max_rx_queues	= IXGBE_X540_MAX_RX_QUEUES;
144 	mac->max_tx_queues	= IXGBE_X540_MAX_TX_QUEUES;
145 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
146 
147 	/*
148 	 * FWSM register
149 	 * ARC supported; valid only if manageability features are
150 	 * enabled.
151 	 */
152 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
153 				     & IXGBE_FWSM_MODE_MASK);
154 
155 	for (i = 0; i < 64; i++)
156 		hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
157 
158 	/* LEDs */
159 	mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
160 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
161 
162 	/* Manageability interface */
163 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
164 
165 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
166 
167 	return ret_val;
168 }
169 
170 /**
171  * ixgbe_get_link_capabilities_X540 - Determines link capabilities
172  * @hw: pointer to hardware structure
173  * @speed: pointer to link speed
174  * @autoneg: TRUE when autoneg or autotry is enabled
175  *
176  * Determines the link capabilities by reading the AUTOC register.
177  **/
178 s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
179 				     ixgbe_link_speed *speed,
180 				     bool *autoneg)
181 {
182 	ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
183 
184 	return IXGBE_SUCCESS;
185 }
186 
187 /**
188  * ixgbe_get_media_type_X540 - Get media type
189  * @hw: pointer to hardware structure
190  *
191  * Returns the media type (fiber, copper, backplane)
192  **/
193 enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
194 {
195 	UNREFERENCED_1PARAMETER(hw);
196 	return ixgbe_media_type_copper;
197 }
198 
199 /**
200  * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
201  * @hw: pointer to hardware structure
202  * @speed: new link speed
203  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
204  **/
205 s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
206 			      ixgbe_link_speed speed,
207 			      bool autoneg_wait_to_complete)
208 {
209 	DEBUGFUNC("ixgbe_setup_mac_link_X540");
210 	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
211 }
212 
213 /**
214  * ixgbe_reset_hw_X540 - Perform hardware reset
215  * @hw: pointer to hardware structure
216  *
217  * Resets the hardware by resetting the transmit and receive units, masks
218  * and clears all interrupts, and perform a reset.
219  **/
220 s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
221 {
222 	s32 status;
223 	u32 ctrl, i;
224 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
225 
226 	DEBUGFUNC("ixgbe_reset_hw_X540");
227 
228 	/* Call adapter stop to disable tx/rx and clear interrupts */
229 	status = hw->mac.ops.stop_adapter(hw);
230 	if (status != IXGBE_SUCCESS)
231 		goto reset_hw_out;
232 
233 	/* flush pending Tx transactions */
234 	ixgbe_clear_tx_pending(hw);
235 
236 mac_reset_top:
237 	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
238 	if (status != IXGBE_SUCCESS) {
239 		ERROR_REPORT2(IXGBE_ERROR_CAUTION,
240 			"semaphore failed with %d", status);
241 		return IXGBE_ERR_SWFW_SYNC;
242 	}
243 	ctrl = IXGBE_CTRL_RST;
244 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
245 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
246 	IXGBE_WRITE_FLUSH(hw);
247 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
248 
249 	/* Poll for reset bit to self-clear indicating reset is complete */
250 	for (i = 0; i < 10; i++) {
251 		usec_delay(1);
252 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
253 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
254 			break;
255 	}
256 
257 	if (ctrl & IXGBE_CTRL_RST_MASK) {
258 		status = IXGBE_ERR_RESET_FAILED;
259 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
260 			     "Reset polling failed to complete.\n");
261 	}
262 	msec_delay(100);
263 
264 	/*
265 	 * Double resets are required for recovery from certain error
266 	 * conditions.  Between resets, it is necessary to stall to allow time
267 	 * for any pending HW events to complete.
268 	 */
269 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
270 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
271 		goto mac_reset_top;
272 	}
273 
274 	/* Set the Rx packet buffer size. */
275 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
276 
277 	/* Store the permanent mac address */
278 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
279 
280 	/*
281 	 * Store MAC address from RAR0, clear receive address registers, and
282 	 * clear the multicast table.  Also reset num_rar_entries to 128,
283 	 * since we modify this value when programming the SAN MAC address.
284 	 */
285 	hw->mac.num_rar_entries = 128;
286 	hw->mac.ops.init_rx_addrs(hw);
287 
288 	/* Store the permanent SAN mac address */
289 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
290 
291 	/* Add the SAN MAC address to the RAR only if it's a valid address */
292 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
293 		/* Save the SAN MAC RAR index */
294 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
295 
296 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
297 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
298 
299 		/* clear VMDq pool/queue selection for this RAR */
300 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
301 				       IXGBE_CLEAR_VMDQ_ALL);
302 
303 		/* Reserve the last RAR for the SAN MAC address */
304 		hw->mac.num_rar_entries--;
305 	}
306 
307 	/* Store the alternative WWNN/WWPN prefix */
308 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
309 				   &hw->mac.wwpn_prefix);
310 
311 reset_hw_out:
312 	return status;
313 }
314 
315 /**
316  * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
317  * @hw: pointer to hardware structure
318  *
319  * Starts the hardware using the generic start_hw function
320  * and the generation start_hw function.
321  * Then performs revision-specific operations, if any.
322  **/
323 s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
324 {
325 	s32 ret_val = IXGBE_SUCCESS;
326 
327 	DEBUGFUNC("ixgbe_start_hw_X540");
328 
329 	ret_val = ixgbe_start_hw_generic(hw);
330 	if (ret_val != IXGBE_SUCCESS)
331 		goto out;
332 
333 	ixgbe_start_hw_gen2(hw);
334 
335 out:
336 	return ret_val;
337 }
338 
339 /**
340  * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
341  * @hw: pointer to hardware structure
342  *
343  * Determines physical layer capabilities of the current configuration.
344  **/
345 u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
346 {
347 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
348 	u16 ext_ability = 0;
349 
350 	DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
351 
352 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
353 	IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
354 	if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
355 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
356 	if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
357 		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
358 	if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
359 		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
360 
361 	if (hw->mac.type == ixgbe_mac_X550) {
362 		physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T
363 		    | IXGBE_PHYSICAL_LAYER_5GBASE_T;
364 	}
365 
366 	return physical_layer;
367 }
368 
369 /**
370  * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
371  * @hw: pointer to hardware structure
372  *
373  * Initializes the EEPROM parameters ixgbe_eeprom_info within the
374  * ixgbe_hw struct in order to set up EEPROM access.
375  **/
376 s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
377 {
378 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
379 	u32 eec;
380 	u16 eeprom_size;
381 
382 	DEBUGFUNC("ixgbe_init_eeprom_params_X540");
383 
384 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
385 		eeprom->semaphore_delay = 10;
386 		eeprom->type = ixgbe_flash;
387 
388 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
389 		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
390 				    IXGBE_EEC_SIZE_SHIFT);
391 		eeprom->word_size = 1 << (eeprom_size +
392 					  IXGBE_EEPROM_WORD_SIZE_SHIFT);
393 
394 		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
395 			  eeprom->type, eeprom->word_size);
396 	}
397 
398 	return IXGBE_SUCCESS;
399 }
400 
401 /**
402  * ixgbe_read_eerd_X540- Read EEPROM word using EERD
403  * @hw: pointer to hardware structure
404  * @offset: offset of  word in the EEPROM to read
405  * @data: word read from the EEPROM
406  *
407  * Reads a 16 bit word from the EEPROM using the EERD register.
408  **/
409 s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
410 {
411 	s32 status = IXGBE_SUCCESS;
412 
413 	DEBUGFUNC("ixgbe_read_eerd_X540");
414 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
415 	    IXGBE_SUCCESS) {
416 		status = ixgbe_read_eerd_generic(hw, offset, data);
417 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
418 	} else {
419 		status = IXGBE_ERR_SWFW_SYNC;
420 	}
421 
422 	return status;
423 }
424 
425 /**
426  * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
427  * @hw: pointer to hardware structure
428  * @offset: offset of  word in the EEPROM to read
429  * @words: number of words
430  * @data: word(s) read from the EEPROM
431  *
432  * Reads a 16 bit word(s) from the EEPROM using the EERD register.
433  **/
434 s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
435 				u16 offset, u16 words, u16 *data)
436 {
437 	s32 status = IXGBE_SUCCESS;
438 
439 	DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
440 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
441 	    IXGBE_SUCCESS) {
442 		status = ixgbe_read_eerd_buffer_generic(hw, offset,
443 							words, data);
444 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
445 	} else {
446 		status = IXGBE_ERR_SWFW_SYNC;
447 	}
448 
449 	return status;
450 }
451 
452 /**
453  * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
454  * @hw: pointer to hardware structure
455  * @offset: offset of  word in the EEPROM to write
456  * @data: word write to the EEPROM
457  *
458  * Write a 16 bit word to the EEPROM using the EEWR register.
459  **/
460 s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
461 {
462 	s32 status = IXGBE_SUCCESS;
463 
464 	DEBUGFUNC("ixgbe_write_eewr_X540");
465 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
466 	    IXGBE_SUCCESS) {
467 		status = ixgbe_write_eewr_generic(hw, offset, data);
468 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
469 	} else {
470 		status = IXGBE_ERR_SWFW_SYNC;
471 	}
472 
473 	return status;
474 }
475 
476 /**
477  * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
478  * @hw: pointer to hardware structure
479  * @offset: offset of  word in the EEPROM to write
480  * @words: number of words
481  * @data: word(s) write to the EEPROM
482  *
483  * Write a 16 bit word(s) to the EEPROM using the EEWR register.
484  **/
485 s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
486 				 u16 offset, u16 words, u16 *data)
487 {
488 	s32 status = IXGBE_SUCCESS;
489 
490 	DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
491 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
492 	    IXGBE_SUCCESS) {
493 		status = ixgbe_write_eewr_buffer_generic(hw, offset,
494 							 words, data);
495 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
496 	} else {
497 		status = IXGBE_ERR_SWFW_SYNC;
498 	}
499 
500 	return status;
501 }
502 
503 /**
504  * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
505  *
506  * This function does not use synchronization for EERD and EEWR. It can
507  * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
508  *
509  * @hw: pointer to hardware structure
510  *
511  * Returns a negative error code on error, or the 16-bit checksum
512  **/
513 s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
514 {
515 	u16 i, j;
516 	u16 checksum = 0;
517 	u16 length = 0;
518 	u16 pointer = 0;
519 	u16 word = 0;
520 	u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
521 
522 	/* Do not use hw->eeprom.ops.read because we do not want to take
523 	 * the synchronization semaphores here. Instead use
524 	 * ixgbe_read_eerd_generic
525 	 */
526 
527 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
528 
529 	/* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the
530 	 * checksum itself
531 	 */
532 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
533 		if (ixgbe_read_eerd_generic(hw, i, &word)) {
534 			DEBUGOUT("EEPROM read failed\n");
535 			return IXGBE_ERR_EEPROM;
536 		}
537 		checksum += word;
538 	}
539 
540 	/* Include all data from pointers 0x3, 0x6-0xE.  This excludes the
541 	 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
542 	 */
543 	for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
544 		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
545 			continue;
546 
547 		if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
548 			DEBUGOUT("EEPROM read failed\n");
549 			return IXGBE_ERR_EEPROM;
550 		}
551 
552 		/* Skip pointer section if the pointer is invalid. */
553 		if (pointer == 0xFFFF || pointer == 0 ||
554 		    pointer >= hw->eeprom.word_size)
555 			continue;
556 
557 		if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
558 			DEBUGOUT("EEPROM read failed\n");
559 			return IXGBE_ERR_EEPROM;
560 		}
561 
562 		/* Skip pointer section if length is invalid. */
563 		if (length == 0xFFFF || length == 0 ||
564 		    (pointer + length) >= hw->eeprom.word_size)
565 			continue;
566 
567 		for (j = pointer + 1; j <= pointer + length; j++) {
568 			if (ixgbe_read_eerd_generic(hw, j, &word)) {
569 				DEBUGOUT("EEPROM read failed\n");
570 				return IXGBE_ERR_EEPROM;
571 			}
572 			checksum += word;
573 		}
574 	}
575 
576 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
577 
578 	return (s32)checksum;
579 }
580 
581 /**
582  * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
583  * @hw: pointer to hardware structure
584  * @checksum_val: calculated checksum
585  *
586  * Performs checksum calculation and validates the EEPROM checksum.  If the
587  * caller does not need checksum_val, the value can be NULL.
588  **/
589 s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
590 					u16 *checksum_val)
591 {
592 	s32 status;
593 	u16 checksum;
594 	u16 read_checksum = 0;
595 
596 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
597 
598 	/* Read the first word from the EEPROM. If this times out or fails, do
599 	 * not continue or we could be in for a very long wait while every
600 	 * EEPROM read fails
601 	 */
602 	status = hw->eeprom.ops.read(hw, 0, &checksum);
603 	if (status) {
604 		DEBUGOUT("EEPROM read failed\n");
605 		return status;
606 	}
607 
608 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
609 		return IXGBE_ERR_SWFW_SYNC;
610 
611 	status = hw->eeprom.ops.calc_checksum(hw);
612 	if (status < 0)
613 		goto out;
614 
615 	checksum = (u16)(status & 0xffff);
616 
617 	/* Do not use hw->eeprom.ops.read because we do not want to take
618 	 * the synchronization semaphores twice here.
619 	 */
620 	status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
621 					 &read_checksum);
622 	if (status)
623 		goto out;
624 
625 	/* Verify read checksum from EEPROM is the same as
626 	 * calculated checksum
627 	 */
628 	if (read_checksum != checksum) {
629 		ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
630 			     "Invalid EEPROM checksum");
631 		status = IXGBE_ERR_EEPROM_CHECKSUM;
632 	}
633 
634 	/* If the user cares, return the calculated checksum */
635 	if (checksum_val)
636 		*checksum_val = checksum;
637 
638 out:
639 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
640 
641 	return status;
642 }
643 
644 /**
645  * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
646  * @hw: pointer to hardware structure
647  *
648  * After writing EEPROM to shadow RAM using EEWR register, software calculates
649  * checksum and updates the EEPROM and instructs the hardware to update
650  * the flash.
651  **/
652 s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
653 {
654 	s32 status;
655 	u16 checksum;
656 
657 	DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
658 
659 	/* Read the first word from the EEPROM. If this times out or fails, do
660 	 * not continue or we could be in for a very long wait while every
661 	 * EEPROM read fails
662 	 */
663 	status = hw->eeprom.ops.read(hw, 0, &checksum);
664 	if (status) {
665 		DEBUGOUT("EEPROM read failed\n");
666 		return status;
667 	}
668 
669 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
670 		return IXGBE_ERR_SWFW_SYNC;
671 
672 	status = hw->eeprom.ops.calc_checksum(hw);
673 	if (status < 0)
674 		goto out;
675 
676 	checksum = (u16)(status & 0xffff);
677 
678 	/* Do not use hw->eeprom.ops.write because we do not want to
679 	 * take the synchronization semaphores twice here.
680 	 */
681 	status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
682 	if (status)
683 		goto out;
684 
685 	status = ixgbe_update_flash_X540(hw);
686 
687 out:
688 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
689 
690 	return status;
691 }
692 
693 /**
694  * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
695  * @hw: pointer to hardware structure
696  *
697  * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
698  * EEPROM from shadow RAM to the flash device.
699  **/
700 s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
701 {
702 	u32 flup;
703 	s32 status;
704 
705 	DEBUGFUNC("ixgbe_update_flash_X540");
706 
707 	status = ixgbe_poll_flash_update_done_X540(hw);
708 	if (status == IXGBE_ERR_EEPROM) {
709 		DEBUGOUT("Flash update time out\n");
710 		goto out;
711 	}
712 
713 	flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP;
714 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
715 
716 	status = ixgbe_poll_flash_update_done_X540(hw);
717 	if (status == IXGBE_SUCCESS)
718 		DEBUGOUT("Flash update complete\n");
719 	else
720 		DEBUGOUT("Flash update time out\n");
721 
722 	if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
723 		flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
724 
725 		if (flup & IXGBE_EEC_SEC1VAL) {
726 			flup |= IXGBE_EEC_FLUP;
727 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
728 		}
729 
730 		status = ixgbe_poll_flash_update_done_X540(hw);
731 		if (status == IXGBE_SUCCESS)
732 			DEBUGOUT("Flash update complete\n");
733 		else
734 			DEBUGOUT("Flash update time out\n");
735 	}
736 out:
737 	return status;
738 }
739 
740 /**
741  * ixgbe_poll_flash_update_done_X540 - Poll flash update status
742  * @hw: pointer to hardware structure
743  *
744  * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
745  * flash update is done.
746  **/
747 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
748 {
749 	u32 i;
750 	u32 reg;
751 	s32 status = IXGBE_ERR_EEPROM;
752 
753 	DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
754 
755 	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
756 		reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
757 		if (reg & IXGBE_EEC_FLUDONE) {
758 			status = IXGBE_SUCCESS;
759 			break;
760 		}
761 		msec_delay(5);
762 	}
763 
764 	if (i == IXGBE_FLUDONE_ATTEMPTS)
765 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
766 			     "Flash update status polling timed out");
767 
768 	return status;
769 }
770 
771 /**
772  * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
773  * @hw: pointer to hardware structure
774  * @mask: Mask to specify which semaphore to acquire
775  *
776  * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
777  * the specified function (CSR, PHY0, PHY1, NVM, Flash)
778  **/
779 s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
780 {
781 	u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
782 	u32 fwmask = swmask << 5;
783 	u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
784 	u32 timeout = 200;
785 	u32 hwmask = 0;
786 	u32 swfw_sync;
787 	u32 i;
788 
789 	DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
790 
791 	if (swmask & IXGBE_GSSR_EEP_SM)
792 		hwmask |= IXGBE_GSSR_FLASH_SM;
793 
794 	/* SW only mask doesn't have FW bit pair */
795 	if (mask & IXGBE_GSSR_SW_MNG_SM)
796 		swmask |= IXGBE_GSSR_SW_MNG_SM;
797 
798 	swmask |= swi2c_mask;
799 	fwmask |= swi2c_mask << 2;
800 	if (hw->mac.type >= ixgbe_mac_X550)
801 		timeout = 1000;
802 
803 	for (i = 0; i < timeout; i++) {
804 		/* SW NVM semaphore bit is used for access to all
805 		 * SW_FW_SYNC bits (not just NVM)
806 		 */
807 		if (ixgbe_get_swfw_sync_semaphore(hw)) {
808 			DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
809 			return IXGBE_ERR_SWFW_SYNC;
810 		}
811 
812 		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
813 		if (!(swfw_sync & (fwmask | swmask | hwmask))) {
814 			swfw_sync |= swmask;
815 			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw),
816 					swfw_sync);
817 			ixgbe_release_swfw_sync_semaphore(hw);
818 			return IXGBE_SUCCESS;
819 		}
820 		/* Firmware currently using resource (fwmask), hardware
821 		 * currently using resource (hwmask), or other software
822 		 * thread currently using resource (swmask)
823 		 */
824 		ixgbe_release_swfw_sync_semaphore(hw);
825 		msec_delay(5);
826 	}
827 
828 	/* If the resource is not released by the FW/HW the SW can assume that
829 	 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
830 	 * of the requested resource(s) while ignoring the corresponding FW/HW
831 	 * bits in the SW_FW_SYNC register.
832 	 */
833 	if (ixgbe_get_swfw_sync_semaphore(hw)) {
834 		DEBUGOUT("Failed to get NVM semaphore and register semaphore while forcefully ignoring FW semaphore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
835 		return IXGBE_ERR_SWFW_SYNC;
836 	}
837 	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
838 	if (swfw_sync & (fwmask | hwmask)) {
839 		swfw_sync |= swmask;
840 		IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
841 		ixgbe_release_swfw_sync_semaphore(hw);
842 		msec_delay(5);
843 		return IXGBE_SUCCESS;
844 	}
845 	/* If the resource is not released by other SW the SW can assume that
846 	 * the other SW malfunctions. In that case the SW should clear all SW
847 	 * flags that it does not own and then repeat the whole process once
848 	 * again.
849 	 */
850 	if (swfw_sync & swmask) {
851 		u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
852 			    IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
853 			    IXGBE_GSSR_SW_MNG_SM;
854 
855 		if (swi2c_mask)
856 			rmask |= IXGBE_GSSR_I2C_MASK;
857 		ixgbe_release_swfw_sync_X540(hw, rmask);
858 		ixgbe_release_swfw_sync_semaphore(hw);
859 		DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n");
860 		return IXGBE_ERR_SWFW_SYNC;
861 	}
862 	ixgbe_release_swfw_sync_semaphore(hw);
863 	DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n");
864 
865 	return IXGBE_ERR_SWFW_SYNC;
866 }
867 
868 /**
869  * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
870  * @hw: pointer to hardware structure
871  * @mask: Mask to specify which semaphore to release
872  *
873  * Releases the SWFW semaphore through the SW_FW_SYNC register
874  * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
875  **/
876 void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
877 {
878 	u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
879 	u32 swfw_sync;
880 
881 	DEBUGFUNC("ixgbe_release_swfw_sync_X540");
882 
883 	if (mask & IXGBE_GSSR_I2C_MASK)
884 		swmask |= mask & IXGBE_GSSR_I2C_MASK;
885 	ixgbe_get_swfw_sync_semaphore(hw);
886 
887 	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
888 	swfw_sync &= ~swmask;
889 	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
890 
891 	ixgbe_release_swfw_sync_semaphore(hw);
892 	msec_delay(2);
893 }
894 
895 /**
896  * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
897  * @hw: pointer to hardware structure
898  *
899  * Sets the hardware semaphores so SW/FW can gain control of shared resources
900  **/
901 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
902 {
903 	s32 status = IXGBE_ERR_EEPROM;
904 	u32 timeout = 2000;
905 	u32 i;
906 	u32 swsm;
907 
908 	DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
909 
910 	/* Get SMBI software semaphore between device drivers first */
911 	for (i = 0; i < timeout; i++) {
912 		/*
913 		 * If the SMBI bit is 0 when we read it, then the bit will be
914 		 * set and we have the semaphore
915 		 */
916 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
917 		if (!(swsm & IXGBE_SWSM_SMBI)) {
918 			status = IXGBE_SUCCESS;
919 			break;
920 		}
921 		usec_delay(50);
922 	}
923 
924 	/* Now get the semaphore between SW/FW through the REGSMP bit */
925 	if (status == IXGBE_SUCCESS) {
926 		for (i = 0; i < timeout; i++) {
927 			swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
928 			if (!(swsm & IXGBE_SWFW_REGSMP))
929 				break;
930 
931 			usec_delay(50);
932 		}
933 
934 		/*
935 		 * Release semaphores and return error if SW NVM semaphore
936 		 * was not granted because we don't have access to the EEPROM
937 		 */
938 		if (i >= timeout) {
939 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
940 				"REGSMP Software NVM semaphore not granted.\n");
941 			ixgbe_release_swfw_sync_semaphore(hw);
942 			status = IXGBE_ERR_EEPROM;
943 		}
944 	} else {
945 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
946 			     "Software semaphore SMBI between device drivers "
947 			     "not granted.\n");
948 	}
949 
950 	return status;
951 }
952 
953 /**
954  * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
955  * @hw: pointer to hardware structure
956  *
957  * This function clears hardware semaphore bits.
958  **/
959 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
960 {
961 	u32 swsm;
962 
963 	DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
964 
965 	/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
966 
967 	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
968 	swsm &= ~IXGBE_SWFW_REGSMP;
969 	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm);
970 
971 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
972 	swsm &= ~IXGBE_SWSM_SMBI;
973 	IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
974 
975 	IXGBE_WRITE_FLUSH(hw);
976 }
977 
978 /**
979  * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
980  * @hw: pointer to hardware structure
981  *
982  * This function reset hardware semaphore bits for a semaphore that may
983  * have be left locked due to a catastrophic failure.
984  **/
985 void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
986 {
987 	u32 rmask;
988 
989 	/* First try to grab the semaphore but we don't need to bother
990 	 * looking to see whether we got the lock or not since we do
991 	 * the same thing regardless of whether we got the lock or not.
992 	 * We got the lock - we release it.
993 	 * We timeout trying to get the lock - we force its release.
994 	 */
995 	ixgbe_get_swfw_sync_semaphore(hw);
996 	ixgbe_release_swfw_sync_semaphore(hw);
997 
998 	/* Acquire and release all software resources. */
999 	rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
1000 		IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
1001 		IXGBE_GSSR_SW_MNG_SM;
1002 
1003 	rmask |= IXGBE_GSSR_I2C_MASK;
1004 	ixgbe_acquire_swfw_sync_X540(hw, rmask);
1005 	ixgbe_release_swfw_sync_X540(hw, rmask);
1006 }
1007 
1008 /**
1009  * ixgbe_blink_led_start_X540 - Blink LED based on index.
1010  * @hw: pointer to hardware structure
1011  * @index: led number to blink
1012  *
1013  * Devices that implement the version 2 interface:
1014  *  X540
1015  **/
1016 s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
1017 {
1018 	u32 macc_reg;
1019 	u32 ledctl_reg;
1020 	ixgbe_link_speed speed;
1021 	bool link_up;
1022 
1023 	DEBUGFUNC("ixgbe_blink_led_start_X540");
1024 
1025 	if (index > 3)
1026 		return IXGBE_ERR_PARAM;
1027 
1028 	/*
1029 	 * Link should be up in order for the blink bit in the LED control
1030 	 * register to work. Force link and speed in the MAC if link is down.
1031 	 * This will be reversed when we stop the blinking.
1032 	 */
1033 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
1034 	if (link_up == FALSE) {
1035 		macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
1036 		macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
1037 		IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
1038 	}
1039 	/* Set the LED to LINK_UP + BLINK. */
1040 	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1041 	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
1042 	ledctl_reg |= IXGBE_LED_BLINK(index);
1043 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
1044 	IXGBE_WRITE_FLUSH(hw);
1045 
1046 	return IXGBE_SUCCESS;
1047 }
1048 
1049 /**
1050  * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
1051  * @hw: pointer to hardware structure
1052  * @index: led number to stop blinking
1053  *
1054  * Devices that implement the version 2 interface:
1055  *  X540
1056  **/
1057 s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
1058 {
1059 	u32 macc_reg;
1060 	u32 ledctl_reg;
1061 
1062 	if (index > 3)
1063 		return IXGBE_ERR_PARAM;
1064 
1065 	DEBUGFUNC("ixgbe_blink_led_stop_X540");
1066 
1067 	/* Restore the LED to its default value. */
1068 	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1069 	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
1070 	ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
1071 	ledctl_reg &= ~IXGBE_LED_BLINK(index);
1072 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
1073 
1074 	/* Unforce link and speed in the MAC. */
1075 	macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
1076 	macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
1077 	IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
1078 	IXGBE_WRITE_FLUSH(hw);
1079 
1080 	return IXGBE_SUCCESS;
1081 }
1082