xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_82599.c (revision fdd524d4ccd2bb0c6f67401e938dabf773eb0372)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2013, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 251964 2013-06-18 21:28:19Z jfv $*/
34 /*$NetBSD: ixgbe_82599.c,v 1.11 2015/08/13 04:56:43 msaitoh Exp $*/
35 
36 #include "ixgbe_type.h"
37 #include "ixgbe_82599.h"
38 #include "ixgbe_api.h"
39 #include "ixgbe_common.h"
40 #include "ixgbe_phy.h"
41 
42 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
43 					 ixgbe_link_speed speed,
44 					 bool autoneg_wait_to_complete);
45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
47 				   u16 offset, u16 *data);
48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 					  u16 words, u16 *data);
50 
51 static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
52 {
53 	u32 fwsm, manc, factps;
54 
55 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
56 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
57 		return FALSE;
58 
59 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
60 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
61 		return FALSE;
62 
63 	factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
64 	if (factps & IXGBE_FACTPS_MNGCG)
65 		return FALSE;
66 
67 	return TRUE;
68 }
69 
70 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
71 {
72 	struct ixgbe_mac_info *mac = &hw->mac;
73 
74 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
75 
76 	/*
77 	 * enable the laser control functions for SFP+ fiber
78 	 * and MNG not enabled
79 	 */
80 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
81 	    !hw->mng_fw_enabled) {
82 		mac->ops.disable_tx_laser =
83 				       &ixgbe_disable_tx_laser_multispeed_fiber;
84 		mac->ops.enable_tx_laser =
85 					&ixgbe_enable_tx_laser_multispeed_fiber;
86 		mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
87 
88 	} else {
89 		mac->ops.disable_tx_laser = NULL;
90 		mac->ops.enable_tx_laser = NULL;
91 		mac->ops.flap_tx_laser = NULL;
92 	}
93 
94 	if (hw->phy.multispeed_fiber) {
95 		/* Set up dual speed SFP+ support */
96 		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
97 	} else {
98 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
99 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
100 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
101 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
102 			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
103 		} else {
104 			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
105 		}
106 	}
107 }
108 
109 /**
110  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
111  *  @hw: pointer to hardware structure
112  *
113  *  Initialize any function pointers that were not able to be
114  *  set during init_shared_code because the PHY/SFP type was
115  *  not known.  Perform the SFP init if necessary.
116  *
117  **/
118 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
119 {
120 	struct ixgbe_mac_info *mac = &hw->mac;
121 	struct ixgbe_phy_info *phy = &hw->phy;
122 	s32 ret_val = IXGBE_SUCCESS;
123 
124 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
125 
126 	/* Identify the PHY or SFP module */
127 	ret_val = phy->ops.identify(hw);
128 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
129 		goto init_phy_ops_out;
130 
131 	/* Setup function pointers based on detected SFP module and speeds */
132 	ixgbe_init_mac_link_ops_82599(hw);
133 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
134 		hw->phy.ops.reset = NULL;
135 
136 	/* If copper media, overwrite with copper function pointers */
137 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
138 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
139 		mac->ops.get_link_capabilities =
140 				  &ixgbe_get_copper_link_capabilities_generic;
141 	}
142 
143 	/* Set necessary function pointers based on phy type */
144 	switch (hw->phy.type) {
145 	case ixgbe_phy_tn:
146 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
147 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
148 		phy->ops.get_firmware_version =
149 			     &ixgbe_get_phy_firmware_version_tnx;
150 		break;
151 	default:
152 		break;
153 	}
154 init_phy_ops_out:
155 	return ret_val;
156 }
157 
158 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
159 {
160 	s32 ret_val = IXGBE_SUCCESS;
161 	u16 list_offset, data_offset, data_value;
162 	bool got_lock = FALSE;
163 
164 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
165 
166 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
167 		ixgbe_init_mac_link_ops_82599(hw);
168 
169 		hw->phy.ops.reset = NULL;
170 
171 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
172 							      &data_offset);
173 		if (ret_val != IXGBE_SUCCESS)
174 			goto setup_sfp_out;
175 
176 		/* PHY config will finish before releasing the semaphore */
177 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
178 							IXGBE_GSSR_MAC_CSR_SM);
179 		if (ret_val != IXGBE_SUCCESS) {
180 			ret_val = IXGBE_ERR_SWFW_SYNC;
181 			goto setup_sfp_out;
182 		}
183 
184 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
185 			goto setup_sfp_err;
186 		while (data_value != 0xffff) {
187 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
188 			IXGBE_WRITE_FLUSH(hw);
189 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
190 				goto setup_sfp_err;
191 		}
192 
193 		/* Release the semaphore */
194 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
195 		/* Delay obtaining semaphore again to allow FW access */
196 		msec_delay(hw->eeprom.semaphore_delay);
197 
198 		/* Need SW/FW semaphore around AUTOC writes if LESM on,
199 		 * likewise reset_pipeline requires lock as it also writes
200 		 * AUTOC.
201 		 */
202 		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
203 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
204 							IXGBE_GSSR_MAC_CSR_SM);
205 			if (ret_val != IXGBE_SUCCESS) {
206 				ret_val = IXGBE_ERR_SWFW_SYNC;
207 				goto setup_sfp_out;
208 			}
209 
210 			got_lock = TRUE;
211 		}
212 
213 		/* Restart DSP and set SFI mode */
214 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
215 				IXGBE_AUTOC_LMS_10G_SERIAL));
216 		hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
217 		ret_val = ixgbe_reset_pipeline_82599(hw);
218 
219 		if (got_lock) {
220 			hw->mac.ops.release_swfw_sync(hw,
221 						      IXGBE_GSSR_MAC_CSR_SM);
222 			got_lock = FALSE;
223 		}
224 
225 		if (ret_val) {
226 			DEBUGOUT("sfp module setup not complete\n");
227 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
228 			goto setup_sfp_out;
229 		}
230 
231 	}
232 
233 setup_sfp_out:
234 	return ret_val;
235 
236 setup_sfp_err:
237 	/* Release the semaphore */
238 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
239 	/* Delay obtaining semaphore again to allow FW access */
240 	msec_delay(hw->eeprom.semaphore_delay);
241 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
242 		      "eeprom read at offset %d failed", data_offset);
243 	return IXGBE_ERR_PHY;
244 }
245 
246 /**
247  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
248  *  @hw: pointer to hardware structure
249  *
250  *  Initialize the function pointers and assign the MAC type for 82599.
251  *  Does not touch the hardware.
252  **/
253 
254 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
255 {
256 	struct ixgbe_mac_info *mac = &hw->mac;
257 	struct ixgbe_phy_info *phy = &hw->phy;
258 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
259 	s32 ret_val;
260 
261 	DEBUGFUNC("ixgbe_init_ops_82599");
262 
263 	ixgbe_init_phy_ops_generic(hw);
264 	ret_val = ixgbe_init_ops_generic(hw);
265 
266 	/* PHY */
267 	phy->ops.identify = &ixgbe_identify_phy_82599;
268 	phy->ops.init = &ixgbe_init_phy_ops_82599;
269 
270 	/* MAC */
271 	mac->ops.reset_hw = &ixgbe_reset_hw_82599;
272 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
273 	mac->ops.get_media_type = &ixgbe_get_media_type_82599;
274 	mac->ops.get_supported_physical_layer =
275 				    &ixgbe_get_supported_physical_layer_82599;
276 	mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
277 	mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
278 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
279 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
280 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
281 	mac->ops.start_hw = &ixgbe_start_hw_82599;
282 	mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
283 	mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
284 	mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
285 	mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
286 	mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
287 
288 	/* RAR, Multicast, VLAN */
289 	mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
290 	mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
291 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
292 	mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
293 	mac->rar_highwater = 1;
294 	mac->ops.set_vfta = &ixgbe_set_vfta_generic;
295 	mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
296 	mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
297 	mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
298 	mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
299 	mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
300 	mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
301 
302 	/* Link */
303 	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
304 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
305 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
306 	ixgbe_init_mac_link_ops_82599(hw);
307 
308 	mac->mcft_size		= 128;
309 	mac->vft_size		= 128;
310 	mac->num_rar_entries	= 128;
311 	mac->rx_pb_size		= 512;
312 	mac->max_tx_queues	= 128;
313 	mac->max_rx_queues	= 128;
314 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
315 
316 	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
317 				   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
318 
319 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
320 
321 	/* EEPROM */
322 	eeprom->ops.read = &ixgbe_read_eeprom_82599;
323 	eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
324 
325 	/* Manageability interface */
326 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
327 
328 
329 	mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
330 
331 	/* Cache if MNG FW is up */
332 	hw->mng_fw_enabled = ixgbe_mng_enabled(hw);
333 
334 	return ret_val;
335 }
336 
337 /**
338  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
339  *  @hw: pointer to hardware structure
340  *  @speed: pointer to link speed
341  *  @autoneg: TRUE when autoneg or autotry is enabled
342  *
343  *  Determines the link capabilities by reading the AUTOC register.
344  **/
345 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
346 				      ixgbe_link_speed *speed,
347 				      bool *autoneg)
348 {
349 	s32 status = IXGBE_SUCCESS;
350 	u32 autoc = 0;
351 
352 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
353 
354 
355 	/* Check if 1G SFP module. */
356 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
357 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
358 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
359 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
360 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
361 		*autoneg = TRUE;
362 		goto out;
363 	}
364 
365 	/*
366 	 * Determine link capabilities based on the stored value of AUTOC,
367 	 * which represents EEPROM defaults.  If AUTOC value has not
368 	 * been stored, use the current register values.
369 	 */
370 	if (hw->mac.orig_link_settings_stored)
371 		autoc = hw->mac.orig_autoc;
372 	else
373 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
374 
375 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
376 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
377 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
378 		*autoneg = FALSE;
379 		break;
380 
381 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
382 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
383 		*autoneg = FALSE;
384 		break;
385 
386 	case IXGBE_AUTOC_LMS_1G_AN:
387 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
388 		*autoneg = TRUE;
389 		break;
390 
391 	case IXGBE_AUTOC_LMS_10G_SERIAL:
392 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
393 		*autoneg = FALSE;
394 		break;
395 
396 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
397 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
398 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
399 		if (autoc & IXGBE_AUTOC_KR_SUPP)
400 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
401 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
402 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
403 		if (autoc & IXGBE_AUTOC_KX_SUPP)
404 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
405 		*autoneg = TRUE;
406 		break;
407 
408 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
409 		*speed = IXGBE_LINK_SPEED_100_FULL;
410 		if (autoc & IXGBE_AUTOC_KR_SUPP)
411 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
412 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
413 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
414 		if (autoc & IXGBE_AUTOC_KX_SUPP)
415 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
416 		*autoneg = TRUE;
417 		break;
418 
419 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
420 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
421 		*autoneg = FALSE;
422 		break;
423 
424 	default:
425 		status = IXGBE_ERR_LINK_SETUP;
426 		goto out;
427 		break;
428 	}
429 
430 	if (hw->phy.multispeed_fiber) {
431 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
432 			  IXGBE_LINK_SPEED_1GB_FULL;
433 		*autoneg = TRUE;
434 	}
435 
436 out:
437 	return status;
438 }
439 
440 /**
441  *  ixgbe_get_media_type_82599 - Get media type
442  *  @hw: pointer to hardware structure
443  *
444  *  Returns the media type (fiber, copper, backplane)
445  **/
446 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
447 {
448 	enum ixgbe_media_type media_type;
449 
450 	DEBUGFUNC("ixgbe_get_media_type_82599");
451 
452 	/* Detect if there is a copper PHY attached. */
453 	switch (hw->phy.type) {
454 	case ixgbe_phy_cu_unknown:
455 	case ixgbe_phy_tn:
456 		media_type = ixgbe_media_type_copper;
457 		goto out;
458 	default:
459 		break;
460 	}
461 
462 	switch (hw->device_id) {
463 	case IXGBE_DEV_ID_82599_KX4:
464 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
465 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
466 	case IXGBE_DEV_ID_82599_KR:
467 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
468 	case IXGBE_DEV_ID_82599_XAUI_LOM:
469 		/* Default device ID is mezzanine card KX/KX4 */
470 		media_type = ixgbe_media_type_backplane;
471 		break;
472 	case IXGBE_DEV_ID_82599_SFP:
473 	case IXGBE_DEV_ID_82599_SFP_FCOE:
474 	case IXGBE_DEV_ID_82599_SFP_EM:
475 	case IXGBE_DEV_ID_82599_SFP_SF2:
476 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
477 	case IXGBE_DEV_ID_82599EN_SFP:
478 		media_type = ixgbe_media_type_fiber;
479 		break;
480 	case IXGBE_DEV_ID_82599_CX4:
481 		media_type = ixgbe_media_type_cx4;
482 		break;
483 	case IXGBE_DEV_ID_82599_T3_LOM:
484 		media_type = ixgbe_media_type_copper;
485 		break;
486 	case IXGBE_DEV_ID_82599_BYPASS:
487 		media_type = ixgbe_media_type_fiber_fixed;
488 		hw->phy.multispeed_fiber = TRUE;
489 		break;
490 	default:
491 		media_type = ixgbe_media_type_unknown;
492 		break;
493 	}
494 out:
495 	return media_type;
496 }
497 
498 /**
499  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
500  *  @hw: pointer to hardware structure
501  *
502  *  Disables link during D3 power down sequence.
503  *
504  **/
505 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
506 {
507 	u32 autoc2_reg;
508 	u16 ee_ctrl_2 = 0;
509 
510 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
511 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
512 
513 	if (!hw->mng_fw_enabled && !hw->wol_enabled &&
514 		ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
515 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
516 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
517 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
518 	}
519 }
520 
521 /**
522  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
523  *  @hw: pointer to hardware structure
524  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
525  *
526  *  Configures link settings based on values in the ixgbe_hw struct.
527  *  Restarts the link.  Performs autonegotiation if needed.
528  **/
529 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
530 			       bool autoneg_wait_to_complete)
531 {
532 	u32 autoc_reg;
533 	u32 links_reg;
534 	u32 i;
535 	s32 status = IXGBE_SUCCESS;
536 	bool got_lock = FALSE;
537 
538 	DEBUGFUNC("ixgbe_start_mac_link_82599");
539 
540 
541 	/*  reset_pipeline requires us to hold this lock as it writes to
542 	 *  AUTOC.
543 	 */
544 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
545 		status = hw->mac.ops.acquire_swfw_sync(hw,
546 						       IXGBE_GSSR_MAC_CSR_SM);
547 		if (status != IXGBE_SUCCESS)
548 			goto out;
549 
550 		got_lock = TRUE;
551 	}
552 
553 	/* Restart link */
554 	ixgbe_reset_pipeline_82599(hw);
555 
556 	if (got_lock)
557 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
558 
559 	/* Only poll for autoneg to complete if specified to do so */
560 	if (autoneg_wait_to_complete) {
561 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
562 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
563 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
564 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
565 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
566 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
567 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
568 			links_reg = 0; /* Just in case Autoneg time = 0 */
569 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
570 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
571 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
572 					break;
573 				msec_delay(100);
574 			}
575 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
576 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
577 				DEBUGOUT("Autoneg did not complete.\n");
578 			}
579 		}
580 	}
581 
582 	/* Add delay to filter out noises during initial link setup */
583 	msec_delay(50);
584 
585 out:
586 	return status;
587 }
588 
589 /**
590  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
591  *  @hw: pointer to hardware structure
592  *
593  *  The base drivers may require better control over SFP+ module
594  *  PHY states.  This includes selectively shutting down the Tx
595  *  laser on the PHY, effectively halting physical link.
596  **/
597 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
598 {
599 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
600 
601 	/* Disable tx laser; allow 100us to go dark per spec */
602 	esdp_reg |= IXGBE_ESDP_SDP3;
603 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
604 	IXGBE_WRITE_FLUSH(hw);
605 	usec_delay(100);
606 }
607 
608 /**
609  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
610  *  @hw: pointer to hardware structure
611  *
612  *  The base drivers may require better control over SFP+ module
613  *  PHY states.  This includes selectively turning on the Tx
614  *  laser on the PHY, effectively starting physical link.
615  **/
616 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
617 {
618 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
619 
620 	/* Enable tx laser; allow 100ms to light up */
621 	esdp_reg &= ~IXGBE_ESDP_SDP3;
622 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
623 	IXGBE_WRITE_FLUSH(hw);
624 	msec_delay(100);
625 }
626 
627 /**
628  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
629  *  @hw: pointer to hardware structure
630  *
631  *  When the driver changes the link speeds that it can support,
632  *  it sets autotry_restart to TRUE to indicate that we need to
633  *  initiate a new autotry session with the link partner.  To do
634  *  so, we set the speed then disable and re-enable the tx laser, to
635  *  alert the link partner that it also needs to restart autotry on its
636  *  end.  This is consistent with TRUE clause 37 autoneg, which also
637  *  involves a loss of signal.
638  **/
639 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
640 {
641 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
642 
643 	if (hw->mac.autotry_restart) {
644 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
645 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
646 		hw->mac.autotry_restart = FALSE;
647 	}
648 }
649 
650 /**
651  *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
652  *  @hw: pointer to hardware structure
653  *  @speed: link speed to set
654  *
655  *  We set the module speed differently for fixed fiber.  For other
656  *  multi-speed devices we don't have an error value so here if we
657  *  detect an error we just log it and exit.
658  */
659 static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
660 					ixgbe_link_speed speed)
661 {
662 	s32 status;
663 	u8 rs, eeprom_data;
664 
665 	switch (speed) {
666 	case IXGBE_LINK_SPEED_10GB_FULL:
667 		/* one bit mask same as setting on */
668 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
669 		break;
670 	case IXGBE_LINK_SPEED_1GB_FULL:
671 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
672 		break;
673 	default:
674 		DEBUGOUT("Invalid fixed module speed\n");
675 		return;
676 	}
677 
678 	/* Set RS0 */
679 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
680 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
681 					   &eeprom_data);
682 	if (status) {
683 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
684 		goto out;
685 	}
686 
687 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
688 
689 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
690 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
691 					    eeprom_data);
692 	if (status) {
693 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
694 		goto out;
695 	}
696 
697 	/* Set RS1 */
698 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
699 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
700 					   &eeprom_data);
701 	if (status) {
702 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
703 		goto out;
704 	}
705 
706 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
707 
708 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
709 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
710 					    eeprom_data);
711 	if (status) {
712 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
713 		goto out;
714 	}
715 out:
716 	return;
717 }
718 
719 /**
720  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
721  *  @hw: pointer to hardware structure
722  *  @speed: new link speed
723  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
724  *
725  *  Set the link speed in the AUTOC register and restarts link.
726  **/
727 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
728 				     ixgbe_link_speed speed,
729 				     bool autoneg_wait_to_complete)
730 {
731 	s32 status = IXGBE_SUCCESS;
732 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
733 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
734 	u32 speedcnt = 0;
735 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
736 	u32 i = 0;
737 	bool autoneg, link_up = FALSE;
738 
739 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
740 
741 	/* Mask off requested but non-supported speeds */
742 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
743 	if (status != IXGBE_SUCCESS)
744 		return status;
745 
746 	speed &= link_speed;
747 
748 	/*
749 	 * Try each speed one by one, highest priority first.  We do this in
750 	 * software because 10gb fiber doesn't support speed autonegotiation.
751 	 */
752 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
753 		speedcnt++;
754 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
755 
756 		/* If we already have link at this speed, just jump out */
757 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
758 		if (status != IXGBE_SUCCESS)
759 			return status;
760 
761 		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
762 			goto out;
763 
764 		/* Set the module link speed */
765 		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
766 			ixgbe_set_fiber_fixed_speed(hw,
767 						    IXGBE_LINK_SPEED_10GB_FULL);
768 		} else {
769 			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
770 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
771 			IXGBE_WRITE_FLUSH(hw);
772 		}
773 
774 		/* Allow module to change analog characteristics (1G->10G) */
775 		msec_delay(40);
776 
777 		status = ixgbe_setup_mac_link_82599(hw,
778 						    IXGBE_LINK_SPEED_10GB_FULL,
779 						    autoneg_wait_to_complete);
780 		if (status != IXGBE_SUCCESS)
781 			return status;
782 
783 		/* Flap the tx laser if it has not already been done */
784 		ixgbe_flap_tx_laser(hw);
785 
786 		/*
787 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
788 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
789 		 * attempted.  82599 uses the same timing for 10g SFI.
790 		 */
791 		for (i = 0; i < 5; i++) {
792 			/* Wait for the link partner to also set speed */
793 			msec_delay(100);
794 
795 			/* If we have link, just jump out */
796 			status = ixgbe_check_link(hw, &link_speed, &link_up,
797 			    FALSE);
798 			if (status != IXGBE_SUCCESS)
799 				return status;
800 
801 			if (link_up)
802 				goto out;
803 		}
804 	}
805 
806 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
807 		speedcnt++;
808 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
809 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
810 
811 		/* If we already have link at this speed, just jump out */
812 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
813 		if (status != IXGBE_SUCCESS)
814 			return status;
815 
816 		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
817 			goto out;
818 
819 		/* Set the module link speed */
820 		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
821 			ixgbe_set_fiber_fixed_speed(hw,
822 						    IXGBE_LINK_SPEED_1GB_FULL);
823 		} else {
824 			esdp_reg &= ~IXGBE_ESDP_SDP5;
825 			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
826 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
827 			IXGBE_WRITE_FLUSH(hw);
828 		}
829 
830 		/* Allow module to change analog characteristics (10G->1G) */
831 		msec_delay(40);
832 
833 		status = ixgbe_setup_mac_link_82599(hw,
834 						    IXGBE_LINK_SPEED_1GB_FULL,
835 						    autoneg_wait_to_complete);
836 		if (status != IXGBE_SUCCESS)
837 			return status;
838 
839 		/* Flap the tx laser if it has not already been done */
840 		ixgbe_flap_tx_laser(hw);
841 
842 		/* Wait for the link partner to also set speed */
843 		msec_delay(100);
844 
845 		/* If we have link, just jump out */
846 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
847 		if (status != IXGBE_SUCCESS)
848 			return status;
849 
850 		if (link_up)
851 			goto out;
852 	}
853 
854 	/*
855 	 * We didn't get link.  Configure back to the highest speed we tried,
856 	 * (if there was more than one).  We call ourselves back with just the
857 	 * single highest speed that the user requested.
858 	 */
859 	if (speedcnt > 1)
860 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
861 			highest_link_speed, autoneg_wait_to_complete);
862 
863 out:
864 	/* Set autoneg_advertised value based on input link speed */
865 	hw->phy.autoneg_advertised = 0;
866 
867 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
868 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
869 
870 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
871 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
872 
873 	return status;
874 }
875 
876 /**
877  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
878  *  @hw: pointer to hardware structure
879  *  @speed: new link speed
880  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
881  *
882  *  Implements the Intel SmartSpeed algorithm.
883  **/
884 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
885 				    ixgbe_link_speed speed,
886 				    bool autoneg_wait_to_complete)
887 {
888 	s32 status = IXGBE_SUCCESS;
889 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
890 	s32 i, j;
891 	bool link_up = FALSE;
892 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
893 
894 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
895 
896 	 /* Set autoneg_advertised value based on input link speed */
897 	hw->phy.autoneg_advertised = 0;
898 
899 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
900 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
901 
902 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
903 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
904 
905 	if (speed & IXGBE_LINK_SPEED_100_FULL)
906 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
907 
908 	/*
909 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
910 	 * autoneg advertisement if link is unable to be established at the
911 	 * highest negotiated rate.  This can sometimes happen due to integrity
912 	 * issues with the physical media connection.
913 	 */
914 
915 	/* First, try to get link with full advertisement */
916 	hw->phy.smart_speed_active = FALSE;
917 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
918 		status = ixgbe_setup_mac_link_82599(hw, speed,
919 						    autoneg_wait_to_complete);
920 		if (status != IXGBE_SUCCESS)
921 			goto out;
922 
923 		/*
924 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
925 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
926 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
927 		 * Table 9 in the AN MAS.
928 		 */
929 		for (i = 0; i < 5; i++) {
930 			msec_delay(100);
931 
932 			/* If we have link, just jump out */
933 			status = ixgbe_check_link(hw, &link_speed, &link_up,
934 						  FALSE);
935 			if (status != IXGBE_SUCCESS)
936 				goto out;
937 
938 			if (link_up)
939 				goto out;
940 		}
941 	}
942 
943 	/*
944 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
945 	 * (or BX4/BX), then disable KR and try again.
946 	 */
947 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
948 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
949 		goto out;
950 
951 	/* Turn SmartSpeed on to disable KR support */
952 	hw->phy.smart_speed_active = TRUE;
953 	status = ixgbe_setup_mac_link_82599(hw, speed,
954 					    autoneg_wait_to_complete);
955 	if (status != IXGBE_SUCCESS)
956 		goto out;
957 
958 	/*
959 	 * Wait for the controller to acquire link.  600ms will allow for
960 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
961 	 * parallel detect, both 10g and 1g. This allows for the maximum
962 	 * connect attempts as defined in the AN MAS table 73-7.
963 	 */
964 	for (i = 0; i < 6; i++) {
965 		msec_delay(100);
966 
967 		/* If we have link, just jump out */
968 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
969 		if (status != IXGBE_SUCCESS)
970 			goto out;
971 
972 		if (link_up)
973 			goto out;
974 	}
975 
976 	/* We didn't get link.  Turn SmartSpeed back off. */
977 	hw->phy.smart_speed_active = FALSE;
978 	status = ixgbe_setup_mac_link_82599(hw, speed,
979 					    autoneg_wait_to_complete);
980 
981 out:
982 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
983 		DEBUGOUT("Smartspeed has downgraded the link speed "
984 		"from the maximum advertised\n");
985 	return status;
986 }
987 
988 /**
989  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
990  *  @hw: pointer to hardware structure
991  *  @speed: new link speed
992  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
993  *
994  *  Set the link speed in the AUTOC register and restarts link.
995  **/
996 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
997 			       ixgbe_link_speed speed,
998 			       bool autoneg_wait_to_complete)
999 {
1000 	bool autoneg = FALSE;
1001 	s32 status = IXGBE_SUCCESS;
1002 	u32 autoc, pma_pmd_1g, link_mode, start_autoc;
1003 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1004 	u32 orig_autoc = 0;
1005 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1006 	u32 links_reg;
1007 	u32 i;
1008 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
1009 	bool got_lock = FALSE;
1010 
1011 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
1012 
1013 	/* Check to see if speed passed in is supported. */
1014 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
1015 	if (status)
1016 		goto out;
1017 
1018 	speed &= link_capabilities;
1019 
1020 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
1021 		status = IXGBE_ERR_LINK_SETUP;
1022 		goto out;
1023 	}
1024 
1025 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
1026 	if (hw->mac.orig_link_settings_stored)
1027 		autoc = hw->mac.orig_autoc;
1028 	else
1029 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1030 
1031 	orig_autoc = autoc;
1032 	start_autoc = hw->mac.cached_autoc;
1033 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
1034 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1035 
1036 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1037 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1038 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1039 		/* Set KX4/KX/KR support according to speed requested */
1040 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
1041 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1042 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
1043 				autoc |= IXGBE_AUTOC_KX4_SUPP;
1044 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
1045 			    (hw->phy.smart_speed_active == FALSE))
1046 				autoc |= IXGBE_AUTOC_KR_SUPP;
1047 		}
1048 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1049 			autoc |= IXGBE_AUTOC_KX_SUPP;
1050 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1051 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1052 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1053 		/* Switch from 1G SFI to 10G SFI if requested */
1054 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1055 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1056 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1057 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1058 		}
1059 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1060 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1061 		/* Switch from 10G SFI to 1G SFI if requested */
1062 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1063 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1064 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
1065 			if (autoneg)
1066 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
1067 			else
1068 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1069 		}
1070 	}
1071 
1072 	if (autoc != start_autoc) {
1073 		/* Need SW/FW semaphore around AUTOC writes if LESM is on,
1074 		 * likewise reset_pipeline requires us to hold this lock as
1075 		 * it also writes to AUTOC.
1076 		 */
1077 		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1078 			status = hw->mac.ops.acquire_swfw_sync(hw,
1079 							IXGBE_GSSR_MAC_CSR_SM);
1080 			if (status != IXGBE_SUCCESS) {
1081 				status = IXGBE_ERR_SWFW_SYNC;
1082 				goto out;
1083 			}
1084 
1085 			got_lock = TRUE;
1086 		}
1087 
1088 		/* Restart link */
1089 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
1090 		hw->mac.cached_autoc = autoc;
1091 		ixgbe_reset_pipeline_82599(hw);
1092 
1093 		if (got_lock) {
1094 			hw->mac.ops.release_swfw_sync(hw,
1095 						      IXGBE_GSSR_MAC_CSR_SM);
1096 			got_lock = FALSE;
1097 		}
1098 
1099 		/* Only poll for autoneg to complete if specified to do so */
1100 		if (autoneg_wait_to_complete) {
1101 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1102 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1103 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1104 				links_reg = 0; /*Just in case Autoneg time=0*/
1105 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1106 					links_reg =
1107 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
1108 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1109 						break;
1110 					msec_delay(100);
1111 				}
1112 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1113 					status =
1114 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1115 					DEBUGOUT("Autoneg did not complete.\n");
1116 				}
1117 			}
1118 		}
1119 
1120 		/* Add delay to filter out noises during initial link setup */
1121 		msec_delay(50);
1122 	}
1123 
1124 out:
1125 	return status;
1126 }
1127 
1128 /**
1129  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1130  *  @hw: pointer to hardware structure
1131  *  @speed: new link speed
1132  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1133  *
1134  *  Restarts link on PHY and MAC based on settings passed in.
1135  **/
1136 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1137 					 ixgbe_link_speed speed,
1138 					 bool autoneg_wait_to_complete)
1139 {
1140 	s32 status;
1141 
1142 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
1143 
1144 	/* Setup the PHY according to input speed */
1145 	status = hw->phy.ops.setup_link_speed(hw, speed,
1146 					      autoneg_wait_to_complete);
1147 	/* Set up MAC */
1148 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1149 
1150 	return status;
1151 }
1152 
1153 /**
1154  *  ixgbe_reset_hw_82599 - Perform hardware reset
1155  *  @hw: pointer to hardware structure
1156  *
1157  *  Resets the hardware by resetting the transmit and receive units, masks
1158  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1159  *  reset.
1160  **/
1161 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1162 {
1163 	ixgbe_link_speed link_speed;
1164 	s32 status;
1165 	u32 ctrl, i, autoc2;
1166 	u32 curr_lms;
1167 	bool link_up = FALSE;
1168 
1169 	DEBUGFUNC("ixgbe_reset_hw_82599");
1170 
1171 	/* Call adapter stop to disable tx/rx and clear interrupts */
1172 	status = hw->mac.ops.stop_adapter(hw);
1173 	if (status != IXGBE_SUCCESS)
1174 		goto reset_hw_out;
1175 
1176 	/* flush pending Tx transactions */
1177 	ixgbe_clear_tx_pending(hw);
1178 
1179 	/* PHY ops must be identified and initialized prior to reset */
1180 
1181 	/* Identify PHY and related function pointers */
1182 	status = hw->phy.ops.init(hw);
1183 
1184 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1185 		goto reset_hw_out;
1186 
1187 	/* Setup SFP module if there is one present. */
1188 	if (hw->phy.sfp_setup_needed) {
1189 		status = hw->mac.ops.setup_sfp(hw);
1190 		hw->phy.sfp_setup_needed = FALSE;
1191 	}
1192 
1193 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1194 		goto reset_hw_out;
1195 
1196 	/* Reset PHY */
1197 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1198 		hw->phy.ops.reset(hw);
1199 
1200 	/* remember AUTOC from before we reset */
1201 	if (hw->mac.cached_autoc)
1202 		curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
1203 	else
1204 		curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
1205 					  IXGBE_AUTOC_LMS_MASK;
1206 
1207 mac_reset_top:
1208 	/*
1209 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1210 	 * If link reset is used when link is up, it might reset the PHY when
1211 	 * mng is using it.  If link is down or the flag to force full link
1212 	 * reset is set, then perform link reset.
1213 	 */
1214 	ctrl = IXGBE_CTRL_LNK_RST;
1215 	if (!hw->force_full_reset) {
1216 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1217 		if (link_up)
1218 			ctrl = IXGBE_CTRL_RST;
1219 	}
1220 
1221 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1222 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1223 	IXGBE_WRITE_FLUSH(hw);
1224 
1225 	/* Poll for reset bit to self-clear indicating reset is complete */
1226 	for (i = 0; i < 10; i++) {
1227 		usec_delay(1);
1228 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1229 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1230 			break;
1231 	}
1232 
1233 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1234 		status = IXGBE_ERR_RESET_FAILED;
1235 		DEBUGOUT("Reset polling failed to complete.\n");
1236 	}
1237 
1238 	msec_delay(50);
1239 
1240 	/*
1241 	 * Double resets are required for recovery from certain error
1242 	 * conditions.  Between resets, it is necessary to stall to allow time
1243 	 * for any pending HW events to complete.
1244 	 */
1245 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1246 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1247 		goto mac_reset_top;
1248 	}
1249 
1250 	/*
1251 	 * Store the original AUTOC/AUTOC2 values if they have not been
1252 	 * stored off yet.  Otherwise restore the stored original
1253 	 * values since the reset operation sets back to defaults.
1254 	 */
1255 	hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1256 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1257 
1258 	/* Enable link if disabled in NVM */
1259 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1260 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1261 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1262 		IXGBE_WRITE_FLUSH(hw);
1263 	}
1264 
1265 	if (hw->mac.orig_link_settings_stored == FALSE) {
1266 		hw->mac.orig_autoc = hw->mac.cached_autoc;
1267 		hw->mac.orig_autoc2 = autoc2;
1268 		hw->mac.orig_link_settings_stored = TRUE;
1269 	} else {
1270 
1271 		/* If MNG FW is running on a multi-speed device that
1272 		 * doesn't autoneg with out driver support we need to
1273 		 * leave LMS in the state it was before we MAC reset.
1274 		 * Likewise if we support WoL we don't want change the
1275 		 * LMS state.
1276 		 */
1277 		if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
1278 		    hw->wol_enabled)
1279 			hw->mac.orig_autoc =
1280 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1281 				curr_lms;
1282 
1283 		if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
1284 			/* Need SW/FW semaphore around AUTOC writes if LESM is
1285 			 * on, likewise reset_pipeline requires us to hold
1286 			 * this lock as it also writes to AUTOC.
1287 			 */
1288 			bool got_lock = FALSE;
1289 			if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1290 				status = hw->mac.ops.acquire_swfw_sync(hw,
1291 							IXGBE_GSSR_MAC_CSR_SM);
1292 				if (status != IXGBE_SUCCESS) {
1293 					status = IXGBE_ERR_SWFW_SYNC;
1294 					goto reset_hw_out;
1295 				}
1296 
1297 				got_lock = TRUE;
1298 			}
1299 
1300 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1301 			hw->mac.cached_autoc = hw->mac.orig_autoc;
1302 			ixgbe_reset_pipeline_82599(hw);
1303 
1304 			if (got_lock)
1305 				hw->mac.ops.release_swfw_sync(hw,
1306 						      IXGBE_GSSR_MAC_CSR_SM);
1307 		}
1308 
1309 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1310 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1311 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1312 			autoc2 |= (hw->mac.orig_autoc2 &
1313 				   IXGBE_AUTOC2_UPPER_MASK);
1314 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1315 		}
1316 	}
1317 
1318 	/* Store the permanent mac address */
1319 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1320 
1321 	/*
1322 	 * Store MAC address from RAR0, clear receive address registers, and
1323 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1324 	 * since we modify this value when programming the SAN MAC address.
1325 	 */
1326 	hw->mac.num_rar_entries = 128;
1327 	hw->mac.ops.init_rx_addrs(hw);
1328 
1329 	/* Store the permanent SAN mac address */
1330 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1331 
1332 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1333 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1334 		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1335 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1336 
1337 		/* Save the SAN MAC RAR index */
1338 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1339 
1340 		/* Reserve the last RAR for the SAN MAC address */
1341 		hw->mac.num_rar_entries--;
1342 	}
1343 
1344 	/* Store the alternative WWNN/WWPN prefix */
1345 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1346 				   &hw->mac.wwpn_prefix);
1347 
1348 reset_hw_out:
1349 	return status;
1350 }
1351 
1352 /**
1353  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1354  *  @hw: pointer to hardware structure
1355  **/
1356 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1357 {
1358 	int i;
1359 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1360 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1361 
1362 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1363 
1364 	/*
1365 	 * Before starting reinitialization process,
1366 	 * FDIRCMD.CMD must be zero.
1367 	 */
1368 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1369 		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1370 		      IXGBE_FDIRCMD_CMD_MASK))
1371 			break;
1372 		usec_delay(10);
1373 	}
1374 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1375 		DEBUGOUT("Flow Director previous command isn't complete, "
1376 			 "aborting table re-initialization.\n");
1377 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1378 	}
1379 
1380 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1381 	IXGBE_WRITE_FLUSH(hw);
1382 	/*
1383 	 * 82599 adapters flow director init flow cannot be restarted,
1384 	 * Workaround 82599 silicon errata by performing the following steps
1385 	 * before re-writing the FDIRCTRL control register with the same value.
1386 	 * - write 1 to bit 8 of FDIRCMD register &
1387 	 * - write 0 to bit 8 of FDIRCMD register
1388 	 */
1389 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1390 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1391 			 IXGBE_FDIRCMD_CLEARHT));
1392 	IXGBE_WRITE_FLUSH(hw);
1393 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1394 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1395 			 ~IXGBE_FDIRCMD_CLEARHT));
1396 	IXGBE_WRITE_FLUSH(hw);
1397 	/*
1398 	 * Clear FDIR Hash register to clear any leftover hashes
1399 	 * waiting to be programmed.
1400 	 */
1401 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1402 	IXGBE_WRITE_FLUSH(hw);
1403 
1404 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1405 	IXGBE_WRITE_FLUSH(hw);
1406 
1407 	/* Poll init-done after we write FDIRCTRL register */
1408 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1409 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1410 				   IXGBE_FDIRCTRL_INIT_DONE)
1411 			break;
1412 		msec_delay(1);
1413 	}
1414 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1415 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1416 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1417 	}
1418 
1419 	/* Clear FDIR statistics registers (read to clear) */
1420 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1421 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1422 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1423 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1424 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1425 
1426 	return IXGBE_SUCCESS;
1427 }
1428 
1429 /**
1430  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1431  *  @hw: pointer to hardware structure
1432  *  @fdirctrl: value to write to flow director control register
1433  **/
1434 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1435 {
1436 	int i;
1437 
1438 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1439 
1440 	/* Prime the keys for hashing */
1441 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1442 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1443 
1444 	/*
1445 	 * Poll init-done after we write the register.  Estimated times:
1446 	 *      10G: PBALLOC = 11b, timing is 60us
1447 	 *       1G: PBALLOC = 11b, timing is 600us
1448 	 *     100M: PBALLOC = 11b, timing is 6ms
1449 	 *
1450 	 *     Multiple these timings by 4 if under full Rx load
1451 	 *
1452 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1453 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1454 	 * this might not finish in our poll time, but we can live with that
1455 	 * for now.
1456 	 */
1457 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1458 	IXGBE_WRITE_FLUSH(hw);
1459 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1460 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1461 				   IXGBE_FDIRCTRL_INIT_DONE)
1462 			break;
1463 		msec_delay(1);
1464 	}
1465 
1466 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1467 		DEBUGOUT("Flow Director poll time exceeded!\n");
1468 }
1469 
1470 /**
1471  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1472  *  @hw: pointer to hardware structure
1473  *  @fdirctrl: value to write to flow director control register, initially
1474  *	     contains just the value of the Rx packet buffer allocation
1475  **/
1476 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1477 {
1478 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1479 
1480 	/*
1481 	 * Continue setup of fdirctrl register bits:
1482 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1483 	 *  Set the maximum length per hash bucket to 0xA filters
1484 	 *  Send interrupt when 64 filters are left
1485 	 */
1486 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1487 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1488 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1489 
1490 	/* write hashes and fdirctrl register, poll for completion */
1491 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1492 
1493 	return IXGBE_SUCCESS;
1494 }
1495 
1496 /**
1497  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1498  *  @hw: pointer to hardware structure
1499  *  @fdirctrl: value to write to flow director control register, initially
1500  *	     contains just the value of the Rx packet buffer allocation
1501  **/
1502 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1503 {
1504 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1505 
1506 	/*
1507 	 * Continue setup of fdirctrl register bits:
1508 	 *  Turn perfect match filtering on
1509 	 *  Report hash in RSS field of Rx wb descriptor
1510 	 *  Initialize the drop queue
1511 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1512 	 *  Set the maximum length per hash bucket to 0xA filters
1513 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1514 	 */
1515 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1516 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1517 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1518 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1519 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1520 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1521 
1522 	/* write hashes and fdirctrl register, poll for completion */
1523 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1524 
1525 	return IXGBE_SUCCESS;
1526 }
1527 
1528 /*
1529  * These defines allow us to quickly generate all of the necessary instructions
1530  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1531  * for values 0 through 15
1532  */
1533 #define IXGBE_ATR_COMMON_HASH_KEY \
1534 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1535 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1536 do { \
1537 	u32 n = (_n); \
1538 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1539 		common_hash ^= lo_hash_dword >> n; \
1540 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1541 		bucket_hash ^= lo_hash_dword >> n; \
1542 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1543 		sig_hash ^= lo_hash_dword << (16 - n); \
1544 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1545 		common_hash ^= hi_hash_dword >> n; \
1546 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1547 		bucket_hash ^= hi_hash_dword >> n; \
1548 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1549 		sig_hash ^= hi_hash_dword << (16 - n); \
1550 } while (0);
1551 
1552 /**
1553  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1554  *  @stream: input bitstream to compute the hash on
1555  *
1556  *  This function is almost identical to the function above but contains
1557  *  several optomizations such as unwinding all of the loops, letting the
1558  *  compiler work out all of the conditional ifs since the keys are static
1559  *  defines, and computing two keys at once since the hashed dword stream
1560  *  will be the same for both keys.
1561  **/
1562 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1563 				     union ixgbe_atr_hash_dword common)
1564 {
1565 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1566 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1567 
1568 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1569 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1570 
1571 	/* generate common hash dword */
1572 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1573 
1574 	/* low dword is word swapped version of common */
1575 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1576 
1577 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1578 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1579 
1580 	/* Process bits 0 and 16 */
1581 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1582 
1583 	/*
1584 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1585 	 * delay this because bit 0 of the stream should not be processed
1586 	 * so we do not add the vlan until after bit 0 was processed
1587 	 */
1588 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1589 
1590 	/* Process remaining 30 bit of the key */
1591 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1592 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1593 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1594 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1595 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1596 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1597 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1598 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1599 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1600 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1601 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1602 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1603 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1604 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1605 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1606 
1607 	/* combine common_hash result with signature and bucket hashes */
1608 	bucket_hash ^= common_hash;
1609 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1610 
1611 	sig_hash ^= common_hash << 16;
1612 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1613 
1614 	/* return completed signature hash */
1615 	return sig_hash ^ bucket_hash;
1616 }
1617 
1618 /**
1619  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1620  *  @hw: pointer to hardware structure
1621  *  @input: unique input dword
1622  *  @common: compressed common input dword
1623  *  @queue: queue index to direct traffic to
1624  **/
1625 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1626 					  union ixgbe_atr_hash_dword input,
1627 					  union ixgbe_atr_hash_dword common,
1628 					  u8 queue)
1629 {
1630 	u64  fdirhashcmd;
1631 	u32  fdircmd;
1632 
1633 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1634 
1635 	/*
1636 	 * Get the flow_type in order to program FDIRCMD properly
1637 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1638 	 */
1639 	switch (input.formatted.flow_type) {
1640 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1641 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1642 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1643 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1644 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1645 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1646 		break;
1647 	default:
1648 		DEBUGOUT(" Error on flow type input\n");
1649 		return IXGBE_ERR_CONFIG;
1650 	}
1651 
1652 	/* configure FDIRCMD register */
1653 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1654 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1655 	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1656 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1657 
1658 	/*
1659 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1660 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1661 	 */
1662 	fdirhashcmd = (u64)fdircmd << 32;
1663 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1664 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1665 
1666 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1667 
1668 	return IXGBE_SUCCESS;
1669 }
1670 
1671 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1672 do { \
1673 	u32 n = (_n); \
1674 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1675 		bucket_hash ^= lo_hash_dword >> n; \
1676 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1677 		bucket_hash ^= hi_hash_dword >> n; \
1678 } while (0);
1679 
1680 /**
1681  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1682  *  @atr_input: input bitstream to compute the hash on
1683  *  @input_mask: mask for the input bitstream
1684  *
1685  *  This function serves two main purposes.  First it applys the input_mask
1686  *  to the atr_input resulting in a cleaned up atr_input data stream.
1687  *  Secondly it computes the hash and stores it in the bkt_hash field at
1688  *  the end of the input byte stream.  This way it will be available for
1689  *  future use without needing to recompute the hash.
1690  **/
1691 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1692 					  union ixgbe_atr_input *input_mask)
1693 {
1694 
1695 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1696 	u32 bucket_hash = 0;
1697 
1698 	/* Apply masks to input data */
1699 	input->dword_stream[0]  &= input_mask->dword_stream[0];
1700 	input->dword_stream[1]  &= input_mask->dword_stream[1];
1701 	input->dword_stream[2]  &= input_mask->dword_stream[2];
1702 	input->dword_stream[3]  &= input_mask->dword_stream[3];
1703 	input->dword_stream[4]  &= input_mask->dword_stream[4];
1704 	input->dword_stream[5]  &= input_mask->dword_stream[5];
1705 	input->dword_stream[6]  &= input_mask->dword_stream[6];
1706 	input->dword_stream[7]  &= input_mask->dword_stream[7];
1707 	input->dword_stream[8]  &= input_mask->dword_stream[8];
1708 	input->dword_stream[9]  &= input_mask->dword_stream[9];
1709 	input->dword_stream[10] &= input_mask->dword_stream[10];
1710 
1711 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1712 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1713 
1714 	/* generate common hash dword */
1715 	hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1716 				    input->dword_stream[2] ^
1717 				    input->dword_stream[3] ^
1718 				    input->dword_stream[4] ^
1719 				    input->dword_stream[5] ^
1720 				    input->dword_stream[6] ^
1721 				    input->dword_stream[7] ^
1722 				    input->dword_stream[8] ^
1723 				    input->dword_stream[9] ^
1724 				    input->dword_stream[10]);
1725 
1726 	/* low dword is word swapped version of common */
1727 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1728 
1729 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1730 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1731 
1732 	/* Process bits 0 and 16 */
1733 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1734 
1735 	/*
1736 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1737 	 * delay this because bit 0 of the stream should not be processed
1738 	 * so we do not add the vlan until after bit 0 was processed
1739 	 */
1740 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1741 
1742 	/* Process remaining 30 bit of the key */
1743 	IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1744 	IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1745 	IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1746 	IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1747 	IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1748 	IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1749 	IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1750 	IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1751 	IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1752 	IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1753 	IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1754 	IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1755 	IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1756 	IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1757 	IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1758 
1759 	/*
1760 	 * Limit hash to 13 bits since max bucket count is 8K.
1761 	 * Store result at the end of the input stream.
1762 	 */
1763 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1764 }
1765 
1766 /**
1767  *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1768  *  @input_mask: mask to be bit swapped
1769  *
1770  *  The source and destination port masks for flow director are bit swapped
1771  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1772  *  generate a correctly swapped value we need to bit swap the mask and that
1773  *  is what is accomplished by this function.
1774  **/
1775 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1776 {
1777 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1778 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1779 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1780 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1781 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1782 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1783 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1784 }
1785 
1786 /*
1787  * These two macros are meant to address the fact that we have registers
1788  * that are either all or in part big-endian.  As a result on big-endian
1789  * systems we will end up byte swapping the value to little-endian before
1790  * it is byte swapped again and written to the hardware in the original
1791  * big-endian format.
1792  */
1793 #define IXGBE_STORE_AS_BE32(_value) \
1794 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1795 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1796 
1797 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1798 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1799 
1800 #define IXGBE_STORE_AS_BE16(_value) \
1801 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1802 
1803 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1804 				    union ixgbe_atr_input *input_mask)
1805 {
1806 	/* mask IPv6 since it is currently not supported */
1807 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1808 	u32 fdirtcpm;
1809 
1810 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1811 
1812 	/*
1813 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1814 	 * are zero, then assume a full mask for that field.  Also assume that
1815 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1816 	 * cannot be masked out in this implementation.
1817 	 *
1818 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1819 	 * point in time.
1820 	 */
1821 
1822 	/* verify bucket hash is cleared on hash generation */
1823 	if (input_mask->formatted.bkt_hash)
1824 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1825 
1826 	/* Program FDIRM and verify partial masks */
1827 	switch (input_mask->formatted.vm_pool & 0x7F) {
1828 	case 0x0:
1829 		fdirm |= IXGBE_FDIRM_POOL;
1830 	case 0x7F:
1831 		break;
1832 	default:
1833 		DEBUGOUT(" Error on vm pool mask\n");
1834 		return IXGBE_ERR_CONFIG;
1835 	}
1836 
1837 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1838 	case 0x0:
1839 		fdirm |= IXGBE_FDIRM_L4P;
1840 		if (input_mask->formatted.dst_port ||
1841 		    input_mask->formatted.src_port) {
1842 			DEBUGOUT(" Error on src/dst port mask\n");
1843 			return IXGBE_ERR_CONFIG;
1844 		}
1845 	case IXGBE_ATR_L4TYPE_MASK:
1846 		break;
1847 	default:
1848 		DEBUGOUT(" Error on flow type mask\n");
1849 		return IXGBE_ERR_CONFIG;
1850 	}
1851 
1852 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1853 	case 0x0000:
1854 		/* mask VLAN ID, fall through to mask VLAN priority */
1855 		fdirm |= IXGBE_FDIRM_VLANID;
1856 	case 0x0FFF:
1857 		/* mask VLAN priority */
1858 		fdirm |= IXGBE_FDIRM_VLANP;
1859 		break;
1860 	case 0xE000:
1861 		/* mask VLAN ID only, fall through */
1862 		fdirm |= IXGBE_FDIRM_VLANID;
1863 	case 0xEFFF:
1864 		/* no VLAN fields masked */
1865 		break;
1866 	default:
1867 		DEBUGOUT(" Error on VLAN mask\n");
1868 		return IXGBE_ERR_CONFIG;
1869 	}
1870 
1871 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1872 	case 0x0000:
1873 		/* Mask Flex Bytes, fall through */
1874 		fdirm |= IXGBE_FDIRM_FLEX;
1875 	case 0xFFFF:
1876 		break;
1877 	default:
1878 		DEBUGOUT(" Error on flexible byte mask\n");
1879 		return IXGBE_ERR_CONFIG;
1880 	}
1881 
1882 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1883 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1884 
1885 	/* store the TCP/UDP port masks, bit reversed from port layout */
1886 	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1887 
1888 	/* write both the same so that UDP and TCP use the same mask */
1889 	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1890 	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1891 
1892 	/* store source and destination IP masks (big-enian) */
1893 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1894 			     ~input_mask->formatted.src_ip[0]);
1895 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1896 			     ~input_mask->formatted.dst_ip[0]);
1897 
1898 	return IXGBE_SUCCESS;
1899 }
1900 
1901 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1902 					  union ixgbe_atr_input *input,
1903 					  u16 soft_id, u8 queue)
1904 {
1905 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1906 
1907 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1908 
1909 	/* currently IPv6 is not supported, must be programmed with 0 */
1910 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1911 			     input->formatted.src_ip[0]);
1912 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1913 			     input->formatted.src_ip[1]);
1914 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1915 			     input->formatted.src_ip[2]);
1916 
1917 	/* record the source address (big-endian) */
1918 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1919 
1920 	/* record the first 32 bits of the destination address (big-endian) */
1921 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1922 
1923 	/* record source and destination port (little-endian)*/
1924 	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1925 	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1926 	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1927 	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1928 
1929 	/* record vlan (little-endian) and flex_bytes(big-endian) */
1930 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1931 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1932 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1933 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1934 
1935 	/* configure FDIRHASH register */
1936 	fdirhash = input->formatted.bkt_hash;
1937 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1938 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1939 
1940 	/*
1941 	 * flush all previous writes to make certain registers are
1942 	 * programmed prior to issuing the command
1943 	 */
1944 	IXGBE_WRITE_FLUSH(hw);
1945 
1946 	/* configure FDIRCMD register */
1947 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1948 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1949 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1950 		fdircmd |= IXGBE_FDIRCMD_DROP;
1951 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1952 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1953 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1954 
1955 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1956 
1957 	return IXGBE_SUCCESS;
1958 }
1959 
1960 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1961 					  union ixgbe_atr_input *input,
1962 					  u16 soft_id)
1963 {
1964 	u32 fdirhash;
1965 	u32 fdircmd = 0;
1966 	u32 retry_count;
1967 	s32 err = IXGBE_SUCCESS;
1968 
1969 	/* configure FDIRHASH register */
1970 	fdirhash = input->formatted.bkt_hash;
1971 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1972 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1973 
1974 	/* flush hash to HW */
1975 	IXGBE_WRITE_FLUSH(hw);
1976 
1977 	/* Query if filter is present */
1978 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1979 
1980 	for (retry_count = 10; retry_count; retry_count--) {
1981 		/* allow 10us for query to process */
1982 		usec_delay(10);
1983 		/* verify query completed successfully */
1984 		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1985 		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1986 			break;
1987 	}
1988 
1989 	if (!retry_count)
1990 		err = IXGBE_ERR_FDIR_REINIT_FAILED;
1991 
1992 	/* if filter exists in hardware then remove it */
1993 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1994 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1995 		IXGBE_WRITE_FLUSH(hw);
1996 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1997 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1998 	}
1999 
2000 	return err;
2001 }
2002 
2003 /**
2004  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2005  *  @hw: pointer to hardware structure
2006  *  @input: input bitstream
2007  *  @input_mask: mask for the input bitstream
2008  *  @soft_id: software index for the filters
2009  *  @queue: queue index to direct traffic to
2010  *
2011  *  Note that the caller to this function must lock before calling, since the
2012  *  hardware writes must be protected from one another.
2013  **/
2014 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2015 					union ixgbe_atr_input *input,
2016 					union ixgbe_atr_input *input_mask,
2017 					u16 soft_id, u8 queue)
2018 {
2019 	s32 err = IXGBE_ERR_CONFIG;
2020 
2021 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2022 
2023 	/*
2024 	 * Check flow_type formatting, and bail out before we touch the hardware
2025 	 * if there's a configuration issue
2026 	 */
2027 	switch (input->formatted.flow_type) {
2028 	case IXGBE_ATR_FLOW_TYPE_IPV4:
2029 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2030 		if (input->formatted.dst_port || input->formatted.src_port) {
2031 			DEBUGOUT(" Error on src/dst port\n");
2032 			return IXGBE_ERR_CONFIG;
2033 		}
2034 		break;
2035 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2036 		if (input->formatted.dst_port || input->formatted.src_port) {
2037 			DEBUGOUT(" Error on src/dst port\n");
2038 			return IXGBE_ERR_CONFIG;
2039 		}
2040 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
2041 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
2042 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2043 						  IXGBE_ATR_L4TYPE_MASK;
2044 		break;
2045 	default:
2046 		DEBUGOUT(" Error on flow type input\n");
2047 		return err;
2048 	}
2049 
2050 	/* program input mask into the HW */
2051 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
2052 	if (err)
2053 		return err;
2054 
2055 	/* apply mask and compute/store hash */
2056 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2057 
2058 	/* program filters to filter memory */
2059 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2060 						     soft_id, queue);
2061 }
2062 
2063 /**
2064  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2065  *  @hw: pointer to hardware structure
2066  *  @reg: analog register to read
2067  *  @val: read value
2068  *
2069  *  Performs read operation to Omer analog register specified.
2070  **/
2071 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2072 {
2073 	u32  core_ctl;
2074 
2075 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2076 
2077 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2078 			(reg << 8));
2079 	IXGBE_WRITE_FLUSH(hw);
2080 	usec_delay(10);
2081 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2082 	*val = (u8)core_ctl;
2083 
2084 	return IXGBE_SUCCESS;
2085 }
2086 
2087 /**
2088  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2089  *  @hw: pointer to hardware structure
2090  *  @reg: atlas register to write
2091  *  @val: value to write
2092  *
2093  *  Performs write operation to Omer analog register specified.
2094  **/
2095 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2096 {
2097 	u32  core_ctl;
2098 
2099 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2100 
2101 	core_ctl = (reg << 8) | val;
2102 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2103 	IXGBE_WRITE_FLUSH(hw);
2104 	usec_delay(10);
2105 
2106 	return IXGBE_SUCCESS;
2107 }
2108 
2109 /**
2110  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2111  *  @hw: pointer to hardware structure
2112  *
2113  *  Starts the hardware using the generic start_hw function
2114  *  and the generation start_hw function.
2115  *  Then performs revision-specific operations, if any.
2116  **/
2117 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2118 {
2119 	s32 ret_val = IXGBE_SUCCESS;
2120 
2121 	DEBUGFUNC("ixgbe_start_hw_82599");
2122 
2123 	ret_val = ixgbe_start_hw_generic(hw);
2124 	if (ret_val != IXGBE_SUCCESS)
2125 		goto out;
2126 
2127 	ret_val = ixgbe_start_hw_gen2(hw);
2128 	if (ret_val != IXGBE_SUCCESS)
2129 		goto out;
2130 
2131 	/* We need to run link autotry after the driver loads */
2132 	hw->mac.autotry_restart = TRUE;
2133 
2134 	if (ret_val == IXGBE_SUCCESS)
2135 		ret_val = ixgbe_verify_fw_version_82599(hw);
2136 out:
2137 	return ret_val;
2138 }
2139 
2140 /**
2141  *  ixgbe_identify_phy_82599 - Get physical layer module
2142  *  @hw: pointer to hardware structure
2143  *
2144  *  Determines the physical layer module found on the current adapter.
2145  *  If PHY already detected, maintains current PHY type in hw struct,
2146  *  otherwise executes the PHY detection routine.
2147  **/
2148 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2149 {
2150 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2151 
2152 	DEBUGFUNC("ixgbe_identify_phy_82599");
2153 
2154 	/* Detect PHY if not unknown - returns success if already detected. */
2155 	status = ixgbe_identify_phy_generic(hw);
2156 	if (status != IXGBE_SUCCESS) {
2157 		/* 82599 10GBASE-T requires an external PHY */
2158 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2159 			goto out;
2160 		else
2161 			status = ixgbe_identify_module_generic(hw);
2162 	}
2163 
2164 	/* Set PHY type none if no PHY detected */
2165 	if (hw->phy.type == ixgbe_phy_unknown) {
2166 		hw->phy.type = ixgbe_phy_none;
2167 		status = IXGBE_SUCCESS;
2168 	}
2169 
2170 	/* Return error if SFP module has been detected but is not supported */
2171 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2172 		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2173 
2174 out:
2175 	return status;
2176 }
2177 
2178 /**
2179  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2180  *  @hw: pointer to hardware structure
2181  *
2182  *  Determines physical layer capabilities of the current configuration.
2183  **/
2184 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2185 {
2186 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2187 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2188 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2189 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2190 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2191 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2192 	u16 ext_ability = 0;
2193 	u8 comp_codes_10g = 0;
2194 	u8 comp_codes_1g = 0;
2195 
2196 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2197 
2198 	hw->phy.ops.identify(hw);
2199 
2200 	switch (hw->phy.type) {
2201 	case ixgbe_phy_tn:
2202 	case ixgbe_phy_cu_unknown:
2203 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2204 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2205 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2206 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2207 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2208 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2209 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2210 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2211 		goto out;
2212 	default:
2213 		break;
2214 	}
2215 
2216 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2217 	case IXGBE_AUTOC_LMS_1G_AN:
2218 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2219 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2220 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2221 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2222 			goto out;
2223 		} else
2224 			/* SFI mode so read SFP module */
2225 			goto sfp_check;
2226 		break;
2227 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2228 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2229 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2230 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2231 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2232 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2233 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2234 		goto out;
2235 		break;
2236 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2237 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2238 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2239 			goto out;
2240 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2241 			goto sfp_check;
2242 		break;
2243 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2244 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2245 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2246 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2247 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2248 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2249 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2250 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2251 		goto out;
2252 		break;
2253 	default:
2254 		goto out;
2255 		break;
2256 	}
2257 
2258 sfp_check:
2259 	/* SFP check must be done last since DA modules are sometimes used to
2260 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2261 	 * Call identify_sfp because the pluggable module may have changed */
2262 	hw->phy.ops.identify_sfp(hw);
2263 	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2264 		goto out;
2265 
2266 	switch (hw->phy.type) {
2267 	case ixgbe_phy_sfp_passive_tyco:
2268 	case ixgbe_phy_sfp_passive_unknown:
2269 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2270 		break;
2271 	case ixgbe_phy_sfp_ftl_active:
2272 	case ixgbe_phy_sfp_active_unknown:
2273 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2274 		break;
2275 	case ixgbe_phy_sfp_avago:
2276 	case ixgbe_phy_sfp_ftl:
2277 	case ixgbe_phy_sfp_intel:
2278 	case ixgbe_phy_sfp_unknown:
2279 		hw->phy.ops.read_i2c_eeprom(hw,
2280 		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2281 		hw->phy.ops.read_i2c_eeprom(hw,
2282 		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2283 		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2284 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2285 		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2286 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2287 		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2288 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2289 		else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2290 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2291 		break;
2292 	default:
2293 		break;
2294 	}
2295 
2296 out:
2297 	return physical_layer;
2298 }
2299 
2300 /**
2301  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2302  *  @hw: pointer to hardware structure
2303  *  @regval: register value to write to RXCTRL
2304  *
2305  *  Enables the Rx DMA unit for 82599
2306  **/
2307 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2308 {
2309 
2310 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2311 
2312 	/*
2313 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2314 	 * If traffic is incoming before we enable the Rx unit, it could hang
2315 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2316 	 * completely disabled prior to enabling the Rx unit.
2317 	 */
2318 
2319 	hw->mac.ops.disable_sec_rx_path(hw);
2320 
2321 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2322 
2323 	hw->mac.ops.enable_sec_rx_path(hw);
2324 
2325 	return IXGBE_SUCCESS;
2326 }
2327 
2328 /**
2329  *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
2330  *  @hw: pointer to hardware structure
2331  *
2332  *  Verifies that installed the firmware version is 0.6 or higher
2333  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2334  *
2335  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2336  *  if the FW version is not supported.
2337  **/
2338 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2339 {
2340 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2341 	u16 fw_offset, fw_ptp_cfg_offset;
2342 	u16 fw_version;
2343 
2344 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2345 
2346 	/* firmware check is only necessary for SFI devices */
2347 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2348 		status = IXGBE_SUCCESS;
2349 		goto fw_version_out;
2350 	}
2351 
2352 	/* get the offset to the Firmware Module block */
2353 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2354 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2355 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
2356 		return IXGBE_ERR_EEPROM_VERSION;
2357 	}
2358 
2359 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2360 		goto fw_version_out;
2361 
2362 	/* get the offset to the Pass Through Patch Configuration block */
2363 	if (hw->eeprom.ops.read(hw, (fw_offset +
2364 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2365 				 &fw_ptp_cfg_offset)) {
2366 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2367 			      "eeprom read at offset %d failed",
2368 			      fw_offset +
2369 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2370 		return IXGBE_ERR_EEPROM_VERSION;
2371 	}
2372 
2373 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2374 		goto fw_version_out;
2375 
2376 	/* get the firmware version */
2377 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2378 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2379 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2380 			      "eeprom read at offset %d failed",
2381 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2382 		return IXGBE_ERR_EEPROM_VERSION;
2383 	}
2384 
2385 	if (fw_version > 0x5)
2386 		status = IXGBE_SUCCESS;
2387 
2388 fw_version_out:
2389 	return status;
2390 }
2391 
2392 /**
2393  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2394  *  @hw: pointer to hardware structure
2395  *
2396  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
2397  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2398  **/
2399 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2400 {
2401 	bool lesm_enabled = FALSE;
2402 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2403 	s32 status;
2404 
2405 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2406 
2407 	/* get the offset to the Firmware Module block */
2408 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2409 
2410 	if ((status != IXGBE_SUCCESS) ||
2411 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2412 		goto out;
2413 
2414 	/* get the offset to the LESM Parameters block */
2415 	status = hw->eeprom.ops.read(hw, (fw_offset +
2416 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2417 				     &fw_lesm_param_offset);
2418 
2419 	if ((status != IXGBE_SUCCESS) ||
2420 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2421 		goto out;
2422 
2423 	/* get the lesm state word */
2424 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2425 				     IXGBE_FW_LESM_STATE_1),
2426 				     &fw_lesm_state);
2427 
2428 	if ((status == IXGBE_SUCCESS) &&
2429 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2430 		lesm_enabled = TRUE;
2431 
2432 out:
2433 	return lesm_enabled;
2434 }
2435 
2436 /**
2437  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2438  *  fastest available method
2439  *
2440  *  @hw: pointer to hardware structure
2441  *  @offset: offset of  word in EEPROM to read
2442  *  @words: number of words
2443  *  @data: word(s) read from the EEPROM
2444  *
2445  *  Retrieves 16 bit word(s) read from EEPROM
2446  **/
2447 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2448 					  u16 words, u16 *data)
2449 {
2450 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2451 	s32 ret_val = IXGBE_ERR_CONFIG;
2452 
2453 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2454 
2455 	/*
2456 	 * If EEPROM is detected and can be addressed using 14 bits,
2457 	 * use EERD otherwise use bit bang
2458 	 */
2459 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2460 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2461 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2462 							 data);
2463 	else
2464 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2465 								    words,
2466 								    data);
2467 
2468 	return ret_val;
2469 }
2470 
2471 /**
2472  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
2473  *  fastest available method
2474  *
2475  *  @hw: pointer to hardware structure
2476  *  @offset: offset of  word in the EEPROM to read
2477  *  @data: word read from the EEPROM
2478  *
2479  *  Reads a 16 bit word from the EEPROM
2480  **/
2481 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2482 				   u16 offset, u16 *data)
2483 {
2484 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2485 	s32 ret_val = IXGBE_ERR_CONFIG;
2486 
2487 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2488 
2489 	/*
2490 	 * If EEPROM is detected and can be addressed using 14 bits,
2491 	 * use EERD otherwise use bit bang
2492 	 */
2493 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2494 	    (offset <= IXGBE_EERD_MAX_ADDR))
2495 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2496 	else
2497 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2498 
2499 	return ret_val;
2500 }
2501 
2502 /**
2503  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2504  *
2505  *  @hw: pointer to hardware structure
2506  *
2507  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2508  * full pipeline reset
2509  **/
2510 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2511 {
2512 	s32 ret_val;
2513 	u32 anlp1_reg = 0;
2514 	u32 i, autoc_reg, autoc2_reg;
2515 
2516 	/* Enable link if disabled in NVM */
2517 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2518 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2519 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2520 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2521 		IXGBE_WRITE_FLUSH(hw);
2522 	}
2523 
2524 	autoc_reg = hw->mac.cached_autoc;
2525 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2526 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2527 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2528 	/* Wait for AN to leave state 0 */
2529 	for (i = 0; i < 10; i++) {
2530 		msec_delay(4);
2531 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2532 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2533 			break;
2534 	}
2535 
2536 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2537 		DEBUGOUT("auto negotiation not completed\n");
2538 		ret_val = IXGBE_ERR_RESET_FAILED;
2539 		goto reset_pipeline_out;
2540 	}
2541 
2542 	ret_val = IXGBE_SUCCESS;
2543 
2544 reset_pipeline_out:
2545 	/* Write AUTOC register with original LMS field and Restart_AN */
2546 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2547 	IXGBE_WRITE_FLUSH(hw);
2548 
2549 	return ret_val;
2550 }
2551 
2552 
2553 
2554