xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_82599.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* $NetBSD: ixgbe_82599.c,v 1.20 2018/06/11 10:34:18 msaitoh Exp $ */
2 
3 /******************************************************************************
4   SPDX-License-Identifier: BSD-3-Clause
5 
6   Copyright (c) 2001-2017, Intel Corporation
7   All rights reserved.
8 
9   Redistribution and use in source and binary forms, with or without
10   modification, are permitted provided that the following conditions are met:
11 
12    1. Redistributions of source code must retain the above copyright notice,
13       this list of conditions and the following disclaimer.
14 
15    2. Redistributions in binary form must reproduce the above copyright
16       notice, this list of conditions and the following disclaimer in the
17       documentation and/or other materials provided with the distribution.
18 
19    3. Neither the name of the Intel Corporation nor the names of its
20       contributors may be used to endorse or promote products derived from
21       this software without specific prior written permission.
22 
23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33   POSSIBILITY OF SUCH DAMAGE.
34 
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 331224 2018-03-19 20:55:05Z erj $*/
37 
38 #include "ixgbe_type.h"
39 #include "ixgbe_82599.h"
40 #include "ixgbe_api.h"
41 #include "ixgbe_common.h"
42 #include "ixgbe_phy.h"
43 
44 #define IXGBE_82599_MAX_TX_QUEUES 128
45 #define IXGBE_82599_MAX_RX_QUEUES 128
46 #define IXGBE_82599_RAR_ENTRIES   128
47 #define IXGBE_82599_MC_TBL_SIZE   128
48 #define IXGBE_82599_VFT_TBL_SIZE  128
49 #define IXGBE_82599_RX_PB_SIZE	  512
50 
51 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
52 					 ixgbe_link_speed speed,
53 					 bool autoneg_wait_to_complete);
54 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
55 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
56 				   u16 offset, u16 *data);
57 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
58 					  u16 words, u16 *data);
59 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
60 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
61 					u8 dev_addr, u8 *data);
62 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
63 					u8 dev_addr, u8 data);
64 
65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
66 {
67 	struct ixgbe_mac_info *mac = &hw->mac;
68 
69 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
70 
71 	/*
72 	 * enable the laser control functions for SFP+ fiber
73 	 * and MNG not enabled
74 	 */
75 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
76 	    !ixgbe_mng_enabled(hw)) {
77 		mac->ops.disable_tx_laser =
78 				       ixgbe_disable_tx_laser_multispeed_fiber;
79 		mac->ops.enable_tx_laser =
80 					ixgbe_enable_tx_laser_multispeed_fiber;
81 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
82 
83 	} else {
84 		mac->ops.disable_tx_laser = NULL;
85 		mac->ops.enable_tx_laser = NULL;
86 		mac->ops.flap_tx_laser = NULL;
87 	}
88 
89 	if (hw->phy.multispeed_fiber) {
90 		/* Set up dual speed SFP+ support */
91 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
92 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
93 		mac->ops.set_rate_select_speed =
94 					       ixgbe_set_hard_rate_select_speed;
95 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
96 			mac->ops.set_rate_select_speed =
97 					       ixgbe_set_soft_rate_select_speed;
98 	} else {
99 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
100 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
101 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
102 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
103 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
104 		} else {
105 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
106 		}
107 	}
108 }
109 
110 /**
111  *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
112  *  @hw: pointer to hardware structure
113  *
114  *  Initialize any function pointers that were not able to be
115  *  set during init_shared_code because the PHY/SFP type was
116  *  not known.  Perform the SFP init if necessary.
117  *
118  **/
119 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
120 {
121 	struct ixgbe_mac_info *mac = &hw->mac;
122 	struct ixgbe_phy_info *phy = &hw->phy;
123 	s32 ret_val = IXGBE_SUCCESS;
124 	u32 esdp;
125 
126 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
127 
128 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
129 		/* Store flag indicating I2C bus access control unit. */
130 		hw->phy.qsfp_shared_i2c_bus = TRUE;
131 
132 		/* Initialize access to QSFP+ I2C bus */
133 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
134 		esdp |= IXGBE_ESDP_SDP0_DIR;
135 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
136 		esdp &= ~IXGBE_ESDP_SDP0;
137 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
138 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
139 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
140 		IXGBE_WRITE_FLUSH(hw);
141 
142 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
143 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
144 	}
145 	/* Identify the PHY or SFP module */
146 	ret_val = phy->ops.identify(hw);
147 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
148 		goto init_phy_ops_out;
149 
150 	/* Setup function pointers based on detected SFP module and speeds */
151 	ixgbe_init_mac_link_ops_82599(hw);
152 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
153 		hw->phy.ops.reset = NULL;
154 
155 	/* If copper media, overwrite with copper function pointers */
156 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
157 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
158 		mac->ops.get_link_capabilities =
159 				  ixgbe_get_copper_link_capabilities_generic;
160 	}
161 
162 	/* Set necessary function pointers based on PHY type */
163 	switch (hw->phy.type) {
164 	case ixgbe_phy_tn:
165 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
166 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
167 		phy->ops.get_firmware_version =
168 			     ixgbe_get_phy_firmware_version_tnx;
169 		break;
170 	default:
171 		break;
172 	}
173 init_phy_ops_out:
174 	return ret_val;
175 }
176 
177 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
178 {
179 	s32 ret_val = IXGBE_SUCCESS;
180 	u16 list_offset, data_offset, data_value;
181 
182 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
183 
184 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
185 		ixgbe_init_mac_link_ops_82599(hw);
186 
187 		hw->phy.ops.reset = NULL;
188 
189 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
190 							      &data_offset);
191 		if (ret_val != IXGBE_SUCCESS)
192 			goto setup_sfp_out;
193 
194 		/* PHY config will finish before releasing the semaphore */
195 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
196 							IXGBE_GSSR_MAC_CSR_SM);
197 		if (ret_val != IXGBE_SUCCESS) {
198 			ret_val = IXGBE_ERR_SWFW_SYNC;
199 			goto setup_sfp_out;
200 		}
201 
202 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
203 			goto setup_sfp_err;
204 		while (data_value != 0xffff) {
205 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
206 			IXGBE_WRITE_FLUSH(hw);
207 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
208 				goto setup_sfp_err;
209 		}
210 
211 		/* Release the semaphore */
212 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
213 		/* Delay obtaining semaphore again to allow FW access
214 		 * prot_autoc_write uses the semaphore too.
215 		 */
216 		msec_delay(hw->eeprom.semaphore_delay);
217 
218 		/* Restart DSP and set SFI mode */
219 		ret_val = hw->mac.ops.prot_autoc_write(hw,
220 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
221 			FALSE);
222 
223 		if (ret_val) {
224 			DEBUGOUT("sfp module setup not complete\n");
225 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
226 			goto setup_sfp_out;
227 		}
228 
229 	}
230 
231 setup_sfp_out:
232 	return ret_val;
233 
234 setup_sfp_err:
235 	/* Release the semaphore */
236 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
237 	/* Delay obtaining semaphore again to allow FW access */
238 	msec_delay(hw->eeprom.semaphore_delay);
239 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
240 		      "eeprom read at offset %d failed", data_offset);
241 	return IXGBE_ERR_PHY;
242 }
243 
244 /**
245  *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
246  *  @hw: pointer to hardware structure
247  *  @locked: Return the if we locked for this read.
248  *  @reg_val: Value we read from AUTOC
249  *
250  *  For this part (82599) we need to wrap read-modify-writes with a possible
251  *  FW/SW lock.  It is assumed this lock will be freed with the next
252  *  prot_autoc_write_82599().
253  */
254 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
255 {
256 	s32 ret_val;
257 
258 	*locked = FALSE;
259 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
260 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
261 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
262 					IXGBE_GSSR_MAC_CSR_SM);
263 		if (ret_val != IXGBE_SUCCESS)
264 			return IXGBE_ERR_SWFW_SYNC;
265 
266 		*locked = TRUE;
267 	}
268 
269 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
270 	return IXGBE_SUCCESS;
271 }
272 
273 /**
274  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
275  * @hw: pointer to hardware structure
276  * @autoc: value to write to AUTOC
277  * @locked: bool to indicate whether the SW/FW lock was already taken by
278  *           previous proc_autoc_read_82599.
279  *
280  * This part (82599) may need to hold the SW/FW lock around all writes to
281  * AUTOC. Likewise after a write we need to do a pipeline reset.
282  */
283 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
284 {
285 	s32 ret_val = IXGBE_SUCCESS;
286 
287 	/* Blocked by MNG FW so bail */
288 	if (ixgbe_check_reset_blocked(hw))
289 		goto out;
290 
291 	/* We only need to get the lock if:
292 	 *  - We didn't do it already (in the read part of a read-modify-write)
293 	 *  - LESM is enabled.
294 	 */
295 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
296 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
297 					IXGBE_GSSR_MAC_CSR_SM);
298 		if (ret_val != IXGBE_SUCCESS)
299 			return IXGBE_ERR_SWFW_SYNC;
300 
301 		locked = TRUE;
302 	}
303 
304 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
305 	ret_val = ixgbe_reset_pipeline_82599(hw);
306 
307 out:
308 	/* Free the SW/FW semaphore as we either grabbed it here or
309 	 * already had it when this function was called.
310 	 */
311 	if (locked)
312 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
313 
314 	return ret_val;
315 }
316 
317 /**
318  *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
319  *  @hw: pointer to hardware structure
320  *
321  *  Initialize the function pointers and assign the MAC type for 82599.
322  *  Does not touch the hardware.
323  **/
324 
325 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
326 {
327 	struct ixgbe_mac_info *mac = &hw->mac;
328 	struct ixgbe_phy_info *phy = &hw->phy;
329 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
330 	s32 ret_val;
331 
332 	DEBUGFUNC("ixgbe_init_ops_82599");
333 
334 	ixgbe_init_phy_ops_generic(hw);
335 	ret_val = ixgbe_init_ops_generic(hw);
336 
337 	/* PHY */
338 	phy->ops.identify = ixgbe_identify_phy_82599;
339 	phy->ops.init = ixgbe_init_phy_ops_82599;
340 
341 	/* MAC */
342 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
343 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
344 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
345 	mac->ops.get_supported_physical_layer =
346 				    ixgbe_get_supported_physical_layer_82599;
347 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
348 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
349 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
350 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
351 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
352 	mac->ops.start_hw = ixgbe_start_hw_82599;
353 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
354 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
355 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
356 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
357 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
358 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
359 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
360 
361 	/* RAR, Multicast, VLAN */
362 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
363 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
364 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
365 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
366 	mac->rar_highwater = 1;
367 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
368 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
369 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
370 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
371 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
372 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
373 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
374 
375 	/* Link */
376 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
377 	mac->ops.check_link = ixgbe_check_mac_link_generic;
378 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
379 	ixgbe_init_mac_link_ops_82599(hw);
380 
381 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
382 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
383 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
384 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
385 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
386 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
387 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
388 
389 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
390 				      & IXGBE_FWSM_MODE_MASK);
391 
392 	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
393 
394 	/* EEPROM */
395 	eeprom->ops.read = ixgbe_read_eeprom_82599;
396 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
397 
398 	/* Manageability interface */
399 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
400 
401 	mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
402 	mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
403 	mac->ops.bypass_set = ixgbe_bypass_set_generic;
404 	mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
405 
406 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
407 
408 	return ret_val;
409 }
410 
411 /**
412  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
413  *  @hw: pointer to hardware structure
414  *  @speed: pointer to link speed
415  *  @autoneg: TRUE when autoneg or autotry is enabled
416  *
417  *  Determines the link capabilities by reading the AUTOC register.
418  **/
419 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
420 				      ixgbe_link_speed *speed,
421 				      bool *autoneg)
422 {
423 	s32 status = IXGBE_SUCCESS;
424 	u32 autoc = 0;
425 
426 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
427 
428 
429 	/* Check if 1G SFP module. */
430 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
431 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
432 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
433 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
434 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
435 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
436 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
437 		*autoneg = TRUE;
438 		goto out;
439 	}
440 
441 	/*
442 	 * Determine link capabilities based on the stored value of AUTOC,
443 	 * which represents EEPROM defaults.  If AUTOC value has not
444 	 * been stored, use the current register values.
445 	 */
446 	if (hw->mac.orig_link_settings_stored)
447 		autoc = hw->mac.orig_autoc;
448 	else
449 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
450 
451 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
452 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
453 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
454 		*autoneg = FALSE;
455 		break;
456 
457 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
458 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
459 		*autoneg = FALSE;
460 		break;
461 
462 	case IXGBE_AUTOC_LMS_1G_AN:
463 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
464 		*autoneg = TRUE;
465 		break;
466 
467 	case IXGBE_AUTOC_LMS_10G_SERIAL:
468 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
469 		*autoneg = FALSE;
470 		break;
471 
472 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
473 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
474 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
475 		if (autoc & IXGBE_AUTOC_KR_SUPP)
476 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
477 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
478 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
479 		if (autoc & IXGBE_AUTOC_KX_SUPP)
480 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
481 		*autoneg = TRUE;
482 		break;
483 
484 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
485 		*speed = IXGBE_LINK_SPEED_100_FULL;
486 		if (autoc & IXGBE_AUTOC_KR_SUPP)
487 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
488 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
489 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
490 		if (autoc & IXGBE_AUTOC_KX_SUPP)
491 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
492 		*autoneg = TRUE;
493 		break;
494 
495 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
496 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
497 		*autoneg = FALSE;
498 		break;
499 
500 	default:
501 		status = IXGBE_ERR_LINK_SETUP;
502 		goto out;
503 		break;
504 	}
505 
506 	if (hw->phy.multispeed_fiber) {
507 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
508 			  IXGBE_LINK_SPEED_1GB_FULL;
509 
510 		/* QSFP must not enable full auto-negotiation
511 		 * Limited autoneg is enabled at 1G
512 		 */
513 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
514 			*autoneg = FALSE;
515 		else
516 			*autoneg = TRUE;
517 	}
518 
519 out:
520 	return status;
521 }
522 
523 /**
524  *  ixgbe_get_media_type_82599 - Get media type
525  *  @hw: pointer to hardware structure
526  *
527  *  Returns the media type (fiber, copper, backplane)
528  **/
529 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
530 {
531 	enum ixgbe_media_type media_type;
532 
533 	DEBUGFUNC("ixgbe_get_media_type_82599");
534 
535 	/* Detect if there is a copper PHY attached. */
536 	switch (hw->phy.type) {
537 	case ixgbe_phy_cu_unknown:
538 	case ixgbe_phy_tn:
539 		media_type = ixgbe_media_type_copper;
540 		goto out;
541 	default:
542 		break;
543 	}
544 
545 	switch (hw->device_id) {
546 	case IXGBE_DEV_ID_82599_KX4:
547 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
548 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
549 	case IXGBE_DEV_ID_82599_KR:
550 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
551 	case IXGBE_DEV_ID_82599_XAUI_LOM:
552 		/* Default device ID is mezzanine card KX/KX4 */
553 		media_type = ixgbe_media_type_backplane;
554 		break;
555 	case IXGBE_DEV_ID_82599_SFP:
556 	case IXGBE_DEV_ID_82599_SFP_FCOE:
557 	case IXGBE_DEV_ID_82599_SFP_EM:
558 	case IXGBE_DEV_ID_82599_SFP_SF2:
559 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
560 	case IXGBE_DEV_ID_82599EN_SFP:
561 		media_type = ixgbe_media_type_fiber;
562 		break;
563 	case IXGBE_DEV_ID_82599_CX4:
564 		media_type = ixgbe_media_type_cx4;
565 		break;
566 	case IXGBE_DEV_ID_82599_T3_LOM:
567 		media_type = ixgbe_media_type_copper;
568 		break;
569 	case IXGBE_DEV_ID_82599_LS:
570 		media_type = ixgbe_media_type_fiber_lco;
571 		break;
572 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
573 		media_type = ixgbe_media_type_fiber_qsfp;
574 		break;
575 	case IXGBE_DEV_ID_82599_BYPASS:
576 		media_type = ixgbe_media_type_fiber_fixed;
577 		hw->phy.multispeed_fiber = TRUE;
578 		break;
579 	default:
580 		media_type = ixgbe_media_type_unknown;
581 		break;
582 	}
583 out:
584 	return media_type;
585 }
586 
587 /**
588  *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
589  *  @hw: pointer to hardware structure
590  *
591  *  Disables link during D3 power down sequence.
592  *
593  **/
594 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
595 {
596 	u32 autoc2_reg;
597 	u16 ee_ctrl_2 = 0;
598 
599 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
600 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
601 
602 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
603 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
604 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
605 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
606 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
607 	}
608 }
609 
610 /**
611  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
612  *  @hw: pointer to hardware structure
613  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
614  *
615  *  Configures link settings based on values in the ixgbe_hw struct.
616  *  Restarts the link.  Performs autonegotiation if needed.
617  **/
618 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
619 			       bool autoneg_wait_to_complete)
620 {
621 	u32 autoc_reg;
622 	u32 links_reg;
623 	u32 i;
624 	s32 status = IXGBE_SUCCESS;
625 	bool got_lock = FALSE;
626 
627 	DEBUGFUNC("ixgbe_start_mac_link_82599");
628 
629 
630 	/*  reset_pipeline requires us to hold this lock as it writes to
631 	 *  AUTOC.
632 	 */
633 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
634 		status = hw->mac.ops.acquire_swfw_sync(hw,
635 						       IXGBE_GSSR_MAC_CSR_SM);
636 		if (status != IXGBE_SUCCESS)
637 			goto out;
638 
639 		got_lock = TRUE;
640 	}
641 
642 	/* Restart link */
643 	ixgbe_reset_pipeline_82599(hw);
644 
645 	if (got_lock)
646 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
647 
648 	/* Only poll for autoneg to complete if specified to do so */
649 	if (autoneg_wait_to_complete) {
650 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
651 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
652 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
653 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
654 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
655 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
656 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
657 			links_reg = 0; /* Just in case Autoneg time = 0 */
658 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
659 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
660 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
661 					break;
662 				msec_delay(100);
663 			}
664 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
665 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
666 				DEBUGOUT("Autoneg did not complete.\n");
667 			}
668 		}
669 	}
670 
671 	/* Add delay to filter out noises during initial link setup */
672 	msec_delay(50);
673 
674 out:
675 	return status;
676 }
677 
678 /**
679  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
680  *  @hw: pointer to hardware structure
681  *
682  *  The base drivers may require better control over SFP+ module
683  *  PHY states.  This includes selectively shutting down the Tx
684  *  laser on the PHY, effectively halting physical link.
685  **/
686 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
687 {
688 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
689 
690 	/* Blocked by MNG FW so bail */
691 	if (ixgbe_check_reset_blocked(hw))
692 		return;
693 
694 	/* Disable Tx laser; allow 100us to go dark per spec */
695 	esdp_reg |= IXGBE_ESDP_SDP3;
696 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
697 	IXGBE_WRITE_FLUSH(hw);
698 	usec_delay(100);
699 }
700 
701 /**
702  *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
703  *  @hw: pointer to hardware structure
704  *
705  *  The base drivers may require better control over SFP+ module
706  *  PHY states.  This includes selectively turning on the Tx
707  *  laser on the PHY, effectively starting physical link.
708  **/
709 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
710 {
711 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
712 
713 	/* Enable Tx laser; allow 100ms to light up */
714 	esdp_reg &= ~IXGBE_ESDP_SDP3;
715 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
716 	IXGBE_WRITE_FLUSH(hw);
717 	msec_delay(100);
718 }
719 
720 /**
721  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
722  *  @hw: pointer to hardware structure
723  *
724  *  When the driver changes the link speeds that it can support,
725  *  it sets autotry_restart to TRUE to indicate that we need to
726  *  initiate a new autotry session with the link partner.  To do
727  *  so, we set the speed then disable and re-enable the Tx laser, to
728  *  alert the link partner that it also needs to restart autotry on its
729  *  end.  This is consistent with TRUE clause 37 autoneg, which also
730  *  involves a loss of signal.
731  **/
732 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
733 {
734 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
735 
736 	/* Blocked by MNG FW so bail */
737 	if (ixgbe_check_reset_blocked(hw))
738 		return;
739 
740 	if (hw->mac.autotry_restart) {
741 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
742 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
743 		hw->mac.autotry_restart = FALSE;
744 	}
745 }
746 
747 /**
748  *  ixgbe_set_hard_rate_select_speed - Set module link speed
749  *  @hw: pointer to hardware structure
750  *  @speed: link speed to set
751  *
752  *  Set module link speed via RS0/RS1 rate select pins.
753  */
754 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
755 					ixgbe_link_speed speed)
756 {
757 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
758 
759 	switch (speed) {
760 	case IXGBE_LINK_SPEED_10GB_FULL:
761 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
762 		break;
763 	case IXGBE_LINK_SPEED_1GB_FULL:
764 		esdp_reg &= ~IXGBE_ESDP_SDP5;
765 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
766 		break;
767 	default:
768 		DEBUGOUT("Invalid fixed module speed\n");
769 		return;
770 	}
771 
772 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
773 	IXGBE_WRITE_FLUSH(hw);
774 }
775 
776 /**
777  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
778  *  @hw: pointer to hardware structure
779  *  @speed: new link speed
780  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
781  *
782  *  Implements the Intel SmartSpeed algorithm.
783  **/
784 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
785 				    ixgbe_link_speed speed,
786 				    bool autoneg_wait_to_complete)
787 {
788 	s32 status = IXGBE_SUCCESS;
789 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
790 	s32 i, j;
791 	bool link_up = FALSE;
792 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
793 
794 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
795 
796 	 /* Set autoneg_advertised value based on input link speed */
797 	hw->phy.autoneg_advertised = 0;
798 
799 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
800 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
801 
802 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
803 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
804 
805 	if (speed & IXGBE_LINK_SPEED_100_FULL)
806 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
807 
808 	/*
809 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
810 	 * autoneg advertisement if link is unable to be established at the
811 	 * highest negotiated rate.  This can sometimes happen due to integrity
812 	 * issues with the physical media connection.
813 	 */
814 
815 	/* First, try to get link with full advertisement */
816 	hw->phy.smart_speed_active = FALSE;
817 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
818 		status = ixgbe_setup_mac_link_82599(hw, speed,
819 						    autoneg_wait_to_complete);
820 		if (status != IXGBE_SUCCESS)
821 			goto out;
822 
823 		/*
824 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
825 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
826 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
827 		 * Table 9 in the AN MAS.
828 		 */
829 		for (i = 0; i < 5; i++) {
830 			msec_delay(100);
831 
832 			/* If we have link, just jump out */
833 			status = ixgbe_check_link(hw, &link_speed, &link_up,
834 						  FALSE);
835 			if (status != IXGBE_SUCCESS)
836 				goto out;
837 
838 			if (link_up)
839 				goto out;
840 		}
841 	}
842 
843 	/*
844 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
845 	 * (or BX4/BX), then disable KR and try again.
846 	 */
847 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
848 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
849 		goto out;
850 
851 	/* Turn SmartSpeed on to disable KR support */
852 	hw->phy.smart_speed_active = TRUE;
853 	status = ixgbe_setup_mac_link_82599(hw, speed,
854 					    autoneg_wait_to_complete);
855 	if (status != IXGBE_SUCCESS)
856 		goto out;
857 
858 	/*
859 	 * Wait for the controller to acquire link.  600ms will allow for
860 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
861 	 * parallel detect, both 10g and 1g. This allows for the maximum
862 	 * connect attempts as defined in the AN MAS table 73-7.
863 	 */
864 	for (i = 0; i < 6; i++) {
865 		msec_delay(100);
866 
867 		/* If we have link, just jump out */
868 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
869 		if (status != IXGBE_SUCCESS)
870 			goto out;
871 
872 		if (link_up)
873 			goto out;
874 	}
875 
876 	/* We didn't get link.  Turn SmartSpeed back off. */
877 	hw->phy.smart_speed_active = FALSE;
878 	status = ixgbe_setup_mac_link_82599(hw, speed,
879 					    autoneg_wait_to_complete);
880 
881 out:
882 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
883 		DEBUGOUT("Smartspeed has downgraded the link speed "
884 		"from the maximum advertised\n");
885 	return status;
886 }
887 
888 /**
889  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
890  *  @hw: pointer to hardware structure
891  *  @speed: new link speed
892  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
893  *
894  *  Set the link speed in the AUTOC register and restarts link.
895  **/
896 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
897 			       ixgbe_link_speed speed,
898 			       bool autoneg_wait_to_complete)
899 {
900 	bool autoneg = FALSE;
901 	s32 status = IXGBE_SUCCESS;
902 	u32 pma_pmd_1g, link_mode;
903 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
904 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
905 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
906 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
907 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
908 	u32 links_reg;
909 	u32 i;
910 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
911 
912 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
913 
914 	/* Check to see if speed passed in is supported. */
915 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
916 	if (status)
917 		goto out;
918 
919 	speed &= link_capabilities;
920 
921 	if (speed == 0) {
922 		ixgbe_disable_tx_laser(hw); /* For fiber */
923 		ixgbe_set_phy_power(hw, false); /* For copper */
924 	} else {
925 		/* In case previous media setting was none(down) */
926 		ixgbe_enable_tx_laser(hw); /* for Fiber */
927 		ixgbe_set_phy_power(hw, true); /* For copper */
928 	}
929 
930 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
931 	if (hw->mac.orig_link_settings_stored)
932 		orig_autoc = hw->mac.orig_autoc;
933 	else
934 		orig_autoc = autoc;
935 
936 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
937 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
938 
939 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
940 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
941 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
942 		/* Set KX4/KX/KR support according to speed requested */
943 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
944 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
945 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
946 				autoc |= IXGBE_AUTOC_KX4_SUPP;
947 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
948 			    (hw->phy.smart_speed_active == FALSE))
949 				autoc |= IXGBE_AUTOC_KR_SUPP;
950 		}
951 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
952 			autoc |= IXGBE_AUTOC_KX_SUPP;
953 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
954 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
955 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
956 		/* Switch from 1G SFI to 10G SFI if requested */
957 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
958 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
959 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
960 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
961 		}
962 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
963 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
964 		/* Switch from 10G SFI to 1G SFI if requested */
965 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
966 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
967 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
968 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
969 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
970 			else
971 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
972 		}
973 	}
974 
975 	if (autoc != current_autoc) {
976 		/* Restart link */
977 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
978 		if (status != IXGBE_SUCCESS)
979 			goto out;
980 
981 		/* Only poll for autoneg to complete if specified to do so */
982 		if (autoneg_wait_to_complete) {
983 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
984 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
985 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
986 				links_reg = 0; /*Just in case Autoneg time=0*/
987 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
988 					links_reg =
989 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
990 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
991 						break;
992 					msec_delay(100);
993 				}
994 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
995 					status =
996 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
997 					DEBUGOUT("Autoneg did not complete.\n");
998 				}
999 			}
1000 		}
1001 
1002 		/* Add delay to filter out noises during initial link setup */
1003 		msec_delay(50);
1004 	}
1005 
1006 out:
1007 	return status;
1008 }
1009 
1010 /**
1011  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1012  *  @hw: pointer to hardware structure
1013  *  @speed: new link speed
1014  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1015  *
1016  *  Restarts link on PHY and MAC based on settings passed in.
1017  **/
1018 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1019 					 ixgbe_link_speed speed,
1020 					 bool autoneg_wait_to_complete)
1021 {
1022 	s32 status;
1023 
1024 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
1025 
1026 	/* Setup the PHY according to input speed */
1027 	status = hw->phy.ops.setup_link_speed(hw, speed,
1028 					      autoneg_wait_to_complete);
1029 	/* Set up MAC */
1030 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1031 
1032 	return status;
1033 }
1034 
1035 /**
1036  *  ixgbe_reset_hw_82599 - Perform hardware reset
1037  *  @hw: pointer to hardware structure
1038  *
1039  *  Resets the hardware by resetting the transmit and receive units, masks
1040  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1041  *  reset.
1042  **/
1043 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1044 {
1045 	ixgbe_link_speed link_speed;
1046 	s32 status;
1047 	u32 ctrl = 0;
1048 	u32 i, autoc, autoc2;
1049 	u32 curr_lms;
1050 	bool link_up = FALSE;
1051 
1052 	DEBUGFUNC("ixgbe_reset_hw_82599");
1053 
1054 	/* Call adapter stop to disable tx/rx and clear interrupts */
1055 	status = hw->mac.ops.stop_adapter(hw);
1056 	if (status != IXGBE_SUCCESS)
1057 		goto reset_hw_out;
1058 
1059 	/* flush pending Tx transactions */
1060 	ixgbe_clear_tx_pending(hw);
1061 
1062 	/* PHY ops must be identified and initialized prior to reset */
1063 
1064 	/* Identify PHY and related function pointers */
1065 	status = hw->phy.ops.init(hw);
1066 
1067 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1068 		goto reset_hw_out;
1069 
1070 	/* Setup SFP module if there is one present. */
1071 	if (hw->phy.sfp_setup_needed) {
1072 		status = hw->mac.ops.setup_sfp(hw);
1073 		hw->phy.sfp_setup_needed = FALSE;
1074 	}
1075 
1076 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1077 		goto reset_hw_out;
1078 
1079 	/* Reset PHY */
1080 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1081 		hw->phy.ops.reset(hw);
1082 
1083 	/* remember AUTOC from before we reset */
1084 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1085 
1086 mac_reset_top:
1087 	/*
1088 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1089 	 * If link reset is used when link is up, it might reset the PHY when
1090 	 * mng is using it.  If link is down or the flag to force full link
1091 	 * reset is set, then perform link reset.
1092 	 */
1093 	ctrl = IXGBE_CTRL_LNK_RST;
1094 	if (!hw->force_full_reset) {
1095 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1096 		if (link_up)
1097 			ctrl = IXGBE_CTRL_RST;
1098 	}
1099 
1100 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1101 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1102 	IXGBE_WRITE_FLUSH(hw);
1103 
1104 	/* Poll for reset bit to self-clear meaning reset is complete */
1105 	for (i = 0; i < 10; i++) {
1106 		usec_delay(1);
1107 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1108 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1109 			break;
1110 	}
1111 
1112 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1113 		status = IXGBE_ERR_RESET_FAILED;
1114 		DEBUGOUT("Reset polling failed to complete.\n");
1115 	}
1116 
1117 	msec_delay(50);
1118 
1119 	/*
1120 	 * Double resets are required for recovery from certain error
1121 	 * conditions.  Between resets, it is necessary to stall to
1122 	 * allow time for any pending HW events to complete.
1123 	 */
1124 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1125 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1126 		goto mac_reset_top;
1127 	}
1128 
1129 	/*
1130 	 * Store the original AUTOC/AUTOC2 values if they have not been
1131 	 * stored off yet.  Otherwise restore the stored original
1132 	 * values since the reset operation sets back to defaults.
1133 	 */
1134 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1135 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1136 
1137 	/* Enable link if disabled in NVM */
1138 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1139 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1140 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1141 		IXGBE_WRITE_FLUSH(hw);
1142 	}
1143 
1144 	if (hw->mac.orig_link_settings_stored == FALSE) {
1145 		hw->mac.orig_autoc = autoc;
1146 		hw->mac.orig_autoc2 = autoc2;
1147 		hw->mac.orig_link_settings_stored = TRUE;
1148 	} else {
1149 
1150 		/* If MNG FW is running on a multi-speed device that
1151 		 * doesn't autoneg with out driver support we need to
1152 		 * leave LMS in the state it was before we MAC reset.
1153 		 * Likewise if we support WoL we don't want change the
1154 		 * LMS state.
1155 		 */
1156 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1157 		    hw->wol_enabled)
1158 			hw->mac.orig_autoc =
1159 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1160 				curr_lms;
1161 
1162 		if (autoc != hw->mac.orig_autoc) {
1163 			status = hw->mac.ops.prot_autoc_write(hw,
1164 							hw->mac.orig_autoc,
1165 							FALSE);
1166 			if (status != IXGBE_SUCCESS)
1167 				goto reset_hw_out;
1168 		}
1169 
1170 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1171 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1172 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1173 			autoc2 |= (hw->mac.orig_autoc2 &
1174 				   IXGBE_AUTOC2_UPPER_MASK);
1175 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1176 		}
1177 	}
1178 
1179 	/* Store the permanent mac address */
1180 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1181 
1182 	/*
1183 	 * Store MAC address from RAR0, clear receive address registers, and
1184 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1185 	 * since we modify this value when programming the SAN MAC address.
1186 	 */
1187 	hw->mac.num_rar_entries = 128;
1188 	hw->mac.ops.init_rx_addrs(hw);
1189 
1190 	/* Store the permanent SAN mac address */
1191 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1192 
1193 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1194 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1195 		/* Save the SAN MAC RAR index */
1196 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1197 
1198 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1199 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1200 
1201 		/* clear VMDq pool/queue selection for this RAR */
1202 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1203 				       IXGBE_CLEAR_VMDQ_ALL);
1204 
1205 		/* Reserve the last RAR for the SAN MAC address */
1206 		hw->mac.num_rar_entries--;
1207 	}
1208 
1209 	/* Store the alternative WWNN/WWPN prefix */
1210 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1211 				   &hw->mac.wwpn_prefix);
1212 
1213 reset_hw_out:
1214 	return status;
1215 }
1216 
1217 /**
1218  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1219  * @hw: pointer to hardware structure
1220  * @fdircmd: current value of FDIRCMD register
1221  */
1222 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1223 {
1224 	int i;
1225 
1226 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1227 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1228 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1229 			return IXGBE_SUCCESS;
1230 		usec_delay(10);
1231 	}
1232 
1233 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1234 }
1235 
1236 /**
1237  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1238  *  @hw: pointer to hardware structure
1239  **/
1240 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1241 {
1242 	s32 err;
1243 	int i;
1244 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1245 	u32 fdircmd;
1246 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1247 
1248 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1249 
1250 	/*
1251 	 * Before starting reinitialization process,
1252 	 * FDIRCMD.CMD must be zero.
1253 	 */
1254 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1255 	if (err) {
1256 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1257 		return err;
1258 	}
1259 
1260 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1261 	IXGBE_WRITE_FLUSH(hw);
1262 	/*
1263 	 * 82599 adapters flow director init flow cannot be restarted,
1264 	 * Workaround 82599 silicon errata by performing the following steps
1265 	 * before re-writing the FDIRCTRL control register with the same value.
1266 	 * - write 1 to bit 8 of FDIRCMD register &
1267 	 * - write 0 to bit 8 of FDIRCMD register
1268 	 */
1269 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1270 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1271 			 IXGBE_FDIRCMD_CLEARHT));
1272 	IXGBE_WRITE_FLUSH(hw);
1273 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1274 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1275 			 ~IXGBE_FDIRCMD_CLEARHT));
1276 	IXGBE_WRITE_FLUSH(hw);
1277 	/*
1278 	 * Clear FDIR Hash register to clear any leftover hashes
1279 	 * waiting to be programmed.
1280 	 */
1281 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1282 	IXGBE_WRITE_FLUSH(hw);
1283 
1284 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1285 	IXGBE_WRITE_FLUSH(hw);
1286 
1287 	/* Poll init-done after we write FDIRCTRL register */
1288 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1289 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1290 				   IXGBE_FDIRCTRL_INIT_DONE)
1291 			break;
1292 		msec_delay(1);
1293 	}
1294 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1295 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1296 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1297 	}
1298 
1299 	/* Clear FDIR statistics registers (read to clear) */
1300 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1301 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1302 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1303 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1304 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1305 
1306 	return IXGBE_SUCCESS;
1307 }
1308 
1309 /**
1310  *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1311  *  @hw: pointer to hardware structure
1312  *  @fdirctrl: value to write to flow director control register
1313  **/
1314 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1315 {
1316 	int i;
1317 
1318 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1319 
1320 	/* Prime the keys for hashing */
1321 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1322 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1323 
1324 	/*
1325 	 * Poll init-done after we write the register.  Estimated times:
1326 	 *      10G: PBALLOC = 11b, timing is 60us
1327 	 *       1G: PBALLOC = 11b, timing is 600us
1328 	 *     100M: PBALLOC = 11b, timing is 6ms
1329 	 *
1330 	 *     Multiple these timings by 4 if under full Rx load
1331 	 *
1332 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1333 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1334 	 * this might not finish in our poll time, but we can live with that
1335 	 * for now.
1336 	 */
1337 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1338 	IXGBE_WRITE_FLUSH(hw);
1339 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1340 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1341 				   IXGBE_FDIRCTRL_INIT_DONE)
1342 			break;
1343 		msec_delay(1);
1344 	}
1345 
1346 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1347 		DEBUGOUT("Flow Director poll time exceeded!\n");
1348 }
1349 
1350 /**
1351  *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1352  *  @hw: pointer to hardware structure
1353  *  @fdirctrl: value to write to flow director control register, initially
1354  *	     contains just the value of the Rx packet buffer allocation
1355  **/
1356 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1357 {
1358 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1359 
1360 	/*
1361 	 * Continue setup of fdirctrl register bits:
1362 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1363 	 *  Set the maximum length per hash bucket to 0xA filters
1364 	 *  Send interrupt when 64 filters are left
1365 	 */
1366 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1367 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1368 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1369 
1370 	/* write hashes and fdirctrl register, poll for completion */
1371 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1372 
1373 	return IXGBE_SUCCESS;
1374 }
1375 
1376 /**
1377  *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1378  *  @hw: pointer to hardware structure
1379  *  @fdirctrl: value to write to flow director control register, initially
1380  *	     contains just the value of the Rx packet buffer allocation
1381  *  @cloud_mode: TRUE - cloud mode, FALSE - other mode
1382  **/
1383 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1384 			bool cloud_mode)
1385 {
1386 	UNREFERENCED_1PARAMETER(cloud_mode);
1387 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1388 
1389 	/*
1390 	 * Continue setup of fdirctrl register bits:
1391 	 *  Turn perfect match filtering on
1392 	 *  Report hash in RSS field of Rx wb descriptor
1393 	 *  Initialize the drop queue to queue 127
1394 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1395 	 *  Set the maximum length per hash bucket to 0xA filters
1396 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1397 	 */
1398 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1399 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1400 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1401 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1402 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1403 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1404 
1405 	if (cloud_mode)
1406 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1407 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1408 
1409 	/* write hashes and fdirctrl register, poll for completion */
1410 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1411 
1412 	return IXGBE_SUCCESS;
1413 }
1414 
1415 /**
1416  *  ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
1417  *  @hw: pointer to hardware structure
1418  *  @dropqueue: Rx queue index used for the dropped packets
1419  **/
1420 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
1421 {
1422 	u32 fdirctrl;
1423 
1424 	DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
1425 	/* Clear init done bit and drop queue field */
1426 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1427 	fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
1428 
1429 	/* Set drop queue */
1430 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1431 	if ((hw->mac.type == ixgbe_mac_X550) ||
1432 	    (hw->mac.type == ixgbe_mac_X550EM_x) ||
1433 	    (hw->mac.type == ixgbe_mac_X550EM_a))
1434 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1435 
1436 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1437 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1438 			 IXGBE_FDIRCMD_CLEARHT));
1439 	IXGBE_WRITE_FLUSH(hw);
1440 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1441 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1442 			 ~IXGBE_FDIRCMD_CLEARHT));
1443 	IXGBE_WRITE_FLUSH(hw);
1444 
1445 	/* write hashes and fdirctrl register, poll for completion */
1446 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1447 }
1448 
1449 /*
1450  * These defines allow us to quickly generate all of the necessary instructions
1451  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1452  * for values 0 through 15
1453  */
1454 #define IXGBE_ATR_COMMON_HASH_KEY \
1455 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1456 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1457 do { \
1458 	u32 n = (_n); \
1459 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1460 		common_hash ^= lo_hash_dword >> n; \
1461 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1462 		bucket_hash ^= lo_hash_dword >> n; \
1463 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1464 		sig_hash ^= lo_hash_dword << (16 - n); \
1465 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1466 		common_hash ^= hi_hash_dword >> n; \
1467 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1468 		bucket_hash ^= hi_hash_dword >> n; \
1469 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1470 		sig_hash ^= hi_hash_dword << (16 - n); \
1471 } while (0)
1472 
1473 /**
1474  *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1475  *  @input: input bitstream to compute the hash on
1476  *  @common: compressed common input dword
1477  *
1478  *  This function is almost identical to the function above but contains
1479  *  several optimizations such as unwinding all of the loops, letting the
1480  *  compiler work out all of the conditional ifs since the keys are static
1481  *  defines, and computing two keys at once since the hashed dword stream
1482  *  will be the same for both keys.
1483  **/
1484 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1485 				     union ixgbe_atr_hash_dword common)
1486 {
1487 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1488 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1489 
1490 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1491 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1492 
1493 	/* generate common hash dword */
1494 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1495 
1496 	/* low dword is word swapped version of common */
1497 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1498 
1499 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1500 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1501 
1502 	/* Process bits 0 and 16 */
1503 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1504 
1505 	/*
1506 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1507 	 * delay this because bit 0 of the stream should not be processed
1508 	 * so we do not add the VLAN until after bit 0 was processed
1509 	 */
1510 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1511 
1512 	/* Process remaining 30 bit of the key */
1513 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1514 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1515 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1516 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1517 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1518 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1519 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1520 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1521 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1522 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1523 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1524 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1525 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1526 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1527 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1528 
1529 	/* combine common_hash result with signature and bucket hashes */
1530 	bucket_hash ^= common_hash;
1531 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1532 
1533 	sig_hash ^= common_hash << 16;
1534 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1535 
1536 	/* return completed signature hash */
1537 	return sig_hash ^ bucket_hash;
1538 }
1539 
1540 /**
1541  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1542  *  @hw: pointer to hardware structure
1543  *  @input: unique input dword
1544  *  @common: compressed common input dword
1545  *  @queue: queue index to direct traffic to
1546  *
1547  * Note that the tunnel bit in input must not be set when the hardware
1548  * tunneling support does not exist.
1549  **/
1550 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1551 					   union ixgbe_atr_hash_dword input,
1552 					   union ixgbe_atr_hash_dword common,
1553 					   u8 queue)
1554 {
1555 	u64 fdirhashcmd;
1556 	u8 flow_type;
1557 	bool tunnel;
1558 	u32 fdircmd;
1559 
1560 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1561 
1562 	/*
1563 	 * Get the flow_type in order to program FDIRCMD properly
1564 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1565 	 * fifth is FDIRCMD.TUNNEL_FILTER
1566 	 */
1567 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1568 	flow_type = input.formatted.flow_type &
1569 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1570 	switch (flow_type) {
1571 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1572 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1573 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1574 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1575 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1576 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1577 		break;
1578 	default:
1579 		DEBUGOUT(" Error on flow type input\n");
1580 		return;
1581 	}
1582 
1583 	/* configure FDIRCMD register */
1584 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1585 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1586 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1587 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1588 	if (tunnel)
1589 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1590 
1591 	/*
1592 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1593 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1594 	 */
1595 	fdirhashcmd = (u64)fdircmd << 32;
1596 	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1597 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1598 
1599 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1600 
1601 	return;
1602 }
1603 
1604 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1605 do { \
1606 	u32 n = (_n); \
1607 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1608 		bucket_hash ^= lo_hash_dword >> n; \
1609 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1610 		bucket_hash ^= hi_hash_dword >> n; \
1611 } while (0)
1612 
1613 /**
1614  *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1615  *  @input: input bitstream to compute the hash on
1616  *  @input_mask: mask for the input bitstream
1617  *
1618  *  This function serves two main purposes.  First it applies the input_mask
1619  *  to the atr_input resulting in a cleaned up atr_input data stream.
1620  *  Secondly it computes the hash and stores it in the bkt_hash field at
1621  *  the end of the input byte stream.  This way it will be available for
1622  *  future use without needing to recompute the hash.
1623  **/
1624 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1625 					  union ixgbe_atr_input *input_mask)
1626 {
1627 
1628 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1629 	u32 bucket_hash = 0;
1630 	u32 hi_dword = 0;
1631 	u32 i = 0;
1632 
1633 	/* Apply masks to input data */
1634 	for (i = 0; i < 14; i++)
1635 		input->dword_stream[i]  &= input_mask->dword_stream[i];
1636 
1637 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1638 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1639 
1640 	/* generate common hash dword */
1641 	for (i = 1; i <= 13; i++)
1642 		hi_dword ^= input->dword_stream[i];
1643 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
1644 
1645 	/* low dword is word swapped version of common */
1646 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1647 
1648 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1649 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1650 
1651 	/* Process bits 0 and 16 */
1652 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1653 
1654 	/*
1655 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1656 	 * delay this because bit 0 of the stream should not be processed
1657 	 * so we do not add the VLAN until after bit 0 was processed
1658 	 */
1659 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1660 
1661 	/* Process remaining 30 bit of the key */
1662 	for (i = 1; i <= 15; i++)
1663 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1664 
1665 	/*
1666 	 * Limit hash to 13 bits since max bucket count is 8K.
1667 	 * Store result at the end of the input stream.
1668 	 */
1669 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1670 }
1671 
1672 /**
1673  *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1674  *  @input_mask: mask to be bit swapped
1675  *
1676  *  The source and destination port masks for flow director are bit swapped
1677  *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1678  *  generate a correctly swapped value we need to bit swap the mask and that
1679  *  is what is accomplished by this function.
1680  **/
1681 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1682 {
1683 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1684 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1685 	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1686 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1687 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1688 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1689 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1690 }
1691 
1692 /*
1693  * These two macros are meant to address the fact that we have registers
1694  * that are either all or in part big-endian.  As a result on big-endian
1695  * systems we will end up byte swapping the value to little-endian before
1696  * it is byte swapped again and written to the hardware in the original
1697  * big-endian format.
1698  */
1699 #define IXGBE_STORE_AS_BE32(_value) \
1700 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1701 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1702 
1703 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1704 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1705 
1706 #define IXGBE_STORE_AS_BE16(_value) \
1707 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1708 
1709 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1710 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
1711 {
1712 	/* mask IPv6 since it is currently not supported */
1713 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1714 	u32 fdirtcpm;
1715 	u32 fdirip6m;
1716 	UNREFERENCED_1PARAMETER(cloud_mode);
1717 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1718 
1719 	/*
1720 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1721 	 * are zero, then assume a full mask for that field.  Also assume that
1722 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1723 	 * cannot be masked out in this implementation.
1724 	 *
1725 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1726 	 * point in time.
1727 	 */
1728 
1729 	/* verify bucket hash is cleared on hash generation */
1730 	if (input_mask->formatted.bkt_hash)
1731 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1732 
1733 	/* Program FDIRM and verify partial masks */
1734 	switch (input_mask->formatted.vm_pool & 0x7F) {
1735 	case 0x0:
1736 		fdirm |= IXGBE_FDIRM_POOL;
1737 	case 0x7F:
1738 		break;
1739 	default:
1740 		DEBUGOUT(" Error on vm pool mask\n");
1741 		return IXGBE_ERR_CONFIG;
1742 	}
1743 
1744 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1745 	case 0x0:
1746 		fdirm |= IXGBE_FDIRM_L4P;
1747 		if (input_mask->formatted.dst_port ||
1748 		    input_mask->formatted.src_port) {
1749 			DEBUGOUT(" Error on src/dst port mask\n");
1750 			return IXGBE_ERR_CONFIG;
1751 		}
1752 	case IXGBE_ATR_L4TYPE_MASK:
1753 		break;
1754 	default:
1755 		DEBUGOUT(" Error on flow type mask\n");
1756 		return IXGBE_ERR_CONFIG;
1757 	}
1758 
1759 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1760 	case 0x0000:
1761 		/* mask VLAN ID */
1762 		fdirm |= IXGBE_FDIRM_VLANID;
1763 		/* fall through */
1764 	case 0x0FFF:
1765 		/* mask VLAN priority */
1766 		fdirm |= IXGBE_FDIRM_VLANP;
1767 		break;
1768 	case 0xE000:
1769 		/* mask VLAN ID only */
1770 		fdirm |= IXGBE_FDIRM_VLANID;
1771 		/* fall through */
1772 	case 0xEFFF:
1773 		/* no VLAN fields masked */
1774 		break;
1775 	default:
1776 		DEBUGOUT(" Error on VLAN mask\n");
1777 		return IXGBE_ERR_CONFIG;
1778 	}
1779 
1780 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1781 	case 0x0000:
1782 		/* Mask Flex Bytes */
1783 		fdirm |= IXGBE_FDIRM_FLEX;
1784 		/* fall through */
1785 	case 0xFFFF:
1786 		break;
1787 	default:
1788 		DEBUGOUT(" Error on flexible byte mask\n");
1789 		return IXGBE_ERR_CONFIG;
1790 	}
1791 
1792 	if (cloud_mode) {
1793 		fdirm |= IXGBE_FDIRM_L3P;
1794 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1795 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1796 
1797 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1798 		case 0x00:
1799 			/* Mask inner MAC, fall through */
1800 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1801 		case 0xFF:
1802 			break;
1803 		default:
1804 			DEBUGOUT(" Error on inner_mac byte mask\n");
1805 			return IXGBE_ERR_CONFIG;
1806 		}
1807 
1808 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1809 		case 0x0:
1810 			/* Mask vxlan id */
1811 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1812 			break;
1813 		case 0x00FFFFFF:
1814 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1815 			break;
1816 		case 0xFFFFFFFF:
1817 			break;
1818 		default:
1819 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
1820 			return IXGBE_ERR_CONFIG;
1821 		}
1822 
1823 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1824 		case 0x0:
1825 			/* Mask turnnel type, fall through */
1826 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1827 		case 0xFFFF:
1828 			break;
1829 		default:
1830 			DEBUGOUT(" Error on tunnel type byte mask\n");
1831 			return IXGBE_ERR_CONFIG;
1832 		}
1833 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1834 
1835 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
1836 		 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
1837 		 * L3/L3 packets to tunnel.
1838 		 */
1839 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1840 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1841 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1842 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1843 		switch (hw->mac.type) {
1844 		case ixgbe_mac_X550:
1845 		case ixgbe_mac_X550EM_x:
1846 		case ixgbe_mac_X550EM_a:
1847 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
1848 			break;
1849 		default:
1850 			break;
1851 		}
1852 	}
1853 
1854 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1855 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1856 
1857 	if (!cloud_mode) {
1858 		/* store the TCP/UDP port masks, bit reversed from port
1859 		 * layout */
1860 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1861 
1862 		/* write both the same so that UDP and TCP use the same mask */
1863 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1864 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1865 		/* also use it for SCTP */
1866 		switch (hw->mac.type) {
1867 		case ixgbe_mac_X550:
1868 		case ixgbe_mac_X550EM_x:
1869 		case ixgbe_mac_X550EM_a:
1870 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1871 			break;
1872 		default:
1873 			break;
1874 		}
1875 
1876 		/* store source and destination IP masks (big-enian) */
1877 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1878 				     ~input_mask->formatted.src_ip[0]);
1879 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1880 				     ~input_mask->formatted.dst_ip[0]);
1881 	}
1882 	return IXGBE_SUCCESS;
1883 }
1884 
1885 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1886 					  union ixgbe_atr_input *input,
1887 					  u16 soft_id, u8 queue, bool cloud_mode)
1888 {
1889 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1890 	u32 addr_low, addr_high;
1891 	u32 cloud_type = 0;
1892 	s32 err;
1893 	UNREFERENCED_1PARAMETER(cloud_mode);
1894 
1895 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1896 	if (!cloud_mode) {
1897 		/* currently IPv6 is not supported, must be programmed with 0 */
1898 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1899 				     input->formatted.src_ip[0]);
1900 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1901 				     input->formatted.src_ip[1]);
1902 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1903 				     input->formatted.src_ip[2]);
1904 
1905 		/* record the source address (big-endian) */
1906 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1907 			input->formatted.src_ip[0]);
1908 
1909 		/* record the first 32 bits of the destination address
1910 		 * (big-endian) */
1911 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1912 			input->formatted.dst_ip[0]);
1913 
1914 		/* record source and destination port (little-endian)*/
1915 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1916 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1917 		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1918 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1919 	}
1920 
1921 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
1922 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1923 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1924 	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1925 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1926 
1927 	if (cloud_mode) {
1928 		if (input->formatted.tunnel_type != 0)
1929 			cloud_type = 0x80000000;
1930 
1931 		addr_low = ((u32)input->formatted.inner_mac[0] |
1932 				((u32)input->formatted.inner_mac[1] << 8) |
1933 				((u32)input->formatted.inner_mac[2] << 16) |
1934 				((u32)input->formatted.inner_mac[3] << 24));
1935 		addr_high = ((u32)input->formatted.inner_mac[4] |
1936 				((u32)input->formatted.inner_mac[5] << 8));
1937 		cloud_type |= addr_high;
1938 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1939 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1940 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1941 	}
1942 
1943 	/* configure FDIRHASH register */
1944 	fdirhash = input->formatted.bkt_hash;
1945 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1946 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1947 
1948 	/*
1949 	 * flush all previous writes to make certain registers are
1950 	 * programmed prior to issuing the command
1951 	 */
1952 	IXGBE_WRITE_FLUSH(hw);
1953 
1954 	/* configure FDIRCMD register */
1955 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1956 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1957 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1958 		fdircmd |= IXGBE_FDIRCMD_DROP;
1959 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1960 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1961 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1962 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1963 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1964 
1965 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1966 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1967 	if (err) {
1968 		DEBUGOUT("Flow Director command did not complete!\n");
1969 		return err;
1970 	}
1971 
1972 	return IXGBE_SUCCESS;
1973 }
1974 
1975 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1976 					  union ixgbe_atr_input *input,
1977 					  u16 soft_id)
1978 {
1979 	u32 fdirhash;
1980 	u32 fdircmd;
1981 	s32 err;
1982 
1983 	/* configure FDIRHASH register */
1984 	fdirhash = input->formatted.bkt_hash;
1985 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1986 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1987 
1988 	/* flush hash to HW */
1989 	IXGBE_WRITE_FLUSH(hw);
1990 
1991 	/* Query if filter is present */
1992 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1993 
1994 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1995 	if (err) {
1996 		DEBUGOUT("Flow Director command did not complete!\n");
1997 		return err;
1998 	}
1999 
2000 	/* if filter exists in hardware then remove it */
2001 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
2002 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2003 		IXGBE_WRITE_FLUSH(hw);
2004 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
2005 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
2006 	}
2007 
2008 	return IXGBE_SUCCESS;
2009 }
2010 
2011 /**
2012  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2013  *  @hw: pointer to hardware structure
2014  *  @input: input bitstream
2015  *  @input_mask: mask for the input bitstream
2016  *  @soft_id: software index for the filters
2017  *  @queue: queue index to direct traffic to
2018  *  @cloud_mode: unused
2019  *
2020  *  Note that the caller to this function must lock before calling, since the
2021  *  hardware writes must be protected from one another.
2022  **/
2023 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2024 					union ixgbe_atr_input *input,
2025 					union ixgbe_atr_input *input_mask,
2026 					u16 soft_id, u8 queue, bool cloud_mode)
2027 {
2028 	s32 err = IXGBE_ERR_CONFIG;
2029 	UNREFERENCED_1PARAMETER(cloud_mode);
2030 
2031 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2032 
2033 	/*
2034 	 * Check flow_type formatting, and bail out before we touch the hardware
2035 	 * if there's a configuration issue
2036 	 */
2037 	switch (input->formatted.flow_type) {
2038 	case IXGBE_ATR_FLOW_TYPE_IPV4:
2039 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2040 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2041 		if (input->formatted.dst_port || input->formatted.src_port) {
2042 			DEBUGOUT(" Error on src/dst port\n");
2043 			return IXGBE_ERR_CONFIG;
2044 		}
2045 		break;
2046 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2047 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2048 		if (input->formatted.dst_port || input->formatted.src_port) {
2049 			DEBUGOUT(" Error on src/dst port\n");
2050 			return IXGBE_ERR_CONFIG;
2051 		}
2052 		/* fall through */
2053 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
2054 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2055 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
2056 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2057 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2058 						  IXGBE_ATR_L4TYPE_MASK;
2059 		break;
2060 	default:
2061 		DEBUGOUT(" Error on flow type input\n");
2062 		return err;
2063 	}
2064 
2065 	/* program input mask into the HW */
2066 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2067 	if (err)
2068 		return err;
2069 
2070 	/* apply mask and compute/store hash */
2071 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2072 
2073 	/* program filters to filter memory */
2074 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2075 						     soft_id, queue, cloud_mode);
2076 }
2077 
2078 /**
2079  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2080  *  @hw: pointer to hardware structure
2081  *  @reg: analog register to read
2082  *  @val: read value
2083  *
2084  *  Performs read operation to Omer analog register specified.
2085  **/
2086 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2087 {
2088 	u32  core_ctl;
2089 
2090 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2091 
2092 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2093 			(reg << 8));
2094 	IXGBE_WRITE_FLUSH(hw);
2095 	usec_delay(10);
2096 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2097 	*val = (u8)core_ctl;
2098 
2099 	return IXGBE_SUCCESS;
2100 }
2101 
2102 /**
2103  *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2104  *  @hw: pointer to hardware structure
2105  *  @reg: atlas register to write
2106  *  @val: value to write
2107  *
2108  *  Performs write operation to Omer analog register specified.
2109  **/
2110 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2111 {
2112 	u32  core_ctl;
2113 
2114 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2115 
2116 	core_ctl = (reg << 8) | val;
2117 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2118 	IXGBE_WRITE_FLUSH(hw);
2119 	usec_delay(10);
2120 
2121 	return IXGBE_SUCCESS;
2122 }
2123 
2124 /**
2125  *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2126  *  @hw: pointer to hardware structure
2127  *
2128  *  Starts the hardware using the generic start_hw function
2129  *  and the generation start_hw function.
2130  *  Then performs revision-specific operations, if any.
2131  **/
2132 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2133 {
2134 	s32 ret_val = IXGBE_SUCCESS;
2135 
2136 	DEBUGFUNC("ixgbe_start_hw_82599");
2137 
2138 	ret_val = ixgbe_start_hw_generic(hw);
2139 	if (ret_val != IXGBE_SUCCESS)
2140 		goto out;
2141 
2142 	ret_val = ixgbe_start_hw_gen2(hw);
2143 	if (ret_val != IXGBE_SUCCESS)
2144 		goto out;
2145 
2146 	/* We need to run link autotry after the driver loads */
2147 	hw->mac.autotry_restart = TRUE;
2148 
2149 	if (ret_val == IXGBE_SUCCESS)
2150 		ret_val = ixgbe_verify_fw_version_82599(hw);
2151 out:
2152 	return ret_val;
2153 }
2154 
2155 /**
2156  *  ixgbe_identify_phy_82599 - Get physical layer module
2157  *  @hw: pointer to hardware structure
2158  *
2159  *  Determines the physical layer module found on the current adapter.
2160  *  If PHY already detected, maintains current PHY type in hw struct,
2161  *  otherwise executes the PHY detection routine.
2162  **/
2163 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2164 {
2165 	s32 status;
2166 
2167 	DEBUGFUNC("ixgbe_identify_phy_82599");
2168 
2169 	/* Detect PHY if not unknown - returns success if already detected. */
2170 	status = ixgbe_identify_phy_generic(hw);
2171 	if (status != IXGBE_SUCCESS) {
2172 		/* 82599 10GBASE-T requires an external PHY */
2173 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2174 			return status;
2175 		else
2176 			status = ixgbe_identify_module_generic(hw);
2177 	}
2178 
2179 	/* Set PHY type none if no PHY detected */
2180 	if (hw->phy.type == ixgbe_phy_unknown) {
2181 		hw->phy.type = ixgbe_phy_none;
2182 		return IXGBE_SUCCESS;
2183 	}
2184 
2185 	/* Return error if SFP module has been detected but is not supported */
2186 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2187 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
2188 
2189 	return status;
2190 }
2191 
2192 /**
2193  *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2194  *  @hw: pointer to hardware structure
2195  *
2196  *  Determines physical layer capabilities of the current configuration.
2197  **/
2198 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2199 {
2200 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2201 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2202 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2203 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2204 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2205 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2206 	u16 ext_ability = 0;
2207 
2208 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2209 
2210 	hw->phy.ops.identify(hw);
2211 
2212 	switch (hw->phy.type) {
2213 	case ixgbe_phy_tn:
2214 	case ixgbe_phy_cu_unknown:
2215 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2216 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2217 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2218 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2219 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2220 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2221 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2222 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2223 		goto out;
2224 	default:
2225 		break;
2226 	}
2227 
2228 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2229 	case IXGBE_AUTOC_LMS_1G_AN:
2230 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2231 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2232 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2233 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2234 			goto out;
2235 		} else
2236 			/* SFI mode so read SFP module */
2237 			goto sfp_check;
2238 		break;
2239 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2240 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2241 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2242 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2243 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2244 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2245 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2246 		goto out;
2247 		break;
2248 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2249 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2250 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2251 			goto out;
2252 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2253 			goto sfp_check;
2254 		break;
2255 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2256 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2257 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2258 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2259 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2260 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2261 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2262 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2263 		goto out;
2264 		break;
2265 	default:
2266 		goto out;
2267 		break;
2268 	}
2269 
2270 sfp_check:
2271 	/* SFP check must be done last since DA modules are sometimes used to
2272 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2273 	 * Call identify_sfp because the pluggable module may have changed */
2274 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2275 out:
2276 	return physical_layer;
2277 }
2278 
2279 /**
2280  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2281  *  @hw: pointer to hardware structure
2282  *  @regval: register value to write to RXCTRL
2283  *
2284  *  Enables the Rx DMA unit for 82599
2285  **/
2286 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2287 {
2288 
2289 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2290 
2291 	/*
2292 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2293 	 * If traffic is incoming before we enable the Rx unit, it could hang
2294 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2295 	 * completely disabled prior to enabling the Rx unit.
2296 	 */
2297 
2298 	hw->mac.ops.disable_sec_rx_path(hw);
2299 
2300 	if (regval & IXGBE_RXCTRL_RXEN)
2301 		ixgbe_enable_rx(hw);
2302 	else
2303 		ixgbe_disable_rx(hw);
2304 
2305 	hw->mac.ops.enable_sec_rx_path(hw);
2306 
2307 	return IXGBE_SUCCESS;
2308 }
2309 
2310 /**
2311  *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
2312  *  @hw: pointer to hardware structure
2313  *
2314  *  Verifies that installed the firmware version is 0.6 or higher
2315  *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2316  *
2317  *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2318  *  if the FW version is not supported.
2319  **/
2320 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2321 {
2322 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2323 	u16 fw_offset, fw_ptp_cfg_offset;
2324 	u16 fw_version;
2325 
2326 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2327 
2328 	/* firmware check is only necessary for SFI devices */
2329 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2330 		status = IXGBE_SUCCESS;
2331 		goto fw_version_out;
2332 	}
2333 
2334 	/* get the offset to the Firmware Module block */
2335 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2336 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2337 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
2338 		return IXGBE_ERR_EEPROM_VERSION;
2339 	}
2340 
2341 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2342 		goto fw_version_out;
2343 
2344 	/* get the offset to the Pass Through Patch Configuration block */
2345 	if (hw->eeprom.ops.read(hw, (fw_offset +
2346 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2347 				 &fw_ptp_cfg_offset)) {
2348 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2349 			      "eeprom read at offset %d failed",
2350 			      fw_offset +
2351 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2352 		return IXGBE_ERR_EEPROM_VERSION;
2353 	}
2354 
2355 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2356 		goto fw_version_out;
2357 
2358 	/* get the firmware version */
2359 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2360 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2361 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2362 			      "eeprom read at offset %d failed",
2363 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2364 		return IXGBE_ERR_EEPROM_VERSION;
2365 	}
2366 
2367 	if (fw_version > 0x5)
2368 		status = IXGBE_SUCCESS;
2369 
2370 fw_version_out:
2371 	return status;
2372 }
2373 
2374 /**
2375  *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2376  *  @hw: pointer to hardware structure
2377  *
2378  *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
2379  *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2380  **/
2381 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2382 {
2383 	bool lesm_enabled = FALSE;
2384 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2385 	s32 status;
2386 
2387 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2388 
2389 	/* get the offset to the Firmware Module block */
2390 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2391 
2392 	if ((status != IXGBE_SUCCESS) ||
2393 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2394 		goto out;
2395 
2396 	/* get the offset to the LESM Parameters block */
2397 	status = hw->eeprom.ops.read(hw, (fw_offset +
2398 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2399 				     &fw_lesm_param_offset);
2400 
2401 	if ((status != IXGBE_SUCCESS) ||
2402 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2403 		goto out;
2404 
2405 	/* get the LESM state word */
2406 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2407 				     IXGBE_FW_LESM_STATE_1),
2408 				     &fw_lesm_state);
2409 
2410 	if ((status == IXGBE_SUCCESS) &&
2411 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2412 		lesm_enabled = TRUE;
2413 
2414 out:
2415 	return lesm_enabled;
2416 }
2417 
2418 /**
2419  *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2420  *  fastest available method
2421  *
2422  *  @hw: pointer to hardware structure
2423  *  @offset: offset of  word in EEPROM to read
2424  *  @words: number of words
2425  *  @data: word(s) read from the EEPROM
2426  *
2427  *  Retrieves 16 bit word(s) read from EEPROM
2428  **/
2429 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2430 					  u16 words, u16 *data)
2431 {
2432 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2433 	s32 ret_val = IXGBE_ERR_CONFIG;
2434 
2435 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2436 
2437 	/*
2438 	 * If EEPROM is detected and can be addressed using 14 bits,
2439 	 * use EERD otherwise use bit bang
2440 	 */
2441 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2442 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2443 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2444 							 data);
2445 	else
2446 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2447 								    words,
2448 								    data);
2449 
2450 	return ret_val;
2451 }
2452 
2453 /**
2454  *  ixgbe_read_eeprom_82599 - Read EEPROM word using
2455  *  fastest available method
2456  *
2457  *  @hw: pointer to hardware structure
2458  *  @offset: offset of  word in the EEPROM to read
2459  *  @data: word read from the EEPROM
2460  *
2461  *  Reads a 16 bit word from the EEPROM
2462  **/
2463 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2464 				   u16 offset, u16 *data)
2465 {
2466 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2467 	s32 ret_val = IXGBE_ERR_CONFIG;
2468 
2469 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2470 
2471 	/*
2472 	 * If EEPROM is detected and can be addressed using 14 bits,
2473 	 * use EERD otherwise use bit bang
2474 	 */
2475 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2476 	    (offset <= IXGBE_EERD_MAX_ADDR))
2477 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2478 	else
2479 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2480 
2481 	return ret_val;
2482 }
2483 
2484 /**
2485  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2486  *
2487  *  @hw: pointer to hardware structure
2488  *
2489  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2490  * full pipeline reset.  This function assumes the SW/FW lock is held.
2491  **/
2492 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2493 {
2494 	s32 ret_val;
2495 	u32 anlp1_reg = 0;
2496 	u32 i, autoc_reg, autoc2_reg;
2497 
2498 	/* Enable link if disabled in NVM */
2499 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2500 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2501 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2502 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2503 		IXGBE_WRITE_FLUSH(hw);
2504 	}
2505 
2506 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2507 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2508 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2509 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2510 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2511 	/* Wait for AN to leave state 0 */
2512 	for (i = 0; i < 10; i++) {
2513 		msec_delay(4);
2514 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2515 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2516 			break;
2517 	}
2518 
2519 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2520 		DEBUGOUT("auto negotiation not completed\n");
2521 		ret_val = IXGBE_ERR_RESET_FAILED;
2522 		goto reset_pipeline_out;
2523 	}
2524 
2525 	ret_val = IXGBE_SUCCESS;
2526 
2527 reset_pipeline_out:
2528 	/* Write AUTOC register with original LMS field and Restart_AN */
2529 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2530 	IXGBE_WRITE_FLUSH(hw);
2531 
2532 	return ret_val;
2533 }
2534 
2535 /**
2536  *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2537  *  @hw: pointer to hardware structure
2538  *  @byte_offset: byte offset to read
2539  *  @dev_addr: address to read from
2540  *  @data: value read
2541  *
2542  *  Performs byte read operation to SFP module's EEPROM over I2C interface at
2543  *  a specified device address.
2544  **/
2545 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2546 				u8 dev_addr, u8 *data)
2547 {
2548 	u32 esdp;
2549 	s32 status;
2550 	s32 timeout = 200;
2551 
2552 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2553 
2554 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2555 		/* Acquire I2C bus ownership. */
2556 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2557 		esdp |= IXGBE_ESDP_SDP0;
2558 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2559 		IXGBE_WRITE_FLUSH(hw);
2560 
2561 		while (timeout) {
2562 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2563 			if (esdp & IXGBE_ESDP_SDP1)
2564 				break;
2565 
2566 			msec_delay(5);
2567 			timeout--;
2568 		}
2569 
2570 		if (!timeout) {
2571 			DEBUGOUT("Driver can't access resource,"
2572 				 " acquiring I2C bus timeout.\n");
2573 			status = IXGBE_ERR_I2C;
2574 			goto release_i2c_access;
2575 		}
2576 	}
2577 
2578 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2579 
2580 release_i2c_access:
2581 
2582 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2583 		/* Release I2C bus ownership. */
2584 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2585 		esdp &= ~IXGBE_ESDP_SDP0;
2586 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2587 		IXGBE_WRITE_FLUSH(hw);
2588 	}
2589 
2590 	return status;
2591 }
2592 
2593 /**
2594  *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2595  *  @hw: pointer to hardware structure
2596  *  @byte_offset: byte offset to write
2597  *  @dev_addr: address to read from
2598  *  @data: value to write
2599  *
2600  *  Performs byte write operation to SFP module's EEPROM over I2C interface at
2601  *  a specified device address.
2602  **/
2603 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2604 				 u8 dev_addr, u8 data)
2605 {
2606 	u32 esdp;
2607 	s32 status;
2608 	s32 timeout = 200;
2609 
2610 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2611 
2612 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2613 		/* Acquire I2C bus ownership. */
2614 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2615 		esdp |= IXGBE_ESDP_SDP0;
2616 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2617 		IXGBE_WRITE_FLUSH(hw);
2618 
2619 		while (timeout) {
2620 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2621 			if (esdp & IXGBE_ESDP_SDP1)
2622 				break;
2623 
2624 			msec_delay(5);
2625 			timeout--;
2626 		}
2627 
2628 		if (!timeout) {
2629 			DEBUGOUT("Driver can't access resource,"
2630 				 " acquiring I2C bus timeout.\n");
2631 			status = IXGBE_ERR_I2C;
2632 			goto release_i2c_access;
2633 		}
2634 	}
2635 
2636 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2637 
2638 release_i2c_access:
2639 
2640 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2641 		/* Release I2C bus ownership. */
2642 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2643 		esdp &= ~IXGBE_ESDP_SDP0;
2644 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2645 		IXGBE_WRITE_FLUSH(hw);
2646 	}
2647 
2648 	return status;
2649 }
2650