xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_82599.c (revision 4ab64dd8558009744b547d27ef7493447654442b)
1 /* $NetBSD: ixgbe_82599.c,v 1.32 2023/10/06 14:48:08 msaitoh Exp $ */
2 
3 /******************************************************************************
4   SPDX-License-Identifier: BSD-3-Clause
5 
6   Copyright (c) 2001-2020, Intel Corporation
7   All rights reserved.
8 
9   Redistribution and use in source and binary forms, with or without
10   modification, are permitted provided that the following conditions are met:
11 
12    1. Redistributions of source code must retain the above copyright notice,
13       this list of conditions and the following disclaimer.
14 
15    2. Redistributions in binary form must reproduce the above copyright
16       notice, this list of conditions and the following disclaimer in the
17       documentation and/or other materials provided with the distribution.
18 
19    3. Neither the name of the Intel Corporation nor the names of its
20       contributors may be used to endorse or promote products derived from
21       this software without specific prior written permission.
22 
23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33   POSSIBILITY OF SUCH DAMAGE.
34 
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 331224 2018-03-19 20:55:05Z erj $*/
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_82599.c,v 1.32 2023/10/06 14:48:08 msaitoh Exp $");
40 
41 #include "ixgbe_type.h"
42 #include "ixgbe_82599.h"
43 #include "ixgbe_api.h"
44 #include "ixgbe_common.h"
45 #include "ixgbe_phy.h"
46 
47 #define IXGBE_82599_MAX_TX_QUEUES 128
48 #define IXGBE_82599_MAX_RX_QUEUES 128
49 #define IXGBE_82599_RAR_ENTRIES   128
50 #define IXGBE_82599_MC_TBL_SIZE   128
51 #define IXGBE_82599_VFT_TBL_SIZE  128
52 #define IXGBE_82599_RX_PB_SIZE	  512
53 
54 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
55 					 ixgbe_link_speed speed,
56 					 bool autoneg_wait_to_complete);
57 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
58 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
59 				   u16 offset, u16 *data);
60 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
61 					  u16 words, u16 *data);
62 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
63 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
64 					u8 dev_addr, u8 *data);
65 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
66 					u8 dev_addr, u8 data);
67 
ixgbe_init_mac_link_ops_82599(struct ixgbe_hw * hw)68 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69 {
70 	struct ixgbe_mac_info *mac = &hw->mac;
71 
72 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
73 
74 	/*
75 	 * enable the laser control functions for SFP+ fiber
76 	 * and MNG not enabled
77 	 */
78 	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
79 	    !ixgbe_mng_enabled(hw)) {
80 		mac->ops.disable_tx_laser =
81 				       ixgbe_disable_tx_laser_multispeed_fiber;
82 		mac->ops.enable_tx_laser =
83 					ixgbe_enable_tx_laser_multispeed_fiber;
84 		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
85 
86 	} else {
87 		mac->ops.disable_tx_laser = NULL;
88 		mac->ops.enable_tx_laser = NULL;
89 		mac->ops.flap_tx_laser = NULL;
90 	}
91 
92 	if (hw->phy.multispeed_fiber) {
93 		/* Set up dual speed SFP+ support */
94 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
95 		mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
96 		mac->ops.set_rate_select_speed =
97 					       ixgbe_set_hard_rate_select_speed;
98 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
99 			mac->ops.set_rate_select_speed =
100 					       ixgbe_set_soft_rate_select_speed;
101 	} else {
102 		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
103 		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
104 		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
105 		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
106 			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
107 		} else {
108 			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
109 		}
110 	}
111 }
112 
113 /**
114  * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
115  * @hw: pointer to hardware structure
116  *
117  * Initialize any function pointers that were not able to be
118  * set during init_shared_code because the PHY/SFP type was
119  * not known.  Perform the SFP init if necessary.
120  *
121  **/
ixgbe_init_phy_ops_82599(struct ixgbe_hw * hw)122 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
123 {
124 	struct ixgbe_mac_info *mac = &hw->mac;
125 	struct ixgbe_phy_info *phy = &hw->phy;
126 	s32 ret_val = IXGBE_SUCCESS;
127 	u32 esdp;
128 
129 	DEBUGFUNC("ixgbe_init_phy_ops_82599");
130 
131 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
132 		/* Store flag indicating I2C bus access control unit. */
133 		hw->phy.qsfp_shared_i2c_bus = TRUE;
134 
135 		/* Initialize access to QSFP+ I2C bus */
136 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
137 		esdp |= IXGBE_ESDP_SDP0_DIR;
138 		esdp &= ~IXGBE_ESDP_SDP1_DIR;
139 		esdp &= ~IXGBE_ESDP_SDP0;
140 		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
141 		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
142 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
143 		IXGBE_WRITE_FLUSH(hw);
144 
145 		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
146 		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
147 	}
148 	/* Identify the PHY or SFP module */
149 	ret_val = phy->ops.identify(hw);
150 	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
151 		goto init_phy_ops_out;
152 
153 	/* Setup function pointers based on detected SFP module and speeds */
154 	ixgbe_init_mac_link_ops_82599(hw);
155 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
156 		hw->phy.ops.reset = NULL;
157 
158 	/* If copper media, overwrite with copper function pointers */
159 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
160 		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
161 		mac->ops.get_link_capabilities =
162 				  ixgbe_get_copper_link_capabilities_generic;
163 	}
164 
165 	/* Set necessary function pointers based on PHY type */
166 	switch (hw->phy.type) {
167 	case ixgbe_phy_tn:
168 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
169 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
170 		phy->ops.get_firmware_version =
171 			     ixgbe_get_phy_firmware_version_tnx;
172 		break;
173 	default:
174 		break;
175 	}
176 init_phy_ops_out:
177 	return ret_val;
178 }
179 
ixgbe_setup_sfp_modules_82599(struct ixgbe_hw * hw)180 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
181 {
182 	s32 ret_val = IXGBE_SUCCESS;
183 	u16 list_offset, data_offset, data_value;
184 
185 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
186 
187 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
188 		ixgbe_init_mac_link_ops_82599(hw);
189 
190 		hw->phy.ops.reset = NULL;
191 
192 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
193 							      &data_offset);
194 		if (ret_val != IXGBE_SUCCESS)
195 			goto setup_sfp_out;
196 
197 		/* PHY config will finish before releasing the semaphore */
198 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
199 							IXGBE_GSSR_MAC_CSR_SM);
200 		if (ret_val != IXGBE_SUCCESS) {
201 			ret_val = IXGBE_ERR_SWFW_SYNC;
202 			goto setup_sfp_out;
203 		}
204 
205 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
206 			goto setup_sfp_err;
207 		while (data_value != 0xffff) {
208 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
209 			IXGBE_WRITE_FLUSH(hw);
210 			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
211 				goto setup_sfp_err;
212 		}
213 
214 		/* Release the semaphore */
215 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
216 		/* Delay obtaining semaphore again to allow FW access
217 		 * prot_autoc_write uses the semaphore too.
218 		 */
219 		msec_delay(hw->eeprom.semaphore_delay);
220 
221 		/* Restart DSP and set SFI mode */
222 		ret_val = hw->mac.ops.prot_autoc_write(hw,
223 			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
224 			FALSE);
225 
226 		if (ret_val) {
227 			DEBUGOUT("sfp module setup not complete\n");
228 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
229 			goto setup_sfp_out;
230 		}
231 
232 	}
233 
234 setup_sfp_out:
235 	return ret_val;
236 
237 setup_sfp_err:
238 	/* Release the semaphore */
239 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
240 	/* Delay obtaining semaphore again to allow FW access */
241 	msec_delay(hw->eeprom.semaphore_delay);
242 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
243 		      "eeprom read at offset %d failed", data_offset);
244 	return IXGBE_ERR_PHY;
245 }
246 
247 /**
248  * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
249  * @hw: pointer to hardware structure
250  * @locked: Return the if we locked for this read.
251  * @reg_val: Value we read from AUTOC
252  *
253  * For this part (82599) we need to wrap read-modify-writes with a possible
254  * FW/SW lock.  It is assumed this lock will be freed with the next
255  * prot_autoc_write_82599().
256  */
prot_autoc_read_82599(struct ixgbe_hw * hw,bool * locked,u32 * reg_val)257 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
258 {
259 	s32 ret_val;
260 
261 	*locked = FALSE;
262 	 /* If LESM is on then we need to hold the SW/FW semaphore. */
263 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
264 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
265 					IXGBE_GSSR_MAC_CSR_SM);
266 		if (ret_val != IXGBE_SUCCESS)
267 			return IXGBE_ERR_SWFW_SYNC;
268 
269 		*locked = TRUE;
270 	}
271 
272 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
273 	return IXGBE_SUCCESS;
274 }
275 
276 /**
277  * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
278  * @hw: pointer to hardware structure
279  * @autoc: value to write to AUTOC
280  * @locked: bool to indicate whether the SW/FW lock was already taken by
281  *          previous proc_autoc_read_82599.
282  *
283  * This part (82599) may need to hold the SW/FW lock around all writes to
284  * AUTOC. Likewise after a write we need to do a pipeline reset.
285  */
prot_autoc_write_82599(struct ixgbe_hw * hw,u32 autoc,bool locked)286 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
287 {
288 	s32 ret_val = IXGBE_SUCCESS;
289 
290 	/* Blocked by MNG FW so bail */
291 	if (ixgbe_check_reset_blocked(hw))
292 		goto out;
293 
294 	/* We only need to get the lock if:
295 	 *  - We didn't do it already (in the read part of a read-modify-write)
296 	 *  - LESM is enabled.
297 	 */
298 	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
299 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
300 					IXGBE_GSSR_MAC_CSR_SM);
301 		if (ret_val != IXGBE_SUCCESS)
302 			return IXGBE_ERR_SWFW_SYNC;
303 
304 		locked = TRUE;
305 	}
306 
307 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
308 	ret_val = ixgbe_reset_pipeline_82599(hw);
309 
310 out:
311 	/* Free the SW/FW semaphore as we either grabbed it here or
312 	 * already had it when this function was called.
313 	 */
314 	if (locked)
315 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
316 
317 	return ret_val;
318 }
319 
320 /**
321  * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
322  * @hw: pointer to hardware structure
323  *
324  * Initialize the function pointers and assign the MAC type for 82599.
325  * Does not touch the hardware.
326  **/
327 
ixgbe_init_ops_82599(struct ixgbe_hw * hw)328 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
329 {
330 	struct ixgbe_mac_info *mac = &hw->mac;
331 	struct ixgbe_phy_info *phy = &hw->phy;
332 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
333 	s32 ret_val;
334 	u16 i;
335 
336 	DEBUGFUNC("ixgbe_init_ops_82599");
337 
338 	ixgbe_init_phy_ops_generic(hw);
339 	ret_val = ixgbe_init_ops_generic(hw);
340 
341 	/* PHY */
342 	phy->ops.identify = ixgbe_identify_phy_82599;
343 	phy->ops.init = ixgbe_init_phy_ops_82599;
344 
345 	/* MAC */
346 	mac->ops.reset_hw = ixgbe_reset_hw_82599;
347 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
348 	mac->ops.get_media_type = ixgbe_get_media_type_82599;
349 	mac->ops.get_supported_physical_layer =
350 				    ixgbe_get_supported_physical_layer_82599;
351 	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
352 	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
353 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
354 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
355 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
356 	mac->ops.start_hw = ixgbe_start_hw_82599;
357 	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
358 	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
359 	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
360 	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
361 	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
362 	mac->ops.prot_autoc_read = prot_autoc_read_82599;
363 	mac->ops.prot_autoc_write = prot_autoc_write_82599;
364 
365 	/* RAR, Multicast, VLAN */
366 	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
367 	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
368 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
369 	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
370 	mac->rar_highwater = 1;
371 	mac->ops.set_vfta = ixgbe_set_vfta_generic;
372 	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
373 	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
374 	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
375 	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
376 	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
377 	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
378 
379 	/* Link */
380 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
381 	mac->ops.check_link = ixgbe_check_mac_link_generic;
382 	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
383 	ixgbe_init_mac_link_ops_82599(hw);
384 
385 	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
386 	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
387 	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
388 	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
389 	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
390 	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
391 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
392 
393 	mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
394 				      & IXGBE_FWSM_MODE_MASK);
395 
396 	for (i = 0; i < 64; i++)
397 		hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
398 
399 	/* EEPROM */
400 	eeprom->ops.read = ixgbe_read_eeprom_82599;
401 	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
402 
403 	/* Manageability interface */
404 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
405 
406 	mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
407 	mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
408 	mac->ops.bypass_set = ixgbe_bypass_set_generic;
409 	mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
410 
411 	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
412 
413 	return ret_val;
414 }
415 
416 /**
417  * ixgbe_get_link_capabilities_82599 - Determines link capabilities
418  * @hw: pointer to hardware structure
419  * @speed: pointer to link speed
420  * @autoneg: TRUE when autoneg or autotry is enabled
421  *
422  * Determines the link capabilities by reading the AUTOC register.
423  **/
ixgbe_get_link_capabilities_82599(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)424 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
425 				      ixgbe_link_speed *speed,
426 				      bool *autoneg)
427 {
428 	s32 status = IXGBE_SUCCESS;
429 	u32 autoc = 0;
430 
431 	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
432 
433 
434 	/* Check if 1G SFP module. */
435 	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
436 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
437 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
438 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
439 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
440 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
441 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
442 		*autoneg = TRUE;
443 		goto out;
444 	}
445 
446 	/*
447 	 * Determine link capabilities based on the stored value of AUTOC,
448 	 * which represents EEPROM defaults.  If AUTOC value has not
449 	 * been stored, use the current register values.
450 	 */
451 	if (hw->mac.orig_link_settings_stored)
452 		autoc = hw->mac.orig_autoc;
453 	else
454 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
455 
456 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
457 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
458 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
459 		*autoneg = FALSE;
460 		break;
461 
462 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
463 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
464 		*autoneg = FALSE;
465 		break;
466 
467 	case IXGBE_AUTOC_LMS_1G_AN:
468 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
469 		*autoneg = TRUE;
470 		break;
471 
472 	case IXGBE_AUTOC_LMS_10G_SERIAL:
473 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
474 		*autoneg = FALSE;
475 		break;
476 
477 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
478 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
479 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
480 		if (autoc & IXGBE_AUTOC_KR_SUPP)
481 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
482 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
483 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
484 		if (autoc & IXGBE_AUTOC_KX_SUPP)
485 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
486 		*autoneg = TRUE;
487 		break;
488 
489 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
490 		*speed = IXGBE_LINK_SPEED_100_FULL;
491 		if (autoc & IXGBE_AUTOC_KR_SUPP)
492 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
493 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
494 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
495 		if (autoc & IXGBE_AUTOC_KX_SUPP)
496 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
497 		*autoneg = TRUE;
498 		break;
499 
500 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
501 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
502 		*autoneg = FALSE;
503 		break;
504 
505 	default:
506 		status = IXGBE_ERR_LINK_SETUP;
507 		goto out;
508 		break;
509 	}
510 
511 	if (hw->phy.multispeed_fiber) {
512 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
513 			  IXGBE_LINK_SPEED_1GB_FULL;
514 
515 		/* QSFP must not enable full auto-negotiation
516 		 * Limited autoneg is enabled at 1G
517 		 */
518 		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
519 			*autoneg = FALSE;
520 		else
521 			*autoneg = TRUE;
522 	}
523 
524 out:
525 	return status;
526 }
527 
528 /**
529  * ixgbe_get_media_type_82599 - Get media type
530  * @hw: pointer to hardware structure
531  *
532  * Returns the media type (fiber, copper, backplane)
533  **/
ixgbe_get_media_type_82599(struct ixgbe_hw * hw)534 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
535 {
536 	enum ixgbe_media_type media_type;
537 
538 	DEBUGFUNC("ixgbe_get_media_type_82599");
539 
540 	/* Detect if there is a copper PHY attached. */
541 	switch (hw->phy.type) {
542 	case ixgbe_phy_cu_unknown:
543 	case ixgbe_phy_tn:
544 		media_type = ixgbe_media_type_copper;
545 		goto out;
546 	default:
547 		break;
548 	}
549 
550 	switch (hw->device_id) {
551 	case IXGBE_DEV_ID_82599_KX4:
552 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
553 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
554 	case IXGBE_DEV_ID_82599_KR:
555 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
556 	case IXGBE_DEV_ID_82599_XAUI_LOM:
557 		/* Default device ID is mezzanine card KX/KX4 */
558 		media_type = ixgbe_media_type_backplane;
559 		break;
560 	case IXGBE_DEV_ID_82599_SFP:
561 	case IXGBE_DEV_ID_82599_SFP_FCOE:
562 	case IXGBE_DEV_ID_82599_SFP_EM:
563 	case IXGBE_DEV_ID_82599_SFP_SF2:
564 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
565 	case IXGBE_DEV_ID_82599EN_SFP:
566 		media_type = ixgbe_media_type_fiber;
567 		break;
568 	case IXGBE_DEV_ID_82599_CX4:
569 		media_type = ixgbe_media_type_cx4;
570 		break;
571 	case IXGBE_DEV_ID_82599_T3_LOM:
572 		media_type = ixgbe_media_type_copper;
573 		break;
574 	case IXGBE_DEV_ID_82599_LS:
575 		media_type = ixgbe_media_type_fiber_lco;
576 		break;
577 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
578 		media_type = ixgbe_media_type_fiber_qsfp;
579 		break;
580 	case IXGBE_DEV_ID_82599_BYPASS:
581 		media_type = ixgbe_media_type_fiber_fixed;
582 		hw->phy.multispeed_fiber = TRUE;
583 		break;
584 	default:
585 		media_type = ixgbe_media_type_unknown;
586 		break;
587 	}
588 out:
589 	return media_type;
590 }
591 
592 /**
593  * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
594  * @hw: pointer to hardware structure
595  *
596  * Disables link during D3 power down sequence.
597  *
598  **/
ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw * hw)599 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
600 {
601 	u32 autoc2_reg;
602 	u16 ee_ctrl_2 = 0;
603 
604 	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
605 	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
606 
607 	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
608 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
609 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
610 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
611 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
612 	}
613 }
614 
615 /**
616  * ixgbe_start_mac_link_82599 - Setup MAC link settings
617  * @hw: pointer to hardware structure
618  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
619  *
620  * Configures link settings based on values in the ixgbe_hw struct.
621  * Restarts the link.  Performs autonegotiation if needed.
622  **/
ixgbe_start_mac_link_82599(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)623 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
624 			       bool autoneg_wait_to_complete)
625 {
626 	u32 autoc_reg;
627 	u32 links_reg;
628 	u32 i;
629 	s32 status = IXGBE_SUCCESS;
630 	bool got_lock = FALSE;
631 
632 	DEBUGFUNC("ixgbe_start_mac_link_82599");
633 
634 	/*  reset_pipeline requires us to hold this lock as it writes to
635 	 *  AUTOC.
636 	 */
637 	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
638 		status = hw->mac.ops.acquire_swfw_sync(hw,
639 						       IXGBE_GSSR_MAC_CSR_SM);
640 		if (status != IXGBE_SUCCESS)
641 			goto out;
642 
643 		got_lock = TRUE;
644 	}
645 
646 	/* Restart link */
647 	ixgbe_reset_pipeline_82599(hw);
648 
649 	if (got_lock)
650 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
651 
652 	/* Only poll for autoneg to complete if specified to do so */
653 	if (autoneg_wait_to_complete) {
654 		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
655 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
656 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
657 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
658 		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
659 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
660 		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
661 			links_reg = 0; /* Just in case Autoneg time = 0 */
662 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
663 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
664 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
665 					break;
666 				msec_delay(100);
667 			}
668 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
669 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
670 				DEBUGOUT("Autoneg did not complete.\n");
671 			}
672 		}
673 	}
674 
675 	/* Add delay to filter out noises during initial link setup */
676 	msec_delay(50);
677 
678 out:
679 	return status;
680 }
681 
682 /**
683  * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
684  * @hw: pointer to hardware structure
685  *
686  * The base drivers may require better control over SFP+ module
687  * PHY states.  This includes selectively shutting down the Tx
688  * laser on the PHY, effectively halting physical link.
689  **/
ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)690 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
691 {
692 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
693 
694 	/* Blocked by MNG FW so bail */
695 	if (ixgbe_check_reset_blocked(hw))
696 		return;
697 
698 	/* Disable Tx laser; allow 100us to go dark per spec */
699 	esdp_reg |= IXGBE_ESDP_SDP3;
700 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
701 	IXGBE_WRITE_FLUSH(hw);
702 	usec_delay(100);
703 }
704 
705 /**
706  * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
707  * @hw: pointer to hardware structure
708  *
709  * The base drivers may require better control over SFP+ module
710  * PHY states.  This includes selectively turning on the Tx
711  * laser on the PHY, effectively starting physical link.
712  **/
ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)713 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
714 {
715 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
716 
717 	/* Enable Tx laser; allow 100ms to light up */
718 	esdp_reg &= ~IXGBE_ESDP_SDP3;
719 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
720 	IXGBE_WRITE_FLUSH(hw);
721 	msec_delay(100);
722 }
723 
724 /**
725  * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
726  * @hw: pointer to hardware structure
727  *
728  * When the driver changes the link speeds that it can support,
729  * it sets autotry_restart to TRUE to indicate that we need to
730  * initiate a new autotry session with the link partner.  To do
731  * so, we set the speed then disable and re-enable the Tx laser, to
732  * alert the link partner that it also needs to restart autotry on its
733  * end.  This is consistent with TRUE clause 37 autoneg, which also
734  * involves a loss of signal.
735  **/
ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw * hw)736 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
737 {
738 	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
739 
740 	/* Blocked by MNG FW so bail */
741 	if (ixgbe_check_reset_blocked(hw))
742 		return;
743 
744 	if (hw->mac.autotry_restart) {
745 		ixgbe_disable_tx_laser_multispeed_fiber(hw);
746 		ixgbe_enable_tx_laser_multispeed_fiber(hw);
747 		hw->mac.autotry_restart = FALSE;
748 	}
749 }
750 
751 /**
752  * ixgbe_set_hard_rate_select_speed - Set module link speed
753  * @hw: pointer to hardware structure
754  * @speed: link speed to set
755  *
756  * Set module link speed via RS0/RS1 rate select pins.
757  */
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw * hw,ixgbe_link_speed speed)758 void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
759 					ixgbe_link_speed speed)
760 {
761 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
762 
763 	switch (speed) {
764 	case IXGBE_LINK_SPEED_10GB_FULL:
765 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
766 		break;
767 	case IXGBE_LINK_SPEED_1GB_FULL:
768 		esdp_reg &= ~IXGBE_ESDP_SDP5;
769 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
770 		break;
771 	default:
772 		DEBUGOUT("Invalid fixed module speed\n");
773 		return;
774 	}
775 
776 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
777 	IXGBE_WRITE_FLUSH(hw);
778 }
779 
780 /**
781  * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
782  * @hw: pointer to hardware structure
783  * @speed: new link speed
784  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
785  *
786  * Implements the Intel SmartSpeed algorithm.
787  **/
ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)788 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
789 				    ixgbe_link_speed speed,
790 				    bool autoneg_wait_to_complete)
791 {
792 	s32 status = IXGBE_SUCCESS;
793 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
794 	s32 i, j;
795 	bool link_up = FALSE;
796 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
797 
798 	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
799 
800 	 /* Set autoneg_advertised value based on input link speed */
801 	hw->phy.autoneg_advertised = 0;
802 
803 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
804 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
805 
806 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
807 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
808 
809 	if (speed & IXGBE_LINK_SPEED_100_FULL)
810 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
811 
812 	/*
813 	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
814 	 * autoneg advertisement if link is unable to be established at the
815 	 * highest negotiated rate.  This can sometimes happen due to integrity
816 	 * issues with the physical media connection.
817 	 */
818 
819 	/* First, try to get link with full advertisement */
820 	hw->phy.smart_speed_active = FALSE;
821 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
822 		status = ixgbe_setup_mac_link_82599(hw, speed,
823 						    autoneg_wait_to_complete);
824 		if (status != IXGBE_SUCCESS)
825 			goto out;
826 
827 		/*
828 		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
829 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
830 		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
831 		 * Table 9 in the AN MAS.
832 		 */
833 		for (i = 0; i < 5; i++) {
834 			msec_delay(100);
835 
836 			/* If we have link, just jump out */
837 			status = ixgbe_check_link(hw, &link_speed, &link_up,
838 						  FALSE);
839 			if (status != IXGBE_SUCCESS)
840 				goto out;
841 
842 			if (link_up)
843 				goto out;
844 		}
845 	}
846 
847 	/*
848 	 * We didn't get link.  If we advertised KR plus one of KX4/KX
849 	 * (or BX4/BX), then disable KR and try again.
850 	 */
851 	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
852 	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
853 		goto out;
854 
855 	/* Turn SmartSpeed on to disable KR support */
856 	hw->phy.smart_speed_active = TRUE;
857 	status = ixgbe_setup_mac_link_82599(hw, speed,
858 					    autoneg_wait_to_complete);
859 	if (status != IXGBE_SUCCESS)
860 		goto out;
861 
862 	/*
863 	 * Wait for the controller to acquire link.  600ms will allow for
864 	 * the AN link_fail_inhibit_timer as well for multiple cycles of
865 	 * parallel detect, both 10g and 1g. This allows for the maximum
866 	 * connect attempts as defined in the AN MAS table 73-7.
867 	 */
868 	for (i = 0; i < 6; i++) {
869 		msec_delay(100);
870 
871 		/* If we have link, just jump out */
872 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
873 		if (status != IXGBE_SUCCESS)
874 			goto out;
875 
876 		if (link_up)
877 			goto out;
878 	}
879 
880 	/* We didn't get link.  Turn SmartSpeed back off. */
881 	hw->phy.smart_speed_active = FALSE;
882 	status = ixgbe_setup_mac_link_82599(hw, speed,
883 					    autoneg_wait_to_complete);
884 
885 out:
886 	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
887 		DEBUGOUT("Smartspeed has downgraded the link speed "
888 		"from the maximum advertised\n");
889 	return status;
890 }
891 
892 /**
893  * ixgbe_setup_mac_link_82599 - Set MAC link speed
894  * @hw: pointer to hardware structure
895  * @speed: new link speed
896  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
897  *
898  * Set the link speed in the AUTOC register and restarts link.
899  **/
ixgbe_setup_mac_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)900 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
901 			       ixgbe_link_speed speed,
902 			       bool autoneg_wait_to_complete)
903 {
904 	bool autoneg = FALSE;
905 	s32 status = IXGBE_SUCCESS;
906 	u32 pma_pmd_1g, link_mode;
907 	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
908 	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
909 	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
910 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
911 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
912 	u32 links_reg;
913 	u32 i;
914 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
915 
916 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
917 
918 	/* Check to see if speed passed in is supported. */
919 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
920 	if (status)
921 		goto out;
922 
923 	speed &= link_capabilities;
924 
925 	if (speed == 0) {
926 		ixgbe_disable_tx_laser(hw); /* For fiber */
927 		ixgbe_set_phy_power(hw, false); /* For copper */
928 	} else {
929 		/* In case previous media setting was none(down) */
930 		ixgbe_enable_tx_laser(hw); /* for Fiber */
931 		ixgbe_set_phy_power(hw, true); /* For copper */
932 	}
933 
934 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
935 	if (hw->mac.orig_link_settings_stored)
936 		orig_autoc = hw->mac.orig_autoc;
937 	else
938 		orig_autoc = autoc;
939 
940 	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
941 	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
942 
943 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
944 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
945 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
946 		/* Set KX4/KX/KR support according to speed requested */
947 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
948 		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
949 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
950 				autoc |= IXGBE_AUTOC_KX4_SUPP;
951 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
952 			    (hw->phy.smart_speed_active == FALSE))
953 				autoc |= IXGBE_AUTOC_KR_SUPP;
954 		}
955 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
956 			autoc |= IXGBE_AUTOC_KX_SUPP;
957 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
958 		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
959 		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
960 		/* Switch from 1G SFI to 10G SFI if requested */
961 		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
962 		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
963 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
964 			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
965 		}
966 	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
967 		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
968 		/* Switch from 10G SFI to 1G SFI if requested */
969 		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
970 		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
971 			autoc &= ~IXGBE_AUTOC_LMS_MASK;
972 			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
973 				autoc |= IXGBE_AUTOC_LMS_1G_AN;
974 			else
975 				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
976 		}
977 	}
978 
979 	if (autoc != current_autoc) {
980 		/* Restart link */
981 		status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
982 		if (status != IXGBE_SUCCESS)
983 			goto out;
984 
985 		/* Only poll for autoneg to complete if specified to do so */
986 		if (autoneg_wait_to_complete) {
987 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
988 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
989 			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
990 				links_reg = 0; /*Just in case Autoneg time=0*/
991 				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
992 					links_reg =
993 					       IXGBE_READ_REG(hw, IXGBE_LINKS);
994 					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
995 						break;
996 					msec_delay(100);
997 				}
998 				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
999 					status =
1000 						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1001 					DEBUGOUT("Autoneg did not complete.\n");
1002 				}
1003 			}
1004 		}
1005 
1006 		/* Add delay to filter out noises during initial link setup */
1007 		msec_delay(50);
1008 	}
1009 
1010 out:
1011 	return status;
1012 }
1013 
1014 /**
1015  * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1016  * @hw: pointer to hardware structure
1017  * @speed: new link speed
1018  * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1019  *
1020  * Restarts link on PHY and MAC based on settings passed in.
1021  **/
ixgbe_setup_copper_link_82599(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)1022 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1023 					 ixgbe_link_speed speed,
1024 					 bool autoneg_wait_to_complete)
1025 {
1026 	s32 status;
1027 
1028 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
1029 
1030 	/* Setup the PHY according to input speed */
1031 	status = hw->phy.ops.setup_link_speed(hw, speed,
1032 					      autoneg_wait_to_complete);
1033 	/* Set up MAC */
1034 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1035 
1036 	return status;
1037 }
1038 
1039 /**
1040  * ixgbe_reset_hw_82599 - Perform hardware reset
1041  * @hw: pointer to hardware structure
1042  *
1043  * Resets the hardware by resetting the transmit and receive units, masks
1044  * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1045  * reset.
1046  **/
ixgbe_reset_hw_82599(struct ixgbe_hw * hw)1047 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1048 {
1049 	ixgbe_link_speed link_speed;
1050 	s32 status;
1051 	s32 phy_status = IXGBE_SUCCESS;
1052 	u32 ctrl = 0;
1053 	u32 i, autoc, autoc2;
1054 	u32 curr_lms;
1055 	bool link_up = FALSE;
1056 
1057 	DEBUGFUNC("ixgbe_reset_hw_82599");
1058 
1059 	/* Call adapter stop to disable tx/rx and clear interrupts */
1060 	status = hw->mac.ops.stop_adapter(hw);
1061 	if (status != IXGBE_SUCCESS)
1062 		goto reset_hw_out;
1063 
1064 	/* flush pending Tx transactions */
1065 	ixgbe_clear_tx_pending(hw);
1066 
1067 	/* PHY ops must be identified and initialized prior to reset */
1068 
1069 	/* Identify PHY and related function pointers */
1070 	phy_status = hw->phy.ops.init(hw);
1071 
1072 	if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1073 		goto mac_reset_top;
1074 
1075 	/* Setup SFP module if there is one present. */
1076 	if (hw->phy.sfp_setup_needed) {
1077 		phy_status = hw->mac.ops.setup_sfp(hw);
1078 		hw->phy.sfp_setup_needed = FALSE;
1079 	}
1080 
1081 	if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1082 		goto mac_reset_top;
1083 
1084 	/* Reset PHY */
1085 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1086 		hw->phy.ops.reset(hw);
1087 
1088 mac_reset_top:
1089 	/* remember AUTOC from before we reset */
1090 	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1091 
1092 mac_reset_retry:
1093 	/*
1094 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
1095 	 * If link reset is used when link is up, it might reset the PHY when
1096 	 * mng is using it.  If link is down or the flag to force full link
1097 	 * reset is set, then perform link reset.
1098 	 */
1099 	ctrl = IXGBE_CTRL_LNK_RST;
1100 	if (!hw->force_full_reset) {
1101 		hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1102 		if (link_up)
1103 			ctrl = IXGBE_CTRL_RST;
1104 	}
1105 
1106 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1107 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1108 	IXGBE_WRITE_FLUSH(hw);
1109 
1110 	/* Poll for reset bit to self-clear meaning reset is complete */
1111 	for (i = 0; i < 10; i++) {
1112 		usec_delay(1);
1113 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1114 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
1115 			break;
1116 	}
1117 
1118 	if (ctrl & IXGBE_CTRL_RST_MASK) {
1119 		status = IXGBE_ERR_RESET_FAILED;
1120 		DEBUGOUT("Reset polling failed to complete.\n");
1121 	}
1122 
1123 	msec_delay(50);
1124 
1125 	/*
1126 	 * Double resets are required for recovery from certain error
1127 	 * conditions.  Between resets, it is necessary to stall to
1128 	 * allow time for any pending HW events to complete.
1129 	 */
1130 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1131 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1132 		goto mac_reset_retry;
1133 	}
1134 
1135 	/*
1136 	 * Store the original AUTOC/AUTOC2 values if they have not been
1137 	 * stored off yet.  Otherwise restore the stored original
1138 	 * values since the reset operation sets back to defaults.
1139 	 */
1140 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1141 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1142 
1143 	/* Enable link if disabled in NVM */
1144 	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1145 		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1146 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1147 		IXGBE_WRITE_FLUSH(hw);
1148 	}
1149 
1150 	if (hw->mac.orig_link_settings_stored == FALSE) {
1151 		hw->mac.orig_autoc = autoc;
1152 		hw->mac.orig_autoc2 = autoc2;
1153 		hw->mac.orig_link_settings_stored = TRUE;
1154 	} else {
1155 
1156 		/* If MNG FW is running on a multi-speed device that
1157 		 * doesn't autoneg with out driver support we need to
1158 		 * leave LMS in the state it was before we MAC reset.
1159 		 * Likewise if we support WoL we don't want change the
1160 		 * LMS state.
1161 		 */
1162 		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1163 		    hw->wol_enabled)
1164 			hw->mac.orig_autoc =
1165 				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1166 				curr_lms;
1167 
1168 		if (autoc != hw->mac.orig_autoc) {
1169 			status = hw->mac.ops.prot_autoc_write(hw,
1170 							hw->mac.orig_autoc,
1171 							FALSE);
1172 			if (status != IXGBE_SUCCESS)
1173 				goto reset_hw_out;
1174 		}
1175 
1176 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1177 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1178 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1179 			autoc2 |= (hw->mac.orig_autoc2 &
1180 				   IXGBE_AUTOC2_UPPER_MASK);
1181 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1182 		}
1183 	}
1184 
1185 	/* Store the permanent mac address */
1186 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1187 
1188 	/*
1189 	 * Store MAC address from RAR0, clear receive address registers, and
1190 	 * clear the multicast table.  Also reset num_rar_entries to 128,
1191 	 * since we modify this value when programming the SAN MAC address.
1192 	 */
1193 	hw->mac.num_rar_entries = 128;
1194 	hw->mac.ops.init_rx_addrs(hw);
1195 
1196 	/* Store the permanent SAN mac address */
1197 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1198 
1199 	/* Add the SAN MAC address to the RAR only if it's a valid address */
1200 	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1201 		/* Save the SAN MAC RAR index */
1202 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1203 
1204 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1205 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1206 
1207 		/* clear VMDq pool/queue selection for this RAR */
1208 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1209 				       IXGBE_CLEAR_VMDQ_ALL);
1210 
1211 		/* Reserve the last RAR for the SAN MAC address */
1212 		hw->mac.num_rar_entries--;
1213 	}
1214 
1215 	/* Store the alternative WWNN/WWPN prefix */
1216 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1217 				   &hw->mac.wwpn_prefix);
1218 
1219 reset_hw_out:
1220 	if (phy_status != IXGBE_SUCCESS)
1221 		status = phy_status;
1222 
1223 	return status;
1224 }
1225 
1226 /**
1227  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1228  * @hw: pointer to hardware structure
1229  * @fdircmd: current value of FDIRCMD register
1230  */
ixgbe_fdir_check_cmd_complete(struct ixgbe_hw * hw,u32 * fdircmd)1231 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1232 {
1233 	int i;
1234 
1235 	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1236 		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1237 		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1238 			return IXGBE_SUCCESS;
1239 		usec_delay(10);
1240 	}
1241 
1242 	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1243 }
1244 
1245 /**
1246  * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1247  * @hw: pointer to hardware structure
1248  **/
ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw * hw)1249 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1250 {
1251 	s32 err;
1252 	int i;
1253 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1254 	u32 fdircmd;
1255 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1256 
1257 	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1258 
1259 	/*
1260 	 * Before starting reinitialization process,
1261 	 * FDIRCMD.CMD must be zero.
1262 	 */
1263 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1264 	if (err) {
1265 		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1266 		return err;
1267 	}
1268 
1269 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1270 	IXGBE_WRITE_FLUSH(hw);
1271 	/*
1272 	 * 82599 adapters flow director init flow cannot be restarted,
1273 	 * Workaround 82599 silicon errata by performing the following steps
1274 	 * before re-writing the FDIRCTRL control register with the same value.
1275 	 * - write 1 to bit 8 of FDIRCMD register &
1276 	 * - write 0 to bit 8 of FDIRCMD register
1277 	 */
1278 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1279 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1280 			 IXGBE_FDIRCMD_CLEARHT));
1281 	IXGBE_WRITE_FLUSH(hw);
1282 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1283 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1284 			 ~IXGBE_FDIRCMD_CLEARHT));
1285 	IXGBE_WRITE_FLUSH(hw);
1286 	/*
1287 	 * Clear FDIR Hash register to clear any leftover hashes
1288 	 * waiting to be programmed.
1289 	 */
1290 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1291 	IXGBE_WRITE_FLUSH(hw);
1292 
1293 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1294 	IXGBE_WRITE_FLUSH(hw);
1295 
1296 	/* Poll init-done after we write FDIRCTRL register */
1297 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1298 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1299 				   IXGBE_FDIRCTRL_INIT_DONE)
1300 			break;
1301 		msec_delay(1);
1302 	}
1303 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1304 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1305 		return IXGBE_ERR_FDIR_REINIT_FAILED;
1306 	}
1307 
1308 	/* Clear FDIR statistics registers (read to clear) */
1309 	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1310 	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1311 	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1312 	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1313 	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1314 
1315 	return IXGBE_SUCCESS;
1316 }
1317 
1318 /**
1319  * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1320  * @hw: pointer to hardware structure
1321  * @fdirctrl: value to write to flow director control register
1322  **/
ixgbe_fdir_enable_82599(struct ixgbe_hw * hw,u32 fdirctrl)1323 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1324 {
1325 	int i;
1326 
1327 	DEBUGFUNC("ixgbe_fdir_enable_82599");
1328 
1329 	/* Prime the keys for hashing */
1330 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1331 	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1332 
1333 	/*
1334 	 * Poll init-done after we write the register.  Estimated times:
1335 	 *      10G: PBALLOC = 11b, timing is 60us
1336 	 *       1G: PBALLOC = 11b, timing is 600us
1337 	 *     100M: PBALLOC = 11b, timing is 6ms
1338 	 *
1339 	 *     Multiple these timings by 4 if under full Rx load
1340 	 *
1341 	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1342 	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1343 	 * this might not finish in our poll time, but we can live with that
1344 	 * for now.
1345 	 */
1346 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1347 	IXGBE_WRITE_FLUSH(hw);
1348 	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1349 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1350 				   IXGBE_FDIRCTRL_INIT_DONE)
1351 			break;
1352 		msec_delay(1);
1353 	}
1354 
1355 	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1356 		DEBUGOUT("Flow Director poll time exceeded!\n");
1357 }
1358 
1359 /**
1360  * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1361  * @hw: pointer to hardware structure
1362  * @fdirctrl: value to write to flow director control register, initially
1363  *	     contains just the value of the Rx packet buffer allocation
1364  **/
ixgbe_init_fdir_signature_82599(struct ixgbe_hw * hw,u32 fdirctrl)1365 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1366 {
1367 	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1368 
1369 	/*
1370 	 * Continue setup of fdirctrl register bits:
1371 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1372 	 *  Set the maximum length per hash bucket to 0xA filters
1373 	 *  Send interrupt when 64 filters are left
1374 	 */
1375 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1376 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1377 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1378 
1379 	/* write hashes and fdirctrl register, poll for completion */
1380 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1381 
1382 	return IXGBE_SUCCESS;
1383 }
1384 
1385 /**
1386  * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1387  * @hw: pointer to hardware structure
1388  * @fdirctrl: value to write to flow director control register, initially
1389  *	     contains just the value of the Rx packet buffer allocation
1390  * @cloud_mode: TRUE - cloud mode, FALSE - other mode
1391  **/
ixgbe_init_fdir_perfect_82599(struct ixgbe_hw * hw,u32 fdirctrl,bool cloud_mode)1392 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1393 			bool cloud_mode)
1394 {
1395 	UNREFERENCED_1PARAMETER(cloud_mode);
1396 	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1397 
1398 	/*
1399 	 * Continue setup of fdirctrl register bits:
1400 	 *  Turn perfect match filtering on
1401 	 *  Report hash in RSS field of Rx wb descriptor
1402 	 *  Initialize the drop queue to queue 127
1403 	 *  Move the flexible bytes to use the ethertype - shift 6 words
1404 	 *  Set the maximum length per hash bucket to 0xA filters
1405 	 *  Send interrupt when 64 (0x4 * 16) filters are left
1406 	 */
1407 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1408 		    IXGBE_FDIRCTRL_REPORT_STATUS |
1409 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1410 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1411 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1412 		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1413 
1414 	if (cloud_mode)
1415 		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
1416 					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
1417 
1418 	/* write hashes and fdirctrl register, poll for completion */
1419 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1420 
1421 	return IXGBE_SUCCESS;
1422 }
1423 
1424 /**
1425  * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
1426  * @hw: pointer to hardware structure
1427  * @dropqueue: Rx queue index used for the dropped packets
1428  **/
ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw * hw,u8 dropqueue)1429 void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
1430 {
1431 	u32 fdirctrl;
1432 
1433 	DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
1434 	/* Clear init done bit and drop queue field */
1435 	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1436 	fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
1437 
1438 	/* Set drop queue */
1439 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1440 	if ((hw->mac.type == ixgbe_mac_X550) ||
1441 	    (hw->mac.type == ixgbe_mac_X550EM_x) ||
1442 	    (hw->mac.type == ixgbe_mac_X550EM_a))
1443 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
1444 
1445 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1446 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1447 			 IXGBE_FDIRCMD_CLEARHT));
1448 	IXGBE_WRITE_FLUSH(hw);
1449 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1450 			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1451 			 ~IXGBE_FDIRCMD_CLEARHT));
1452 	IXGBE_WRITE_FLUSH(hw);
1453 
1454 	/* write hashes and fdirctrl register, poll for completion */
1455 	ixgbe_fdir_enable_82599(hw, fdirctrl);
1456 }
1457 
1458 /*
1459  * These defines allow us to quickly generate all of the necessary instructions
1460  * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1461  * for values 0 through 15
1462  */
1463 #define IXGBE_ATR_COMMON_HASH_KEY \
1464 		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1465 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1466 do { \
1467 	u32 n = (_n); \
1468 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1469 		common_hash ^= lo_hash_dword >> n; \
1470 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1471 		bucket_hash ^= lo_hash_dword >> n; \
1472 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1473 		sig_hash ^= lo_hash_dword << (16 - n); \
1474 	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1475 		common_hash ^= hi_hash_dword >> n; \
1476 	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1477 		bucket_hash ^= hi_hash_dword >> n; \
1478 	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1479 		sig_hash ^= hi_hash_dword << (16 - n); \
1480 } while (0)
1481 
1482 /**
1483  * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1484  * @input: input bitstream to compute the hash on
1485  * @common: compressed common input dword
1486  *
1487  * This function is almost identical to the function above but contains
1488  * several optimizations such as unwinding all of the loops, letting the
1489  * compiler work out all of the conditional ifs since the keys are static
1490  * defines, and computing two keys at once since the hashed dword stream
1491  * will be the same for both keys.
1492  **/
ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common)1493 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1494 				     union ixgbe_atr_hash_dword common)
1495 {
1496 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1497 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1498 
1499 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1500 	flow_vm_vlan = IXGBE_NTOHL(input.dword);
1501 
1502 	/* generate common hash dword */
1503 	hi_hash_dword = IXGBE_NTOHL(common.dword);
1504 
1505 	/* low dword is word swapped version of common */
1506 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1507 
1508 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1509 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1510 
1511 	/* Process bits 0 and 16 */
1512 	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1513 
1514 	/*
1515 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1516 	 * delay this because bit 0 of the stream should not be processed
1517 	 * so we do not add the VLAN until after bit 0 was processed
1518 	 */
1519 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1520 
1521 	/* Process remaining 30 bit of the key */
1522 	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1523 	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1524 	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1525 	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1526 	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1527 	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1528 	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1529 	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1530 	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1531 	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1532 	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1533 	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1534 	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1535 	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1536 	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1537 
1538 	/* combine common_hash result with signature and bucket hashes */
1539 	bucket_hash ^= common_hash;
1540 	bucket_hash &= IXGBE_ATR_HASH_MASK;
1541 
1542 	sig_hash ^= common_hash << 16;
1543 	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1544 
1545 	/* return completed signature hash */
1546 	return sig_hash ^ bucket_hash;
1547 }
1548 
1549 /**
1550  * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1551  * @hw: pointer to hardware structure
1552  * @input: unique input dword
1553  * @common: compressed common input dword
1554  * @queue: queue index to direct traffic to
1555  *
1556  * Note that the tunnel bit in input must not be set when the hardware
1557  * tunneling support does not exist.
1558  **/
ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_hash_dword input,union ixgbe_atr_hash_dword common,u8 queue)1559 void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1560 					   union ixgbe_atr_hash_dword input,
1561 					   union ixgbe_atr_hash_dword common,
1562 					   u8 queue)
1563 {
1564 	u64 fdirhashcmd;
1565 	u8 flow_type;
1566 	bool tunnel;
1567 	u32 fdircmd;
1568 
1569 	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1570 
1571 	/*
1572 	 * Get the flow_type in order to program FDIRCMD properly
1573 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1574 	 * fifth is FDIRCMD.TUNNEL_FILTER
1575 	 */
1576 	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1577 	flow_type = input.formatted.flow_type &
1578 		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1579 	switch (flow_type) {
1580 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
1581 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
1582 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1583 	case IXGBE_ATR_FLOW_TYPE_TCPV6:
1584 	case IXGBE_ATR_FLOW_TYPE_UDPV6:
1585 	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1586 		break;
1587 	default:
1588 		DEBUGOUT(" Error on flow type input\n");
1589 		return;
1590 	}
1591 
1592 	/* configure FDIRCMD register */
1593 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1594 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1595 	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1596 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1597 	if (tunnel)
1598 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1599 
1600 	/*
1601 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1602 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1603 	 */
1604 	fdirhashcmd = (u64)fdircmd << 32;
1605 	fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common);
1606 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1607 
1608 	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1609 
1610 	return;
1611 }
1612 
1613 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1614 do { \
1615 	u32 n = (_n); \
1616 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1617 		bucket_hash ^= lo_hash_dword >> n; \
1618 	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1619 		bucket_hash ^= hi_hash_dword >> n; \
1620 } while (0)
1621 
1622 /**
1623  * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1624  * @input: input bitstream to compute the hash on
1625  * @input_mask: mask for the input bitstream
1626  *
1627  * This function serves two main purposes.  First it applies the input_mask
1628  * to the atr_input resulting in a cleaned up atr_input data stream.
1629  * Secondly it computes the hash and stores it in the bkt_hash field at
1630  * the end of the input byte stream.  This way it will be available for
1631  * future use without needing to recompute the hash.
1632  **/
ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask)1633 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1634 					  union ixgbe_atr_input *input_mask)
1635 {
1636 
1637 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1638 	u32 bucket_hash = 0;
1639 	u32 hi_dword = 0;
1640 	u32 i = 0;
1641 
1642 	/* Apply masks to input data */
1643 	for (i = 0; i < 14; i++)
1644 		input->dword_stream[i]  &= input_mask->dword_stream[i];
1645 
1646 	/* record the flow_vm_vlan bits as they are a key part to the hash */
1647 	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1648 
1649 	/* generate common hash dword */
1650 	for (i = 1; i <= 13; i++)
1651 		hi_dword ^= input->dword_stream[i];
1652 	hi_hash_dword = IXGBE_NTOHL(hi_dword);
1653 
1654 	/* low dword is word swapped version of common */
1655 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1656 
1657 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
1658 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1659 
1660 	/* Process bits 0 and 16 */
1661 	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1662 
1663 	/*
1664 	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1665 	 * delay this because bit 0 of the stream should not be processed
1666 	 * so we do not add the VLAN until after bit 0 was processed
1667 	 */
1668 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1669 
1670 	/* Process remaining 30 bit of the key */
1671 	for (i = 1; i <= 15; i++)
1672 		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1673 
1674 	/*
1675 	 * Limit hash to 13 bits since max bucket count is 8K.
1676 	 * Store result at the end of the input stream.
1677 	 */
1678 	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1679 }
1680 
1681 /**
1682  * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1683  * @input_mask: mask to be bit swapped
1684  *
1685  * The source and destination port masks for flow director are bit swapped
1686  * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1687  * generate a correctly swapped value we need to bit swap the mask and that
1688  * is what is accomplished by this function.
1689  **/
ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input * input_mask)1690 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1691 {
1692 	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1693 	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1694 	mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port);
1695 	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1696 	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1697 	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1698 	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1699 }
1700 
1701 /*
1702  * These two macros are meant to address the fact that we have registers
1703  * that are either all or in part big-endian.  As a result on big-endian
1704  * systems we will end up byte swapping the value to little-endian before
1705  * it is byte swapped again and written to the hardware in the original
1706  * big-endian format.
1707  */
1708 #define IXGBE_STORE_AS_BE32(_value) \
1709 	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1710 	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1711 
1712 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1713 	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1714 
1715 #define IXGBE_STORE_AS_BE16(_value) \
1716 	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1717 
ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input_mask,bool cloud_mode)1718 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1719 				    union ixgbe_atr_input *input_mask, bool cloud_mode)
1720 {
1721 	/* mask IPv6 since it is currently not supported */
1722 	u32 fdirm = IXGBE_FDIRM_DIPv6;
1723 	u32 fdirtcpm;
1724 	u32 fdirip6m;
1725 	UNREFERENCED_1PARAMETER(cloud_mode);
1726 	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1727 
1728 	/*
1729 	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1730 	 * are zero, then assume a full mask for that field.  Also assume that
1731 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1732 	 * cannot be masked out in this implementation.
1733 	 *
1734 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1735 	 * point in time.
1736 	 */
1737 
1738 	/* verify bucket hash is cleared on hash generation */
1739 	if (input_mask->formatted.bkt_hash)
1740 		DEBUGOUT(" bucket hash should always be 0 in mask\n");
1741 
1742 	/* Program FDIRM and verify partial masks */
1743 	switch (input_mask->formatted.vm_pool & 0x7F) {
1744 	case 0x0:
1745 		fdirm |= IXGBE_FDIRM_POOL;
1746 	case 0x7F:
1747 		break;
1748 	default:
1749 		DEBUGOUT(" Error on vm pool mask\n");
1750 		return IXGBE_ERR_CONFIG;
1751 	}
1752 
1753 	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1754 	case 0x0:
1755 		fdirm |= IXGBE_FDIRM_L4P;
1756 		if (input_mask->formatted.dst_port ||
1757 		    input_mask->formatted.src_port) {
1758 			DEBUGOUT(" Error on src/dst port mask\n");
1759 			return IXGBE_ERR_CONFIG;
1760 		}
1761 	case IXGBE_ATR_L4TYPE_MASK:
1762 		break;
1763 	default:
1764 		DEBUGOUT(" Error on flow type mask\n");
1765 		return IXGBE_ERR_CONFIG;
1766 	}
1767 
1768 	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1769 	case 0x0000:
1770 		/* mask VLAN ID */
1771 		fdirm |= IXGBE_FDIRM_VLANID;
1772 		/* fall through */
1773 	case 0x0FFF:
1774 		/* mask VLAN priority */
1775 		fdirm |= IXGBE_FDIRM_VLANP;
1776 		break;
1777 	case 0xE000:
1778 		/* mask VLAN ID only */
1779 		fdirm |= IXGBE_FDIRM_VLANID;
1780 		/* fall through */
1781 	case 0xEFFF:
1782 		/* no VLAN fields masked */
1783 		break;
1784 	default:
1785 		DEBUGOUT(" Error on VLAN mask\n");
1786 		return IXGBE_ERR_CONFIG;
1787 	}
1788 
1789 	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1790 	case 0x0000:
1791 		/* Mask Flex Bytes */
1792 		fdirm |= IXGBE_FDIRM_FLEX;
1793 		/* fall through */
1794 	case 0xFFFF:
1795 		break;
1796 	default:
1797 		DEBUGOUT(" Error on flexible byte mask\n");
1798 		return IXGBE_ERR_CONFIG;
1799 	}
1800 
1801 	if (cloud_mode) {
1802 		fdirm |= IXGBE_FDIRM_L3P;
1803 		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
1804 		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
1805 
1806 		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
1807 		case 0x00:
1808 			/* Mask inner MAC, fall through */
1809 			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
1810 		case 0xFF:
1811 			break;
1812 		default:
1813 			DEBUGOUT(" Error on inner_mac byte mask\n");
1814 			return IXGBE_ERR_CONFIG;
1815 		}
1816 
1817 		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
1818 		case 0x0:
1819 			/* Mask vxlan id */
1820 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
1821 			break;
1822 		case 0x00FFFFFF:
1823 			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
1824 			break;
1825 		case 0xFFFFFFFF:
1826 			break;
1827 		default:
1828 			DEBUGOUT(" Error on TNI/VNI byte mask\n");
1829 			return IXGBE_ERR_CONFIG;
1830 		}
1831 
1832 		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
1833 		case 0x0:
1834 			/* Mask tunnel type, fall through */
1835 			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
1836 		case 0xFFFF:
1837 			break;
1838 		default:
1839 			DEBUGOUT(" Error on tunnel type byte mask\n");
1840 			return IXGBE_ERR_CONFIG;
1841 		}
1842 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
1843 
1844 		/* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
1845 		 * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
1846 		 * L3/L3 packets to tunnel.
1847 		 */
1848 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
1849 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
1850 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
1851 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
1852 		switch (hw->mac.type) {
1853 		case ixgbe_mac_X550:
1854 		case ixgbe_mac_X550EM_x:
1855 		case ixgbe_mac_X550EM_a:
1856 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
1857 			break;
1858 		default:
1859 			break;
1860 		}
1861 	}
1862 
1863 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1864 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1865 
1866 	if (!cloud_mode) {
1867 		/* store the TCP/UDP port masks, bit reversed from port
1868 		 * layout */
1869 		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1870 
1871 		/* write both the same so that UDP and TCP use the same mask */
1872 		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1873 		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1874 		/* also use it for SCTP */
1875 		switch (hw->mac.type) {
1876 		case ixgbe_mac_X550:
1877 		case ixgbe_mac_X550EM_x:
1878 		case ixgbe_mac_X550EM_a:
1879 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1880 			break;
1881 		default:
1882 			break;
1883 		}
1884 
1885 		/* store source and destination IP masks (big-enian) */
1886 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1887 				     ~input_mask->formatted.src_ip[0]);
1888 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1889 				     ~input_mask->formatted.dst_ip[0]);
1890 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF);
1891 	}
1892 	return IXGBE_SUCCESS;
1893 }
1894 
ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id,u8 queue,bool cloud_mode)1895 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1896 					  union ixgbe_atr_input *input,
1897 					  u16 soft_id, u8 queue, bool cloud_mode)
1898 {
1899 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
1900 	u32 addr_low, addr_high;
1901 	u32 cloud_type = 0;
1902 	s32 err;
1903 	UNREFERENCED_1PARAMETER(cloud_mode);
1904 
1905 	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1906 	if (!cloud_mode) {
1907 		/* currently IPv6 is not supported, must be programmed with 0 */
1908 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1909 				     input->formatted.src_ip[0]);
1910 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1911 				     input->formatted.src_ip[1]);
1912 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1913 				     input->formatted.src_ip[2]);
1914 
1915 		/* record the source address (big-endian) */
1916 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
1917 			input->formatted.src_ip[0]);
1918 
1919 		/* record the first 32 bits of the destination address
1920 		 * (big-endian) */
1921 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
1922 			input->formatted.dst_ip[0]);
1923 
1924 		/* record source and destination port (little-endian)*/
1925 		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1926 		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1927 		fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port);
1928 		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1929 	}
1930 
1931 	/* record VLAN (little-endian) and flex_bytes(big-endian) */
1932 	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1933 	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1934 	fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id);
1935 	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1936 
1937 	if (cloud_mode) {
1938 		if (input->formatted.tunnel_type != 0)
1939 			cloud_type = 0x80000000;
1940 
1941 		addr_low = ((u32)input->formatted.inner_mac[0] |
1942 				((u32)input->formatted.inner_mac[1] << 8) |
1943 				((u32)input->formatted.inner_mac[2] << 16) |
1944 				((u32)input->formatted.inner_mac[3] << 24));
1945 		addr_high = ((u32)input->formatted.inner_mac[4] |
1946 				((u32)input->formatted.inner_mac[5] << 8));
1947 		cloud_type |= addr_high;
1948 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
1949 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
1950 		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
1951 	}
1952 
1953 	/* configure FDIRHASH register */
1954 	fdirhash = input->formatted.bkt_hash;
1955 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1956 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1957 
1958 	/*
1959 	 * flush all previous writes to make certain registers are
1960 	 * programmed prior to issuing the command
1961 	 */
1962 	IXGBE_WRITE_FLUSH(hw);
1963 
1964 	/* configure FDIRCMD register */
1965 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1966 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1967 	if (queue == IXGBE_FDIR_DROP_QUEUE)
1968 		fdircmd |= IXGBE_FDIRCMD_DROP;
1969 	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1970 		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1971 	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1972 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1973 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1974 
1975 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1976 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1977 	if (err) {
1978 		DEBUGOUT("Flow Director command did not complete!\n");
1979 		return err;
1980 	}
1981 
1982 	return IXGBE_SUCCESS;
1983 }
1984 
ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,u16 soft_id)1985 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1986 					  union ixgbe_atr_input *input,
1987 					  u16 soft_id)
1988 {
1989 	u32 fdirhash;
1990 	u32 fdircmd;
1991 	s32 err;
1992 
1993 	/* configure FDIRHASH register */
1994 	fdirhash = input->formatted.bkt_hash;
1995 	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1996 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1997 
1998 	/* flush hash to HW */
1999 	IXGBE_WRITE_FLUSH(hw);
2000 
2001 	/* Query if filter is present */
2002 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
2003 
2004 	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
2005 	if (err) {
2006 		DEBUGOUT("Flow Director command did not complete!\n");
2007 		return err;
2008 	}
2009 
2010 	/* if filter exists in hardware then remove it */
2011 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
2012 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
2013 		IXGBE_WRITE_FLUSH(hw);
2014 		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
2015 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
2016 	}
2017 
2018 	return IXGBE_SUCCESS;
2019 }
2020 
2021 /**
2022  * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
2023  * @hw: pointer to hardware structure
2024  * @input: input bitstream
2025  * @input_mask: mask for the input bitstream
2026  * @soft_id: software index for the filters
2027  * @queue: queue index to direct traffic to
2028  * @cloud_mode: unused
2029  *
2030  * Note that the caller to this function must lock before calling, since the
2031  * hardware writes must be protected from one another.
2032  **/
ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw * hw,union ixgbe_atr_input * input,union ixgbe_atr_input * input_mask,u16 soft_id,u8 queue,bool cloud_mode)2033 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
2034 					union ixgbe_atr_input *input,
2035 					union ixgbe_atr_input *input_mask,
2036 					u16 soft_id, u8 queue, bool cloud_mode)
2037 {
2038 	s32 err = IXGBE_ERR_CONFIG;
2039 	UNREFERENCED_1PARAMETER(cloud_mode);
2040 
2041 	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2042 
2043 	/*
2044 	 * Check flow_type formatting, and bail out before we touch the hardware
2045 	 * if there's a configuration issue
2046 	 */
2047 	switch (input->formatted.flow_type) {
2048 	case IXGBE_ATR_FLOW_TYPE_IPV4:
2049 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2050 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2051 		if (input->formatted.dst_port || input->formatted.src_port) {
2052 			DEBUGOUT(" Error on src/dst port\n");
2053 			return IXGBE_ERR_CONFIG;
2054 		}
2055 		break;
2056 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2057 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2058 		if (input->formatted.dst_port || input->formatted.src_port) {
2059 			DEBUGOUT(" Error on src/dst port\n");
2060 			return IXGBE_ERR_CONFIG;
2061 		}
2062 		/* fall through */
2063 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
2064 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2065 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
2066 	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2067 		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2068 						  IXGBE_ATR_L4TYPE_MASK;
2069 		break;
2070 	default:
2071 		DEBUGOUT(" Error on flow type input\n");
2072 		return err;
2073 	}
2074 
2075 	/* program input mask into the HW */
2076 	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2077 	if (err)
2078 		return err;
2079 
2080 	/* apply mask and compute/store hash */
2081 	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2082 
2083 	/* program filters to filter memory */
2084 	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2085 						     soft_id, queue, cloud_mode);
2086 }
2087 
2088 /**
2089  * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2090  * @hw: pointer to hardware structure
2091  * @reg: analog register to read
2092  * @val: read value
2093  *
2094  * Performs read operation to Omer analog register specified.
2095  **/
ixgbe_read_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 * val)2096 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2097 {
2098 	u32  core_ctl;
2099 
2100 	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2101 
2102 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2103 			(reg << 8));
2104 	IXGBE_WRITE_FLUSH(hw);
2105 	usec_delay(10);
2106 	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2107 	*val = (u8)core_ctl;
2108 
2109 	return IXGBE_SUCCESS;
2110 }
2111 
2112 /**
2113  * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2114  * @hw: pointer to hardware structure
2115  * @reg: atlas register to write
2116  * @val: value to write
2117  *
2118  * Performs write operation to Omer analog register specified.
2119  **/
ixgbe_write_analog_reg8_82599(struct ixgbe_hw * hw,u32 reg,u8 val)2120 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2121 {
2122 	u32  core_ctl;
2123 
2124 	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2125 
2126 	core_ctl = (reg << 8) | val;
2127 	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2128 	IXGBE_WRITE_FLUSH(hw);
2129 	usec_delay(10);
2130 
2131 	return IXGBE_SUCCESS;
2132 }
2133 
2134 /**
2135  * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2136  * @hw: pointer to hardware structure
2137  *
2138  * Starts the hardware using the generic start_hw function
2139  * and the generation start_hw function.
2140  * Then performs revision-specific operations, if any.
2141  **/
ixgbe_start_hw_82599(struct ixgbe_hw * hw)2142 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2143 {
2144 	s32 ret_val = IXGBE_SUCCESS;
2145 
2146 	DEBUGFUNC("ixgbe_start_hw_82599");
2147 
2148 	ret_val = ixgbe_start_hw_generic(hw);
2149 	if (ret_val != IXGBE_SUCCESS)
2150 		goto out;
2151 
2152 	ixgbe_start_hw_gen2(hw);
2153 
2154 	/* We need to run link autotry after the driver loads */
2155 	hw->mac.autotry_restart = TRUE;
2156 
2157 	if (ret_val == IXGBE_SUCCESS)
2158 		ret_val = ixgbe_verify_fw_version_82599(hw);
2159 out:
2160 	return ret_val;
2161 }
2162 
2163 /**
2164  * ixgbe_identify_phy_82599 - Get physical layer module
2165  * @hw: pointer to hardware structure
2166  *
2167  * Determines the physical layer module found on the current adapter.
2168  * If PHY already detected, maintains current PHY type in hw struct,
2169  * otherwise executes the PHY detection routine.
2170  **/
ixgbe_identify_phy_82599(struct ixgbe_hw * hw)2171 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2172 {
2173 	s32 status;
2174 
2175 	DEBUGFUNC("ixgbe_identify_phy_82599");
2176 
2177 	/* Detect PHY if not unknown - returns success if already detected. */
2178 	status = ixgbe_identify_phy_generic(hw);
2179 	if (status != IXGBE_SUCCESS) {
2180 		/* 82599 10GBASE-T requires an external PHY */
2181 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2182 			return status;
2183 		else
2184 			status = ixgbe_identify_module_generic(hw);
2185 	}
2186 
2187 	/* Set PHY type none if no PHY detected */
2188 	if (hw->phy.type == ixgbe_phy_unknown) {
2189 		hw->phy.type = ixgbe_phy_none;
2190 		return IXGBE_SUCCESS;
2191 	}
2192 
2193 	/* Return error if SFP module has been detected but is not supported */
2194 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2195 		return IXGBE_ERR_SFP_NOT_SUPPORTED;
2196 
2197 	return status;
2198 }
2199 
2200 /**
2201  * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2202  * @hw: pointer to hardware structure
2203  *
2204  * Determines physical layer capabilities of the current configuration.
2205  **/
ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw * hw)2206 u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2207 {
2208 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2209 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2210 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2211 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2212 	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2213 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2214 	u16 ext_ability = 0;
2215 
2216 	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2217 
2218 	hw->phy.ops.identify(hw);
2219 
2220 	switch (hw->phy.type) {
2221 	case ixgbe_phy_tn:
2222 	case ixgbe_phy_cu_unknown:
2223 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2224 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2225 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2226 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2227 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2228 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2229 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2230 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2231 		goto out;
2232 	default:
2233 		break;
2234 	}
2235 
2236 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2237 	case IXGBE_AUTOC_LMS_1G_AN:
2238 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2239 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2240 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2241 			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2242 			goto out;
2243 		} else
2244 			/* SFI mode so read SFP module */
2245 			goto sfp_check;
2246 		break;
2247 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2248 		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2249 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2250 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2251 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2252 		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2253 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2254 		goto out;
2255 		break;
2256 	case IXGBE_AUTOC_LMS_10G_SERIAL:
2257 		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2258 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2259 			goto out;
2260 		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2261 			goto sfp_check;
2262 		break;
2263 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
2264 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2265 		if (autoc & IXGBE_AUTOC_KX_SUPP)
2266 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2267 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
2268 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2269 		if (autoc & IXGBE_AUTOC_KR_SUPP)
2270 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2271 		goto out;
2272 		break;
2273 	default:
2274 		goto out;
2275 		break;
2276 	}
2277 
2278 sfp_check:
2279 	/* SFP check must be done last since DA modules are sometimes used to
2280 	 * test KR mode -  we need to id KR mode correctly before SFP module.
2281 	 * Call identify_sfp because the pluggable module may have changed */
2282 	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2283 out:
2284 	return physical_layer;
2285 }
2286 
2287 /**
2288  * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2289  * @hw: pointer to hardware structure
2290  * @regval: register value to write to RXCTRL
2291  *
2292  * Enables the Rx DMA unit for 82599
2293  **/
ixgbe_enable_rx_dma_82599(struct ixgbe_hw * hw,u32 regval)2294 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2295 {
2296 
2297 	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2298 
2299 	/*
2300 	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2301 	 * If traffic is incoming before we enable the Rx unit, it could hang
2302 	 * the Rx DMA unit.  Therefore, make sure the security engine is
2303 	 * completely disabled prior to enabling the Rx unit.
2304 	 */
2305 
2306 	hw->mac.ops.disable_sec_rx_path(hw);
2307 
2308 	if (regval & IXGBE_RXCTRL_RXEN)
2309 		ixgbe_enable_rx(hw);
2310 	else
2311 		ixgbe_disable_rx(hw);
2312 
2313 	hw->mac.ops.enable_sec_rx_path(hw);
2314 
2315 	return IXGBE_SUCCESS;
2316 }
2317 
2318 /**
2319  * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2320  * @hw: pointer to hardware structure
2321  *
2322  * Verifies that installed the firmware version is 0.6 or higher
2323  * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2324  *
2325  * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2326  * if the FW version is not supported.
2327  **/
ixgbe_verify_fw_version_82599(struct ixgbe_hw * hw)2328 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2329 {
2330 	s32 status = IXGBE_ERR_EEPROM_VERSION;
2331 	u16 fw_offset, fw_ptp_cfg_offset;
2332 	u16 fw_version;
2333 
2334 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
2335 
2336 	/* firmware check is only necessary for SFI devices */
2337 	if (hw->phy.media_type != ixgbe_media_type_fiber) {
2338 		status = IXGBE_SUCCESS;
2339 		goto fw_version_out;
2340 	}
2341 
2342 	/* get the offset to the Firmware Module block */
2343 	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2344 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2345 			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
2346 		return IXGBE_ERR_EEPROM_VERSION;
2347 	}
2348 
2349 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2350 		goto fw_version_out;
2351 
2352 	/* get the offset to the Pass Through Patch Configuration block */
2353 	if (hw->eeprom.ops.read(hw, (fw_offset +
2354 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2355 				 &fw_ptp_cfg_offset)) {
2356 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2357 			      "eeprom read at offset %d failed",
2358 			      fw_offset +
2359 			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2360 		return IXGBE_ERR_EEPROM_VERSION;
2361 	}
2362 
2363 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2364 		goto fw_version_out;
2365 
2366 	/* get the firmware version */
2367 	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2368 			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2369 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2370 			      "eeprom read at offset %d failed",
2371 			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2372 		return IXGBE_ERR_EEPROM_VERSION;
2373 	}
2374 
2375 	if (fw_version > 0x5)
2376 		status = IXGBE_SUCCESS;
2377 
2378 fw_version_out:
2379 	return status;
2380 }
2381 
2382 /**
2383  * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2384  * @hw: pointer to hardware structure
2385  *
2386  * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2387  * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2388  **/
ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw * hw)2389 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2390 {
2391 	bool lesm_enabled = FALSE;
2392 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2393 	s32 status;
2394 
2395 	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2396 
2397 	/* get the offset to the Firmware Module block */
2398 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2399 
2400 	if ((status != IXGBE_SUCCESS) ||
2401 	    (fw_offset == 0) || (fw_offset == 0xFFFF))
2402 		goto out;
2403 
2404 	/* get the offset to the LESM Parameters block */
2405 	status = hw->eeprom.ops.read(hw, (fw_offset +
2406 				     IXGBE_FW_LESM_PARAMETERS_PTR),
2407 				     &fw_lesm_param_offset);
2408 
2409 	if ((status != IXGBE_SUCCESS) ||
2410 	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2411 		goto out;
2412 
2413 	/* get the LESM state word */
2414 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2415 				     IXGBE_FW_LESM_STATE_1),
2416 				     &fw_lesm_state);
2417 
2418 	if ((status == IXGBE_SUCCESS) &&
2419 	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2420 		lesm_enabled = TRUE;
2421 
2422 out:
2423 	return lesm_enabled;
2424 }
2425 
2426 /**
2427  * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2428  * fastest available method
2429  *
2430  * @hw: pointer to hardware structure
2431  * @offset: offset of  word in EEPROM to read
2432  * @words: number of words
2433  * @data: word(s) read from the EEPROM
2434  *
2435  * Retrieves 16 bit word(s) read from EEPROM
2436  **/
ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)2437 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2438 					  u16 words, u16 *data)
2439 {
2440 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2441 	s32 ret_val = IXGBE_ERR_CONFIG;
2442 
2443 	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2444 
2445 	/*
2446 	 * If EEPROM is detected and can be addressed using 14 bits,
2447 	 * use EERD otherwise use bit bang
2448 	 */
2449 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2450 	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2451 		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2452 							 data);
2453 	else
2454 		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2455 								    words,
2456 								    data);
2457 
2458 	return ret_val;
2459 }
2460 
2461 /**
2462  * ixgbe_read_eeprom_82599 - Read EEPROM word using
2463  * fastest available method
2464  *
2465  * @hw: pointer to hardware structure
2466  * @offset: offset of  word in the EEPROM to read
2467  * @data: word read from the EEPROM
2468  *
2469  * Reads a 16 bit word from the EEPROM
2470  **/
ixgbe_read_eeprom_82599(struct ixgbe_hw * hw,u16 offset,u16 * data)2471 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2472 				   u16 offset, u16 *data)
2473 {
2474 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2475 	s32 ret_val = IXGBE_ERR_CONFIG;
2476 
2477 	DEBUGFUNC("ixgbe_read_eeprom_82599");
2478 
2479 	/*
2480 	 * If EEPROM is detected and can be addressed using 14 bits,
2481 	 * use EERD otherwise use bit bang
2482 	 */
2483 	if ((eeprom->type == ixgbe_eeprom_spi) &&
2484 	    (offset <= IXGBE_EERD_MAX_ADDR))
2485 		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2486 	else
2487 		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2488 
2489 	return ret_val;
2490 }
2491 
2492 /**
2493  * ixgbe_reset_pipeline_82599 - perform pipeline reset
2494  *
2495  * @hw: pointer to hardware structure
2496  *
2497  * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2498  * full pipeline reset.  This function assumes the SW/FW lock is held.
2499  **/
ixgbe_reset_pipeline_82599(struct ixgbe_hw * hw)2500 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2501 {
2502 	s32 ret_val;
2503 	u32 anlp1_reg = 0;
2504 	u32 i, autoc_reg, autoc2_reg;
2505 
2506 	/* Enable link if disabled in NVM */
2507 	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2508 	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2509 		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2510 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2511 		IXGBE_WRITE_FLUSH(hw);
2512 	}
2513 
2514 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2515 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2516 	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2517 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2518 			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2519 	/* Wait for AN to leave state 0 */
2520 	for (i = 0; i < 10; i++) {
2521 		msec_delay(4);
2522 		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2523 		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2524 			break;
2525 	}
2526 
2527 	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2528 		DEBUGOUT("auto negotiation not completed\n");
2529 		ret_val = IXGBE_ERR_RESET_FAILED;
2530 		goto reset_pipeline_out;
2531 	}
2532 
2533 	ret_val = IXGBE_SUCCESS;
2534 
2535 reset_pipeline_out:
2536 	/* Write AUTOC register with original LMS field and Restart_AN */
2537 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2538 	IXGBE_WRITE_FLUSH(hw);
2539 
2540 	return ret_val;
2541 }
2542 
2543 /**
2544  * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2545  * @hw: pointer to hardware structure
2546  * @byte_offset: byte offset to read
2547  * @dev_addr: address to read from
2548  * @data: value read
2549  *
2550  * Performs byte read operation to SFP module's EEPROM over I2C interface at
2551  * a specified device address.
2552  **/
ixgbe_read_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)2553 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2554 				u8 dev_addr, u8 *data)
2555 {
2556 	u32 esdp;
2557 	s32 status;
2558 	s32 timeout = 200;
2559 
2560 	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2561 
2562 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2563 		/* Acquire I2C bus ownership. */
2564 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2565 		esdp |= IXGBE_ESDP_SDP0;
2566 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2567 		IXGBE_WRITE_FLUSH(hw);
2568 
2569 		while (timeout) {
2570 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2571 			if (esdp & IXGBE_ESDP_SDP1)
2572 				break;
2573 
2574 			msec_delay(5);
2575 			timeout--;
2576 		}
2577 
2578 		if (!timeout) {
2579 			DEBUGOUT("Driver can't access resource,"
2580 				 " acquiring I2C bus timeout.\n");
2581 			status = IXGBE_ERR_I2C;
2582 			goto release_i2c_access;
2583 		}
2584 	}
2585 
2586 	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2587 
2588 release_i2c_access:
2589 
2590 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2591 		/* Release I2C bus ownership. */
2592 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2593 		esdp &= ~IXGBE_ESDP_SDP0;
2594 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2595 		IXGBE_WRITE_FLUSH(hw);
2596 	}
2597 
2598 	return status;
2599 }
2600 
2601 /**
2602  * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2603  * @hw: pointer to hardware structure
2604  * @byte_offset: byte offset to write
2605  * @dev_addr: address to read from
2606  * @data: value to write
2607  *
2608  * Performs byte write operation to SFP module's EEPROM over I2C interface at
2609  * a specified device address.
2610  **/
ixgbe_write_i2c_byte_82599(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)2611 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2612 				 u8 dev_addr, u8 data)
2613 {
2614 	u32 esdp;
2615 	s32 status;
2616 	s32 timeout = 200;
2617 
2618 	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2619 
2620 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2621 		/* Acquire I2C bus ownership. */
2622 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2623 		esdp |= IXGBE_ESDP_SDP0;
2624 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2625 		IXGBE_WRITE_FLUSH(hw);
2626 
2627 		while (timeout) {
2628 			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2629 			if (esdp & IXGBE_ESDP_SDP1)
2630 				break;
2631 
2632 			msec_delay(5);
2633 			timeout--;
2634 		}
2635 
2636 		if (!timeout) {
2637 			DEBUGOUT("Driver can't access resource,"
2638 				 " acquiring I2C bus timeout.\n");
2639 			status = IXGBE_ERR_I2C;
2640 			goto release_i2c_access;
2641 		}
2642 	}
2643 
2644 	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2645 
2646 release_i2c_access:
2647 
2648 	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2649 		/* Release I2C bus ownership. */
2650 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2651 		esdp &= ~IXGBE_ESDP_SDP0;
2652 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2653 		IXGBE_WRITE_FLUSH(hw);
2654 	}
2655 
2656 	return status;
2657 }
2658