xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_common.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /* $NetBSD: ixgbe_common.c,v 1.31 2021/04/30 06:55:32 msaitoh Exp $ */
2 
3 /******************************************************************************
4   SPDX-License-Identifier: BSD-3-Clause
5 
6   Copyright (c) 2001-2017, Intel Corporation
7   All rights reserved.
8 
9   Redistribution and use in source and binary forms, with or without
10   modification, are permitted provided that the following conditions are met:
11 
12    1. Redistributions of source code must retain the above copyright notice,
13       this list of conditions and the following disclaimer.
14 
15    2. Redistributions in binary form must reproduce the above copyright
16       notice, this list of conditions and the following disclaimer in the
17       documentation and/or other materials provided with the distribution.
18 
19    3. Neither the name of the Intel Corporation nor the names of its
20       contributors may be used to endorse or promote products derived from
21       this software without specific prior written permission.
22 
23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33   POSSIBILITY OF SUCH DAMAGE.
34 
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 331224 2018-03-19 20:55:05Z erj $*/
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_common.c,v 1.31 2021/04/30 06:55:32 msaitoh Exp $");
40 
41 #include "ixgbe_common.h"
42 #include "ixgbe_phy.h"
43 #include "ixgbe_dcb.h"
44 #include "ixgbe_dcb_82599.h"
45 #include "ixgbe_api.h"
46 
47 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
48 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
49 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
50 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
51 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
52 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
53 					u16 count);
54 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
55 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
56 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
57 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
58 
59 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
60 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
61 					 u16 *san_mac_offset);
62 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
63 					     u16 words, u16 *data);
64 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
65 					      u16 words, u16 *data);
66 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
67 						 u16 offset);
68 
69 /**
70  *  ixgbe_init_ops_generic - Inits function ptrs
71  *  @hw: pointer to the hardware structure
72  *
73  *  Initialize the function pointers.
74  **/
75 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
76 {
77 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
78 	struct ixgbe_mac_info *mac = &hw->mac;
79 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
80 
81 	DEBUGFUNC("ixgbe_init_ops_generic");
82 
83 	/* EEPROM */
84 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
85 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
86 	if (eec & IXGBE_EEC_PRES) {
87 		eeprom->ops.read = ixgbe_read_eerd_generic;
88 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
89 	} else {
90 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
91 		eeprom->ops.read_buffer =
92 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
93 	}
94 	eeprom->ops.write = ixgbe_write_eeprom_generic;
95 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
96 	eeprom->ops.validate_checksum =
97 				      ixgbe_validate_eeprom_checksum_generic;
98 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
99 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
100 
101 	/* MAC */
102 	mac->ops.init_hw = ixgbe_init_hw_generic;
103 	mac->ops.reset_hw = NULL;
104 	mac->ops.start_hw = ixgbe_start_hw_generic;
105 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
106 	mac->ops.get_media_type = NULL;
107 	mac->ops.get_supported_physical_layer = NULL;
108 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
109 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
110 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
111 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
112 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
113 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
114 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
115 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
116 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
117 
118 	/* LEDs */
119 	mac->ops.led_on = ixgbe_led_on_generic;
120 	mac->ops.led_off = ixgbe_led_off_generic;
121 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
122 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
123 	mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
124 
125 	/* RAR, Multicast, VLAN */
126 	mac->ops.set_rar = ixgbe_set_rar_generic;
127 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
128 	mac->ops.insert_mac_addr = NULL;
129 	mac->ops.set_vmdq = NULL;
130 	mac->ops.clear_vmdq = NULL;
131 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
132 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
133 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
134 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
135 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
136 	mac->ops.clear_vfta = NULL;
137 	mac->ops.set_vfta = NULL;
138 	mac->ops.set_vlvf = NULL;
139 	mac->ops.init_uta_tables = NULL;
140 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
141 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
142 	mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic;
143 
144 	/* Flow Control */
145 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
146 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
147 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
148 
149 	/* Link */
150 	mac->ops.get_link_capabilities = NULL;
151 	mac->ops.setup_link = NULL;
152 	mac->ops.check_link = NULL;
153 	mac->ops.dmac_config = NULL;
154 	mac->ops.dmac_update_tcs = NULL;
155 	mac->ops.dmac_config_tcs = NULL;
156 
157 	return IXGBE_SUCCESS;
158 }
159 
160 /**
161  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
162  * of flow control
163  * @hw: pointer to hardware structure
164  *
165  * This function returns TRUE if the device supports flow control
166  * autonegotiation, and FALSE if it does not.
167  *
168  **/
169 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
170 {
171 	bool supported = FALSE;
172 	ixgbe_link_speed speed;
173 	bool link_up;
174 
175 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
176 
177 	switch (hw->phy.media_type) {
178 	case ixgbe_media_type_fiber_fixed:
179 	case ixgbe_media_type_fiber_qsfp:
180 	case ixgbe_media_type_fiber:
181 		/* flow control autoneg black list */
182 		switch (hw->device_id) {
183 		case IXGBE_DEV_ID_X550EM_A_SFP:
184 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
185 		case IXGBE_DEV_ID_X550EM_A_QSFP:
186 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
187 			supported = FALSE;
188 			break;
189 		default:
190 			hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
191 			/* if link is down, assume supported */
192 			if (link_up)
193 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
194 				    TRUE : FALSE;
195 			else
196 				supported = TRUE;
197 		}
198 
199 		break;
200 	case ixgbe_media_type_backplane:
201 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
202 			supported = FALSE;
203 		else
204 			supported = TRUE;
205 		break;
206 	case ixgbe_media_type_copper:
207 		/* only some copper devices support flow control autoneg */
208 		switch (hw->device_id) {
209 		case IXGBE_DEV_ID_82599_T3_LOM:
210 		case IXGBE_DEV_ID_X540T:
211 		case IXGBE_DEV_ID_X540T1:
212 		case IXGBE_DEV_ID_X540_BYPASS:
213 		case IXGBE_DEV_ID_X550T:
214 		case IXGBE_DEV_ID_X550T1:
215 		case IXGBE_DEV_ID_X550EM_X_10G_T:
216 		case IXGBE_DEV_ID_X550EM_A_10G_T:
217 		case IXGBE_DEV_ID_X550EM_A_1G_T:
218 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
219 			supported = TRUE;
220 			break;
221 		default:
222 			supported = FALSE;
223 		}
224 	default:
225 		break;
226 	}
227 
228 	if (!supported)
229 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
230 			      "Device %x does not support flow control autoneg",
231 			      hw->device_id);
232 
233 	return supported;
234 }
235 
236 /**
237  *  ixgbe_setup_fc_generic - Set up flow control
238  *  @hw: pointer to hardware structure
239  *
240  *  Called at init time to set up flow control.
241  **/
242 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
243 {
244 	s32 ret_val = IXGBE_SUCCESS;
245 	u32 reg = 0, reg_bp = 0;
246 	u16 reg_cu = 0;
247 	bool locked = FALSE;
248 
249 	DEBUGFUNC("ixgbe_setup_fc_generic");
250 
251 	/* Validate the requested mode */
252 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
253 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
254 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
255 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
256 		goto out;
257 	}
258 
259 	/*
260 	 * 10gig parts do not have a word in the EEPROM to determine the
261 	 * default flow control setting, so we explicitly set it to full.
262 	 */
263 	if (hw->fc.requested_mode == ixgbe_fc_default)
264 		hw->fc.requested_mode = ixgbe_fc_full;
265 
266 	/*
267 	 * Set up the 1G and 10G flow control advertisement registers so the
268 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
269 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
270 	 */
271 	switch (hw->phy.media_type) {
272 	case ixgbe_media_type_backplane:
273 		/* some MAC's need RMW protection on AUTOC */
274 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
275 		if (ret_val != IXGBE_SUCCESS)
276 			goto out;
277 
278 		/* fall through - only backplane uses autoc */
279 	case ixgbe_media_type_fiber_fixed:
280 	case ixgbe_media_type_fiber_qsfp:
281 	case ixgbe_media_type_fiber:
282 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
283 
284 		break;
285 	case ixgbe_media_type_copper:
286 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
287 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
288 		break;
289 	default:
290 		break;
291 	}
292 
293 	/*
294 	 * The possible values of fc.requested_mode are:
295 	 * 0: Flow control is completely disabled
296 	 * 1: Rx flow control is enabled (we can receive pause frames,
297 	 *    but not send pause frames).
298 	 * 2: Tx flow control is enabled (we can send pause frames but
299 	 *    we do not support receiving pause frames).
300 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
301 	 * other: Invalid.
302 	 */
303 	switch (hw->fc.requested_mode) {
304 	case ixgbe_fc_none:
305 		/* Flow control completely disabled by software override. */
306 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
307 		if (hw->phy.media_type == ixgbe_media_type_backplane)
308 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
309 				    IXGBE_AUTOC_ASM_PAUSE);
310 		else if (hw->phy.media_type == ixgbe_media_type_copper)
311 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
312 		break;
313 	case ixgbe_fc_tx_pause:
314 		/*
315 		 * Tx Flow control is enabled, and Rx Flow control is
316 		 * disabled by software override.
317 		 */
318 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
319 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
320 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
321 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
322 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
323 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
324 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
325 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
326 		}
327 		break;
328 	case ixgbe_fc_rx_pause:
329 		/*
330 		 * Rx Flow control is enabled and Tx Flow control is
331 		 * disabled by software override. Since there really
332 		 * isn't a way to advertise that we are capable of RX
333 		 * Pause ONLY, we will advertise that we support both
334 		 * symmetric and asymmetric Rx PAUSE, as such we fall
335 		 * through to the fc_full statement.  Later, we will
336 		 * disable the adapter's ability to send PAUSE frames.
337 		 */
338 	case ixgbe_fc_full:
339 		/* Flow control (both Rx and Tx) is enabled by SW override. */
340 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
341 		if (hw->phy.media_type == ixgbe_media_type_backplane)
342 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
343 				  IXGBE_AUTOC_ASM_PAUSE;
344 		else if (hw->phy.media_type == ixgbe_media_type_copper)
345 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
346 		break;
347 	default:
348 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
349 			     "Flow control param set incorrectly\n");
350 		ret_val = IXGBE_ERR_CONFIG;
351 		goto out;
352 		break;
353 	}
354 
355 	if (hw->mac.type < ixgbe_mac_X540) {
356 		/*
357 		 * Enable auto-negotiation between the MAC & PHY;
358 		 * the MAC will advertise clause 37 flow control.
359 		 */
360 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
361 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
362 
363 		/* Disable AN timeout */
364 		if (hw->fc.strict_ieee)
365 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
366 
367 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
368 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
369 	}
370 
371 	/*
372 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
373 	 * and copper. There is no need to set the PCS1GCTL register.
374 	 *
375 	 */
376 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
377 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
378 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
379 		if (ret_val)
380 			goto out;
381 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
382 		    (ixgbe_device_supports_autoneg_fc(hw))) {
383 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
384 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
385 	}
386 
387 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
388 out:
389 	return ret_val;
390 }
391 
392 /**
393  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
394  *  @hw: pointer to hardware structure
395  *
396  *  Starts the hardware by filling the bus info structure and media type, clears
397  *  all on chip counters, initializes receive address registers, multicast
398  *  table, VLAN filter table, calls routine to set up link and flow control
399  *  settings, and leaves transmit and receive units disabled and uninitialized
400  **/
401 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
402 {
403 	s32 ret_val;
404 	u32 ctrl_ext;
405 	u16 device_caps;
406 
407 	DEBUGFUNC("ixgbe_start_hw_generic");
408 
409 	/* Set the media type */
410 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
411 
412 	/* PHY ops initialization must be done in reset_hw() */
413 
414 	/* Clear the VLAN filter table */
415 	hw->mac.ops.clear_vfta(hw);
416 
417 	/* Clear statistics registers */
418 	hw->mac.ops.clear_hw_cntrs(hw);
419 
420 	/* Set No Snoop Disable */
421 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
422 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
423 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
424 	IXGBE_WRITE_FLUSH(hw);
425 
426 	/* Setup flow control */
427 	ret_val = ixgbe_setup_fc(hw);
428 	if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
429 		DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
430 		return ret_val;
431 	}
432 
433 	/* Cache bit indicating need for crosstalk fix */
434 	switch (hw->mac.type) {
435 	case ixgbe_mac_82599EB:
436 	case ixgbe_mac_X550EM_x:
437 	case ixgbe_mac_X550EM_a:
438 		hw->mac.ops.get_device_caps(hw, &device_caps);
439 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
440 			hw->need_crosstalk_fix = FALSE;
441 		else
442 			hw->need_crosstalk_fix = TRUE;
443 		break;
444 	default:
445 		hw->need_crosstalk_fix = FALSE;
446 		break;
447 	}
448 
449 	/* Clear adapter stopped flag */
450 	hw->adapter_stopped = FALSE;
451 
452 	return IXGBE_SUCCESS;
453 }
454 
455 /**
456  *  ixgbe_start_hw_gen2 - Init sequence for common device family
457  *  @hw: pointer to hw structure
458  *
459  * Performs the init sequence common to the second generation
460  * of 10 GbE devices.
461  * Devices in the second generation:
462  *     82599
463  *     X540
464  **/
465 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
466 {
467 	u32 i;
468 	u32 regval;
469 
470 	DEBUGFUNC("ixgbe_start_hw_gen2");
471 
472 	/* Clear the rate limiters */
473 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
474 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
475 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
476 	}
477 	IXGBE_WRITE_FLUSH(hw);
478 
479 	/* Disable relaxed ordering */
480 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
481 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
482 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
483 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
484 	}
485 
486 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
487 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
488 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
489 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
490 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
491 	}
492 
493 	return IXGBE_SUCCESS;
494 }
495 
496 /**
497  *  ixgbe_init_hw_generic - Generic hardware initialization
498  *  @hw: pointer to hardware structure
499  *
500  *  Initialize the hardware by resetting the hardware, filling the bus info
501  *  structure and media type, clears all on chip counters, initializes receive
502  *  address registers, multicast table, VLAN filter table, calls routine to set
503  *  up link and flow control settings, and leaves transmit and receive units
504  *  disabled and uninitialized
505  **/
506 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
507 {
508 	s32 status;
509 
510 	DEBUGFUNC("ixgbe_init_hw_generic");
511 
512 	/* Reset the hardware */
513 	status = hw->mac.ops.reset_hw(hw);
514 
515 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
516 		/* Start the HW */
517 		status = hw->mac.ops.start_hw(hw);
518 	}
519 
520 	/* Initialize the LED link active for LED blink support */
521 	if (hw->mac.ops.init_led_link_act)
522 		hw->mac.ops.init_led_link_act(hw);
523 
524 	if (status != IXGBE_SUCCESS)
525 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
526 
527 	return status;
528 }
529 
530 /**
531  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
532  *  @hw: pointer to hardware structure
533  *
534  *  Clears all hardware statistics counters by reading them from the hardware
535  *  Statistics counters are clear on read.
536  **/
537 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
538 {
539 	u16 i = 0;
540 
541 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
542 
543 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
544 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
545 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
546 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
547 	if (hw->mac.type >= ixgbe_mac_X550)
548 		IXGBE_READ_REG(hw, IXGBE_MBSDC);
549 	for (i = 0; i < 8; i++)
550 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
551 
552 	IXGBE_READ_REG(hw, IXGBE_MLFC);
553 	IXGBE_READ_REG(hw, IXGBE_MRFC);
554 	IXGBE_READ_REG(hw, IXGBE_RLEC);
555 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
556 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
557 	if (hw->mac.type >= ixgbe_mac_82599EB) {
558 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
559 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
560 	} else {
561 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
562 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
563 	}
564 
565 	for (i = 0; i < 8; i++) {
566 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
567 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
568 		if (hw->mac.type >= ixgbe_mac_82599EB) {
569 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
570 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
571 		} else {
572 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
573 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
574 		}
575 	}
576 	if (hw->mac.type >= ixgbe_mac_82599EB)
577 		for (i = 0; i < 8; i++)
578 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
579 	IXGBE_READ_REG(hw, IXGBE_PRC64);
580 	IXGBE_READ_REG(hw, IXGBE_PRC127);
581 	IXGBE_READ_REG(hw, IXGBE_PRC255);
582 	IXGBE_READ_REG(hw, IXGBE_PRC511);
583 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
584 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
585 	IXGBE_READ_REG(hw, IXGBE_GPRC);
586 	IXGBE_READ_REG(hw, IXGBE_BPRC);
587 	IXGBE_READ_REG(hw, IXGBE_MPRC);
588 	IXGBE_READ_REG(hw, IXGBE_GPTC);
589 	IXGBE_READ_REG(hw, IXGBE_GORCL);
590 	IXGBE_READ_REG(hw, IXGBE_GORCH);
591 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
592 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
593 	if (hw->mac.type == ixgbe_mac_82598EB)
594 		for (i = 0; i < 8; i++)
595 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
596 	IXGBE_READ_REG(hw, IXGBE_RUC);
597 	IXGBE_READ_REG(hw, IXGBE_RFC);
598 	IXGBE_READ_REG(hw, IXGBE_ROC);
599 	IXGBE_READ_REG(hw, IXGBE_RJC);
600 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
601 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
602 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
603 	IXGBE_READ_REG(hw, IXGBE_TORL);
604 	IXGBE_READ_REG(hw, IXGBE_TORH);
605 	IXGBE_READ_REG(hw, IXGBE_TPR);
606 	IXGBE_READ_REG(hw, IXGBE_TPT);
607 	IXGBE_READ_REG(hw, IXGBE_PTC64);
608 	IXGBE_READ_REG(hw, IXGBE_PTC127);
609 	IXGBE_READ_REG(hw, IXGBE_PTC255);
610 	IXGBE_READ_REG(hw, IXGBE_PTC511);
611 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
612 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
613 	IXGBE_READ_REG(hw, IXGBE_MPTC);
614 	IXGBE_READ_REG(hw, IXGBE_BPTC);
615 	for (i = 0; i < 16; i++) {
616 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
617 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
618 		if (hw->mac.type >= ixgbe_mac_82599EB) {
619 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
620 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
621 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
622 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
623 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
624 		} else {
625 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
626 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
627 		}
628 	}
629 
630 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
631 		if (hw->phy.id == 0)
632 			ixgbe_identify_phy(hw);
633 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
634 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
635 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
636 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
637 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
638 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
639 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
640 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
641 	}
642 
643 	return IXGBE_SUCCESS;
644 }
645 
646 /**
647  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
648  *  @hw: pointer to hardware structure
649  *  @pba_num: stores the part number string from the EEPROM
650  *  @pba_num_size: part number string buffer length
651  *
652  *  Reads the part number string from the EEPROM.
653  **/
654 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
655 				  u32 pba_num_size)
656 {
657 	s32 ret_val;
658 	u16 data;
659 	u16 pba_ptr;
660 	u16 offset;
661 	u16 length;
662 
663 	DEBUGFUNC("ixgbe_read_pba_string_generic");
664 
665 	if (pba_num == NULL) {
666 		DEBUGOUT("PBA string buffer was null\n");
667 		return IXGBE_ERR_INVALID_ARGUMENT;
668 	}
669 
670 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
671 	if (ret_val) {
672 		DEBUGOUT("NVM Read Error\n");
673 		return ret_val;
674 	}
675 
676 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
677 	if (ret_val) {
678 		DEBUGOUT("NVM Read Error\n");
679 		return ret_val;
680 	}
681 
682 	/*
683 	 * if data is not ptr guard the PBA must be in legacy format which
684 	 * means pba_ptr is actually our second data word for the PBA number
685 	 * and we can decode it into an ascii string
686 	 */
687 	if (data != IXGBE_PBANUM_PTR_GUARD) {
688 		DEBUGOUT("NVM PBA number is not stored as string\n");
689 
690 		/* we will need 11 characters to store the PBA */
691 		if (pba_num_size < 11) {
692 			DEBUGOUT("PBA string buffer too small\n");
693 			return IXGBE_ERR_NO_SPACE;
694 		}
695 
696 		/* extract hex string from data and pba_ptr */
697 		pba_num[0] = (data >> 12) & 0xF;
698 		pba_num[1] = (data >> 8) & 0xF;
699 		pba_num[2] = (data >> 4) & 0xF;
700 		pba_num[3] = data & 0xF;
701 		pba_num[4] = (pba_ptr >> 12) & 0xF;
702 		pba_num[5] = (pba_ptr >> 8) & 0xF;
703 		pba_num[6] = '-';
704 		pba_num[7] = 0;
705 		pba_num[8] = (pba_ptr >> 4) & 0xF;
706 		pba_num[9] = pba_ptr & 0xF;
707 
708 		/* put a null character on the end of our string */
709 		pba_num[10] = '\0';
710 
711 		/* switch all the data but the '-' to hex char */
712 		for (offset = 0; offset < 10; offset++) {
713 			if (pba_num[offset] < 0xA)
714 				pba_num[offset] += '0';
715 			else if (pba_num[offset] < 0x10)
716 				pba_num[offset] += 'A' - 0xA;
717 		}
718 
719 		return IXGBE_SUCCESS;
720 	}
721 
722 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
723 	if (ret_val) {
724 		DEBUGOUT("NVM Read Error\n");
725 		return ret_val;
726 	}
727 
728 	if (length == 0xFFFF || length == 0) {
729 		DEBUGOUT("NVM PBA number section invalid length\n");
730 		return IXGBE_ERR_PBA_SECTION;
731 	}
732 
733 	/* check if pba_num buffer is big enough */
734 	if (pba_num_size  < (((u32)length * 2) - 1)) {
735 		DEBUGOUT("PBA string buffer too small\n");
736 		return IXGBE_ERR_NO_SPACE;
737 	}
738 
739 	/* trim pba length from start of string */
740 	pba_ptr++;
741 	length--;
742 
743 	for (offset = 0; offset < length; offset++) {
744 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
745 		if (ret_val) {
746 			DEBUGOUT("NVM Read Error\n");
747 			return ret_val;
748 		}
749 		pba_num[offset * 2] = (u8)(data >> 8);
750 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
751 	}
752 	pba_num[offset * 2] = '\0';
753 
754 	return IXGBE_SUCCESS;
755 }
756 
757 /**
758  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
759  *  @hw: pointer to hardware structure
760  *  @pba_num: stores the part number from the EEPROM
761  *
762  *  Reads the part number from the EEPROM.
763  **/
764 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
765 {
766 	s32 ret_val;
767 	u16 data;
768 
769 	DEBUGFUNC("ixgbe_read_pba_num_generic");
770 
771 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
772 	if (ret_val) {
773 		DEBUGOUT("NVM Read Error\n");
774 		return ret_val;
775 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
776 		DEBUGOUT("NVM Not supported\n");
777 		return IXGBE_NOT_IMPLEMENTED;
778 	}
779 	*pba_num = (u32)(data << 16);
780 
781 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
782 	if (ret_val) {
783 		DEBUGOUT("NVM Read Error\n");
784 		return ret_val;
785 	}
786 	*pba_num |= data;
787 
788 	return IXGBE_SUCCESS;
789 }
790 
791 /**
792  *  ixgbe_read_pba_raw
793  *  @hw: pointer to the HW structure
794  *  @eeprom_buf: optional pointer to EEPROM image
795  *  @eeprom_buf_size: size of EEPROM image in words
796  *  @max_pba_block_size: PBA block size limit
797  *  @pba: pointer to output PBA structure
798  *
799  *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
800  *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
801  *
802  **/
803 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
804 		       u32 eeprom_buf_size, u16 max_pba_block_size,
805 		       struct ixgbe_pba *pba)
806 {
807 	s32 ret_val;
808 	u16 pba_block_size;
809 
810 	if (pba == NULL)
811 		return IXGBE_ERR_PARAM;
812 
813 	if (eeprom_buf == NULL) {
814 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
815 						     &pba->word[0]);
816 		if (ret_val)
817 			return ret_val;
818 	} else {
819 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
820 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
821 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
822 		} else {
823 			return IXGBE_ERR_PARAM;
824 		}
825 	}
826 
827 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
828 		if (pba->pba_block == NULL)
829 			return IXGBE_ERR_PARAM;
830 
831 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
832 						   eeprom_buf_size,
833 						   &pba_block_size);
834 		if (ret_val)
835 			return ret_val;
836 
837 		if (pba_block_size > max_pba_block_size)
838 			return IXGBE_ERR_PARAM;
839 
840 		if (eeprom_buf == NULL) {
841 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
842 							     pba_block_size,
843 							     pba->pba_block);
844 			if (ret_val)
845 				return ret_val;
846 		} else {
847 			if (eeprom_buf_size > (u32)(pba->word[1] +
848 					      pba_block_size)) {
849 				memcpy(pba->pba_block,
850 				       &eeprom_buf[pba->word[1]],
851 				       pba_block_size * sizeof(u16));
852 			} else {
853 				return IXGBE_ERR_PARAM;
854 			}
855 		}
856 	}
857 
858 	return IXGBE_SUCCESS;
859 }
860 
861 /**
862  *  ixgbe_write_pba_raw
863  *  @hw: pointer to the HW structure
864  *  @eeprom_buf: optional pointer to EEPROM image
865  *  @eeprom_buf_size: size of EEPROM image in words
866  *  @pba: pointer to PBA structure
867  *
868  *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
869  *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
870  *
871  **/
872 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
873 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
874 {
875 	s32 ret_val;
876 
877 	if (pba == NULL)
878 		return IXGBE_ERR_PARAM;
879 
880 	if (eeprom_buf == NULL) {
881 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
882 						      &pba->word[0]);
883 		if (ret_val)
884 			return ret_val;
885 	} else {
886 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
887 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
888 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
889 		} else {
890 			return IXGBE_ERR_PARAM;
891 		}
892 	}
893 
894 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
895 		if (pba->pba_block == NULL)
896 			return IXGBE_ERR_PARAM;
897 
898 		if (eeprom_buf == NULL) {
899 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
900 							      pba->pba_block[0],
901 							      pba->pba_block);
902 			if (ret_val)
903 				return ret_val;
904 		} else {
905 			if (eeprom_buf_size > (u32)(pba->word[1] +
906 					      pba->pba_block[0])) {
907 				memcpy(&eeprom_buf[pba->word[1]],
908 				       pba->pba_block,
909 				       pba->pba_block[0] * sizeof(u16));
910 			} else {
911 				return IXGBE_ERR_PARAM;
912 			}
913 		}
914 	}
915 
916 	return IXGBE_SUCCESS;
917 }
918 
919 /**
920  *  ixgbe_get_pba_block_size
921  *  @hw: pointer to the HW structure
922  *  @eeprom_buf: optional pointer to EEPROM image
923  *  @eeprom_buf_size: size of EEPROM image in words
924  *  @pba_data_size: pointer to output variable
925  *
926  *  Returns the size of the PBA block in words. Function operates on EEPROM
927  *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
928  *  EEPROM device.
929  *
930  **/
931 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
932 			     u32 eeprom_buf_size, u16 *pba_block_size)
933 {
934 	s32 ret_val;
935 	u16 pba_word[2];
936 	u16 length;
937 
938 	DEBUGFUNC("ixgbe_get_pba_block_size");
939 
940 	if (eeprom_buf == NULL) {
941 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
942 						     &pba_word[0]);
943 		if (ret_val)
944 			return ret_val;
945 	} else {
946 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
947 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
948 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
949 		} else {
950 			return IXGBE_ERR_PARAM;
951 		}
952 	}
953 
954 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
955 		if (eeprom_buf == NULL) {
956 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
957 						      &length);
958 			if (ret_val)
959 				return ret_val;
960 		} else {
961 			if (eeprom_buf_size > pba_word[1])
962 				length = eeprom_buf[pba_word[1] + 0];
963 			else
964 				return IXGBE_ERR_PARAM;
965 		}
966 
967 		if (length == 0xFFFF || length == 0)
968 			return IXGBE_ERR_PBA_SECTION;
969 	} else {
970 		/* PBA number in legacy format, there is no PBA Block. */
971 		length = 0;
972 	}
973 
974 	if (pba_block_size != NULL)
975 		*pba_block_size = length;
976 
977 	return IXGBE_SUCCESS;
978 }
979 
980 /**
981  *  ixgbe_get_mac_addr_generic - Generic get MAC address
982  *  @hw: pointer to hardware structure
983  *  @mac_addr: Adapter MAC address
984  *
985  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
986  *  A reset of the adapter must be performed prior to calling this function
987  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
988  **/
989 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
990 {
991 	u32 rar_high;
992 	u32 rar_low;
993 	u16 i;
994 
995 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
996 
997 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
998 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
999 
1000 	for (i = 0; i < 4; i++)
1001 		mac_addr[i] = (u8)(rar_low >> (i*8));
1002 
1003 	for (i = 0; i < 2; i++)
1004 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
1005 
1006 	return IXGBE_SUCCESS;
1007 }
1008 
1009 /**
1010  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
1011  *  @hw: pointer to hardware structure
1012  *  @link_status: the link status returned by the PCI config space
1013  *
1014  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1015  **/
1016 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1017 {
1018 	struct ixgbe_mac_info *mac = &hw->mac;
1019 
1020 	if (hw->bus.type == ixgbe_bus_type_unknown)
1021 		hw->bus.type = ixgbe_bus_type_pci_express;
1022 
1023 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1024 	case IXGBE_PCI_LINK_WIDTH_1:
1025 		hw->bus.width = ixgbe_bus_width_pcie_x1;
1026 		break;
1027 	case IXGBE_PCI_LINK_WIDTH_2:
1028 		hw->bus.width = ixgbe_bus_width_pcie_x2;
1029 		break;
1030 	case IXGBE_PCI_LINK_WIDTH_4:
1031 		hw->bus.width = ixgbe_bus_width_pcie_x4;
1032 		break;
1033 	case IXGBE_PCI_LINK_WIDTH_8:
1034 		hw->bus.width = ixgbe_bus_width_pcie_x8;
1035 		break;
1036 	default:
1037 		hw->bus.width = ixgbe_bus_width_unknown;
1038 		break;
1039 	}
1040 
1041 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
1042 	case IXGBE_PCI_LINK_SPEED_2500:
1043 		hw->bus.speed = ixgbe_bus_speed_2500;
1044 		break;
1045 	case IXGBE_PCI_LINK_SPEED_5000:
1046 		hw->bus.speed = ixgbe_bus_speed_5000;
1047 		break;
1048 	case IXGBE_PCI_LINK_SPEED_8000:
1049 		hw->bus.speed = ixgbe_bus_speed_8000;
1050 		break;
1051 	default:
1052 		hw->bus.speed = ixgbe_bus_speed_unknown;
1053 		break;
1054 	}
1055 
1056 	mac->ops.set_lan_id(hw);
1057 }
1058 
1059 /**
1060  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
1061  *  @hw: pointer to hardware structure
1062  *
1063  *  Gets the PCI bus info (speed, width, type) then calls helper function to
1064  *  store this data within the ixgbe_hw structure.
1065  **/
1066 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1067 {
1068 	u16 link_status;
1069 
1070 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1071 
1072 	/* Get the negotiated link width and speed from PCI config space */
1073 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1074 
1075 	ixgbe_set_pci_config_data_generic(hw, link_status);
1076 
1077 	return IXGBE_SUCCESS;
1078 }
1079 
1080 /**
1081  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1082  *  @hw: pointer to the HW structure
1083  *
1084  *  Determines the LAN function id by reading memory-mapped registers and swaps
1085  *  the port value if requested, and set MAC instance for devices that share
1086  *  CS4227.
1087  **/
1088 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1089 {
1090 	struct ixgbe_bus_info *bus = &hw->bus;
1091 	u32 reg;
1092 	u16 ee_ctrl_4;
1093 
1094 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1095 
1096 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1097 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1098 	bus->lan_id = (u8)bus->func;
1099 
1100 	/* check for a port swap */
1101 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1102 	if (reg & IXGBE_FACTPS_LFS)
1103 		bus->func ^= 0x1;
1104 
1105 	/* Get MAC instance from EEPROM for configuring CS4227 */
1106 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1107 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1108 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1109 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1110 	}
1111 }
1112 
1113 /**
1114  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1115  *  @hw: pointer to hardware structure
1116  *
1117  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1118  *  disables transmit and receive units. The adapter_stopped flag is used by
1119  *  the shared code and drivers to determine if the adapter is in a stopped
1120  *  state and should not touch the hardware.
1121  **/
1122 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1123 {
1124 	u32 reg_val;
1125 	u16 i;
1126 
1127 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1128 
1129 	/*
1130 	 * Set the adapter_stopped flag so other driver functions stop touching
1131 	 * the hardware
1132 	 */
1133 	hw->adapter_stopped = TRUE;
1134 
1135 	/* Disable the receive unit */
1136 	ixgbe_disable_rx(hw);
1137 
1138 	/* Clear interrupt mask to stop interrupts from being generated */
1139 	/*
1140 	 * XXX
1141 	 * This function is called in the state of both interrupt disabled
1142 	 * and interrupt enabled, e.g.
1143 	 * + interrupt disabled case:
1144 	 *   - ixgbe_stop_locked()
1145 	 *     - ixgbe_disable_intr() // interrupt disabled here
1146 	 *     - ixgbe_stop_adapter()
1147 	 *       - hw->mac.ops.stop_adapter()
1148 	 *         == this function
1149 	 * + interrupt enabled case:
1150 	 *   - ixgbe_local_timer1()
1151 	 *     - ixgbe_init_locked()
1152 	 *       - ixgbe_stop_adapter()
1153 	 *         - hw->mac.ops.stop_adapter()
1154 	 *           == this function
1155 	 * Therefore, it causes nest status breaking to nest the status
1156 	 * (that is, que->im_nest++) at all times. So, this function must
1157 	 * use ixgbe_ensure_disabled_intr() instead of ixgbe_disable_intr().
1158 	 */
1159 	ixgbe_ensure_disabled_intr(hw->back);
1160 
1161 	/* Clear any pending interrupts, flush previous writes */
1162 	IXGBE_READ_REG(hw, IXGBE_EICR);
1163 
1164 	/* Disable the transmit unit.  Each queue must be disabled. */
1165 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1166 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1167 
1168 	/* Disable the receive unit by stopping each queue */
1169 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1170 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1171 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1172 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1173 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1174 	}
1175 
1176 	/* flush all queues disables */
1177 	IXGBE_WRITE_FLUSH(hw);
1178 	msec_delay(2);
1179 
1180 	/*
1181 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1182 	 * access and verify no pending requests
1183 	 */
1184 	return ixgbe_disable_pcie_master(hw);
1185 }
1186 
1187 /**
1188  *  ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1189  *  @hw: pointer to hardware structure
1190  *
1191  *  Store the index for the link active LED. This will be used to support
1192  *  blinking the LED.
1193  **/
1194 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1195 {
1196 	struct ixgbe_mac_info *mac = &hw->mac;
1197 	u32 led_reg, led_mode;
1198 	u8 i;
1199 
1200 	led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1201 
1202 	/* Get LED link active from the LEDCTL register */
1203 	for (i = 0; i < 4; i++) {
1204 		led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1205 
1206 		if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1207 		     IXGBE_LED_LINK_ACTIVE) {
1208 			mac->led_link_act = i;
1209 			return IXGBE_SUCCESS;
1210 		}
1211 	}
1212 
1213 	/*
1214 	 * If LEDCTL register does not have the LED link active set, then use
1215 	 * known MAC defaults.
1216 	 */
1217 	switch (hw->mac.type) {
1218 	case ixgbe_mac_X550EM_a:
1219 	case ixgbe_mac_X550EM_x:
1220 		mac->led_link_act = 1;
1221 		break;
1222 	default:
1223 		mac->led_link_act = 2;
1224 	}
1225 	return IXGBE_SUCCESS;
1226 }
1227 
1228 /**
1229  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
1230  *  @hw: pointer to hardware structure
1231  *  @index: led number to turn on
1232  **/
1233 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1234 {
1235 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1236 
1237 	DEBUGFUNC("ixgbe_led_on_generic");
1238 
1239 	if (index > 3)
1240 		return IXGBE_ERR_PARAM;
1241 
1242 	/* To turn on the LED, set mode to ON. */
1243 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1244 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1245 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1246 	IXGBE_WRITE_FLUSH(hw);
1247 
1248 	return IXGBE_SUCCESS;
1249 }
1250 
1251 /**
1252  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
1253  *  @hw: pointer to hardware structure
1254  *  @index: led number to turn off
1255  **/
1256 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1257 {
1258 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1259 
1260 	DEBUGFUNC("ixgbe_led_off_generic");
1261 
1262 	if (index > 3)
1263 		return IXGBE_ERR_PARAM;
1264 
1265 	/* To turn off the LED, set mode to OFF. */
1266 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1267 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1268 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1269 	IXGBE_WRITE_FLUSH(hw);
1270 
1271 	return IXGBE_SUCCESS;
1272 }
1273 
1274 /**
1275  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1276  *  @hw: pointer to hardware structure
1277  *
1278  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
1279  *  ixgbe_hw struct in order to set up EEPROM access.
1280  **/
1281 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1282 {
1283 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1284 	u32 eec;
1285 	u16 eeprom_size;
1286 
1287 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1288 
1289 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1290 		eeprom->type = ixgbe_eeprom_none;
1291 		/* Set default semaphore delay to 10ms which is a well
1292 		 * tested value */
1293 		eeprom->semaphore_delay = 10;
1294 		/* Clear EEPROM page size, it will be initialized as needed */
1295 		eeprom->word_page_size = 0;
1296 
1297 		/*
1298 		 * Check for EEPROM present first.
1299 		 * If not present leave as none
1300 		 */
1301 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1302 		if (eec & IXGBE_EEC_PRES) {
1303 			eeprom->type = ixgbe_eeprom_spi;
1304 
1305 			/*
1306 			 * SPI EEPROM is assumed here.  This code would need to
1307 			 * change if a future EEPROM is not SPI.
1308 			 */
1309 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1310 					    IXGBE_EEC_SIZE_SHIFT);
1311 			eeprom->word_size = 1 << (eeprom_size +
1312 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1313 		}
1314 
1315 		if (eec & IXGBE_EEC_ADDR_SIZE)
1316 			eeprom->address_bits = 16;
1317 		else
1318 			eeprom->address_bits = 8;
1319 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1320 			  "%d\n", eeprom->type, eeprom->word_size,
1321 			  eeprom->address_bits);
1322 	}
1323 
1324 	return IXGBE_SUCCESS;
1325 }
1326 
1327 /**
1328  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1329  *  @hw: pointer to hardware structure
1330  *  @offset: offset within the EEPROM to write
1331  *  @words: number of word(s)
1332  *  @data: 16 bit word(s) to write to EEPROM
1333  *
1334  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1335  **/
1336 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1337 					       u16 words, u16 *data)
1338 {
1339 	s32 status = IXGBE_SUCCESS;
1340 	u16 i, count;
1341 
1342 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1343 
1344 	hw->eeprom.ops.init_params(hw);
1345 
1346 	if (words == 0) {
1347 		status = IXGBE_ERR_INVALID_ARGUMENT;
1348 		goto out;
1349 	}
1350 
1351 	if (offset + words > hw->eeprom.word_size) {
1352 		status = IXGBE_ERR_EEPROM;
1353 		goto out;
1354 	}
1355 
1356 	/*
1357 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1358 	 * initialization. It is worth to do that when we write large buffer.
1359 	 */
1360 	if ((hw->eeprom.word_page_size == 0) &&
1361 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1362 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1363 
1364 	/*
1365 	 * We cannot hold synchronization semaphores for too long
1366 	 * to avoid other entity starvation. However it is more efficient
1367 	 * to read in bursts than synchronizing access for each word.
1368 	 */
1369 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1370 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1371 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1372 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1373 							    count, &data[i]);
1374 
1375 		if (status != IXGBE_SUCCESS)
1376 			break;
1377 	}
1378 
1379 out:
1380 	return status;
1381 }
1382 
1383 /**
1384  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1385  *  @hw: pointer to hardware structure
1386  *  @offset: offset within the EEPROM to be written to
1387  *  @words: number of word(s)
1388  *  @data: 16 bit word(s) to be written to the EEPROM
1389  *
1390  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1391  *  EEPROM will most likely contain an invalid checksum.
1392  **/
1393 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1394 					      u16 words, u16 *data)
1395 {
1396 	s32 status;
1397 	u16 word;
1398 	u16 page_size;
1399 	u16 i;
1400 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1401 
1402 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1403 
1404 	/* Prepare the EEPROM for writing  */
1405 	status = ixgbe_acquire_eeprom(hw);
1406 
1407 	if (status == IXGBE_SUCCESS) {
1408 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1409 			ixgbe_release_eeprom(hw);
1410 			status = IXGBE_ERR_EEPROM;
1411 		}
1412 	}
1413 
1414 	if (status == IXGBE_SUCCESS) {
1415 		for (i = 0; i < words; i++) {
1416 			ixgbe_standby_eeprom(hw);
1417 
1418 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1419 			ixgbe_shift_out_eeprom_bits(hw,
1420 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1421 						   IXGBE_EEPROM_OPCODE_BITS);
1422 
1423 			ixgbe_standby_eeprom(hw);
1424 
1425 			/*
1426 			 * Some SPI eeproms use the 8th address bit embedded
1427 			 * in the opcode
1428 			 */
1429 			if ((hw->eeprom.address_bits == 8) &&
1430 			    ((offset + i) >= 128))
1431 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1432 
1433 			/* Send the Write command (8-bit opcode + addr) */
1434 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1435 						    IXGBE_EEPROM_OPCODE_BITS);
1436 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1437 						    hw->eeprom.address_bits);
1438 
1439 			page_size = hw->eeprom.word_page_size;
1440 
1441 			/* Send the data in burst via SPI*/
1442 			do {
1443 				word = data[i];
1444 				word = (word >> 8) | (word << 8);
1445 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1446 
1447 				if (page_size == 0)
1448 					break;
1449 
1450 				/* do not wrap around page */
1451 				if (((offset + i) & (page_size - 1)) ==
1452 				    (page_size - 1))
1453 					break;
1454 			} while (++i < words);
1455 
1456 			ixgbe_standby_eeprom(hw);
1457 			msec_delay(10);
1458 		}
1459 		/* Done with writing - release the EEPROM */
1460 		ixgbe_release_eeprom(hw);
1461 	}
1462 
1463 	return status;
1464 }
1465 
1466 /**
1467  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1468  *  @hw: pointer to hardware structure
1469  *  @offset: offset within the EEPROM to be written to
1470  *  @data: 16 bit word to be written to the EEPROM
1471  *
1472  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1473  *  EEPROM will most likely contain an invalid checksum.
1474  **/
1475 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1476 {
1477 	s32 status;
1478 
1479 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1480 
1481 	hw->eeprom.ops.init_params(hw);
1482 
1483 	if (offset >= hw->eeprom.word_size) {
1484 		status = IXGBE_ERR_EEPROM;
1485 		goto out;
1486 	}
1487 
1488 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1489 
1490 out:
1491 	return status;
1492 }
1493 
1494 /**
1495  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1496  *  @hw: pointer to hardware structure
1497  *  @offset: offset within the EEPROM to be read
1498  *  @data: read 16 bit words(s) from EEPROM
1499  *  @words: number of word(s)
1500  *
1501  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1502  **/
1503 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1504 					      u16 words, u16 *data)
1505 {
1506 	s32 status = IXGBE_SUCCESS;
1507 	u16 i, count;
1508 
1509 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1510 
1511 	hw->eeprom.ops.init_params(hw);
1512 
1513 	if (words == 0) {
1514 		status = IXGBE_ERR_INVALID_ARGUMENT;
1515 		goto out;
1516 	}
1517 
1518 	if (offset + words > hw->eeprom.word_size) {
1519 		status = IXGBE_ERR_EEPROM;
1520 		goto out;
1521 	}
1522 
1523 	/*
1524 	 * We cannot hold synchronization semaphores for too long
1525 	 * to avoid other entity starvation. However it is more efficient
1526 	 * to read in bursts than synchronizing access for each word.
1527 	 */
1528 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1529 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1530 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1531 
1532 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1533 							   count, &data[i]);
1534 
1535 		if (status != IXGBE_SUCCESS)
1536 			break;
1537 	}
1538 
1539 out:
1540 	return status;
1541 }
1542 
1543 /**
1544  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1545  *  @hw: pointer to hardware structure
1546  *  @offset: offset within the EEPROM to be read
1547  *  @words: number of word(s)
1548  *  @data: read 16 bit word(s) from EEPROM
1549  *
1550  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1551  **/
1552 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1553 					     u16 words, u16 *data)
1554 {
1555 	s32 status;
1556 	u16 word_in;
1557 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1558 	u16 i;
1559 
1560 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1561 
1562 	/* Prepare the EEPROM for reading  */
1563 	status = ixgbe_acquire_eeprom(hw);
1564 
1565 	if (status == IXGBE_SUCCESS) {
1566 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1567 			ixgbe_release_eeprom(hw);
1568 			status = IXGBE_ERR_EEPROM;
1569 		}
1570 	}
1571 
1572 	if (status == IXGBE_SUCCESS) {
1573 		for (i = 0; i < words; i++) {
1574 			ixgbe_standby_eeprom(hw);
1575 			/*
1576 			 * Some SPI eeproms use the 8th address bit embedded
1577 			 * in the opcode
1578 			 */
1579 			if ((hw->eeprom.address_bits == 8) &&
1580 			    ((offset + i) >= 128))
1581 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1582 
1583 			/* Send the READ command (opcode + addr) */
1584 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1585 						    IXGBE_EEPROM_OPCODE_BITS);
1586 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1587 						    hw->eeprom.address_bits);
1588 
1589 			/* Read the data. */
1590 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1591 			data[i] = (word_in >> 8) | (word_in << 8);
1592 		}
1593 
1594 		/* End this read operation */
1595 		ixgbe_release_eeprom(hw);
1596 	}
1597 
1598 	return status;
1599 }
1600 
1601 /**
1602  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1603  *  @hw: pointer to hardware structure
1604  *  @offset: offset within the EEPROM to be read
1605  *  @data: read 16 bit value from EEPROM
1606  *
1607  *  Reads 16 bit value from EEPROM through bit-bang method
1608  **/
1609 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1610 				       u16 *data)
1611 {
1612 	s32 status;
1613 
1614 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1615 
1616 	hw->eeprom.ops.init_params(hw);
1617 
1618 	if (offset >= hw->eeprom.word_size) {
1619 		status = IXGBE_ERR_EEPROM;
1620 		goto out;
1621 	}
1622 
1623 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1624 
1625 out:
1626 	return status;
1627 }
1628 
1629 /**
1630  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1631  *  @hw: pointer to hardware structure
1632  *  @offset: offset of word in the EEPROM to read
1633  *  @words: number of word(s)
1634  *  @data: 16 bit word(s) from the EEPROM
1635  *
1636  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1637  **/
1638 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1639 				   u16 words, u16 *data)
1640 {
1641 	u32 eerd;
1642 	s32 status = IXGBE_SUCCESS;
1643 	u32 i;
1644 
1645 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1646 
1647 	hw->eeprom.ops.init_params(hw);
1648 
1649 	if (words == 0) {
1650 		status = IXGBE_ERR_INVALID_ARGUMENT;
1651 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1652 		goto out;
1653 	}
1654 
1655 	if (offset >= hw->eeprom.word_size) {
1656 		status = IXGBE_ERR_EEPROM;
1657 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1658 		goto out;
1659 	}
1660 
1661 	for (i = 0; i < words; i++) {
1662 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1663 		       IXGBE_EEPROM_RW_REG_START;
1664 
1665 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1666 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1667 
1668 		if (status == IXGBE_SUCCESS) {
1669 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1670 				   IXGBE_EEPROM_RW_REG_DATA);
1671 		} else {
1672 			DEBUGOUT("Eeprom read timed out\n");
1673 			goto out;
1674 		}
1675 	}
1676 out:
1677 	return status;
1678 }
1679 
1680 /**
1681  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1682  *  @hw: pointer to hardware structure
1683  *  @offset: offset within the EEPROM to be used as a scratch pad
1684  *
1685  *  Discover EEPROM page size by writing marching data at given offset.
1686  *  This function is called only when we are writing a new large buffer
1687  *  at given offset so the data would be overwritten anyway.
1688  **/
1689 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1690 						 u16 offset)
1691 {
1692 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1693 	s32 status = IXGBE_SUCCESS;
1694 	u16 i;
1695 
1696 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1697 
1698 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1699 		data[i] = i;
1700 
1701 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1702 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1703 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1704 	hw->eeprom.word_page_size = 0;
1705 	if (status != IXGBE_SUCCESS)
1706 		goto out;
1707 
1708 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1709 	if (status != IXGBE_SUCCESS)
1710 		goto out;
1711 
1712 	/*
1713 	 * When writing in burst more than the actual page size
1714 	 * EEPROM address wraps around current page.
1715 	 */
1716 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1717 
1718 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1719 		  hw->eeprom.word_page_size);
1720 out:
1721 	return status;
1722 }
1723 
1724 /**
1725  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1726  *  @hw: pointer to hardware structure
1727  *  @offset: offset of  word in the EEPROM to read
1728  *  @data: word read from the EEPROM
1729  *
1730  *  Reads a 16 bit word from the EEPROM using the EERD register.
1731  **/
1732 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1733 {
1734 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1735 }
1736 
1737 /**
1738  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1739  *  @hw: pointer to hardware structure
1740  *  @offset: offset of  word in the EEPROM to write
1741  *  @words: number of word(s)
1742  *  @data: word(s) write to the EEPROM
1743  *
1744  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1745  **/
1746 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1747 				    u16 words, u16 *data)
1748 {
1749 	u32 eewr;
1750 	s32 status = IXGBE_SUCCESS;
1751 	u16 i;
1752 
1753 	DEBUGFUNC("ixgbe_write_eewr_generic");
1754 
1755 	hw->eeprom.ops.init_params(hw);
1756 
1757 	if (words == 0) {
1758 		status = IXGBE_ERR_INVALID_ARGUMENT;
1759 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1760 		goto out;
1761 	}
1762 
1763 	if (offset >= hw->eeprom.word_size) {
1764 		status = IXGBE_ERR_EEPROM;
1765 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1766 		goto out;
1767 	}
1768 
1769 	for (i = 0; i < words; i++) {
1770 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1771 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1772 			IXGBE_EEPROM_RW_REG_START;
1773 
1774 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1775 		if (status != IXGBE_SUCCESS) {
1776 			DEBUGOUT("Eeprom write EEWR timed out\n");
1777 			goto out;
1778 		}
1779 
1780 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1781 
1782 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1783 		if (status != IXGBE_SUCCESS) {
1784 			DEBUGOUT("Eeprom write EEWR timed out\n");
1785 			goto out;
1786 		}
1787 	}
1788 
1789 out:
1790 	return status;
1791 }
1792 
1793 /**
1794  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1795  *  @hw: pointer to hardware structure
1796  *  @offset: offset of  word in the EEPROM to write
1797  *  @data: word write to the EEPROM
1798  *
1799  *  Write a 16 bit word to the EEPROM using the EEWR register.
1800  **/
1801 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1802 {
1803 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1804 }
1805 
1806 /**
1807  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1808  *  @hw: pointer to hardware structure
1809  *  @ee_reg: EEPROM flag for polling
1810  *
1811  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1812  *  read or write is done respectively.
1813  **/
1814 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1815 {
1816 	u32 i;
1817 	u32 reg;
1818 	s32 status = IXGBE_ERR_EEPROM;
1819 
1820 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1821 
1822 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1823 		if (ee_reg == IXGBE_NVM_POLL_READ)
1824 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1825 		else
1826 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1827 
1828 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1829 			status = IXGBE_SUCCESS;
1830 			break;
1831 		}
1832 		usec_delay(5);
1833 	}
1834 
1835 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1836 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1837 			     "EEPROM read/write done polling timed out");
1838 
1839 	return status;
1840 }
1841 
1842 /**
1843  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1844  *  @hw: pointer to hardware structure
1845  *
1846  *  Prepares EEPROM for access using bit-bang method. This function should
1847  *  be called before issuing a command to the EEPROM.
1848  **/
1849 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1850 {
1851 	s32 status = IXGBE_SUCCESS;
1852 	u32 eec;
1853 	u32 i;
1854 
1855 	DEBUGFUNC("ixgbe_acquire_eeprom");
1856 
1857 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1858 	    != IXGBE_SUCCESS)
1859 		status = IXGBE_ERR_SWFW_SYNC;
1860 
1861 	if (status == IXGBE_SUCCESS) {
1862 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1863 
1864 		/* Request EEPROM Access */
1865 		eec |= IXGBE_EEC_REQ;
1866 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1867 
1868 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1869 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1870 			if (eec & IXGBE_EEC_GNT)
1871 				break;
1872 			usec_delay(5);
1873 		}
1874 
1875 		/* Release if grant not acquired */
1876 		if (!(eec & IXGBE_EEC_GNT)) {
1877 			eec &= ~IXGBE_EEC_REQ;
1878 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1879 			DEBUGOUT("Could not acquire EEPROM grant\n");
1880 
1881 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1882 			status = IXGBE_ERR_EEPROM;
1883 		}
1884 
1885 		/* Setup EEPROM for Read/Write */
1886 		if (status == IXGBE_SUCCESS) {
1887 			/* Clear CS and SK */
1888 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1889 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1890 			IXGBE_WRITE_FLUSH(hw);
1891 			usec_delay(1);
1892 		}
1893 	}
1894 	return status;
1895 }
1896 
1897 /**
1898  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1899  *  @hw: pointer to hardware structure
1900  *
1901  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1902  **/
1903 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1904 {
1905 	s32 status = IXGBE_ERR_EEPROM;
1906 	u32 timeout = 2000;
1907 	u32 i;
1908 	u32 swsm;
1909 
1910 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1911 
1912 
1913 	/* Get SMBI software semaphore between device drivers first */
1914 	for (i = 0; i < timeout; i++) {
1915 		/*
1916 		 * If the SMBI bit is 0 when we read it, then the bit will be
1917 		 * set and we have the semaphore
1918 		 */
1919 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1920 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1921 			status = IXGBE_SUCCESS;
1922 			break;
1923 		}
1924 		usec_delay(50);
1925 	}
1926 
1927 	if (i == timeout) {
1928 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1929 			 "not granted.\n");
1930 		/*
1931 		 * this release is particularly important because our attempts
1932 		 * above to get the semaphore may have succeeded, and if there
1933 		 * was a timeout, we should unconditionally clear the semaphore
1934 		 * bits to free the driver to make progress
1935 		 */
1936 		ixgbe_release_eeprom_semaphore(hw);
1937 
1938 		usec_delay(50);
1939 		/*
1940 		 * one last try
1941 		 * If the SMBI bit is 0 when we read it, then the bit will be
1942 		 * set and we have the semaphore
1943 		 */
1944 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1945 		if (!(swsm & IXGBE_SWSM_SMBI))
1946 			status = IXGBE_SUCCESS;
1947 	}
1948 
1949 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1950 	if (status == IXGBE_SUCCESS) {
1951 		for (i = 0; i < timeout; i++) {
1952 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1953 
1954 			/* Set the SW EEPROM semaphore bit to request access */
1955 			swsm |= IXGBE_SWSM_SWESMBI;
1956 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1957 
1958 			/*
1959 			 * If we set the bit successfully then we got the
1960 			 * semaphore.
1961 			 */
1962 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1963 			if (swsm & IXGBE_SWSM_SWESMBI)
1964 				break;
1965 
1966 			usec_delay(50);
1967 		}
1968 
1969 		/*
1970 		 * Release semaphores and return error if SW EEPROM semaphore
1971 		 * was not granted because we don't have access to the EEPROM
1972 		 */
1973 		if (i >= timeout) {
1974 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1975 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1976 			ixgbe_release_eeprom_semaphore(hw);
1977 			status = IXGBE_ERR_EEPROM;
1978 		}
1979 	} else {
1980 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1981 			     "Software semaphore SMBI between device drivers "
1982 			     "not granted.\n");
1983 	}
1984 
1985 	return status;
1986 }
1987 
1988 /**
1989  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1990  *  @hw: pointer to hardware structure
1991  *
1992  *  This function clears hardware semaphore bits.
1993  **/
1994 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1995 {
1996 	u32 swsm;
1997 
1998 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1999 
2000 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
2001 
2002 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
2003 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
2004 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
2005 	IXGBE_WRITE_FLUSH(hw);
2006 }
2007 
2008 /**
2009  *  ixgbe_ready_eeprom - Polls for EEPROM ready
2010  *  @hw: pointer to hardware structure
2011  **/
2012 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
2013 {
2014 	s32 status = IXGBE_SUCCESS;
2015 	u16 i;
2016 	u8 spi_stat_reg;
2017 
2018 	DEBUGFUNC("ixgbe_ready_eeprom");
2019 
2020 	/*
2021 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
2022 	 * EEPROM will signal that the command has been completed by clearing
2023 	 * bit 0 of the internal status register.  If it's not cleared within
2024 	 * 5 milliseconds, then error out.
2025 	 */
2026 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
2027 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
2028 					    IXGBE_EEPROM_OPCODE_BITS);
2029 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
2030 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2031 			break;
2032 
2033 		usec_delay(5);
2034 		ixgbe_standby_eeprom(hw);
2035 	}
2036 
2037 	/*
2038 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2039 	 * devices (and only 0-5mSec on 5V devices)
2040 	 */
2041 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2042 		DEBUGOUT("SPI EEPROM Status error\n");
2043 		status = IXGBE_ERR_EEPROM;
2044 	}
2045 
2046 	return status;
2047 }
2048 
2049 /**
2050  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2051  *  @hw: pointer to hardware structure
2052  **/
2053 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2054 {
2055 	u32 eec;
2056 
2057 	DEBUGFUNC("ixgbe_standby_eeprom");
2058 
2059 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2060 
2061 	/* Toggle CS to flush commands */
2062 	eec |= IXGBE_EEC_CS;
2063 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2064 	IXGBE_WRITE_FLUSH(hw);
2065 	usec_delay(1);
2066 	eec &= ~IXGBE_EEC_CS;
2067 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2068 	IXGBE_WRITE_FLUSH(hw);
2069 	usec_delay(1);
2070 }
2071 
2072 /**
2073  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2074  *  @hw: pointer to hardware structure
2075  *  @data: data to send to the EEPROM
2076  *  @count: number of bits to shift out
2077  **/
2078 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2079 					u16 count)
2080 {
2081 	u32 eec;
2082 	u32 mask;
2083 	u32 i;
2084 
2085 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2086 
2087 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2088 
2089 	/*
2090 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
2091 	 * one bit at a time.  Determine the starting bit based on count
2092 	 */
2093 	mask = 0x01 << (count - 1);
2094 
2095 	for (i = 0; i < count; i++) {
2096 		/*
2097 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2098 		 * "1", and then raising and then lowering the clock (the SK
2099 		 * bit controls the clock input to the EEPROM).  A "0" is
2100 		 * shifted out to the EEPROM by setting "DI" to "0" and then
2101 		 * raising and then lowering the clock.
2102 		 */
2103 		if (data & mask)
2104 			eec |= IXGBE_EEC_DI;
2105 		else
2106 			eec &= ~IXGBE_EEC_DI;
2107 
2108 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2109 		IXGBE_WRITE_FLUSH(hw);
2110 
2111 		usec_delay(1);
2112 
2113 		ixgbe_raise_eeprom_clk(hw, &eec);
2114 		ixgbe_lower_eeprom_clk(hw, &eec);
2115 
2116 		/*
2117 		 * Shift mask to signify next bit of data to shift in to the
2118 		 * EEPROM
2119 		 */
2120 		mask = mask >> 1;
2121 	}
2122 
2123 	/* We leave the "DI" bit set to "0" when we leave this routine. */
2124 	eec &= ~IXGBE_EEC_DI;
2125 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2126 	IXGBE_WRITE_FLUSH(hw);
2127 }
2128 
2129 /**
2130  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2131  *  @hw: pointer to hardware structure
2132  *  @count: number of bits to shift
2133  **/
2134 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2135 {
2136 	u32 eec;
2137 	u32 i;
2138 	u16 data = 0;
2139 
2140 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2141 
2142 	/*
2143 	 * In order to read a register from the EEPROM, we need to shift
2144 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2145 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2146 	 * the value of the "DO" bit.  During this "shifting in" process the
2147 	 * "DI" bit should always be clear.
2148 	 */
2149 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2150 
2151 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2152 
2153 	for (i = 0; i < count; i++) {
2154 		data = data << 1;
2155 		ixgbe_raise_eeprom_clk(hw, &eec);
2156 
2157 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2158 
2159 		eec &= ~(IXGBE_EEC_DI);
2160 		if (eec & IXGBE_EEC_DO)
2161 			data |= 1;
2162 
2163 		ixgbe_lower_eeprom_clk(hw, &eec);
2164 	}
2165 
2166 	return data;
2167 }
2168 
2169 /**
2170  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2171  *  @hw: pointer to hardware structure
2172  *  @eec: EEC register's current value
2173  **/
2174 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2175 {
2176 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2177 
2178 	/*
2179 	 * Raise the clock input to the EEPROM
2180 	 * (setting the SK bit), then delay
2181 	 */
2182 	*eec = *eec | IXGBE_EEC_SK;
2183 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2184 	IXGBE_WRITE_FLUSH(hw);
2185 	usec_delay(1);
2186 }
2187 
2188 /**
2189  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2190  *  @hw: pointer to hardware structure
2191  *  @eec: EEC's current value
2192  **/
2193 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2194 {
2195 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2196 
2197 	/*
2198 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2199 	 * delay
2200 	 */
2201 	*eec = *eec & ~IXGBE_EEC_SK;
2202 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2203 	IXGBE_WRITE_FLUSH(hw);
2204 	usec_delay(1);
2205 }
2206 
2207 /**
2208  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
2209  *  @hw: pointer to hardware structure
2210  **/
2211 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2212 {
2213 	u32 eec;
2214 
2215 	DEBUGFUNC("ixgbe_release_eeprom");
2216 
2217 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2218 
2219 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2220 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2221 
2222 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2223 	IXGBE_WRITE_FLUSH(hw);
2224 
2225 	usec_delay(1);
2226 
2227 	/* Stop requesting EEPROM access */
2228 	eec &= ~IXGBE_EEC_REQ;
2229 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2230 
2231 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2232 
2233 	/* Delay before attempt to obtain semaphore again to allow FW access */
2234 	msec_delay(hw->eeprom.semaphore_delay);
2235 }
2236 
2237 /**
2238  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2239  *  @hw: pointer to hardware structure
2240  *
2241  *  Returns a negative error code on error, or the 16-bit checksum
2242  **/
2243 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2244 {
2245 	u16 i;
2246 	u16 j;
2247 	u16 checksum = 0;
2248 	u16 length = 0;
2249 	u16 pointer = 0;
2250 	u16 word = 0;
2251 
2252 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2253 
2254 	/* Include 0x0-0x3F in the checksum */
2255 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2256 		if (hw->eeprom.ops.read(hw, i, &word)) {
2257 			DEBUGOUT("EEPROM read failed\n");
2258 			return IXGBE_ERR_EEPROM;
2259 		}
2260 		checksum += word;
2261 	}
2262 
2263 	/* Include all data from pointers except for the fw pointer */
2264 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2265 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
2266 			DEBUGOUT("EEPROM read failed\n");
2267 			return IXGBE_ERR_EEPROM;
2268 		}
2269 
2270 		/* If the pointer seems invalid */
2271 		if (pointer == 0xFFFF || pointer == 0)
2272 			continue;
2273 
2274 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
2275 			DEBUGOUT("EEPROM read failed\n");
2276 			return IXGBE_ERR_EEPROM;
2277 		}
2278 
2279 		if (length == 0xFFFF || length == 0)
2280 			continue;
2281 
2282 		for (j = pointer + 1; j <= pointer + length; j++) {
2283 			if (hw->eeprom.ops.read(hw, j, &word)) {
2284 				DEBUGOUT("EEPROM read failed\n");
2285 				return IXGBE_ERR_EEPROM;
2286 			}
2287 			checksum += word;
2288 		}
2289 	}
2290 
2291 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2292 
2293 	return (s32)checksum;
2294 }
2295 
2296 /**
2297  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2298  *  @hw: pointer to hardware structure
2299  *  @checksum_val: calculated checksum
2300  *
2301  *  Performs checksum calculation and validates the EEPROM checksum.  If the
2302  *  caller does not need checksum_val, the value can be NULL.
2303  **/
2304 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2305 					   u16 *checksum_val)
2306 {
2307 	s32 status;
2308 	u16 checksum;
2309 	u16 read_checksum = 0;
2310 
2311 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2312 
2313 	/* Read the first word from the EEPROM. If this times out or fails, do
2314 	 * not continue or we could be in for a very long wait while every
2315 	 * EEPROM read fails
2316 	 */
2317 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2318 	if (status) {
2319 		DEBUGOUT("EEPROM read failed\n");
2320 		return status;
2321 	}
2322 
2323 	status = hw->eeprom.ops.calc_checksum(hw);
2324 	if (status < 0)
2325 		return status;
2326 
2327 	checksum = (u16)(status & 0xffff);
2328 
2329 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2330 	if (status) {
2331 		DEBUGOUT("EEPROM read failed\n");
2332 		return status;
2333 	}
2334 
2335 	/* Verify read checksum from EEPROM is the same as
2336 	 * calculated checksum
2337 	 */
2338 	if (read_checksum != checksum)
2339 		status = IXGBE_ERR_EEPROM_CHECKSUM;
2340 
2341 	/* If the user cares, return the calculated checksum */
2342 	if (checksum_val)
2343 		*checksum_val = checksum;
2344 
2345 	return status;
2346 }
2347 
2348 /**
2349  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2350  *  @hw: pointer to hardware structure
2351  **/
2352 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2353 {
2354 	s32 status;
2355 	u16 checksum;
2356 
2357 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2358 
2359 	/* Read the first word from the EEPROM. If this times out or fails, do
2360 	 * not continue or we could be in for a very long wait while every
2361 	 * EEPROM read fails
2362 	 */
2363 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2364 	if (status) {
2365 		DEBUGOUT("EEPROM read failed\n");
2366 		return status;
2367 	}
2368 
2369 	status = hw->eeprom.ops.calc_checksum(hw);
2370 	if (status < 0)
2371 		return status;
2372 
2373 	checksum = (u16)(status & 0xffff);
2374 
2375 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2376 
2377 	return status;
2378 }
2379 
2380 /**
2381  *  ixgbe_validate_mac_addr - Validate MAC address
2382  *  @mac_addr: pointer to MAC address.
2383  *
2384  *  Tests a MAC address to ensure it is a valid Individual Address.
2385  **/
2386 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2387 {
2388 	s32 status = IXGBE_SUCCESS;
2389 
2390 	DEBUGFUNC("ixgbe_validate_mac_addr");
2391 
2392 	/* Make sure it is not a multicast address */
2393 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2394 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2395 	/* Not a broadcast address */
2396 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2397 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2398 	/* Reject the zero address */
2399 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2400 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2401 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2402 	}
2403 	return status;
2404 }
2405 
2406 /**
2407  *  ixgbe_set_rar_generic - Set Rx address register
2408  *  @hw: pointer to hardware structure
2409  *  @index: Receive address register to write
2410  *  @addr: Address to put into receive address register
2411  *  @vmdq: VMDq "set" or "pool" index
2412  *  @enable_addr: set flag that address is active
2413  *
2414  *  Puts an ethernet address into a receive address register.
2415  **/
2416 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2417 			  u32 enable_addr)
2418 {
2419 	u32 rar_low, rar_high;
2420 	u32 rar_entries = hw->mac.num_rar_entries;
2421 
2422 	DEBUGFUNC("ixgbe_set_rar_generic");
2423 
2424 	/* Make sure we are using a valid rar index range */
2425 	if (index >= rar_entries) {
2426 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2427 			     "RAR index %d is out of range.\n", index);
2428 		return IXGBE_ERR_INVALID_ARGUMENT;
2429 	}
2430 
2431 	/* setup VMDq pool selection before this RAR gets enabled */
2432 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2433 
2434 	/*
2435 	 * HW expects these in little endian so we reverse the byte
2436 	 * order from network order (big endian) to little endian
2437 	 */
2438 	rar_low = ((u32)addr[0] |
2439 		   ((u32)addr[1] << 8) |
2440 		   ((u32)addr[2] << 16) |
2441 		   ((u32)addr[3] << 24));
2442 	/*
2443 	 * Some parts put the VMDq setting in the extra RAH bits,
2444 	 * so save everything except the lower 16 bits that hold part
2445 	 * of the address and the address valid bit.
2446 	 */
2447 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2448 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2449 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2450 
2451 	if (enable_addr != 0)
2452 		rar_high |= IXGBE_RAH_AV;
2453 
2454 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2455 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2456 
2457 	return IXGBE_SUCCESS;
2458 }
2459 
2460 /**
2461  *  ixgbe_clear_rar_generic - Remove Rx address register
2462  *  @hw: pointer to hardware structure
2463  *  @index: Receive address register to write
2464  *
2465  *  Clears an ethernet address from a receive address register.
2466  **/
2467 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2468 {
2469 	u32 rar_high;
2470 	u32 rar_entries = hw->mac.num_rar_entries;
2471 
2472 	DEBUGFUNC("ixgbe_clear_rar_generic");
2473 
2474 	/* Make sure we are using a valid rar index range */
2475 	if (index >= rar_entries) {
2476 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2477 			     "RAR index %d is out of range.\n", index);
2478 		return IXGBE_ERR_INVALID_ARGUMENT;
2479 	}
2480 
2481 	/*
2482 	 * Some parts put the VMDq setting in the extra RAH bits,
2483 	 * so save everything except the lower 16 bits that hold part
2484 	 * of the address and the address valid bit.
2485 	 */
2486 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2487 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2488 
2489 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2490 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2491 
2492 	/* clear VMDq pool/queue selection for this RAR */
2493 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2494 
2495 	return IXGBE_SUCCESS;
2496 }
2497 
2498 /**
2499  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2500  *  @hw: pointer to hardware structure
2501  *
2502  *  Places the MAC address in receive address register 0 and clears the rest
2503  *  of the receive address registers. Clears the multicast table. Assumes
2504  *  the receiver is in reset when the routine is called.
2505  **/
2506 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2507 {
2508 	u32 i;
2509 	u32 rar_entries = hw->mac.num_rar_entries;
2510 
2511 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2512 
2513 	/*
2514 	 * If the current mac address is valid, assume it is a software override
2515 	 * to the permanent address.
2516 	 * Otherwise, use the permanent address from the eeprom.
2517 	 */
2518 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2519 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2520 		/* Get the MAC address from the RAR0 for later reference */
2521 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2522 
2523 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2524 			  hw->mac.addr[0], hw->mac.addr[1],
2525 			  hw->mac.addr[2]);
2526 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2527 			  hw->mac.addr[4], hw->mac.addr[5]);
2528 	} else {
2529 		/* Setup the receive address. */
2530 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2531 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2532 			  hw->mac.addr[0], hw->mac.addr[1],
2533 			  hw->mac.addr[2]);
2534 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2535 			  hw->mac.addr[4], hw->mac.addr[5]);
2536 
2537 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2538 	}
2539 
2540 	/* clear VMDq pool/queue selection for RAR 0 */
2541 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2542 
2543 	hw->addr_ctrl.overflow_promisc = 0;
2544 
2545 	hw->addr_ctrl.rar_used_count = 1;
2546 
2547 	/* Zero out the other receive addresses. */
2548 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2549 	for (i = 1; i < rar_entries; i++) {
2550 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2551 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2552 	}
2553 
2554 	/* Clear the MTA */
2555 	hw->addr_ctrl.mta_in_use = 0;
2556 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2557 
2558 	DEBUGOUT(" Clearing MTA\n");
2559 	for (i = 0; i < hw->mac.mcft_size; i++)
2560 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2561 
2562 	ixgbe_init_uta_tables(hw);
2563 
2564 	return IXGBE_SUCCESS;
2565 }
2566 
2567 /**
2568  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2569  *  @hw: pointer to hardware structure
2570  *  @addr: new address
2571  *  @vmdq: VMDq "set" or "pool" index
2572  *
2573  *  Adds it to unused receive address register or goes into promiscuous mode.
2574  **/
2575 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2576 {
2577 	u32 rar_entries = hw->mac.num_rar_entries;
2578 	u32 rar;
2579 
2580 	DEBUGFUNC("ixgbe_add_uc_addr");
2581 
2582 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2583 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2584 
2585 	/*
2586 	 * Place this address in the RAR if there is room,
2587 	 * else put the controller into promiscuous mode
2588 	 */
2589 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2590 		rar = hw->addr_ctrl.rar_used_count;
2591 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2592 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2593 		hw->addr_ctrl.rar_used_count++;
2594 	} else {
2595 		hw->addr_ctrl.overflow_promisc++;
2596 	}
2597 
2598 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2599 }
2600 
2601 /**
2602  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2603  *  @hw: pointer to hardware structure
2604  *  @addr_list: the list of new addresses
2605  *  @addr_count: number of addresses
2606  *  @next: iterator function to walk the address list
2607  *
2608  *  The given list replaces any existing list.  Clears the secondary addrs from
2609  *  receive address registers.  Uses unused receive address registers for the
2610  *  first secondary addresses, and falls back to promiscuous mode as needed.
2611  *
2612  *  Drivers using secondary unicast addresses must set user_set_promisc when
2613  *  manually putting the device into promiscuous mode.
2614  **/
2615 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2616 				      u32 addr_count, ixgbe_mc_addr_itr next)
2617 {
2618 	u8 *addr;
2619 	u32 i;
2620 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2621 	u32 uc_addr_in_use;
2622 	u32 fctrl;
2623 	u32 vmdq;
2624 
2625 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2626 
2627 	/*
2628 	 * Clear accounting of old secondary address list,
2629 	 * don't count RAR[0]
2630 	 */
2631 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2632 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2633 	hw->addr_ctrl.overflow_promisc = 0;
2634 
2635 	/* Zero out the other receive addresses */
2636 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2637 	for (i = 0; i < uc_addr_in_use; i++) {
2638 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2639 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2640 	}
2641 
2642 	/* Add the new addresses */
2643 	for (i = 0; i < addr_count; i++) {
2644 		DEBUGOUT(" Adding the secondary addresses:\n");
2645 		addr = next(hw, &addr_list, &vmdq);
2646 		ixgbe_add_uc_addr(hw, addr, vmdq);
2647 	}
2648 
2649 	if (hw->addr_ctrl.overflow_promisc) {
2650 		/* enable promisc if not already in overflow or set by user */
2651 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2652 			DEBUGOUT(" Entering address overflow promisc mode\n");
2653 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2654 			fctrl |= IXGBE_FCTRL_UPE;
2655 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2656 		}
2657 	} else {
2658 		/* only disable if set by overflow, not by user */
2659 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2660 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2661 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2662 			fctrl &= ~IXGBE_FCTRL_UPE;
2663 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2664 		}
2665 	}
2666 
2667 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2668 	return IXGBE_SUCCESS;
2669 }
2670 
2671 /**
2672  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2673  *  @hw: pointer to hardware structure
2674  *  @mc_addr: the multicast address
2675  *
2676  *  Extracts the 12 bits, from a multicast address, to determine which
2677  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2678  *  incoming rx multicast addresses, to determine the bit-vector to check in
2679  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2680  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2681  *  to mc_filter_type.
2682  **/
2683 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2684 {
2685 	u32 vector = 0;
2686 
2687 	DEBUGFUNC("ixgbe_mta_vector");
2688 
2689 	switch (hw->mac.mc_filter_type) {
2690 	case 0:   /* use bits [47:36] of the address */
2691 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2692 		break;
2693 	case 1:   /* use bits [46:35] of the address */
2694 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2695 		break;
2696 	case 2:   /* use bits [45:34] of the address */
2697 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2698 		break;
2699 	case 3:   /* use bits [43:32] of the address */
2700 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2701 		break;
2702 	default:  /* Invalid mc_filter_type */
2703 		DEBUGOUT("MC filter type param set incorrectly\n");
2704 		ASSERT(0);
2705 		break;
2706 	}
2707 
2708 	/* vector can only be 12-bits or boundary will be exceeded */
2709 	vector &= 0xFFF;
2710 	return vector;
2711 }
2712 
2713 /**
2714  *  ixgbe_set_mta - Set bit-vector in multicast table
2715  *  @hw: pointer to hardware structure
2716  *  @mc_addr: Multicast address
2717  *
2718  *  Sets the bit-vector in the multicast table.
2719  **/
2720 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2721 {
2722 	u32 vector;
2723 	u32 vector_bit;
2724 	u32 vector_reg;
2725 
2726 	DEBUGFUNC("ixgbe_set_mta");
2727 
2728 	hw->addr_ctrl.mta_in_use++;
2729 
2730 	vector = ixgbe_mta_vector(hw, mc_addr);
2731 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2732 
2733 	/*
2734 	 * The MTA is a register array of 128 32-bit registers. It is treated
2735 	 * like an array of 4096 bits.  We want to set bit
2736 	 * BitArray[vector_value]. So we figure out what register the bit is
2737 	 * in, read it, OR in the new bit, then write back the new value.  The
2738 	 * register is determined by the upper 7 bits of the vector value and
2739 	 * the bit within that register are determined by the lower 5 bits of
2740 	 * the value.
2741 	 */
2742 	vector_reg = (vector >> 5) & 0x7F;
2743 	vector_bit = vector & 0x1F;
2744 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2745 }
2746 
2747 /**
2748  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2749  *  @hw: pointer to hardware structure
2750  *  @mc_addr_list: the list of new multicast addresses
2751  *  @mc_addr_count: number of addresses
2752  *  @next: iterator function to walk the multicast address list
2753  *  @clear: flag, when set clears the table beforehand
2754  *
2755  *  When the clear flag is set, the given list replaces any existing list.
2756  *  Hashes the given addresses into the multicast table.
2757  **/
2758 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2759 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2760 				      bool clear)
2761 {
2762 	u32 i;
2763 	u32 vmdq;
2764 
2765 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2766 
2767 	/*
2768 	 * Set the new number of MC addresses that we are being requested to
2769 	 * use.
2770 	 */
2771 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2772 	hw->addr_ctrl.mta_in_use = 0;
2773 
2774 	/* Clear mta_shadow */
2775 	if (clear) {
2776 		DEBUGOUT(" Clearing MTA\n");
2777 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2778 	}
2779 
2780 	/* Update mta_shadow */
2781 	for (i = 0; i < mc_addr_count; i++) {
2782 		DEBUGOUT(" Adding the multicast addresses:\n");
2783 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2784 	}
2785 
2786 	/* Enable mta */
2787 	for (i = 0; i < hw->mac.mcft_size; i++)
2788 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2789 				      hw->mac.mta_shadow[i]);
2790 
2791 	if (hw->addr_ctrl.mta_in_use > 0)
2792 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2793 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2794 
2795 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2796 	return IXGBE_SUCCESS;
2797 }
2798 
2799 /**
2800  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2801  *  @hw: pointer to hardware structure
2802  *
2803  *  Enables multicast address in RAR and the use of the multicast hash table.
2804  **/
2805 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2806 {
2807 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2808 
2809 	DEBUGFUNC("ixgbe_enable_mc_generic");
2810 
2811 	if (a->mta_in_use > 0)
2812 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2813 				hw->mac.mc_filter_type);
2814 
2815 	return IXGBE_SUCCESS;
2816 }
2817 
2818 /**
2819  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2820  *  @hw: pointer to hardware structure
2821  *
2822  *  Disables multicast address in RAR and the use of the multicast hash table.
2823  **/
2824 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2825 {
2826 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2827 
2828 	DEBUGFUNC("ixgbe_disable_mc_generic");
2829 
2830 	if (a->mta_in_use > 0)
2831 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2832 
2833 	return IXGBE_SUCCESS;
2834 }
2835 
2836 /**
2837  *  ixgbe_fc_enable_generic - Enable flow control
2838  *  @hw: pointer to hardware structure
2839  *
2840  *  Enable flow control according to the current settings.
2841  **/
2842 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2843 {
2844 	s32 ret_val = IXGBE_SUCCESS;
2845 	u32 mflcn_reg, fccfg_reg;
2846 	u32 reg;
2847 	u32 fcrtl, fcrth;
2848 	int i;
2849 
2850 	DEBUGFUNC("ixgbe_fc_enable_generic");
2851 
2852 	/* Validate the water mark configuration */
2853 	if (!hw->fc.pause_time) {
2854 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2855 		goto out;
2856 	}
2857 
2858 	/* Low water mark of zero causes XOFF floods */
2859 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2860 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2861 		    hw->fc.high_water[i]) {
2862 			if (!hw->fc.low_water[i] ||
2863 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2864 				DEBUGOUT("Invalid water mark configuration\n");
2865 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2866 				goto out;
2867 			}
2868 		}
2869 	}
2870 
2871 	/* Negotiate the fc mode to use */
2872 	hw->mac.ops.fc_autoneg(hw);
2873 
2874 	/* Disable any previous flow control settings */
2875 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2876 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2877 
2878 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2879 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2880 
2881 	/*
2882 	 * The possible values of fc.current_mode are:
2883 	 * 0: Flow control is completely disabled
2884 	 * 1: Rx flow control is enabled (we can receive pause frames,
2885 	 *    but not send pause frames).
2886 	 * 2: Tx flow control is enabled (we can send pause frames but
2887 	 *    we do not support receiving pause frames).
2888 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2889 	 * other: Invalid.
2890 	 */
2891 	switch (hw->fc.current_mode) {
2892 	case ixgbe_fc_none:
2893 		/*
2894 		 * Flow control is disabled by software override or autoneg.
2895 		 * The code below will actually disable it in the HW.
2896 		 */
2897 		break;
2898 	case ixgbe_fc_rx_pause:
2899 		/*
2900 		 * Rx Flow control is enabled and Tx Flow control is
2901 		 * disabled by software override. Since there really
2902 		 * isn't a way to advertise that we are capable of RX
2903 		 * Pause ONLY, we will advertise that we support both
2904 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2905 		 * disable the adapter's ability to send PAUSE frames.
2906 		 */
2907 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2908 		break;
2909 	case ixgbe_fc_tx_pause:
2910 		/*
2911 		 * Tx Flow control is enabled, and Rx Flow control is
2912 		 * disabled by software override.
2913 		 */
2914 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2915 		break;
2916 	case ixgbe_fc_full:
2917 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2918 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2919 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2920 		break;
2921 	default:
2922 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2923 			     "Flow control param set incorrectly\n");
2924 		ret_val = IXGBE_ERR_CONFIG;
2925 		goto out;
2926 		break;
2927 	}
2928 
2929 	/* Set 802.3x based flow control settings. */
2930 	mflcn_reg |= IXGBE_MFLCN_DPF;
2931 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2932 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2933 
2934 
2935 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2936 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2937 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2938 		    hw->fc.high_water[i]) {
2939 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2940 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2941 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2942 		} else {
2943 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2944 			/*
2945 			 * In order to prevent Tx hangs when the internal Tx
2946 			 * switch is enabled we must set the high water mark
2947 			 * to the Rx packet buffer size - 24KB.  This allows
2948 			 * the Tx switch to function even under heavy Rx
2949 			 * workloads.
2950 			 */
2951 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2952 		}
2953 
2954 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2955 	}
2956 
2957 	/* Configure pause time (2 TCs per register) */
2958 	reg = (u32)hw->fc.pause_time * 0x00010001;
2959 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2960 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2961 
2962 	/* Configure flow control refresh threshold value */
2963 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2964 
2965 out:
2966 	return ret_val;
2967 }
2968 
2969 /**
2970  *  ixgbe_negotiate_fc - Negotiate flow control
2971  *  @hw: pointer to hardware structure
2972  *  @adv_reg: flow control advertised settings
2973  *  @lp_reg: link partner's flow control settings
2974  *  @adv_sym: symmetric pause bit in advertisement
2975  *  @adv_asm: asymmetric pause bit in advertisement
2976  *  @lp_sym: symmetric pause bit in link partner advertisement
2977  *  @lp_asm: asymmetric pause bit in link partner advertisement
2978  *
2979  *  Find the intersection between advertised settings and link partner's
2980  *  advertised settings
2981  **/
2982 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2983 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2984 {
2985 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2986 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2987 			     "Local or link partner's advertised flow control "
2988 			     "settings are NULL. Local: %x, link partner: %x\n",
2989 			     adv_reg, lp_reg);
2990 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2991 	}
2992 
2993 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2994 		/*
2995 		 * Now we need to check if the user selected Rx ONLY
2996 		 * of pause frames.  In this case, we had to advertise
2997 		 * FULL flow control because we could not advertise RX
2998 		 * ONLY. Hence, we must now check to see if we need to
2999 		 * turn OFF the TRANSMISSION of PAUSE frames.
3000 		 */
3001 		if (hw->fc.requested_mode == ixgbe_fc_full) {
3002 			hw->fc.current_mode = ixgbe_fc_full;
3003 			DEBUGOUT("Flow Control = FULL.\n");
3004 		} else {
3005 			hw->fc.current_mode = ixgbe_fc_rx_pause;
3006 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
3007 		}
3008 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3009 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
3010 		hw->fc.current_mode = ixgbe_fc_tx_pause;
3011 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
3012 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3013 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
3014 		hw->fc.current_mode = ixgbe_fc_rx_pause;
3015 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
3016 	} else {
3017 		hw->fc.current_mode = ixgbe_fc_none;
3018 		DEBUGOUT("Flow Control = NONE.\n");
3019 	}
3020 	return IXGBE_SUCCESS;
3021 }
3022 
3023 /**
3024  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
3025  *  @hw: pointer to hardware structure
3026  *
3027  *  Enable flow control according on 1 gig fiber.
3028  **/
3029 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
3030 {
3031 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
3032 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3033 
3034 	/*
3035 	 * On multispeed fiber at 1g, bail out if
3036 	 * - link is up but AN did not complete, or if
3037 	 * - link is up and AN completed but timed out
3038 	 */
3039 
3040 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3041 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3042 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3043 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3044 		goto out;
3045 	}
3046 
3047 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3048 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3049 
3050 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3051 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3052 				      IXGBE_PCS1GANA_ASM_PAUSE,
3053 				      IXGBE_PCS1GANA_SYM_PAUSE,
3054 				      IXGBE_PCS1GANA_ASM_PAUSE);
3055 
3056 out:
3057 	return ret_val;
3058 }
3059 
3060 /**
3061  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3062  *  @hw: pointer to hardware structure
3063  *
3064  *  Enable flow control according to IEEE clause 37.
3065  **/
3066 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3067 {
3068 	u32 links2, anlp1_reg, autoc_reg, links;
3069 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3070 
3071 	/*
3072 	 * On backplane, bail out if
3073 	 * - backplane autoneg was not completed, or if
3074 	 * - we are 82599 and link partner is not AN enabled
3075 	 */
3076 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3077 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3078 		DEBUGOUT("Auto-Negotiation did not complete\n");
3079 		goto out;
3080 	}
3081 
3082 	if (hw->mac.type == ixgbe_mac_82599EB) {
3083 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3084 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3085 			DEBUGOUT("Link partner is not AN enabled\n");
3086 			goto out;
3087 		}
3088 	}
3089 	/*
3090 	 * Read the 10g AN autoc and LP ability registers and resolve
3091 	 * local flow control settings accordingly
3092 	 */
3093 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3094 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3095 
3096 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3097 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3098 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3099 
3100 out:
3101 	return ret_val;
3102 }
3103 
3104 /**
3105  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3106  *  @hw: pointer to hardware structure
3107  *
3108  *  Enable flow control according to IEEE clause 37.
3109  **/
3110 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3111 {
3112 	u16 technology_ability_reg = 0;
3113 	u16 lp_technology_ability_reg = 0;
3114 
3115 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3116 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3117 			     &technology_ability_reg);
3118 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3119 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3120 			     &lp_technology_ability_reg);
3121 
3122 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3123 				  (u32)lp_technology_ability_reg,
3124 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3125 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3126 }
3127 
3128 /**
3129  *  ixgbe_fc_autoneg - Configure flow control
3130  *  @hw: pointer to hardware structure
3131  *
3132  *  Compares our advertised flow control capabilities to those advertised by
3133  *  our link partner, and determines the proper flow control mode to use.
3134  **/
3135 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3136 {
3137 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3138 	ixgbe_link_speed speed;
3139 	bool link_up;
3140 
3141 	DEBUGFUNC("ixgbe_fc_autoneg");
3142 
3143 	/*
3144 	 * AN should have completed when the cable was plugged in.
3145 	 * Look for reasons to bail out.  Bail out if:
3146 	 * - FC autoneg is disabled, or if
3147 	 * - link is not up.
3148 	 */
3149 	if (hw->fc.disable_fc_autoneg) {
3150 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3151 			     "Flow control autoneg is disabled");
3152 		goto out;
3153 	}
3154 
3155 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3156 	if (!link_up) {
3157 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3158 		goto out;
3159 	}
3160 
3161 	switch (hw->phy.media_type) {
3162 	/* Autoneg flow control on fiber adapters */
3163 	case ixgbe_media_type_fiber_fixed:
3164 	case ixgbe_media_type_fiber_qsfp:
3165 	case ixgbe_media_type_fiber:
3166 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3167 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3168 		break;
3169 
3170 	/* Autoneg flow control on backplane adapters */
3171 	case ixgbe_media_type_backplane:
3172 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3173 		break;
3174 
3175 	/* Autoneg flow control on copper adapters */
3176 	case ixgbe_media_type_copper:
3177 		if (ixgbe_device_supports_autoneg_fc(hw))
3178 			ret_val = ixgbe_fc_autoneg_copper(hw);
3179 		break;
3180 
3181 	default:
3182 		break;
3183 	}
3184 
3185 out:
3186 	if (ret_val == IXGBE_SUCCESS) {
3187 		hw->fc.fc_was_autonegged = TRUE;
3188 	} else {
3189 		hw->fc.fc_was_autonegged = FALSE;
3190 		hw->fc.current_mode = hw->fc.requested_mode;
3191 	}
3192 }
3193 
3194 /*
3195  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3196  * @hw: pointer to hardware structure
3197  *
3198  * System-wide timeout range is encoded in PCIe Device Control2 register.
3199  *
3200  * Add 10% to specified maximum and return the number of times to poll for
3201  * completion timeout, in units of 100 microsec.  Never return less than
3202  * 800 = 80 millisec.
3203  */
3204 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3205 {
3206 	s16 devctl2;
3207 	u32 pollcnt;
3208 
3209 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3210 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3211 
3212 	switch (devctl2) {
3213 	case IXGBE_PCIDEVCTRL2_65_130ms:
3214 		pollcnt = 1300;		/* 130 millisec */
3215 		break;
3216 	case IXGBE_PCIDEVCTRL2_260_520ms:
3217 		pollcnt = 5200;		/* 520 millisec */
3218 		break;
3219 	case IXGBE_PCIDEVCTRL2_1_2s:
3220 		pollcnt = 20000;	/* 2 sec */
3221 		break;
3222 	case IXGBE_PCIDEVCTRL2_4_8s:
3223 		pollcnt = 80000;	/* 8 sec */
3224 		break;
3225 	case IXGBE_PCIDEVCTRL2_17_34s:
3226 		pollcnt = 34000;	/* 34 sec */
3227 		break;
3228 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3229 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3230 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3231 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3232 	default:
3233 		pollcnt = 800;		/* 80 millisec minimum */
3234 		break;
3235 	}
3236 
3237 	/* add 10% to spec maximum */
3238 	return (pollcnt * 11) / 10;
3239 }
3240 
3241 /**
3242  *  ixgbe_disable_pcie_master - Disable PCI-express master access
3243  *  @hw: pointer to hardware structure
3244  *
3245  *  Disables PCI-Express master access and verifies there are no pending
3246  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3247  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3248  *  is returned signifying master requests disabled.
3249  **/
3250 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3251 {
3252 	s32 status = IXGBE_SUCCESS;
3253 	u32 i, poll;
3254 	u16 value;
3255 
3256 	DEBUGFUNC("ixgbe_disable_pcie_master");
3257 
3258 	/* Always set this bit to ensure any future transactions are blocked */
3259 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3260 
3261 	/* Exit if master requests are blocked */
3262 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3263 	    IXGBE_REMOVED(hw->hw_addr))
3264 		goto out;
3265 
3266 	/* Poll for master request bit to clear */
3267 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3268 		usec_delay(100);
3269 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3270 			goto out;
3271 	}
3272 
3273 	/*
3274 	 * Two consecutive resets are required via CTRL.RST per datasheet
3275 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
3276 	 * of this need.  The first reset prevents new master requests from
3277 	 * being issued by our device.  We then must wait 1usec or more for any
3278 	 * remaining completions from the PCIe bus to trickle in, and then reset
3279 	 * again to clear out any effects they may have had on our device.
3280 	 */
3281 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3282 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3283 
3284 	if (hw->mac.type >= ixgbe_mac_X550)
3285 		goto out;
3286 
3287 	/*
3288 	 * Before proceeding, make sure that the PCIe block does not have
3289 	 * transactions pending.
3290 	 */
3291 	poll = ixgbe_pcie_timeout_poll(hw);
3292 	for (i = 0; i < poll; i++) {
3293 		usec_delay(100);
3294 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3295 		if (IXGBE_REMOVED(hw->hw_addr))
3296 			goto out;
3297 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3298 			goto out;
3299 	}
3300 
3301 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3302 		     "PCIe transaction pending bit also did not clear.\n");
3303 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3304 
3305 out:
3306 	return status;
3307 }
3308 
3309 /**
3310  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3311  *  @hw: pointer to hardware structure
3312  *  @mask: Mask to specify which semaphore to acquire
3313  *
3314  *  Acquires the SWFW semaphore through the GSSR register for the specified
3315  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3316  **/
3317 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3318 {
3319 	u32 gssr = 0;
3320 	u32 swmask = mask;
3321 	u32 fwmask = mask << 5;
3322 	u32 timeout = 200;
3323 	u32 i;
3324 
3325 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3326 
3327 	for (i = 0; i < timeout; i++) {
3328 		/*
3329 		 * SW NVM semaphore bit is used for access to all
3330 		 * SW_FW_SYNC bits (not just NVM)
3331 		 */
3332 		if (ixgbe_get_eeprom_semaphore(hw))
3333 			return IXGBE_ERR_SWFW_SYNC;
3334 
3335 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3336 		if (!(gssr & (fwmask | swmask))) {
3337 			gssr |= swmask;
3338 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3339 			ixgbe_release_eeprom_semaphore(hw);
3340 			return IXGBE_SUCCESS;
3341 		} else {
3342 			/* Resource is currently in use by FW or SW */
3343 			ixgbe_release_eeprom_semaphore(hw);
3344 			msec_delay(5);
3345 		}
3346 	}
3347 
3348 	/* If time expired clear the bits holding the lock and retry */
3349 	if (gssr & (fwmask | swmask))
3350 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3351 
3352 	msec_delay(5);
3353 	return IXGBE_ERR_SWFW_SYNC;
3354 }
3355 
3356 /**
3357  *  ixgbe_release_swfw_sync - Release SWFW semaphore
3358  *  @hw: pointer to hardware structure
3359  *  @mask: Mask to specify which semaphore to release
3360  *
3361  *  Releases the SWFW semaphore through the GSSR register for the specified
3362  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3363  **/
3364 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3365 {
3366 	u32 gssr;
3367 	u32 swmask = mask;
3368 
3369 	DEBUGFUNC("ixgbe_release_swfw_sync");
3370 
3371 	ixgbe_get_eeprom_semaphore(hw);
3372 
3373 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3374 	gssr &= ~swmask;
3375 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3376 
3377 	ixgbe_release_eeprom_semaphore(hw);
3378 }
3379 
3380 /**
3381  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3382  *  @hw: pointer to hardware structure
3383  *
3384  *  Stops the receive data path and waits for the HW to internally empty
3385  *  the Rx security block
3386  **/
3387 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3388 {
3389 #define IXGBE_MAX_SECRX_POLL 4000
3390 
3391 	int i;
3392 	int secrxreg;
3393 
3394 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3395 
3396 
3397 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3398 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3399 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3400 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3401 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3402 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3403 			break;
3404 		else
3405 			/* Use interrupt-safe sleep just in case */
3406 			usec_delay(10);
3407 	}
3408 
3409 	/* For informational purposes only */
3410 	if (i >= IXGBE_MAX_SECRX_POLL)
3411 		DEBUGOUT("Rx unit being enabled before security "
3412 			 "path fully disabled.  Continuing with init.\n");
3413 
3414 	return IXGBE_SUCCESS;
3415 }
3416 
3417 /**
3418  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3419  *  @hw: pointer to hardware structure
3420  *  @locked: bool to indicate whether the SW/FW lock was taken
3421  *  @reg_val: Value we read from AUTOC
3422  *
3423  *  The default case requires no protection so just to the register read.
3424  */
3425 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3426 {
3427 	*locked = FALSE;
3428 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3429 	return IXGBE_SUCCESS;
3430 }
3431 
3432 /**
3433  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3434  * @hw: pointer to hardware structure
3435  * @reg_val: value to write to AUTOC
3436  * @locked: bool to indicate whether the SW/FW lock was already taken by
3437  *           previous read.
3438  *
3439  * The default case requires no protection so just to the register write.
3440  */
3441 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3442 {
3443 	UNREFERENCED_1PARAMETER(locked);
3444 
3445 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3446 	return IXGBE_SUCCESS;
3447 }
3448 
3449 /**
3450  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3451  *  @hw: pointer to hardware structure
3452  *
3453  *  Enables the receive data path.
3454  **/
3455 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3456 {
3457 	u32 secrxreg;
3458 
3459 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3460 
3461 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3462 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3463 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3464 	IXGBE_WRITE_FLUSH(hw);
3465 
3466 	return IXGBE_SUCCESS;
3467 }
3468 
3469 /**
3470  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3471  *  @hw: pointer to hardware structure
3472  *  @regval: register value to write to RXCTRL
3473  *
3474  *  Enables the Rx DMA unit
3475  **/
3476 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3477 {
3478 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3479 
3480 	if (regval & IXGBE_RXCTRL_RXEN)
3481 		ixgbe_enable_rx(hw);
3482 	else
3483 		ixgbe_disable_rx(hw);
3484 
3485 	return IXGBE_SUCCESS;
3486 }
3487 
3488 /**
3489  *  ixgbe_blink_led_start_generic - Blink LED based on index.
3490  *  @hw: pointer to hardware structure
3491  *  @index: led number to blink
3492  **/
3493 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3494 {
3495 	ixgbe_link_speed speed = 0;
3496 	bool link_up = 0;
3497 	u32 autoc_reg = 0;
3498 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3499 	s32 ret_val = IXGBE_SUCCESS;
3500 	bool locked = FALSE;
3501 
3502 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3503 
3504 	if (index > 3)
3505 		return IXGBE_ERR_PARAM;
3506 
3507 	/*
3508 	 * Link must be up to auto-blink the LEDs;
3509 	 * Force it if link is down.
3510 	 */
3511 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3512 
3513 	if (!link_up) {
3514 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3515 		if (ret_val != IXGBE_SUCCESS)
3516 			goto out;
3517 
3518 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3519 		autoc_reg |= IXGBE_AUTOC_FLU;
3520 
3521 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3522 		if (ret_val != IXGBE_SUCCESS)
3523 			goto out;
3524 
3525 		IXGBE_WRITE_FLUSH(hw);
3526 		msec_delay(10);
3527 	}
3528 
3529 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3530 	led_reg |= IXGBE_LED_BLINK(index);
3531 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3532 	IXGBE_WRITE_FLUSH(hw);
3533 
3534 out:
3535 	return ret_val;
3536 }
3537 
3538 /**
3539  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3540  *  @hw: pointer to hardware structure
3541  *  @index: led number to stop blinking
3542  **/
3543 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3544 {
3545 	u32 autoc_reg = 0;
3546 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3547 	s32 ret_val = IXGBE_SUCCESS;
3548 	bool locked = FALSE;
3549 
3550 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3551 
3552 	if (index > 3)
3553 		return IXGBE_ERR_PARAM;
3554 
3555 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3556 	if (ret_val != IXGBE_SUCCESS)
3557 		goto out;
3558 
3559 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3560 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3561 
3562 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3563 	if (ret_val != IXGBE_SUCCESS)
3564 		goto out;
3565 
3566 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3567 	led_reg &= ~IXGBE_LED_BLINK(index);
3568 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3569 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3570 	IXGBE_WRITE_FLUSH(hw);
3571 
3572 out:
3573 	return ret_val;
3574 }
3575 
3576 /**
3577  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3578  *  @hw: pointer to hardware structure
3579  *  @san_mac_offset: SAN MAC address offset
3580  *
3581  *  This function will read the EEPROM location for the SAN MAC address
3582  *  pointer, and returns the value at that location.  This is used in both
3583  *  get and set mac_addr routines.
3584  **/
3585 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3586 					 u16 *san_mac_offset)
3587 {
3588 	s32 ret_val;
3589 
3590 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3591 
3592 	/*
3593 	 * First read the EEPROM pointer to see if the MAC addresses are
3594 	 * available.
3595 	 */
3596 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3597 				      san_mac_offset);
3598 	if (ret_val) {
3599 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3600 			      "eeprom at offset %d failed",
3601 			      IXGBE_SAN_MAC_ADDR_PTR);
3602 	}
3603 
3604 	return ret_val;
3605 }
3606 
3607 /**
3608  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3609  *  @hw: pointer to hardware structure
3610  *  @san_mac_addr: SAN MAC address
3611  *
3612  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3613  *  per-port, so set_lan_id() must be called before reading the addresses.
3614  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3615  *  upon for non-SFP connections, so we must call it here.
3616  **/
3617 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3618 {
3619 	u16 san_mac_data, san_mac_offset;
3620 	u8 i;
3621 	s32 ret_val;
3622 
3623 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3624 
3625 	/*
3626 	 * First read the EEPROM pointer to see if the MAC addresses are
3627 	 * available.  If they're not, no point in calling set_lan_id() here.
3628 	 */
3629 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3630 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3631 		goto san_mac_addr_out;
3632 
3633 	/* make sure we know which port we need to program */
3634 	hw->mac.ops.set_lan_id(hw);
3635 	/* apply the port offset to the address offset */
3636 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3637 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3638 	for (i = 0; i < 3; i++) {
3639 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3640 					      &san_mac_data);
3641 		if (ret_val) {
3642 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3643 				      "eeprom read at offset %d failed",
3644 				      san_mac_offset);
3645 			goto san_mac_addr_out;
3646 		}
3647 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3648 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3649 		san_mac_offset++;
3650 	}
3651 	return IXGBE_SUCCESS;
3652 
3653 san_mac_addr_out:
3654 	/*
3655 	 * No addresses available in this EEPROM.  It's not an
3656 	 * error though, so just wipe the local address and return.
3657 	 */
3658 	for (i = 0; i < 6; i++)
3659 		san_mac_addr[i] = 0xFF;
3660 	return IXGBE_SUCCESS;
3661 }
3662 
3663 /**
3664  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3665  *  @hw: pointer to hardware structure
3666  *  @san_mac_addr: SAN MAC address
3667  *
3668  *  Write a SAN MAC address to the EEPROM.
3669  **/
3670 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3671 {
3672 	s32 ret_val;
3673 	u16 san_mac_data, san_mac_offset;
3674 	u8 i;
3675 
3676 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3677 
3678 	/* Look for SAN mac address pointer.  If not defined, return */
3679 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3680 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3681 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3682 
3683 	/* Make sure we know which port we need to write */
3684 	hw->mac.ops.set_lan_id(hw);
3685 	/* Apply the port offset to the address offset */
3686 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3687 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3688 
3689 	for (i = 0; i < 3; i++) {
3690 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3691 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3692 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3693 		san_mac_offset++;
3694 	}
3695 
3696 	return IXGBE_SUCCESS;
3697 }
3698 
3699 /**
3700  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3701  *  @hw: pointer to hardware structure
3702  *
3703  *  Read PCIe configuration space, and get the MSI-X vector count from
3704  *  the capabilities table.
3705  **/
3706 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3707 {
3708 	u16 msix_count = 1;
3709 	u16 max_msix_count;
3710 	u16 pcie_offset;
3711 
3712 	switch (hw->mac.type) {
3713 	case ixgbe_mac_82598EB:
3714 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3715 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3716 		break;
3717 	case ixgbe_mac_82599EB:
3718 	case ixgbe_mac_X540:
3719 	case ixgbe_mac_X550:
3720 	case ixgbe_mac_X550EM_x:
3721 	case ixgbe_mac_X550EM_a:
3722 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3723 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3724 		break;
3725 	default:
3726 		return msix_count;
3727 	}
3728 
3729 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3730 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3731 	if (IXGBE_REMOVED(hw->hw_addr))
3732 		msix_count = 0;
3733 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3734 
3735 	/* MSI-X count is zero-based in HW */
3736 	msix_count++;
3737 
3738 	if (msix_count > max_msix_count)
3739 		msix_count = max_msix_count;
3740 
3741 	return msix_count;
3742 }
3743 
3744 /**
3745  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3746  *  @hw: pointer to hardware structure
3747  *  @addr: Address to put into receive address register
3748  *  @vmdq: VMDq pool to assign
3749  *
3750  *  Puts an ethernet address into a receive address register, or
3751  *  finds the rar that it is already in; adds to the pool list
3752  **/
3753 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3754 {
3755 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3756 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3757 	u32 rar;
3758 	u32 rar_low, rar_high;
3759 	u32 addr_low, addr_high;
3760 
3761 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3762 
3763 	/* swap bytes for HW little endian */
3764 	addr_low  = addr[0] | (addr[1] << 8)
3765 			    | (addr[2] << 16)
3766 			    | (addr[3] << 24);
3767 	addr_high = addr[4] | (addr[5] << 8);
3768 
3769 	/*
3770 	 * Either find the mac_id in rar or find the first empty space.
3771 	 * rar_highwater points to just after the highest currently used
3772 	 * rar in order to shorten the search.  It grows when we add a new
3773 	 * rar to the top.
3774 	 */
3775 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3776 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3777 
3778 		if (((IXGBE_RAH_AV & rar_high) == 0)
3779 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3780 			first_empty_rar = rar;
3781 		} else if ((rar_high & 0xFFFF) == addr_high) {
3782 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3783 			if (rar_low == addr_low)
3784 				break;    /* found it already in the rars */
3785 		}
3786 	}
3787 
3788 	if (rar < hw->mac.rar_highwater) {
3789 		/* already there so just add to the pool bits */
3790 		ixgbe_set_vmdq(hw, rar, vmdq);
3791 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3792 		/* stick it into first empty RAR slot we found */
3793 		rar = first_empty_rar;
3794 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3795 	} else if (rar == hw->mac.rar_highwater) {
3796 		/* add it to the top of the list and inc the highwater mark */
3797 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3798 		hw->mac.rar_highwater++;
3799 	} else if (rar >= hw->mac.num_rar_entries) {
3800 		return IXGBE_ERR_INVALID_MAC_ADDR;
3801 	}
3802 
3803 	/*
3804 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3805 	 * remains cleared to be sure default pool packets will get delivered
3806 	 */
3807 	if (rar == 0)
3808 		ixgbe_clear_vmdq(hw, rar, 0);
3809 
3810 	return rar;
3811 }
3812 
3813 /**
3814  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3815  *  @hw: pointer to hardware struct
3816  *  @rar: receive address register index to disassociate
3817  *  @vmdq: VMDq pool index to remove from the rar
3818  **/
3819 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3820 {
3821 	u32 mpsar_lo, mpsar_hi;
3822 	u32 rar_entries = hw->mac.num_rar_entries;
3823 
3824 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3825 
3826 	/* Make sure we are using a valid rar index range */
3827 	if (rar >= rar_entries) {
3828 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3829 			     "RAR index %d is out of range.\n", rar);
3830 		return IXGBE_ERR_INVALID_ARGUMENT;
3831 	}
3832 
3833 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3834 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3835 
3836 	if (IXGBE_REMOVED(hw->hw_addr))
3837 		goto done;
3838 
3839 	if (!mpsar_lo && !mpsar_hi)
3840 		goto done;
3841 
3842 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3843 		if (mpsar_lo) {
3844 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3845 			mpsar_lo = 0;
3846 		}
3847 		if (mpsar_hi) {
3848 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3849 			mpsar_hi = 0;
3850 		}
3851 	} else if (vmdq < 32) {
3852 		mpsar_lo &= ~(1 << vmdq);
3853 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3854 	} else {
3855 		mpsar_hi &= ~(1 << (vmdq - 32));
3856 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3857 	}
3858 
3859 	/* was that the last pool using this rar? */
3860 	if (mpsar_lo == 0 && mpsar_hi == 0 &&
3861 	    rar != 0 && rar != hw->mac.san_mac_rar_index)
3862 		hw->mac.ops.clear_rar(hw, rar);
3863 done:
3864 	return IXGBE_SUCCESS;
3865 }
3866 
3867 /**
3868  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3869  *  @hw: pointer to hardware struct
3870  *  @rar: receive address register index to associate with a VMDq index
3871  *  @vmdq: VMDq pool index
3872  **/
3873 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3874 {
3875 	u32 mpsar;
3876 	u32 rar_entries = hw->mac.num_rar_entries;
3877 
3878 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3879 
3880 	/* Make sure we are using a valid rar index range */
3881 	if (rar >= rar_entries) {
3882 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3883 			     "RAR index %d is out of range.\n", rar);
3884 		return IXGBE_ERR_INVALID_ARGUMENT;
3885 	}
3886 
3887 	if (vmdq < 32) {
3888 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3889 		mpsar |= 1 << vmdq;
3890 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3891 	} else {
3892 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3893 		mpsar |= 1 << (vmdq - 32);
3894 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3895 	}
3896 	return IXGBE_SUCCESS;
3897 }
3898 
3899 /**
3900  *  This function should only be involved in the IOV mode.
3901  *  In IOV mode, Default pool is next pool after the number of
3902  *  VFs advertized and not 0.
3903  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3904  *
3905  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3906  *  @hw: pointer to hardware struct
3907  *  @vmdq: VMDq pool index
3908  **/
3909 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3910 {
3911 	u32 rar = hw->mac.san_mac_rar_index;
3912 
3913 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3914 
3915 	if (vmdq < 32) {
3916 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3917 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3918 	} else {
3919 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3920 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3921 	}
3922 
3923 	return IXGBE_SUCCESS;
3924 }
3925 
3926 /**
3927  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3928  *  @hw: pointer to hardware structure
3929  **/
3930 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3931 {
3932 	int i;
3933 
3934 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3935 	DEBUGOUT(" Clearing UTA\n");
3936 
3937 	for (i = 0; i < 128; i++)
3938 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3939 
3940 	return IXGBE_SUCCESS;
3941 }
3942 
3943 /**
3944  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3945  *  @hw: pointer to hardware structure
3946  *  @vlan: VLAN id to write to VLAN filter
3947  *  @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if
3948  *		  vlanid not found
3949  *
3950  *
3951  *  return the VLVF index where this VLAN id should be placed
3952  *
3953  **/
3954 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3955 {
3956 	s32 regindex, first_empty_slot;
3957 	u32 bits;
3958 
3959 	/* short cut the special case */
3960 	if (vlan == 0)
3961 		return 0;
3962 
3963 	/* if vlvf_bypass is set we don't want to use an empty slot, we
3964 	 * will simply bypass the VLVF if there are no entries present in the
3965 	 * VLVF that contain our VLAN
3966 	 */
3967 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3968 
3969 	/* add VLAN enable bit for comparison */
3970 	vlan |= IXGBE_VLVF_VIEN;
3971 
3972 	/* Search for the vlan id in the VLVF entries. Save off the first empty
3973 	 * slot found along the way.
3974 	 *
3975 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3976 	 */
3977 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3978 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3979 		if (bits == vlan)
3980 			return regindex;
3981 		if (!first_empty_slot && !bits)
3982 			first_empty_slot = regindex;
3983 	}
3984 
3985 	/* If we are here then we didn't find the VLAN.  Return first empty
3986 	 * slot we found during our search, else error.
3987 	 */
3988 	if (!first_empty_slot)
3989 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3990 
3991 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3992 }
3993 
3994 /**
3995  *  ixgbe_set_vfta_generic - Set VLAN filter table
3996  *  @hw: pointer to hardware structure
3997  *  @vlan: VLAN id to write to VLAN filter
3998  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
3999  *  @vlan_on: boolean flag to turn on/off VLAN
4000  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
4001  *
4002  *  Turn on/off specified VLAN in the VLAN filter table.
4003  **/
4004 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4005 			   bool vlan_on, bool vlvf_bypass)
4006 {
4007 	u32 regidx, vfta_delta, vfta;
4008 	s32 ret_val;
4009 
4010 	DEBUGFUNC("ixgbe_set_vfta_generic");
4011 
4012 	if (vlan > 4095 || vind > 63)
4013 		return IXGBE_ERR_PARAM;
4014 
4015 	/*
4016 	 * this is a 2 part operation - first the VFTA, then the
4017 	 * VLVF and VLVFB if VT Mode is set
4018 	 * We don't write the VFTA until we know the VLVF part succeeded.
4019 	 */
4020 
4021 	/* Part 1
4022 	 * The VFTA is a bitstring made up of 128 32-bit registers
4023 	 * that enable the particular VLAN id, much like the MTA:
4024 	 *    bits[11-5]: which register
4025 	 *    bits[4-0]:  which bit in the register
4026 	 */
4027 	regidx = vlan / 32;
4028 	vfta_delta = (u32)1 << (vlan % 32);
4029 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
4030 
4031 	/*
4032 	 * vfta_delta represents the difference between the current value
4033 	 * of vfta and the value we want in the register.  Since the diff
4034 	 * is an XOR mask we can just update the vfta using an XOR
4035 	 */
4036 	vfta_delta &= vlan_on ? ~vfta : vfta;
4037 	vfta ^= vfta_delta;
4038 
4039 	/* Part 2
4040 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4041 	 */
4042 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4043 					 vfta, vlvf_bypass);
4044 	if (ret_val != IXGBE_SUCCESS) {
4045 		if (vlvf_bypass)
4046 			goto vfta_update;
4047 		return ret_val;
4048 	}
4049 
4050 vfta_update:
4051 	/* Update VFTA now that we are ready for traffic */
4052 	if (vfta_delta)
4053 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4054 
4055 	return IXGBE_SUCCESS;
4056 }
4057 
4058 /**
4059  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4060  *  @hw: pointer to hardware structure
4061  *  @vlan: VLAN id to write to VLAN filter
4062  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
4063  *  @vlan_on: boolean flag to turn on/off VLAN in VLVF
4064  *  @vfta_delta: pointer to the difference between the current value of VFTA
4065  *		 and the desired value
4066  *  @vfta: the desired value of the VFTA
4067  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
4068  *
4069  *  Turn on/off specified bit in VLVF table.
4070  **/
4071 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4072 			   bool vlan_on, u32 *vfta_delta, u32 vfta,
4073 			   bool vlvf_bypass)
4074 {
4075 	u32 bits;
4076 	s32 vlvf_index;
4077 
4078 	DEBUGFUNC("ixgbe_set_vlvf_generic");
4079 
4080 	if (vlan > 4095 || vind > 63)
4081 		return IXGBE_ERR_PARAM;
4082 
4083 	/* If VT Mode is set
4084 	 *   Either vlan_on
4085 	 *     make sure the vlan is in VLVF
4086 	 *     set the vind bit in the matching VLVFB
4087 	 *   Or !vlan_on
4088 	 *     clear the pool bit and possibly the vind
4089 	 */
4090 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4091 		return IXGBE_SUCCESS;
4092 
4093 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4094 	if (vlvf_index < 0)
4095 		return vlvf_index;
4096 
4097 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4098 
4099 	/* set the pool bit */
4100 	bits |= 1 << (vind % 32);
4101 	if (vlan_on)
4102 		goto vlvf_update;
4103 
4104 	/* clear the pool bit */
4105 	bits ^= 1 << (vind % 32);
4106 
4107 	if (!bits &&
4108 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4109 		/* Clear VFTA first, then disable VLVF.  Otherwise
4110 		 * we run the risk of stray packets leaking into
4111 		 * the PF via the default pool
4112 		 */
4113 		if (*vfta_delta)
4114 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4115 
4116 		/* disable VLVF and clear remaining bit from pool */
4117 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4118 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4119 
4120 		return IXGBE_SUCCESS;
4121 	}
4122 
4123 	/* If there are still bits set in the VLVFB registers
4124 	 * for the VLAN ID indicated we need to see if the
4125 	 * caller is requesting that we clear the VFTA entry bit.
4126 	 * If the caller has requested that we clear the VFTA
4127 	 * entry bit but there are still pools/VFs using this VLAN
4128 	 * ID entry then ignore the request.  We're not worried
4129 	 * about the case where we're turning the VFTA VLAN ID
4130 	 * entry bit on, only when requested to turn it off as
4131 	 * there may be multiple pools and/or VFs using the
4132 	 * VLAN ID entry.  In that case we cannot clear the
4133 	 * VFTA bit until all pools/VFs using that VLAN ID have also
4134 	 * been cleared.  This will be indicated by "bits" being
4135 	 * zero.
4136 	 */
4137 	*vfta_delta = 0;
4138 
4139 vlvf_update:
4140 	/* record pool change and enable VLAN ID if not already enabled */
4141 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4142 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4143 
4144 	return IXGBE_SUCCESS;
4145 }
4146 
4147 /**
4148  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
4149  *  @hw: pointer to hardware structure
4150  *
4151  *  Clears the VLAN filer table, and the VMDq index associated with the filter
4152  **/
4153 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4154 {
4155 	u32 offset;
4156 
4157 	DEBUGFUNC("ixgbe_clear_vfta_generic");
4158 
4159 	for (offset = 0; offset < hw->mac.vft_size; offset++)
4160 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4161 
4162 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4163 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4164 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4165 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4166 	}
4167 
4168 	return IXGBE_SUCCESS;
4169 }
4170 
4171 /**
4172  *  ixgbe_toggle_txdctl_generic - Toggle VF's queues
4173  *  @hw: pointer to hardware structure
4174  *  @vf_number: VF index
4175  *
4176  *  Enable and disable each queue in VF.
4177  */
4178 s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number)
4179 {
4180 	u8  queue_count, i;
4181 	u32 offset, reg;
4182 
4183 	if (vf_number > 63)
4184 		return IXGBE_ERR_PARAM;
4185 
4186 	/*
4187 	 * Determine number of queues by checking
4188 	 * number of virtual functions
4189 	 */
4190 	reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4191 	switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
4192 	case IXGBE_GCR_EXT_VT_MODE_64:
4193 		queue_count = 2;
4194 		break;
4195 	case IXGBE_GCR_EXT_VT_MODE_32:
4196 		queue_count = 4;
4197 		break;
4198 	case IXGBE_GCR_EXT_VT_MODE_16:
4199 		queue_count = 8;
4200 		break;
4201 	default:
4202 		return IXGBE_ERR_CONFIG;
4203 	}
4204 
4205 	/* Toggle queues */
4206 	for (i = 0; i < queue_count; ++i) {
4207 		/* Calculate offset of current queue */
4208 		offset = queue_count * vf_number + i;
4209 
4210 		/* Enable queue */
4211 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
4212 		reg |= IXGBE_TXDCTL_ENABLE;
4213 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
4214 		IXGBE_WRITE_FLUSH(hw);
4215 
4216 		/* Disable queue */
4217 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
4218 		reg &= ~IXGBE_TXDCTL_ENABLE;
4219 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
4220 		IXGBE_WRITE_FLUSH(hw);
4221 	}
4222 
4223 	return IXGBE_SUCCESS;
4224 }
4225 
4226 /**
4227  *  ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4228  *  @hw: pointer to hardware structure
4229  *
4230  *  Contains the logic to identify if we need to verify link for the
4231  *  crosstalk fix
4232  **/
4233 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4234 {
4235 
4236 	/* Does FW say we need the fix */
4237 	if (!hw->need_crosstalk_fix)
4238 		return FALSE;
4239 
4240 	/* Only consider SFP+ PHYs i.e. media type fiber */
4241 	switch (hw->mac.ops.get_media_type(hw)) {
4242 	case ixgbe_media_type_fiber:
4243 	case ixgbe_media_type_fiber_qsfp:
4244 		break;
4245 	default:
4246 		return FALSE;
4247 	}
4248 
4249 	return TRUE;
4250 }
4251 
4252 /**
4253  *  ixgbe_check_mac_link_generic - Determine link and speed status
4254  *  @hw: pointer to hardware structure
4255  *  @speed: pointer to link speed
4256  *  @link_up: TRUE when link is up
4257  *  @link_up_wait_to_complete: bool used to wait for link up or not
4258  *
4259  *  Reads the links register to determine if link is up and the current speed
4260  **/
4261 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4262 				 bool *link_up, bool link_up_wait_to_complete)
4263 {
4264 	u32 links_reg, links_orig;
4265 	u32 i;
4266 
4267 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4268 
4269 	/* If Crosstalk fix enabled do the sanity check of making sure
4270 	 * the SFP+ cage is full.
4271 	 */
4272 	if (ixgbe_need_crosstalk_fix(hw)) {
4273 		if ((hw->mac.type != ixgbe_mac_82598EB) &&
4274 		    !ixgbe_sfp_cage_full(hw)) {
4275 			*link_up = FALSE;
4276 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4277 			return IXGBE_SUCCESS;
4278 		}
4279 	}
4280 
4281 	/* clear the old state */
4282 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4283 
4284 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4285 
4286 	if (links_orig != links_reg) {
4287 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4288 			  links_orig, links_reg);
4289 	}
4290 
4291 	if (link_up_wait_to_complete) {
4292 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4293 			if (links_reg & IXGBE_LINKS_UP) {
4294 				*link_up = TRUE;
4295 				break;
4296 			} else {
4297 				*link_up = FALSE;
4298 			}
4299 			msec_delay(100);
4300 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4301 		}
4302 	} else {
4303 		if (links_reg & IXGBE_LINKS_UP)
4304 			*link_up = TRUE;
4305 		else
4306 			*link_up = FALSE;
4307 	}
4308 
4309 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4310 	case IXGBE_LINKS_SPEED_10G_82599:
4311 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4312 		if (hw->mac.type >= ixgbe_mac_X550) {
4313 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4314 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4315 		}
4316 		break;
4317 	case IXGBE_LINKS_SPEED_1G_82599:
4318 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4319 		break;
4320 	case IXGBE_LINKS_SPEED_100_82599:
4321 		*speed = IXGBE_LINK_SPEED_100_FULL;
4322 		if (hw->mac.type >= ixgbe_mac_X550) {
4323 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4324 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
4325 		}
4326 		break;
4327 	case IXGBE_LINKS_SPEED_10_X550EM_A:
4328 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4329 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4330 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4331 			*speed = IXGBE_LINK_SPEED_10_FULL;
4332 		break;
4333 	default:
4334 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4335 	}
4336 
4337 	return IXGBE_SUCCESS;
4338 }
4339 
4340 /**
4341  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4342  *  the EEPROM
4343  *  @hw: pointer to hardware structure
4344  *  @wwnn_prefix: the alternative WWNN prefix
4345  *  @wwpn_prefix: the alternative WWPN prefix
4346  *
4347  *  This function will read the EEPROM from the alternative SAN MAC address
4348  *  block to check the support for the alternative WWNN/WWPN prefix support.
4349  **/
4350 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4351 				 u16 *wwpn_prefix)
4352 {
4353 	u16 offset, caps;
4354 	u16 alt_san_mac_blk_offset;
4355 
4356 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4357 
4358 	/* clear output first */
4359 	*wwnn_prefix = 0xFFFF;
4360 	*wwpn_prefix = 0xFFFF;
4361 
4362 	/* check if alternative SAN MAC is supported */
4363 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4364 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4365 		goto wwn_prefix_err;
4366 
4367 	if ((alt_san_mac_blk_offset == 0) ||
4368 	    (alt_san_mac_blk_offset == 0xFFFF))
4369 		goto wwn_prefix_out;
4370 
4371 	/* check capability in alternative san mac address block */
4372 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4373 	if (hw->eeprom.ops.read(hw, offset, &caps))
4374 		goto wwn_prefix_err;
4375 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4376 		goto wwn_prefix_out;
4377 
4378 	/* get the corresponding prefix for WWNN/WWPN */
4379 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4380 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4381 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4382 			      "eeprom read at offset %d failed", offset);
4383 	}
4384 
4385 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4386 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4387 		goto wwn_prefix_err;
4388 
4389 wwn_prefix_out:
4390 	return IXGBE_SUCCESS;
4391 
4392 wwn_prefix_err:
4393 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4394 		      "eeprom read at offset %d failed", offset);
4395 	return IXGBE_SUCCESS;
4396 }
4397 
4398 /**
4399  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4400  *  @hw: pointer to hardware structure
4401  *  @bs: the fcoe boot status
4402  *
4403  *  This function will read the FCOE boot status from the iSCSI FCOE block
4404  **/
4405 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4406 {
4407 	u16 offset, caps, flags;
4408 	s32 status;
4409 
4410 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4411 
4412 	/* clear output first */
4413 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4414 
4415 	/* check if FCOE IBA block is present */
4416 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4417 	status = hw->eeprom.ops.read(hw, offset, &caps);
4418 	if (status != IXGBE_SUCCESS)
4419 		goto out;
4420 
4421 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4422 		goto out;
4423 
4424 	/* check if iSCSI FCOE block is populated */
4425 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4426 	if (status != IXGBE_SUCCESS)
4427 		goto out;
4428 
4429 	if ((offset == 0) || (offset == 0xFFFF))
4430 		goto out;
4431 
4432 	/* read fcoe flags in iSCSI FCOE block */
4433 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4434 	status = hw->eeprom.ops.read(hw, offset, &flags);
4435 	if (status != IXGBE_SUCCESS)
4436 		goto out;
4437 
4438 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4439 		*bs = ixgbe_fcoe_bootstatus_enabled;
4440 	else
4441 		*bs = ixgbe_fcoe_bootstatus_disabled;
4442 
4443 out:
4444 	return status;
4445 }
4446 
4447 /**
4448  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4449  *  @hw: pointer to hardware structure
4450  *  @enable: enable or disable switch for MAC anti-spoofing
4451  *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4452  *
4453  **/
4454 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4455 {
4456 	int vf_target_reg = vf >> 3;
4457 	int vf_target_shift = vf % 8;
4458 	u32 pfvfspoof;
4459 
4460 	if (hw->mac.type == ixgbe_mac_82598EB)
4461 		return;
4462 
4463 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4464 	if (enable)
4465 		pfvfspoof |= (1 << vf_target_shift);
4466 	else
4467 		pfvfspoof &= ~(1 << vf_target_shift);
4468 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4469 }
4470 
4471 /**
4472  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4473  *  @hw: pointer to hardware structure
4474  *  @enable: enable or disable switch for VLAN anti-spoofing
4475  *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4476  *
4477  **/
4478 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4479 {
4480 	int vf_target_reg = vf >> 3;
4481 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4482 	u32 pfvfspoof;
4483 
4484 	if (hw->mac.type == ixgbe_mac_82598EB)
4485 		return;
4486 
4487 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4488 	if (enable)
4489 		pfvfspoof |= (1 << vf_target_shift);
4490 	else
4491 		pfvfspoof &= ~(1 << vf_target_shift);
4492 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4493 }
4494 
4495 /**
4496  *  ixgbe_get_device_caps_generic - Get additional device capabilities
4497  *  @hw: pointer to hardware structure
4498  *  @device_caps: the EEPROM word with the extra device capabilities
4499  *
4500  *  This function will read the EEPROM location for the device capabilities,
4501  *  and return the word through device_caps.
4502  **/
4503 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4504 {
4505 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4506 
4507 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4508 
4509 	return IXGBE_SUCCESS;
4510 }
4511 
4512 /**
4513  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4514  *  @hw: pointer to hardware structure
4515  *
4516  **/
4517 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4518 {
4519 	u32 regval;
4520 	u32 i;
4521 
4522 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4523 
4524 	/* Enable relaxed ordering */
4525 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4526 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4527 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4528 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4529 	}
4530 
4531 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4532 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4533 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4534 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4535 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4536 	}
4537 
4538 }
4539 
4540 /**
4541  *  ixgbe_calculate_checksum - Calculate checksum for buffer
4542  *  @buffer: pointer to EEPROM
4543  *  @length: size of EEPROM to calculate a checksum for
4544  *  Calculates the checksum for some buffer on a specified length.  The
4545  *  checksum calculated is returned.
4546  **/
4547 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4548 {
4549 	u32 i;
4550 	u8 sum = 0;
4551 
4552 	DEBUGFUNC("ixgbe_calculate_checksum");
4553 
4554 	if (!buffer)
4555 		return 0;
4556 
4557 	for (i = 0; i < length; i++)
4558 		sum += buffer[i];
4559 
4560 	return (u8) (0 - sum);
4561 }
4562 
4563 /**
4564  *  ixgbe_hic_unlocked - Issue command to manageability block unlocked
4565  *  @hw: pointer to the HW structure
4566  *  @buffer: command to write and where the return status will be placed
4567  *  @length: length of buffer, must be multiple of 4 bytes
4568  *  @timeout: time in ms to wait for command completion
4569  *
4570  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4571  *  else returns semaphore error when encountering an error acquiring
4572  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4573  *
4574  *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4575  *  by the caller.
4576  **/
4577 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4578 		       u32 timeout)
4579 {
4580 	u32 hicr, i, fwsts;
4581 	u16 dword_len;
4582 
4583 	DEBUGFUNC("ixgbe_hic_unlocked");
4584 
4585 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4586 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4587 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4588 	}
4589 
4590 	/* Set bit 9 of FWSTS clearing FW reset indication */
4591 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4592 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4593 
4594 	/* Check that the host interface is enabled. */
4595 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4596 	if (!(hicr & IXGBE_HICR_EN)) {
4597 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4598 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4599 	}
4600 
4601 	/* Calculate length in DWORDs. We must be DWORD aligned */
4602 	if (length % sizeof(u32)) {
4603 		DEBUGOUT("Buffer length failure, not aligned to dword");
4604 		return IXGBE_ERR_INVALID_ARGUMENT;
4605 	}
4606 
4607 	dword_len = length >> 2;
4608 
4609 	/* The device driver writes the relevant command block
4610 	 * into the ram area.
4611 	 */
4612 	for (i = 0; i < dword_len; i++)
4613 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4614 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4615 
4616 	/* Setting this bit tells the ARC that a new command is pending. */
4617 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4618 
4619 	for (i = 0; i < timeout; i++) {
4620 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4621 		if (!(hicr & IXGBE_HICR_C))
4622 			break;
4623 		msec_delay(1);
4624 	}
4625 
4626 	/* For each command except "Apply Update" perform
4627 	 * status checks in the HICR registry.
4628 	 */
4629 	if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) ==
4630 	    IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD)
4631 		return IXGBE_SUCCESS;
4632 
4633 	/* Check command completion */
4634 	if ((timeout && i == timeout) ||
4635 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4636 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4637 			      "Command has failed with no status valid.\n");
4638 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4639 	}
4640 
4641 	return IXGBE_SUCCESS;
4642 }
4643 
4644 /**
4645  *  ixgbe_host_interface_command - Issue command to manageability block
4646  *  @hw: pointer to the HW structure
4647  *  @buffer: contains the command to write and where the return status will
4648  *   be placed
4649  *  @length: length of buffer, must be multiple of 4 bytes
4650  *  @timeout: time in ms to wait for command completion
4651  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4652  *   Needed because FW structures are big endian and decoding of
4653  *   these fields can be 8 bit or 16 bit based on command. Decoding
4654  *   is not easily understood without making a table of commands.
4655  *   So we will leave this up to the caller to read back the data
4656  *   in these cases.
4657  *
4658  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4659  *  else returns semaphore error when encountering an error acquiring
4660  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4661  **/
4662 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4663 				 u32 length, u32 timeout, bool return_data)
4664 {
4665 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4666 	struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
4667 	u16 buf_len;
4668 	s32 status;
4669 	u32 bi;
4670 	u32 dword_len;
4671 
4672 	DEBUGFUNC("ixgbe_host_interface_command");
4673 
4674 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4675 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4676 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4677 	}
4678 
4679 	/* Take management host interface semaphore */
4680 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4681 	if (status)
4682 		return status;
4683 
4684 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4685 	if (status)
4686 		goto rel_out;
4687 
4688 	if (!return_data)
4689 		goto rel_out;
4690 
4691 	/* Calculate length in DWORDs */
4692 	dword_len = hdr_size >> 2;
4693 
4694 	/* first pull in the header so we know the buffer length */
4695 	for (bi = 0; bi < dword_len; bi++) {
4696 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4697 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4698 	}
4699 
4700 	/*
4701 	 * If there is any thing in data position pull it in
4702 	 * Read Flash command requires reading buffer length from
4703 	 * two byes instead of one byte
4704 	 */
4705 	if (resp->cmd == 0x30 || resp->cmd == 0x31) {
4706 		for (; bi < dword_len + 2; bi++) {
4707 			buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4708 							  bi);
4709 			IXGBE_LE32_TO_CPUS(&buffer[bi]);
4710 		}
4711 		buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
4712 				  & 0xF00) | resp->buf_len;
4713 		hdr_size += (2 << 2);
4714 	} else {
4715 		buf_len = resp->buf_len;
4716 	}
4717 	if (!buf_len)
4718 		goto rel_out;
4719 
4720 	if (length < buf_len + hdr_size) {
4721 		DEBUGOUT("Buffer not large enough for reply message.\n");
4722 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4723 		goto rel_out;
4724 	}
4725 
4726 	/* Calculate length in DWORDs, add 3 for odd lengths */
4727 	dword_len = (buf_len + 3) >> 2;
4728 
4729 	/* Pull in the rest of the buffer (bi is where we left off) */
4730 	for (; bi <= dword_len; bi++) {
4731 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4732 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4733 	}
4734 
4735 rel_out:
4736 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4737 
4738 	return status;
4739 }
4740 
4741 /**
4742  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4743  *  @hw: pointer to the HW structure
4744  *  @maj: driver version major number
4745  *  @minr: driver version minor number
4746  *  @build: driver version build number
4747  *  @sub: driver version sub build number
4748  *  @len: unused
4749  *  @driver_ver: unused
4750  *
4751  *  Sends driver version number to firmware through the manageability
4752  *  block.  On success return IXGBE_SUCCESS
4753  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4754  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4755  **/
4756 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr,
4757 				 u8 build, u8 sub, u16 len,
4758 				 const char *driver_ver)
4759 {
4760 	struct ixgbe_hic_drv_info fw_cmd;
4761 	int i;
4762 	s32 ret_val = IXGBE_SUCCESS;
4763 
4764 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4765 	UNREFERENCED_2PARAMETER(len, driver_ver);
4766 
4767 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4768 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4769 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4770 	fw_cmd.port_num = (u8)hw->bus.func;
4771 	fw_cmd.ver_maj = maj;
4772 	fw_cmd.ver_min = minr;
4773 	fw_cmd.ver_build = build;
4774 	fw_cmd.ver_sub = sub;
4775 	fw_cmd.hdr.checksum = 0;
4776 	fw_cmd.pad = 0;
4777 	fw_cmd.pad2 = 0;
4778 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4779 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4780 
4781 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4782 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4783 						       sizeof(fw_cmd),
4784 						       IXGBE_HI_COMMAND_TIMEOUT,
4785 						       TRUE);
4786 		if (ret_val != IXGBE_SUCCESS)
4787 			continue;
4788 
4789 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4790 		    FW_CEM_RESP_STATUS_SUCCESS)
4791 			ret_val = IXGBE_SUCCESS;
4792 		else
4793 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4794 
4795 		break;
4796 	}
4797 
4798 	return ret_val;
4799 }
4800 
4801 /**
4802  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4803  * @hw: pointer to hardware structure
4804  * @num_pb: number of packet buffers to allocate
4805  * @headroom: reserve n KB of headroom
4806  * @strategy: packet buffer allocation strategy
4807  **/
4808 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4809 			     int strategy)
4810 {
4811 	u32 pbsize = hw->mac.rx_pb_size;
4812 	int i = 0;
4813 	u32 rxpktsize, txpktsize, txpbthresh;
4814 
4815 	/* Reserve headroom */
4816 	pbsize -= headroom;
4817 
4818 	if (!num_pb)
4819 		num_pb = 1;
4820 
4821 	/* Divide remaining packet buffer space amongst the number of packet
4822 	 * buffers requested using supplied strategy.
4823 	 */
4824 	switch (strategy) {
4825 	case PBA_STRATEGY_WEIGHTED:
4826 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4827 		 * buffer with 5/8 of the packet buffer space.
4828 		 */
4829 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4830 		pbsize -= rxpktsize * (num_pb / 2);
4831 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4832 		for (; i < (num_pb / 2); i++)
4833 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4834 		/* fall through - configure remaining packet buffers */
4835 	case PBA_STRATEGY_EQUAL:
4836 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4837 		for (; i < num_pb; i++)
4838 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4839 		break;
4840 	default:
4841 		break;
4842 	}
4843 
4844 	/* Only support an equally distributed Tx packet buffer strategy. */
4845 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4846 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4847 	for (i = 0; i < num_pb; i++) {
4848 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4849 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4850 	}
4851 
4852 	/* Clear unused TCs, if any, to zero buffer size*/
4853 	for (; i < IXGBE_MAX_PB; i++) {
4854 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4855 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4856 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4857 	}
4858 }
4859 
4860 /**
4861  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4862  * @hw: pointer to the hardware structure
4863  *
4864  * The 82599 and x540 MACs can experience issues if TX work is still pending
4865  * when a reset occurs.  This function prevents this by flushing the PCIe
4866  * buffers on the system.
4867  **/
4868 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4869 {
4870 	u32 gcr_ext, hlreg0, i, poll;
4871 	u16 value;
4872 
4873 	/*
4874 	 * If double reset is not requested then all transactions should
4875 	 * already be clear and as such there is no work to do
4876 	 */
4877 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4878 		return;
4879 
4880 	/*
4881 	 * Set loopback enable to prevent any transmits from being sent
4882 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4883 	 * has already been cleared.
4884 	 */
4885 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4886 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4887 
4888 	/* Wait for a last completion before clearing buffers */
4889 	IXGBE_WRITE_FLUSH(hw);
4890 	msec_delay(3);
4891 
4892 	/*
4893 	 * Before proceeding, make sure that the PCIe block does not have
4894 	 * transactions pending.
4895 	 */
4896 	poll = ixgbe_pcie_timeout_poll(hw);
4897 	for (i = 0; i < poll; i++) {
4898 		usec_delay(100);
4899 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4900 		if (IXGBE_REMOVED(hw->hw_addr))
4901 			goto out;
4902 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4903 			goto out;
4904 	}
4905 
4906 out:
4907 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4908 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4909 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4910 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4911 
4912 	/* Flush all writes and allow 20usec for all transactions to clear */
4913 	IXGBE_WRITE_FLUSH(hw);
4914 	usec_delay(20);
4915 
4916 	/* restore previous register values */
4917 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4918 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4919 }
4920 
4921 /**
4922  *  ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
4923  *
4924  *  @hw: pointer to hardware structure
4925  *  @cmd: Command we send to the FW
4926  *  @status: The reply from the FW
4927  *
4928  *  Bit-bangs the cmd to the by_pass FW status points to what is returned.
4929  **/
4930 #define IXGBE_BYPASS_BB_WAIT 1
4931 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
4932 {
4933 	int i;
4934 	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
4935 	u32 esdp;
4936 
4937 	if (!status)
4938 		return IXGBE_ERR_PARAM;
4939 
4940 	*status = 0;
4941 
4942 	/* SDP vary by MAC type */
4943 	switch (hw->mac.type) {
4944 	case ixgbe_mac_82599EB:
4945 		sck = IXGBE_ESDP_SDP7;
4946 		sdi = IXGBE_ESDP_SDP0;
4947 		sdo = IXGBE_ESDP_SDP6;
4948 		dir_sck = IXGBE_ESDP_SDP7_DIR;
4949 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4950 		dir_sdo = IXGBE_ESDP_SDP6_DIR;
4951 		break;
4952 	case ixgbe_mac_X540:
4953 		sck = IXGBE_ESDP_SDP2;
4954 		sdi = IXGBE_ESDP_SDP0;
4955 		sdo = IXGBE_ESDP_SDP1;
4956 		dir_sck = IXGBE_ESDP_SDP2_DIR;
4957 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4958 		dir_sdo = IXGBE_ESDP_SDP1_DIR;
4959 		break;
4960 	default:
4961 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4962 	}
4963 
4964 	/* Set SDP pins direction */
4965 	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4966 	esdp |= dir_sck;	/* SCK as output */
4967 	esdp |= dir_sdi;	/* SDI as output */
4968 	esdp &= ~dir_sdo;	/* SDO as input */
4969 	esdp |= sck;
4970 	esdp |= sdi;
4971 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4972 	IXGBE_WRITE_FLUSH(hw);
4973 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4974 
4975 	/* Generate start condition */
4976 	esdp &= ~sdi;
4977 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4978 	IXGBE_WRITE_FLUSH(hw);
4979 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4980 
4981 	esdp &= ~sck;
4982 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4983 	IXGBE_WRITE_FLUSH(hw);
4984 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4985 
4986 	/* Clock out the new control word and clock in the status */
4987 	for (i = 0; i < 32; i++) {
4988 		if ((cmd >> (31 - i)) & 0x01) {
4989 			esdp |= sdi;
4990 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4991 		} else {
4992 			esdp &= ~sdi;
4993 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4994 		}
4995 		IXGBE_WRITE_FLUSH(hw);
4996 		msec_delay(IXGBE_BYPASS_BB_WAIT);
4997 
4998 		esdp |= sck;
4999 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5000 		IXGBE_WRITE_FLUSH(hw);
5001 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5002 
5003 		esdp &= ~sck;
5004 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5005 		IXGBE_WRITE_FLUSH(hw);
5006 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5007 
5008 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5009 		if (esdp & sdo)
5010 			*status = (*status << 1) | 0x01;
5011 		else
5012 			*status = (*status << 1) | 0x00;
5013 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5014 	}
5015 
5016 	/* stop condition */
5017 	esdp |= sck;
5018 	esdp &= ~sdi;
5019 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5020 	IXGBE_WRITE_FLUSH(hw);
5021 	msec_delay(IXGBE_BYPASS_BB_WAIT);
5022 
5023 	esdp |= sdi;
5024 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5025 	IXGBE_WRITE_FLUSH(hw);
5026 
5027 	/* set the page bits to match the cmd that the status it belongs to */
5028 	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
5029 
5030 	return IXGBE_SUCCESS;
5031 }
5032 
5033 /**
5034  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
5035  *
5036  * If we send a write we can't be sure it took until we can read back
5037  * that same register.  It can be a problem as some of the feilds may
5038  * for valid reasons change inbetween the time wrote the register and
5039  * we read it again to verify.  So this function check everything we
5040  * can check and then assumes it worked.
5041  *
5042  * @u32 in_reg - The register cmd for the bit-bang read.
5043  * @u32 out_reg - The register returned from a bit-bang read.
5044  **/
5045 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
5046 {
5047 	u32 mask;
5048 
5049 	/* Page must match for all control pages */
5050 	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
5051 		return FALSE;
5052 
5053 	switch (in_reg & BYPASS_PAGE_M) {
5054 	case BYPASS_PAGE_CTL0:
5055 		/* All the following can't change since the last write
5056 		 *  - All the event actions
5057 		 *  - The timeout value
5058 		 */
5059 		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
5060 		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
5061 		       BYPASS_WDTIMEOUT_M |
5062 		       BYPASS_WDT_VALUE_M;
5063 		if ((out_reg & mask) != (in_reg & mask))
5064 			return FALSE;
5065 
5066 		/* 0x0 is never a valid value for bypass status */
5067 		if (!(out_reg & BYPASS_STATUS_OFF_M))
5068 			return FALSE;
5069 		break;
5070 	case BYPASS_PAGE_CTL1:
5071 		/* All the following can't change since the last write
5072 		 *  - time valid bit
5073 		 *  - time we last sent
5074 		 */
5075 		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
5076 		if ((out_reg & mask) != (in_reg & mask))
5077 			return FALSE;
5078 		break;
5079 	case BYPASS_PAGE_CTL2:
5080 		/* All we can check in this page is control number
5081 		 * which is already done above.
5082 		 */
5083 		break;
5084 	}
5085 
5086 	/* We are as sure as we can be return TRUE */
5087 	return TRUE;
5088 }
5089 
5090 /**
5091  *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
5092  *
5093  *  @hw: pointer to hardware structure
5094  *  @cmd: The control word we are setting.
5095  *  @event: The event we are setting in the FW.  This also happens to
5096  *	    be the mask for the event we are setting (handy)
5097  *  @action: The action we set the event to in the FW. This is in a
5098  *	     bit field that happens to be what we want to put in
5099  *	     the event spot (also handy)
5100  **/
5101 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
5102 			     u32 action)
5103 {
5104 	u32 by_ctl = 0;
5105 	u32 cmd, verify;
5106 	u32 count = 0;
5107 
5108 	/* Get current values */
5109 	cmd = ctrl;	/* just reading only need control number */
5110 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5111 		return IXGBE_ERR_INVALID_ARGUMENT;
5112 
5113 	/* Set to new action */
5114 	cmd = (by_ctl & ~event) | BYPASS_WE | action;
5115 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5116 		return IXGBE_ERR_INVALID_ARGUMENT;
5117 
5118 	/* Page 0 force a FW eeprom write which is slow so verify */
5119 	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
5120 		verify = BYPASS_PAGE_CTL0;
5121 		do {
5122 			if (count++ > 5)
5123 				return IXGBE_BYPASS_FW_WRITE_FAILURE;
5124 
5125 			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
5126 				return IXGBE_ERR_INVALID_ARGUMENT;
5127 		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
5128 	} else {
5129 		/* We have give the FW time for the write to stick */
5130 		msec_delay(100);
5131 	}
5132 
5133 	return IXGBE_SUCCESS;
5134 }
5135 
5136 /**
5137  *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
5138  *
5139  *  @hw: pointer to hardware structure
5140  *  @addr: The bypass eeprom address to read.
5141  *  @value: The 8b of data at the address above.
5142  **/
5143 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
5144 {
5145 	u32 cmd;
5146 	u32 status;
5147 
5148 
5149 	/* send the request */
5150 	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
5151 	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
5152 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5153 		return IXGBE_ERR_INVALID_ARGUMENT;
5154 
5155 	/* We have give the FW time for the write to stick */
5156 	msec_delay(100);
5157 
5158 	/* now read the results */
5159 	cmd &= ~BYPASS_WE;
5160 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5161 		return IXGBE_ERR_INVALID_ARGUMENT;
5162 
5163 	*value = status & BYPASS_CTL2_DATA_M;
5164 
5165 	return IXGBE_SUCCESS;
5166 }
5167 
5168 /**
5169  *  ixgbe_get_orom_version - Return option ROM from EEPROM
5170  *
5171  *  @hw: pointer to hardware structure
5172  *  @nvm_ver: pointer to output structure
5173  *
5174  *  if valid option ROM version, nvm_ver->or_valid set to TRUE
5175  *  else nvm_ver->or_valid is FALSE.
5176  **/
5177 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
5178 			    struct ixgbe_nvm_version *nvm_ver)
5179 {
5180 	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
5181 
5182 	nvm_ver->or_valid = FALSE;
5183 	/* Option Rom may or may not be present.  Start with pointer */
5184 	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
5185 
5186 	/* make sure offset is valid */
5187 	if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5188 		return;
5189 
5190 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5191 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5192 
5193 	/* option rom exists and is valid */
5194 	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5195 	    eeprom_cfg_blkl == NVM_VER_INVALID ||
5196 	    eeprom_cfg_blkh == NVM_VER_INVALID)
5197 		return;
5198 
5199 	nvm_ver->or_valid = TRUE;
5200 	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5201 	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5202 			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5203 	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5204 }
5205 
5206 /**
5207  *  ixgbe_get_oem_prod_version - Return OEM Product version
5208  *
5209  *  @hw: pointer to hardware structure
5210  *  @nvm_ver: pointer to output structure
5211  *
5212  *  if valid OEM product version, nvm_ver->oem_valid set to TRUE
5213  *  else nvm_ver->oem_valid is FALSE.
5214  **/
5215 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5216 				struct ixgbe_nvm_version *nvm_ver)
5217 {
5218 	u16 rel_num, prod_ver, mod_len, cap, offset;
5219 
5220 	nvm_ver->oem_valid = FALSE;
5221 	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5222 
5223 	/* Return if offset to OEM Product Version block is invalid */
5224 	if (offset == 0x0 || offset == NVM_INVALID_PTR)
5225 		return;
5226 
5227 	/* Read product version block */
5228 	hw->eeprom.ops.read(hw, offset, &mod_len);
5229 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5230 
5231 	/* Return if OEM product version block is invalid */
5232 	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5233 	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5234 		return;
5235 
5236 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5237 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5238 
5239 	/* Return if version is invalid */
5240 	if ((rel_num | prod_ver) == 0x0 ||
5241 	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5242 		return;
5243 
5244 	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5245 	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5246 	nvm_ver->oem_release = rel_num;
5247 	nvm_ver->oem_valid = TRUE;
5248 }
5249 
5250 /**
5251  *  ixgbe_get_etk_id - Return Etrack ID from EEPROM
5252  *
5253  *  @hw: pointer to hardware structure
5254  *  @nvm_ver: pointer to output structure
5255  *
5256  *  word read errors will return 0xFFFF
5257  **/
5258 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5259 {
5260 	u16 etk_id_l, etk_id_h;
5261 
5262 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5263 		etk_id_l = NVM_VER_INVALID;
5264 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5265 		etk_id_h = NVM_VER_INVALID;
5266 
5267 	/* The word order for the version format is determined by high order
5268 	 * word bit 15.
5269 	 */
5270 	if ((etk_id_h & NVM_ETK_VALID) == 0) {
5271 		nvm_ver->etk_id = etk_id_h;
5272 		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5273 	} else {
5274 		nvm_ver->etk_id = etk_id_l;
5275 		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5276 	}
5277 }
5278 
5279 
5280 /**
5281  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5282  * @hw: pointer to hardware structure
5283  * @map: pointer to u8 arr for returning map
5284  *
5285  * Read the rtrup2tc HW register and resolve its content into map
5286  **/
5287 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5288 {
5289 	u32 reg, i;
5290 
5291 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5292 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5293 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
5294 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5295 	return;
5296 }
5297 
5298 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5299 {
5300 	u32 pfdtxgswc;
5301 	u32 rxctrl;
5302 
5303 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5304 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
5305 		if (hw->mac.type != ixgbe_mac_82598EB) {
5306 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5307 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5308 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5309 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5310 				hw->mac.set_lben = TRUE;
5311 			} else {
5312 				hw->mac.set_lben = FALSE;
5313 			}
5314 		}
5315 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
5316 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5317 	}
5318 }
5319 
5320 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5321 {
5322 	u32 pfdtxgswc;
5323 	u32 rxctrl;
5324 
5325 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5326 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5327 
5328 	if (hw->mac.type != ixgbe_mac_82598EB) {
5329 		if (hw->mac.set_lben) {
5330 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5331 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5332 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5333 			hw->mac.set_lben = FALSE;
5334 		}
5335 	}
5336 }
5337 
5338 /**
5339  * ixgbe_mng_present - returns TRUE when management capability is present
5340  * @hw: pointer to hardware structure
5341  */
5342 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5343 {
5344 	u32 fwsm;
5345 
5346 	if (hw->mac.type < ixgbe_mac_82599EB)
5347 		return FALSE;
5348 
5349 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5350 	return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5351 }
5352 
5353 /**
5354  * ixgbe_mng_enabled - Is the manageability engine enabled?
5355  * @hw: pointer to hardware structure
5356  *
5357  * Returns TRUE if the manageability engine is enabled.
5358  **/
5359 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5360 {
5361 	u32 fwsm, manc, factps;
5362 
5363 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5364 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5365 		return FALSE;
5366 
5367 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5368 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5369 		return FALSE;
5370 
5371 	if (hw->mac.type <= ixgbe_mac_X540) {
5372 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5373 		if (factps & IXGBE_FACTPS_MNGCG)
5374 			return FALSE;
5375 	}
5376 
5377 	return TRUE;
5378 }
5379 
5380 /**
5381  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5382  *  @hw: pointer to hardware structure
5383  *  @speed: new link speed
5384  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
5385  *
5386  *  Set the link speed in the MAC and/or PHY register and restarts link.
5387  **/
5388 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5389 					  ixgbe_link_speed speed,
5390 					  bool autoneg_wait_to_complete)
5391 {
5392 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5393 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5394 	s32 status = IXGBE_SUCCESS;
5395 	u32 speedcnt = 0;
5396 	u32 i = 0;
5397 	bool autoneg, link_up = FALSE;
5398 
5399 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5400 
5401 	/* Mask off requested but non-supported speeds */
5402 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5403 	if (status != IXGBE_SUCCESS)
5404 		return status;
5405 
5406 	speed &= link_speed;
5407 
5408 	/* Try each speed one by one, highest priority first.  We do this in
5409 	 * software because 10Gb fiber doesn't support speed autonegotiation.
5410 	 */
5411 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5412 		speedcnt++;
5413 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5414 
5415 		/* Set the module link speed */
5416 		switch (hw->phy.media_type) {
5417 		case ixgbe_media_type_fiber_fixed:
5418 		case ixgbe_media_type_fiber:
5419 			ixgbe_set_rate_select_speed(hw,
5420 						    IXGBE_LINK_SPEED_10GB_FULL);
5421 			break;
5422 		case ixgbe_media_type_fiber_qsfp:
5423 			/* QSFP module automatically detects MAC link speed */
5424 			break;
5425 		default:
5426 			DEBUGOUT("Unexpected media type.\n");
5427 			break;
5428 		}
5429 
5430 		/* Allow module to change analog characteristics (1G->10G) */
5431 		msec_delay(40);
5432 
5433 		status = ixgbe_setup_mac_link(hw,
5434 					      IXGBE_LINK_SPEED_10GB_FULL,
5435 					      autoneg_wait_to_complete);
5436 		if (status != IXGBE_SUCCESS)
5437 			return status;
5438 
5439 		/* Flap the Tx laser if it has not already been done */
5440 		ixgbe_flap_tx_laser(hw);
5441 
5442 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
5443 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
5444 		 * attempted.  82599 uses the same timing for 10g SFI.
5445 		 */
5446 		for (i = 0; i < 5; i++) {
5447 			/* Wait for the link partner to also set speed */
5448 			msec_delay(100);
5449 
5450 			/* If we have link, just jump out */
5451 			status = ixgbe_check_link(hw, &link_speed,
5452 						  &link_up, FALSE);
5453 			if (status != IXGBE_SUCCESS)
5454 				return status;
5455 
5456 			if (link_up)
5457 				goto out;
5458 		}
5459 	}
5460 
5461 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5462 		speedcnt++;
5463 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5464 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5465 
5466 		/* Set the module link speed */
5467 		switch (hw->phy.media_type) {
5468 		case ixgbe_media_type_fiber_fixed:
5469 		case ixgbe_media_type_fiber:
5470 			ixgbe_set_rate_select_speed(hw,
5471 						    IXGBE_LINK_SPEED_1GB_FULL);
5472 			break;
5473 		case ixgbe_media_type_fiber_qsfp:
5474 			/* QSFP module automatically detects link speed */
5475 			break;
5476 		default:
5477 			DEBUGOUT("Unexpected media type.\n");
5478 			break;
5479 		}
5480 
5481 		/* Allow module to change analog characteristics (10G->1G) */
5482 		msec_delay(40);
5483 
5484 		status = ixgbe_setup_mac_link(hw,
5485 					      IXGBE_LINK_SPEED_1GB_FULL,
5486 					      autoneg_wait_to_complete);
5487 		if (status != IXGBE_SUCCESS)
5488 			return status;
5489 
5490 		/* Flap the Tx laser if it has not already been done */
5491 		ixgbe_flap_tx_laser(hw);
5492 
5493 		/* Wait for the link partner to also set speed */
5494 		msec_delay(100);
5495 
5496 		/* If we have link, just jump out */
5497 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
5498 		if (status != IXGBE_SUCCESS)
5499 			return status;
5500 
5501 		if (link_up)
5502 			goto out;
5503 	}
5504 
5505 	if (speed == 0) {
5506 		/* Disable the Tx laser for media none */
5507 		ixgbe_disable_tx_laser(hw);
5508 
5509 		goto out;
5510 	}
5511 
5512 	/* We didn't get link.  Configure back to the highest speed we tried,
5513 	 * (if there was more than one).  We call ourselves back with just the
5514 	 * single highest speed that the user requested.
5515 	 */
5516 	if (speedcnt > 1)
5517 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5518 						      highest_link_speed,
5519 						      autoneg_wait_to_complete);
5520 
5521 out:
5522 	/* Set autoneg_advertised value based on input link speed */
5523 	hw->phy.autoneg_advertised = 0;
5524 
5525 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5526 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5527 
5528 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5529 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5530 
5531 	return status;
5532 }
5533 
5534 /**
5535  *  ixgbe_set_soft_rate_select_speed - Set module link speed
5536  *  @hw: pointer to hardware structure
5537  *  @speed: link speed to set
5538  *
5539  *  Set module link speed via the soft rate select.
5540  */
5541 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5542 					ixgbe_link_speed speed)
5543 {
5544 	s32 status;
5545 	u8 rs, eeprom_data;
5546 
5547 	switch (speed) {
5548 	case IXGBE_LINK_SPEED_10GB_FULL:
5549 		/* one bit mask same as setting on */
5550 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5551 		break;
5552 	case IXGBE_LINK_SPEED_1GB_FULL:
5553 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5554 		break;
5555 	default:
5556 		DEBUGOUT("Invalid fixed module speed\n");
5557 		return;
5558 	}
5559 
5560 	/* Set RS0 */
5561 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5562 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5563 					   &eeprom_data);
5564 	if (status) {
5565 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5566 		goto out;
5567 	}
5568 
5569 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5570 
5571 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5572 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5573 					    eeprom_data);
5574 	if (status) {
5575 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5576 		goto out;
5577 	}
5578 
5579 	/* Set RS1 */
5580 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5581 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5582 					   &eeprom_data);
5583 	if (status) {
5584 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5585 		goto out;
5586 	}
5587 
5588 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5589 
5590 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5591 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5592 					    eeprom_data);
5593 	if (status) {
5594 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5595 		goto out;
5596 	}
5597 out:
5598 	return;
5599 }
5600