xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_common.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* $NetBSD: ixgbe_common.c,v 1.27 2020/02/05 07:45:46 msaitoh Exp $ */
2 
3 /******************************************************************************
4   SPDX-License-Identifier: BSD-3-Clause
5 
6   Copyright (c) 2001-2017, Intel Corporation
7   All rights reserved.
8 
9   Redistribution and use in source and binary forms, with or without
10   modification, are permitted provided that the following conditions are met:
11 
12    1. Redistributions of source code must retain the above copyright notice,
13       this list of conditions and the following disclaimer.
14 
15    2. Redistributions in binary form must reproduce the above copyright
16       notice, this list of conditions and the following disclaimer in the
17       documentation and/or other materials provided with the distribution.
18 
19    3. Neither the name of the Intel Corporation nor the names of its
20       contributors may be used to endorse or promote products derived from
21       this software without specific prior written permission.
22 
23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33   POSSIBILITY OF SUCH DAMAGE.
34 
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 331224 2018-03-19 20:55:05Z erj $*/
37 
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 #include "ixgbe_dcb.h"
41 #include "ixgbe_dcb_82599.h"
42 #include "ixgbe_api.h"
43 
44 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
45 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
46 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
47 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
48 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
49 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
50 					u16 count);
51 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
52 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
53 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
54 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
55 
56 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
57 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
58 					 u16 *san_mac_offset);
59 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
60 					     u16 words, u16 *data);
61 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
62 					      u16 words, u16 *data);
63 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
64 						 u16 offset);
65 
66 /**
67  *  ixgbe_init_ops_generic - Inits function ptrs
68  *  @hw: pointer to the hardware structure
69  *
70  *  Initialize the function pointers.
71  **/
72 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
73 {
74 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
75 	struct ixgbe_mac_info *mac = &hw->mac;
76 	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
77 
78 	DEBUGFUNC("ixgbe_init_ops_generic");
79 
80 	/* EEPROM */
81 	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
82 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
83 	if (eec & IXGBE_EEC_PRES) {
84 		eeprom->ops.read = ixgbe_read_eerd_generic;
85 		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
86 	} else {
87 		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
88 		eeprom->ops.read_buffer =
89 				 ixgbe_read_eeprom_buffer_bit_bang_generic;
90 	}
91 	eeprom->ops.write = ixgbe_write_eeprom_generic;
92 	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
93 	eeprom->ops.validate_checksum =
94 				      ixgbe_validate_eeprom_checksum_generic;
95 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
96 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
97 
98 	/* MAC */
99 	mac->ops.init_hw = ixgbe_init_hw_generic;
100 	mac->ops.reset_hw = NULL;
101 	mac->ops.start_hw = ixgbe_start_hw_generic;
102 	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
103 	mac->ops.get_media_type = NULL;
104 	mac->ops.get_supported_physical_layer = NULL;
105 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
106 	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
107 	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
108 	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
109 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
110 	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
111 	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
112 	mac->ops.prot_autoc_read = prot_autoc_read_generic;
113 	mac->ops.prot_autoc_write = prot_autoc_write_generic;
114 
115 	/* LEDs */
116 	mac->ops.led_on = ixgbe_led_on_generic;
117 	mac->ops.led_off = ixgbe_led_off_generic;
118 	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
119 	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
120 	mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
121 
122 	/* RAR, Multicast, VLAN */
123 	mac->ops.set_rar = ixgbe_set_rar_generic;
124 	mac->ops.clear_rar = ixgbe_clear_rar_generic;
125 	mac->ops.insert_mac_addr = NULL;
126 	mac->ops.set_vmdq = NULL;
127 	mac->ops.clear_vmdq = NULL;
128 	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
129 	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
130 	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
131 	mac->ops.enable_mc = ixgbe_enable_mc_generic;
132 	mac->ops.disable_mc = ixgbe_disable_mc_generic;
133 	mac->ops.clear_vfta = NULL;
134 	mac->ops.set_vfta = NULL;
135 	mac->ops.set_vlvf = NULL;
136 	mac->ops.init_uta_tables = NULL;
137 	mac->ops.enable_rx = ixgbe_enable_rx_generic;
138 	mac->ops.disable_rx = ixgbe_disable_rx_generic;
139 	mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic;
140 
141 	/* Flow Control */
142 	mac->ops.fc_enable = ixgbe_fc_enable_generic;
143 	mac->ops.setup_fc = ixgbe_setup_fc_generic;
144 	mac->ops.fc_autoneg = ixgbe_fc_autoneg;
145 
146 	/* Link */
147 	mac->ops.get_link_capabilities = NULL;
148 	mac->ops.setup_link = NULL;
149 	mac->ops.check_link = NULL;
150 	mac->ops.dmac_config = NULL;
151 	mac->ops.dmac_update_tcs = NULL;
152 	mac->ops.dmac_config_tcs = NULL;
153 
154 	return IXGBE_SUCCESS;
155 }
156 
157 /**
158  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
159  * of flow control
160  * @hw: pointer to hardware structure
161  *
162  * This function returns TRUE if the device supports flow control
163  * autonegotiation, and FALSE if it does not.
164  *
165  **/
166 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
167 {
168 	bool supported = FALSE;
169 	ixgbe_link_speed speed;
170 	bool link_up;
171 
172 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
173 
174 	switch (hw->phy.media_type) {
175 	case ixgbe_media_type_fiber_fixed:
176 	case ixgbe_media_type_fiber_qsfp:
177 	case ixgbe_media_type_fiber:
178 		/* flow control autoneg black list */
179 		switch (hw->device_id) {
180 		case IXGBE_DEV_ID_X550EM_A_SFP:
181 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
182 		case IXGBE_DEV_ID_X550EM_A_QSFP:
183 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
184 			supported = FALSE;
185 			break;
186 		default:
187 			hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
188 			/* if link is down, assume supported */
189 			if (link_up)
190 				supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
191 				    TRUE : FALSE;
192 			else
193 				supported = TRUE;
194 		}
195 
196 		break;
197 	case ixgbe_media_type_backplane:
198 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
199 			supported = FALSE;
200 		else
201 			supported = TRUE;
202 		break;
203 	case ixgbe_media_type_copper:
204 		/* only some copper devices support flow control autoneg */
205 		switch (hw->device_id) {
206 		case IXGBE_DEV_ID_82599_T3_LOM:
207 		case IXGBE_DEV_ID_X540T:
208 		case IXGBE_DEV_ID_X540T1:
209 		case IXGBE_DEV_ID_X540_BYPASS:
210 		case IXGBE_DEV_ID_X550T:
211 		case IXGBE_DEV_ID_X550T1:
212 		case IXGBE_DEV_ID_X550EM_X_10G_T:
213 		case IXGBE_DEV_ID_X550EM_A_10G_T:
214 		case IXGBE_DEV_ID_X550EM_A_1G_T:
215 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
216 			supported = TRUE;
217 			break;
218 		default:
219 			supported = FALSE;
220 		}
221 	default:
222 		break;
223 	}
224 
225 	if (!supported)
226 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
227 			      "Device %x does not support flow control autoneg",
228 			      hw->device_id);
229 
230 	return supported;
231 }
232 
233 /**
234  *  ixgbe_setup_fc_generic - Set up flow control
235  *  @hw: pointer to hardware structure
236  *
237  *  Called at init time to set up flow control.
238  **/
239 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
240 {
241 	s32 ret_val = IXGBE_SUCCESS;
242 	u32 reg = 0, reg_bp = 0;
243 	u16 reg_cu = 0;
244 	bool locked = FALSE;
245 
246 	DEBUGFUNC("ixgbe_setup_fc_generic");
247 
248 	/* Validate the requested mode */
249 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
250 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
251 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
252 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
253 		goto out;
254 	}
255 
256 	/*
257 	 * 10gig parts do not have a word in the EEPROM to determine the
258 	 * default flow control setting, so we explicitly set it to full.
259 	 */
260 	if (hw->fc.requested_mode == ixgbe_fc_default)
261 		hw->fc.requested_mode = ixgbe_fc_full;
262 
263 	/*
264 	 * Set up the 1G and 10G flow control advertisement registers so the
265 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
266 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
267 	 */
268 	switch (hw->phy.media_type) {
269 	case ixgbe_media_type_backplane:
270 		/* some MAC's need RMW protection on AUTOC */
271 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
272 		if (ret_val != IXGBE_SUCCESS)
273 			goto out;
274 
275 		/* fall through - only backplane uses autoc */
276 	case ixgbe_media_type_fiber_fixed:
277 	case ixgbe_media_type_fiber_qsfp:
278 	case ixgbe_media_type_fiber:
279 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
280 
281 		break;
282 	case ixgbe_media_type_copper:
283 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
284 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
285 		break;
286 	default:
287 		break;
288 	}
289 
290 	/*
291 	 * The possible values of fc.requested_mode are:
292 	 * 0: Flow control is completely disabled
293 	 * 1: Rx flow control is enabled (we can receive pause frames,
294 	 *    but not send pause frames).
295 	 * 2: Tx flow control is enabled (we can send pause frames but
296 	 *    we do not support receiving pause frames).
297 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
298 	 * other: Invalid.
299 	 */
300 	switch (hw->fc.requested_mode) {
301 	case ixgbe_fc_none:
302 		/* Flow control completely disabled by software override. */
303 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
304 		if (hw->phy.media_type == ixgbe_media_type_backplane)
305 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
306 				    IXGBE_AUTOC_ASM_PAUSE);
307 		else if (hw->phy.media_type == ixgbe_media_type_copper)
308 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
309 		break;
310 	case ixgbe_fc_tx_pause:
311 		/*
312 		 * Tx Flow control is enabled, and Rx Flow control is
313 		 * disabled by software override.
314 		 */
315 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
316 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
317 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
318 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
319 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
320 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
321 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
322 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
323 		}
324 		break;
325 	case ixgbe_fc_rx_pause:
326 		/*
327 		 * Rx Flow control is enabled and Tx Flow control is
328 		 * disabled by software override. Since there really
329 		 * isn't a way to advertise that we are capable of RX
330 		 * Pause ONLY, we will advertise that we support both
331 		 * symmetric and asymmetric Rx PAUSE, as such we fall
332 		 * through to the fc_full statement.  Later, we will
333 		 * disable the adapter's ability to send PAUSE frames.
334 		 */
335 	case ixgbe_fc_full:
336 		/* Flow control (both Rx and Tx) is enabled by SW override. */
337 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
338 		if (hw->phy.media_type == ixgbe_media_type_backplane)
339 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
340 				  IXGBE_AUTOC_ASM_PAUSE;
341 		else if (hw->phy.media_type == ixgbe_media_type_copper)
342 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
343 		break;
344 	default:
345 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
346 			     "Flow control param set incorrectly\n");
347 		ret_val = IXGBE_ERR_CONFIG;
348 		goto out;
349 		break;
350 	}
351 
352 	if (hw->mac.type < ixgbe_mac_X540) {
353 		/*
354 		 * Enable auto-negotiation between the MAC & PHY;
355 		 * the MAC will advertise clause 37 flow control.
356 		 */
357 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
358 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
359 
360 		/* Disable AN timeout */
361 		if (hw->fc.strict_ieee)
362 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
363 
364 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
365 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
366 	}
367 
368 	/*
369 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
370 	 * and copper. There is no need to set the PCS1GCTL register.
371 	 *
372 	 */
373 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
374 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
375 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
376 		if (ret_val)
377 			goto out;
378 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
379 		    (ixgbe_device_supports_autoneg_fc(hw))) {
380 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
381 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
382 	}
383 
384 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
385 out:
386 	return ret_val;
387 }
388 
389 /**
390  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
391  *  @hw: pointer to hardware structure
392  *
393  *  Starts the hardware by filling the bus info structure and media type, clears
394  *  all on chip counters, initializes receive address registers, multicast
395  *  table, VLAN filter table, calls routine to set up link and flow control
396  *  settings, and leaves transmit and receive units disabled and uninitialized
397  **/
398 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
399 {
400 	s32 ret_val;
401 	u32 ctrl_ext;
402 	u16 device_caps;
403 
404 	DEBUGFUNC("ixgbe_start_hw_generic");
405 
406 	/* Set the media type */
407 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
408 
409 	/* PHY ops initialization must be done in reset_hw() */
410 
411 	/* Clear the VLAN filter table */
412 	hw->mac.ops.clear_vfta(hw);
413 
414 	/* Clear statistics registers */
415 	hw->mac.ops.clear_hw_cntrs(hw);
416 
417 	/* Set No Snoop Disable */
418 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
419 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
420 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
421 	IXGBE_WRITE_FLUSH(hw);
422 
423 	/* Setup flow control */
424 	ret_val = ixgbe_setup_fc(hw);
425 	if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
426 		DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
427 		return ret_val;
428 	}
429 
430 	/* Cache bit indicating need for crosstalk fix */
431 	switch (hw->mac.type) {
432 	case ixgbe_mac_82599EB:
433 	case ixgbe_mac_X550EM_x:
434 	case ixgbe_mac_X550EM_a:
435 		hw->mac.ops.get_device_caps(hw, &device_caps);
436 		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
437 			hw->need_crosstalk_fix = FALSE;
438 		else
439 			hw->need_crosstalk_fix = TRUE;
440 		break;
441 	default:
442 		hw->need_crosstalk_fix = FALSE;
443 		break;
444 	}
445 
446 	/* Clear adapter stopped flag */
447 	hw->adapter_stopped = FALSE;
448 
449 	return IXGBE_SUCCESS;
450 }
451 
452 /**
453  *  ixgbe_start_hw_gen2 - Init sequence for common device family
454  *  @hw: pointer to hw structure
455  *
456  * Performs the init sequence common to the second generation
457  * of 10 GbE devices.
458  * Devices in the second generation:
459  *     82599
460  *     X540
461  **/
462 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
463 {
464 	u32 i;
465 	u32 regval;
466 
467 	DEBUGFUNC("ixgbe_start_hw_gen2");
468 
469 	/* Clear the rate limiters */
470 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
471 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
472 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
473 	}
474 	IXGBE_WRITE_FLUSH(hw);
475 
476 	/* Disable relaxed ordering */
477 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
478 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
479 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
480 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
481 	}
482 
483 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
484 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
485 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
486 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
487 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
488 	}
489 
490 	return IXGBE_SUCCESS;
491 }
492 
493 /**
494  *  ixgbe_init_hw_generic - Generic hardware initialization
495  *  @hw: pointer to hardware structure
496  *
497  *  Initialize the hardware by resetting the hardware, filling the bus info
498  *  structure and media type, clears all on chip counters, initializes receive
499  *  address registers, multicast table, VLAN filter table, calls routine to set
500  *  up link and flow control settings, and leaves transmit and receive units
501  *  disabled and uninitialized
502  **/
503 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
504 {
505 	s32 status;
506 
507 	DEBUGFUNC("ixgbe_init_hw_generic");
508 
509 	/* Reset the hardware */
510 	status = hw->mac.ops.reset_hw(hw);
511 
512 	if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
513 		/* Start the HW */
514 		status = hw->mac.ops.start_hw(hw);
515 	}
516 
517 	/* Initialize the LED link active for LED blink support */
518 	if (hw->mac.ops.init_led_link_act)
519 		hw->mac.ops.init_led_link_act(hw);
520 
521 	if (status != IXGBE_SUCCESS)
522 		DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
523 
524 	return status;
525 }
526 
527 /**
528  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
529  *  @hw: pointer to hardware structure
530  *
531  *  Clears all hardware statistics counters by reading them from the hardware
532  *  Statistics counters are clear on read.
533  **/
534 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
535 {
536 	u16 i = 0;
537 
538 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
539 
540 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
541 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
542 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
543 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
544 	if (hw->mac.type >= ixgbe_mac_X550)
545 		IXGBE_READ_REG(hw, IXGBE_MBSDC);
546 	for (i = 0; i < 8; i++)
547 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
548 
549 	IXGBE_READ_REG(hw, IXGBE_MLFC);
550 	IXGBE_READ_REG(hw, IXGBE_MRFC);
551 	IXGBE_READ_REG(hw, IXGBE_RLEC);
552 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
553 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
554 	if (hw->mac.type >= ixgbe_mac_82599EB) {
555 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
556 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
557 	} else {
558 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
559 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
560 	}
561 
562 	for (i = 0; i < 8; i++) {
563 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
564 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
565 		if (hw->mac.type >= ixgbe_mac_82599EB) {
566 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
567 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
568 		} else {
569 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
570 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
571 		}
572 	}
573 	if (hw->mac.type >= ixgbe_mac_82599EB)
574 		for (i = 0; i < 8; i++)
575 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
576 	IXGBE_READ_REG(hw, IXGBE_PRC64);
577 	IXGBE_READ_REG(hw, IXGBE_PRC127);
578 	IXGBE_READ_REG(hw, IXGBE_PRC255);
579 	IXGBE_READ_REG(hw, IXGBE_PRC511);
580 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
581 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
582 	IXGBE_READ_REG(hw, IXGBE_GPRC);
583 	IXGBE_READ_REG(hw, IXGBE_BPRC);
584 	IXGBE_READ_REG(hw, IXGBE_MPRC);
585 	IXGBE_READ_REG(hw, IXGBE_GPTC);
586 	IXGBE_READ_REG(hw, IXGBE_GORCL);
587 	IXGBE_READ_REG(hw, IXGBE_GORCH);
588 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
589 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
590 	if (hw->mac.type == ixgbe_mac_82598EB)
591 		for (i = 0; i < 8; i++)
592 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
593 	IXGBE_READ_REG(hw, IXGBE_RUC);
594 	IXGBE_READ_REG(hw, IXGBE_RFC);
595 	IXGBE_READ_REG(hw, IXGBE_ROC);
596 	IXGBE_READ_REG(hw, IXGBE_RJC);
597 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
598 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
599 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
600 	IXGBE_READ_REG(hw, IXGBE_TORL);
601 	IXGBE_READ_REG(hw, IXGBE_TORH);
602 	IXGBE_READ_REG(hw, IXGBE_TPR);
603 	IXGBE_READ_REG(hw, IXGBE_TPT);
604 	IXGBE_READ_REG(hw, IXGBE_PTC64);
605 	IXGBE_READ_REG(hw, IXGBE_PTC127);
606 	IXGBE_READ_REG(hw, IXGBE_PTC255);
607 	IXGBE_READ_REG(hw, IXGBE_PTC511);
608 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
609 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
610 	IXGBE_READ_REG(hw, IXGBE_MPTC);
611 	IXGBE_READ_REG(hw, IXGBE_BPTC);
612 	for (i = 0; i < 16; i++) {
613 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
614 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
615 		if (hw->mac.type >= ixgbe_mac_82599EB) {
616 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
617 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
618 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
619 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
620 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
621 		} else {
622 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
623 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
624 		}
625 	}
626 
627 	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
628 		if (hw->phy.id == 0)
629 			ixgbe_identify_phy(hw);
630 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
631 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
632 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
633 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
634 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
635 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
636 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
637 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
638 	}
639 
640 	return IXGBE_SUCCESS;
641 }
642 
643 /**
644  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
645  *  @hw: pointer to hardware structure
646  *  @pba_num: stores the part number string from the EEPROM
647  *  @pba_num_size: part number string buffer length
648  *
649  *  Reads the part number string from the EEPROM.
650  **/
651 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
652 				  u32 pba_num_size)
653 {
654 	s32 ret_val;
655 	u16 data;
656 	u16 pba_ptr;
657 	u16 offset;
658 	u16 length;
659 
660 	DEBUGFUNC("ixgbe_read_pba_string_generic");
661 
662 	if (pba_num == NULL) {
663 		DEBUGOUT("PBA string buffer was null\n");
664 		return IXGBE_ERR_INVALID_ARGUMENT;
665 	}
666 
667 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
668 	if (ret_val) {
669 		DEBUGOUT("NVM Read Error\n");
670 		return ret_val;
671 	}
672 
673 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
674 	if (ret_val) {
675 		DEBUGOUT("NVM Read Error\n");
676 		return ret_val;
677 	}
678 
679 	/*
680 	 * if data is not ptr guard the PBA must be in legacy format which
681 	 * means pba_ptr is actually our second data word for the PBA number
682 	 * and we can decode it into an ascii string
683 	 */
684 	if (data != IXGBE_PBANUM_PTR_GUARD) {
685 		DEBUGOUT("NVM PBA number is not stored as string\n");
686 
687 		/* we will need 11 characters to store the PBA */
688 		if (pba_num_size < 11) {
689 			DEBUGOUT("PBA string buffer too small\n");
690 			return IXGBE_ERR_NO_SPACE;
691 		}
692 
693 		/* extract hex string from data and pba_ptr */
694 		pba_num[0] = (data >> 12) & 0xF;
695 		pba_num[1] = (data >> 8) & 0xF;
696 		pba_num[2] = (data >> 4) & 0xF;
697 		pba_num[3] = data & 0xF;
698 		pba_num[4] = (pba_ptr >> 12) & 0xF;
699 		pba_num[5] = (pba_ptr >> 8) & 0xF;
700 		pba_num[6] = '-';
701 		pba_num[7] = 0;
702 		pba_num[8] = (pba_ptr >> 4) & 0xF;
703 		pba_num[9] = pba_ptr & 0xF;
704 
705 		/* put a null character on the end of our string */
706 		pba_num[10] = '\0';
707 
708 		/* switch all the data but the '-' to hex char */
709 		for (offset = 0; offset < 10; offset++) {
710 			if (pba_num[offset] < 0xA)
711 				pba_num[offset] += '0';
712 			else if (pba_num[offset] < 0x10)
713 				pba_num[offset] += 'A' - 0xA;
714 		}
715 
716 		return IXGBE_SUCCESS;
717 	}
718 
719 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
720 	if (ret_val) {
721 		DEBUGOUT("NVM Read Error\n");
722 		return ret_val;
723 	}
724 
725 	if (length == 0xFFFF || length == 0) {
726 		DEBUGOUT("NVM PBA number section invalid length\n");
727 		return IXGBE_ERR_PBA_SECTION;
728 	}
729 
730 	/* check if pba_num buffer is big enough */
731 	if (pba_num_size  < (((u32)length * 2) - 1)) {
732 		DEBUGOUT("PBA string buffer too small\n");
733 		return IXGBE_ERR_NO_SPACE;
734 	}
735 
736 	/* trim pba length from start of string */
737 	pba_ptr++;
738 	length--;
739 
740 	for (offset = 0; offset < length; offset++) {
741 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
742 		if (ret_val) {
743 			DEBUGOUT("NVM Read Error\n");
744 			return ret_val;
745 		}
746 		pba_num[offset * 2] = (u8)(data >> 8);
747 		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
748 	}
749 	pba_num[offset * 2] = '\0';
750 
751 	return IXGBE_SUCCESS;
752 }
753 
754 /**
755  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
756  *  @hw: pointer to hardware structure
757  *  @pba_num: stores the part number from the EEPROM
758  *
759  *  Reads the part number from the EEPROM.
760  **/
761 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
762 {
763 	s32 ret_val;
764 	u16 data;
765 
766 	DEBUGFUNC("ixgbe_read_pba_num_generic");
767 
768 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
769 	if (ret_val) {
770 		DEBUGOUT("NVM Read Error\n");
771 		return ret_val;
772 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
773 		DEBUGOUT("NVM Not supported\n");
774 		return IXGBE_NOT_IMPLEMENTED;
775 	}
776 	*pba_num = (u32)(data << 16);
777 
778 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
779 	if (ret_val) {
780 		DEBUGOUT("NVM Read Error\n");
781 		return ret_val;
782 	}
783 	*pba_num |= data;
784 
785 	return IXGBE_SUCCESS;
786 }
787 
788 /**
789  *  ixgbe_read_pba_raw
790  *  @hw: pointer to the HW structure
791  *  @eeprom_buf: optional pointer to EEPROM image
792  *  @eeprom_buf_size: size of EEPROM image in words
793  *  @max_pba_block_size: PBA block size limit
794  *  @pba: pointer to output PBA structure
795  *
796  *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
797  *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
798  *
799  **/
800 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
801 		       u32 eeprom_buf_size, u16 max_pba_block_size,
802 		       struct ixgbe_pba *pba)
803 {
804 	s32 ret_val;
805 	u16 pba_block_size;
806 
807 	if (pba == NULL)
808 		return IXGBE_ERR_PARAM;
809 
810 	if (eeprom_buf == NULL) {
811 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
812 						     &pba->word[0]);
813 		if (ret_val)
814 			return ret_val;
815 	} else {
816 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
817 			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
818 			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
819 		} else {
820 			return IXGBE_ERR_PARAM;
821 		}
822 	}
823 
824 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
825 		if (pba->pba_block == NULL)
826 			return IXGBE_ERR_PARAM;
827 
828 		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
829 						   eeprom_buf_size,
830 						   &pba_block_size);
831 		if (ret_val)
832 			return ret_val;
833 
834 		if (pba_block_size > max_pba_block_size)
835 			return IXGBE_ERR_PARAM;
836 
837 		if (eeprom_buf == NULL) {
838 			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
839 							     pba_block_size,
840 							     pba->pba_block);
841 			if (ret_val)
842 				return ret_val;
843 		} else {
844 			if (eeprom_buf_size > (u32)(pba->word[1] +
845 					      pba_block_size)) {
846 				memcpy(pba->pba_block,
847 				       &eeprom_buf[pba->word[1]],
848 				       pba_block_size * sizeof(u16));
849 			} else {
850 				return IXGBE_ERR_PARAM;
851 			}
852 		}
853 	}
854 
855 	return IXGBE_SUCCESS;
856 }
857 
858 /**
859  *  ixgbe_write_pba_raw
860  *  @hw: pointer to the HW structure
861  *  @eeprom_buf: optional pointer to EEPROM image
862  *  @eeprom_buf_size: size of EEPROM image in words
863  *  @pba: pointer to PBA structure
864  *
865  *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
866  *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
867  *
868  **/
869 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
870 			u32 eeprom_buf_size, struct ixgbe_pba *pba)
871 {
872 	s32 ret_val;
873 
874 	if (pba == NULL)
875 		return IXGBE_ERR_PARAM;
876 
877 	if (eeprom_buf == NULL) {
878 		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
879 						      &pba->word[0]);
880 		if (ret_val)
881 			return ret_val;
882 	} else {
883 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
884 			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
885 			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
886 		} else {
887 			return IXGBE_ERR_PARAM;
888 		}
889 	}
890 
891 	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
892 		if (pba->pba_block == NULL)
893 			return IXGBE_ERR_PARAM;
894 
895 		if (eeprom_buf == NULL) {
896 			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
897 							      pba->pba_block[0],
898 							      pba->pba_block);
899 			if (ret_val)
900 				return ret_val;
901 		} else {
902 			if (eeprom_buf_size > (u32)(pba->word[1] +
903 					      pba->pba_block[0])) {
904 				memcpy(&eeprom_buf[pba->word[1]],
905 				       pba->pba_block,
906 				       pba->pba_block[0] * sizeof(u16));
907 			} else {
908 				return IXGBE_ERR_PARAM;
909 			}
910 		}
911 	}
912 
913 	return IXGBE_SUCCESS;
914 }
915 
916 /**
917  *  ixgbe_get_pba_block_size
918  *  @hw: pointer to the HW structure
919  *  @eeprom_buf: optional pointer to EEPROM image
920  *  @eeprom_buf_size: size of EEPROM image in words
921  *  @pba_data_size: pointer to output variable
922  *
923  *  Returns the size of the PBA block in words. Function operates on EEPROM
924  *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
925  *  EEPROM device.
926  *
927  **/
928 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
929 			     u32 eeprom_buf_size, u16 *pba_block_size)
930 {
931 	s32 ret_val;
932 	u16 pba_word[2];
933 	u16 length;
934 
935 	DEBUGFUNC("ixgbe_get_pba_block_size");
936 
937 	if (eeprom_buf == NULL) {
938 		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
939 						     &pba_word[0]);
940 		if (ret_val)
941 			return ret_val;
942 	} else {
943 		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
944 			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
945 			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
946 		} else {
947 			return IXGBE_ERR_PARAM;
948 		}
949 	}
950 
951 	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
952 		if (eeprom_buf == NULL) {
953 			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
954 						      &length);
955 			if (ret_val)
956 				return ret_val;
957 		} else {
958 			if (eeprom_buf_size > pba_word[1])
959 				length = eeprom_buf[pba_word[1] + 0];
960 			else
961 				return IXGBE_ERR_PARAM;
962 		}
963 
964 		if (length == 0xFFFF || length == 0)
965 			return IXGBE_ERR_PBA_SECTION;
966 	} else {
967 		/* PBA number in legacy format, there is no PBA Block. */
968 		length = 0;
969 	}
970 
971 	if (pba_block_size != NULL)
972 		*pba_block_size = length;
973 
974 	return IXGBE_SUCCESS;
975 }
976 
977 /**
978  *  ixgbe_get_mac_addr_generic - Generic get MAC address
979  *  @hw: pointer to hardware structure
980  *  @mac_addr: Adapter MAC address
981  *
982  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
983  *  A reset of the adapter must be performed prior to calling this function
984  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
985  **/
986 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
987 {
988 	u32 rar_high;
989 	u32 rar_low;
990 	u16 i;
991 
992 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
993 
994 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
995 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
996 
997 	for (i = 0; i < 4; i++)
998 		mac_addr[i] = (u8)(rar_low >> (i*8));
999 
1000 	for (i = 0; i < 2; i++)
1001 		mac_addr[i+4] = (u8)(rar_high >> (i*8));
1002 
1003 	return IXGBE_SUCCESS;
1004 }
1005 
1006 /**
1007  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
1008  *  @hw: pointer to hardware structure
1009  *  @link_status: the link status returned by the PCI config space
1010  *
1011  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1012  **/
1013 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1014 {
1015 	struct ixgbe_mac_info *mac = &hw->mac;
1016 
1017 	if (hw->bus.type == ixgbe_bus_type_unknown)
1018 		hw->bus.type = ixgbe_bus_type_pci_express;
1019 
1020 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1021 	case IXGBE_PCI_LINK_WIDTH_1:
1022 		hw->bus.width = ixgbe_bus_width_pcie_x1;
1023 		break;
1024 	case IXGBE_PCI_LINK_WIDTH_2:
1025 		hw->bus.width = ixgbe_bus_width_pcie_x2;
1026 		break;
1027 	case IXGBE_PCI_LINK_WIDTH_4:
1028 		hw->bus.width = ixgbe_bus_width_pcie_x4;
1029 		break;
1030 	case IXGBE_PCI_LINK_WIDTH_8:
1031 		hw->bus.width = ixgbe_bus_width_pcie_x8;
1032 		break;
1033 	default:
1034 		hw->bus.width = ixgbe_bus_width_unknown;
1035 		break;
1036 	}
1037 
1038 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
1039 	case IXGBE_PCI_LINK_SPEED_2500:
1040 		hw->bus.speed = ixgbe_bus_speed_2500;
1041 		break;
1042 	case IXGBE_PCI_LINK_SPEED_5000:
1043 		hw->bus.speed = ixgbe_bus_speed_5000;
1044 		break;
1045 	case IXGBE_PCI_LINK_SPEED_8000:
1046 		hw->bus.speed = ixgbe_bus_speed_8000;
1047 		break;
1048 	default:
1049 		hw->bus.speed = ixgbe_bus_speed_unknown;
1050 		break;
1051 	}
1052 
1053 	mac->ops.set_lan_id(hw);
1054 }
1055 
1056 /**
1057  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
1058  *  @hw: pointer to hardware structure
1059  *
1060  *  Gets the PCI bus info (speed, width, type) then calls helper function to
1061  *  store this data within the ixgbe_hw structure.
1062  **/
1063 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1064 {
1065 	u16 link_status;
1066 
1067 	DEBUGFUNC("ixgbe_get_bus_info_generic");
1068 
1069 	/* Get the negotiated link width and speed from PCI config space */
1070 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1071 
1072 	ixgbe_set_pci_config_data_generic(hw, link_status);
1073 
1074 	return IXGBE_SUCCESS;
1075 }
1076 
1077 /**
1078  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1079  *  @hw: pointer to the HW structure
1080  *
1081  *  Determines the LAN function id by reading memory-mapped registers and swaps
1082  *  the port value if requested, and set MAC instance for devices that share
1083  *  CS4227.
1084  **/
1085 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1086 {
1087 	struct ixgbe_bus_info *bus = &hw->bus;
1088 	u32 reg;
1089 	u16 ee_ctrl_4;
1090 
1091 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1092 
1093 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1094 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1095 	bus->lan_id = (u8)bus->func;
1096 
1097 	/* check for a port swap */
1098 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1099 	if (reg & IXGBE_FACTPS_LFS)
1100 		bus->func ^= 0x1;
1101 
1102 	/* Get MAC instance from EEPROM for configuring CS4227 */
1103 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1104 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1105 		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1106 				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1107 	}
1108 }
1109 
1110 /**
1111  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1112  *  @hw: pointer to hardware structure
1113  *
1114  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1115  *  disables transmit and receive units. The adapter_stopped flag is used by
1116  *  the shared code and drivers to determine if the adapter is in a stopped
1117  *  state and should not touch the hardware.
1118  **/
1119 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1120 {
1121 	u32 reg_val;
1122 	u16 i;
1123 
1124 	DEBUGFUNC("ixgbe_stop_adapter_generic");
1125 
1126 	/*
1127 	 * Set the adapter_stopped flag so other driver functions stop touching
1128 	 * the hardware
1129 	 */
1130 	hw->adapter_stopped = TRUE;
1131 
1132 	/* Disable the receive unit */
1133 	ixgbe_disable_rx(hw);
1134 
1135 	/* Clear interrupt mask to stop interrupts from being generated */
1136 	/*
1137 	 * XXX
1138 	 * This function is called in the state of both interrupt disabled
1139 	 * and interrupt enabled, e.g.
1140 	 * + interrupt disabled case:
1141 	 *   - ixgbe_stop()
1142 	 *     - ixgbe_disable_intr() // interrupt disabled here
1143 	 *     - ixgbe_stop_adapter()
1144 	 *       - hw->mac.ops.stop_adapter()
1145 	 *         == this function
1146 	 * + interrupt enabled case:
1147 	 *   - ixgbe_local_timer1()
1148 	 *     - ixgbe_init_locked()
1149 	 *       - ixgbe_stop_adapter()
1150 	 *         - hw->mac.ops.stop_adapter()
1151 	 *           == this function
1152 	 * Therefore, it causes nest status breaking to nest the status
1153 	 * (that is, que->im_nest++) at all times. So, this function must
1154 	 * use ixgbe_ensure_disabled_intr() instead of ixgbe_disable_intr().
1155 	 */
1156 	ixgbe_ensure_disabled_intr(hw->back);
1157 
1158 	/* Clear any pending interrupts, flush previous writes */
1159 	IXGBE_READ_REG(hw, IXGBE_EICR);
1160 
1161 	/* Disable the transmit unit.  Each queue must be disabled. */
1162 	for (i = 0; i < hw->mac.max_tx_queues; i++)
1163 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1164 
1165 	/* Disable the receive unit by stopping each queue */
1166 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
1167 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1168 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
1169 		reg_val |= IXGBE_RXDCTL_SWFLSH;
1170 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1171 	}
1172 
1173 	/* flush all queues disables */
1174 	IXGBE_WRITE_FLUSH(hw);
1175 	msec_delay(2);
1176 
1177 	/*
1178 	 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1179 	 * access and verify no pending requests
1180 	 */
1181 	return ixgbe_disable_pcie_master(hw);
1182 }
1183 
1184 /**
1185  *  ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1186  *  @hw: pointer to hardware structure
1187  *
1188  *  Store the index for the link active LED. This will be used to support
1189  *  blinking the LED.
1190  **/
1191 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1192 {
1193 	struct ixgbe_mac_info *mac = &hw->mac;
1194 	u32 led_reg, led_mode;
1195 	u8 i;
1196 
1197 	led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1198 
1199 	/* Get LED link active from the LEDCTL register */
1200 	for (i = 0; i < 4; i++) {
1201 		led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1202 
1203 		if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1204 		     IXGBE_LED_LINK_ACTIVE) {
1205 			mac->led_link_act = i;
1206 			return IXGBE_SUCCESS;
1207 		}
1208 	}
1209 
1210 	/*
1211 	 * If LEDCTL register does not have the LED link active set, then use
1212 	 * known MAC defaults.
1213 	 */
1214 	switch (hw->mac.type) {
1215 	case ixgbe_mac_X550EM_a:
1216 	case ixgbe_mac_X550EM_x:
1217 		mac->led_link_act = 1;
1218 		break;
1219 	default:
1220 		mac->led_link_act = 2;
1221 	}
1222 	return IXGBE_SUCCESS;
1223 }
1224 
1225 /**
1226  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
1227  *  @hw: pointer to hardware structure
1228  *  @index: led number to turn on
1229  **/
1230 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1231 {
1232 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1233 
1234 	DEBUGFUNC("ixgbe_led_on_generic");
1235 
1236 	if (index > 3)
1237 		return IXGBE_ERR_PARAM;
1238 
1239 	/* To turn on the LED, set mode to ON. */
1240 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1241 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1242 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1243 	IXGBE_WRITE_FLUSH(hw);
1244 
1245 	return IXGBE_SUCCESS;
1246 }
1247 
1248 /**
1249  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
1250  *  @hw: pointer to hardware structure
1251  *  @index: led number to turn off
1252  **/
1253 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1254 {
1255 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1256 
1257 	DEBUGFUNC("ixgbe_led_off_generic");
1258 
1259 	if (index > 3)
1260 		return IXGBE_ERR_PARAM;
1261 
1262 	/* To turn off the LED, set mode to OFF. */
1263 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
1264 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1265 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1266 	IXGBE_WRITE_FLUSH(hw);
1267 
1268 	return IXGBE_SUCCESS;
1269 }
1270 
1271 /**
1272  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1273  *  @hw: pointer to hardware structure
1274  *
1275  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
1276  *  ixgbe_hw struct in order to set up EEPROM access.
1277  **/
1278 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1279 {
1280 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1281 	u32 eec;
1282 	u16 eeprom_size;
1283 
1284 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1285 
1286 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
1287 		eeprom->type = ixgbe_eeprom_none;
1288 		/* Set default semaphore delay to 10ms which is a well
1289 		 * tested value */
1290 		eeprom->semaphore_delay = 10;
1291 		/* Clear EEPROM page size, it will be initialized as needed */
1292 		eeprom->word_page_size = 0;
1293 
1294 		/*
1295 		 * Check for EEPROM present first.
1296 		 * If not present leave as none
1297 		 */
1298 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1299 		if (eec & IXGBE_EEC_PRES) {
1300 			eeprom->type = ixgbe_eeprom_spi;
1301 
1302 			/*
1303 			 * SPI EEPROM is assumed here.  This code would need to
1304 			 * change if a future EEPROM is not SPI.
1305 			 */
1306 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1307 					    IXGBE_EEC_SIZE_SHIFT);
1308 			eeprom->word_size = 1 << (eeprom_size +
1309 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
1310 		}
1311 
1312 		if (eec & IXGBE_EEC_ADDR_SIZE)
1313 			eeprom->address_bits = 16;
1314 		else
1315 			eeprom->address_bits = 8;
1316 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1317 			  "%d\n", eeprom->type, eeprom->word_size,
1318 			  eeprom->address_bits);
1319 	}
1320 
1321 	return IXGBE_SUCCESS;
1322 }
1323 
1324 /**
1325  *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1326  *  @hw: pointer to hardware structure
1327  *  @offset: offset within the EEPROM to write
1328  *  @words: number of word(s)
1329  *  @data: 16 bit word(s) to write to EEPROM
1330  *
1331  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1332  **/
1333 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1334 					       u16 words, u16 *data)
1335 {
1336 	s32 status = IXGBE_SUCCESS;
1337 	u16 i, count;
1338 
1339 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1340 
1341 	hw->eeprom.ops.init_params(hw);
1342 
1343 	if (words == 0) {
1344 		status = IXGBE_ERR_INVALID_ARGUMENT;
1345 		goto out;
1346 	}
1347 
1348 	if (offset + words > hw->eeprom.word_size) {
1349 		status = IXGBE_ERR_EEPROM;
1350 		goto out;
1351 	}
1352 
1353 	/*
1354 	 * The EEPROM page size cannot be queried from the chip. We do lazy
1355 	 * initialization. It is worth to do that when we write large buffer.
1356 	 */
1357 	if ((hw->eeprom.word_page_size == 0) &&
1358 	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1359 		ixgbe_detect_eeprom_page_size_generic(hw, offset);
1360 
1361 	/*
1362 	 * We cannot hold synchronization semaphores for too long
1363 	 * to avoid other entity starvation. However it is more efficient
1364 	 * to read in bursts than synchronizing access for each word.
1365 	 */
1366 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1367 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1368 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1369 		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1370 							    count, &data[i]);
1371 
1372 		if (status != IXGBE_SUCCESS)
1373 			break;
1374 	}
1375 
1376 out:
1377 	return status;
1378 }
1379 
1380 /**
1381  *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1382  *  @hw: pointer to hardware structure
1383  *  @offset: offset within the EEPROM to be written to
1384  *  @words: number of word(s)
1385  *  @data: 16 bit word(s) to be written to the EEPROM
1386  *
1387  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1388  *  EEPROM will most likely contain an invalid checksum.
1389  **/
1390 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1391 					      u16 words, u16 *data)
1392 {
1393 	s32 status;
1394 	u16 word;
1395 	u16 page_size;
1396 	u16 i;
1397 	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1398 
1399 	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1400 
1401 	/* Prepare the EEPROM for writing  */
1402 	status = ixgbe_acquire_eeprom(hw);
1403 
1404 	if (status == IXGBE_SUCCESS) {
1405 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1406 			ixgbe_release_eeprom(hw);
1407 			status = IXGBE_ERR_EEPROM;
1408 		}
1409 	}
1410 
1411 	if (status == IXGBE_SUCCESS) {
1412 		for (i = 0; i < words; i++) {
1413 			ixgbe_standby_eeprom(hw);
1414 
1415 			/*  Send the WRITE ENABLE command (8 bit opcode )  */
1416 			ixgbe_shift_out_eeprom_bits(hw,
1417 						   IXGBE_EEPROM_WREN_OPCODE_SPI,
1418 						   IXGBE_EEPROM_OPCODE_BITS);
1419 
1420 			ixgbe_standby_eeprom(hw);
1421 
1422 			/*
1423 			 * Some SPI eeproms use the 8th address bit embedded
1424 			 * in the opcode
1425 			 */
1426 			if ((hw->eeprom.address_bits == 8) &&
1427 			    ((offset + i) >= 128))
1428 				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1429 
1430 			/* Send the Write command (8-bit opcode + addr) */
1431 			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1432 						    IXGBE_EEPROM_OPCODE_BITS);
1433 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1434 						    hw->eeprom.address_bits);
1435 
1436 			page_size = hw->eeprom.word_page_size;
1437 
1438 			/* Send the data in burst via SPI*/
1439 			do {
1440 				word = data[i];
1441 				word = (word >> 8) | (word << 8);
1442 				ixgbe_shift_out_eeprom_bits(hw, word, 16);
1443 
1444 				if (page_size == 0)
1445 					break;
1446 
1447 				/* do not wrap around page */
1448 				if (((offset + i) & (page_size - 1)) ==
1449 				    (page_size - 1))
1450 					break;
1451 			} while (++i < words);
1452 
1453 			ixgbe_standby_eeprom(hw);
1454 			msec_delay(10);
1455 		}
1456 		/* Done with writing - release the EEPROM */
1457 		ixgbe_release_eeprom(hw);
1458 	}
1459 
1460 	return status;
1461 }
1462 
1463 /**
1464  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1465  *  @hw: pointer to hardware structure
1466  *  @offset: offset within the EEPROM to be written to
1467  *  @data: 16 bit word to be written to the EEPROM
1468  *
1469  *  If ixgbe_eeprom_update_checksum is not called after this function, the
1470  *  EEPROM will most likely contain an invalid checksum.
1471  **/
1472 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1473 {
1474 	s32 status;
1475 
1476 	DEBUGFUNC("ixgbe_write_eeprom_generic");
1477 
1478 	hw->eeprom.ops.init_params(hw);
1479 
1480 	if (offset >= hw->eeprom.word_size) {
1481 		status = IXGBE_ERR_EEPROM;
1482 		goto out;
1483 	}
1484 
1485 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1486 
1487 out:
1488 	return status;
1489 }
1490 
1491 /**
1492  *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1493  *  @hw: pointer to hardware structure
1494  *  @offset: offset within the EEPROM to be read
1495  *  @data: read 16 bit words(s) from EEPROM
1496  *  @words: number of word(s)
1497  *
1498  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1499  **/
1500 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1501 					      u16 words, u16 *data)
1502 {
1503 	s32 status = IXGBE_SUCCESS;
1504 	u16 i, count;
1505 
1506 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1507 
1508 	hw->eeprom.ops.init_params(hw);
1509 
1510 	if (words == 0) {
1511 		status = IXGBE_ERR_INVALID_ARGUMENT;
1512 		goto out;
1513 	}
1514 
1515 	if (offset + words > hw->eeprom.word_size) {
1516 		status = IXGBE_ERR_EEPROM;
1517 		goto out;
1518 	}
1519 
1520 	/*
1521 	 * We cannot hold synchronization semaphores for too long
1522 	 * to avoid other entity starvation. However it is more efficient
1523 	 * to read in bursts than synchronizing access for each word.
1524 	 */
1525 	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1526 		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1527 			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1528 
1529 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1530 							   count, &data[i]);
1531 
1532 		if (status != IXGBE_SUCCESS)
1533 			break;
1534 	}
1535 
1536 out:
1537 	return status;
1538 }
1539 
1540 /**
1541  *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1542  *  @hw: pointer to hardware structure
1543  *  @offset: offset within the EEPROM to be read
1544  *  @words: number of word(s)
1545  *  @data: read 16 bit word(s) from EEPROM
1546  *
1547  *  Reads 16 bit word(s) from EEPROM through bit-bang method
1548  **/
1549 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1550 					     u16 words, u16 *data)
1551 {
1552 	s32 status;
1553 	u16 word_in;
1554 	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1555 	u16 i;
1556 
1557 	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1558 
1559 	/* Prepare the EEPROM for reading  */
1560 	status = ixgbe_acquire_eeprom(hw);
1561 
1562 	if (status == IXGBE_SUCCESS) {
1563 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1564 			ixgbe_release_eeprom(hw);
1565 			status = IXGBE_ERR_EEPROM;
1566 		}
1567 	}
1568 
1569 	if (status == IXGBE_SUCCESS) {
1570 		for (i = 0; i < words; i++) {
1571 			ixgbe_standby_eeprom(hw);
1572 			/*
1573 			 * Some SPI eeproms use the 8th address bit embedded
1574 			 * in the opcode
1575 			 */
1576 			if ((hw->eeprom.address_bits == 8) &&
1577 			    ((offset + i) >= 128))
1578 				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1579 
1580 			/* Send the READ command (opcode + addr) */
1581 			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1582 						    IXGBE_EEPROM_OPCODE_BITS);
1583 			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1584 						    hw->eeprom.address_bits);
1585 
1586 			/* Read the data. */
1587 			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1588 			data[i] = (word_in >> 8) | (word_in << 8);
1589 		}
1590 
1591 		/* End this read operation */
1592 		ixgbe_release_eeprom(hw);
1593 	}
1594 
1595 	return status;
1596 }
1597 
1598 /**
1599  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1600  *  @hw: pointer to hardware structure
1601  *  @offset: offset within the EEPROM to be read
1602  *  @data: read 16 bit value from EEPROM
1603  *
1604  *  Reads 16 bit value from EEPROM through bit-bang method
1605  **/
1606 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1607 				       u16 *data)
1608 {
1609 	s32 status;
1610 
1611 	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1612 
1613 	hw->eeprom.ops.init_params(hw);
1614 
1615 	if (offset >= hw->eeprom.word_size) {
1616 		status = IXGBE_ERR_EEPROM;
1617 		goto out;
1618 	}
1619 
1620 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1621 
1622 out:
1623 	return status;
1624 }
1625 
1626 /**
1627  *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1628  *  @hw: pointer to hardware structure
1629  *  @offset: offset of word in the EEPROM to read
1630  *  @words: number of word(s)
1631  *  @data: 16 bit word(s) from the EEPROM
1632  *
1633  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1634  **/
1635 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1636 				   u16 words, u16 *data)
1637 {
1638 	u32 eerd;
1639 	s32 status = IXGBE_SUCCESS;
1640 	u32 i;
1641 
1642 	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1643 
1644 	hw->eeprom.ops.init_params(hw);
1645 
1646 	if (words == 0) {
1647 		status = IXGBE_ERR_INVALID_ARGUMENT;
1648 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1649 		goto out;
1650 	}
1651 
1652 	if (offset >= hw->eeprom.word_size) {
1653 		status = IXGBE_ERR_EEPROM;
1654 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1655 		goto out;
1656 	}
1657 
1658 	for (i = 0; i < words; i++) {
1659 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1660 		       IXGBE_EEPROM_RW_REG_START;
1661 
1662 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1663 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1664 
1665 		if (status == IXGBE_SUCCESS) {
1666 			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1667 				   IXGBE_EEPROM_RW_REG_DATA);
1668 		} else {
1669 			DEBUGOUT("Eeprom read timed out\n");
1670 			goto out;
1671 		}
1672 	}
1673 out:
1674 	return status;
1675 }
1676 
1677 /**
1678  *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1679  *  @hw: pointer to hardware structure
1680  *  @offset: offset within the EEPROM to be used as a scratch pad
1681  *
1682  *  Discover EEPROM page size by writing marching data at given offset.
1683  *  This function is called only when we are writing a new large buffer
1684  *  at given offset so the data would be overwritten anyway.
1685  **/
1686 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1687 						 u16 offset)
1688 {
1689 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1690 	s32 status = IXGBE_SUCCESS;
1691 	u16 i;
1692 
1693 	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1694 
1695 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1696 		data[i] = i;
1697 
1698 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1699 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1700 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1701 	hw->eeprom.word_page_size = 0;
1702 	if (status != IXGBE_SUCCESS)
1703 		goto out;
1704 
1705 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1706 	if (status != IXGBE_SUCCESS)
1707 		goto out;
1708 
1709 	/*
1710 	 * When writing in burst more than the actual page size
1711 	 * EEPROM address wraps around current page.
1712 	 */
1713 	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1714 
1715 	DEBUGOUT1("Detected EEPROM page size = %d words.",
1716 		  hw->eeprom.word_page_size);
1717 out:
1718 	return status;
1719 }
1720 
1721 /**
1722  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1723  *  @hw: pointer to hardware structure
1724  *  @offset: offset of  word in the EEPROM to read
1725  *  @data: word read from the EEPROM
1726  *
1727  *  Reads a 16 bit word from the EEPROM using the EERD register.
1728  **/
1729 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1730 {
1731 	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1732 }
1733 
1734 /**
1735  *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1736  *  @hw: pointer to hardware structure
1737  *  @offset: offset of  word in the EEPROM to write
1738  *  @words: number of word(s)
1739  *  @data: word(s) write to the EEPROM
1740  *
1741  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1742  **/
1743 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1744 				    u16 words, u16 *data)
1745 {
1746 	u32 eewr;
1747 	s32 status = IXGBE_SUCCESS;
1748 	u16 i;
1749 
1750 	DEBUGFUNC("ixgbe_write_eewr_generic");
1751 
1752 	hw->eeprom.ops.init_params(hw);
1753 
1754 	if (words == 0) {
1755 		status = IXGBE_ERR_INVALID_ARGUMENT;
1756 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1757 		goto out;
1758 	}
1759 
1760 	if (offset >= hw->eeprom.word_size) {
1761 		status = IXGBE_ERR_EEPROM;
1762 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1763 		goto out;
1764 	}
1765 
1766 	for (i = 0; i < words; i++) {
1767 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1768 			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1769 			IXGBE_EEPROM_RW_REG_START;
1770 
1771 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1772 		if (status != IXGBE_SUCCESS) {
1773 			DEBUGOUT("Eeprom write EEWR timed out\n");
1774 			goto out;
1775 		}
1776 
1777 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1778 
1779 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1780 		if (status != IXGBE_SUCCESS) {
1781 			DEBUGOUT("Eeprom write EEWR timed out\n");
1782 			goto out;
1783 		}
1784 	}
1785 
1786 out:
1787 	return status;
1788 }
1789 
1790 /**
1791  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1792  *  @hw: pointer to hardware structure
1793  *  @offset: offset of  word in the EEPROM to write
1794  *  @data: word write to the EEPROM
1795  *
1796  *  Write a 16 bit word to the EEPROM using the EEWR register.
1797  **/
1798 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1799 {
1800 	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1801 }
1802 
1803 /**
1804  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1805  *  @hw: pointer to hardware structure
1806  *  @ee_reg: EEPROM flag for polling
1807  *
1808  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1809  *  read or write is done respectively.
1810  **/
1811 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1812 {
1813 	u32 i;
1814 	u32 reg;
1815 	s32 status = IXGBE_ERR_EEPROM;
1816 
1817 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1818 
1819 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1820 		if (ee_reg == IXGBE_NVM_POLL_READ)
1821 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1822 		else
1823 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1824 
1825 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1826 			status = IXGBE_SUCCESS;
1827 			break;
1828 		}
1829 		usec_delay(5);
1830 	}
1831 
1832 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1833 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1834 			     "EEPROM read/write done polling timed out");
1835 
1836 	return status;
1837 }
1838 
1839 /**
1840  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1841  *  @hw: pointer to hardware structure
1842  *
1843  *  Prepares EEPROM for access using bit-bang method. This function should
1844  *  be called before issuing a command to the EEPROM.
1845  **/
1846 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1847 {
1848 	s32 status = IXGBE_SUCCESS;
1849 	u32 eec;
1850 	u32 i;
1851 
1852 	DEBUGFUNC("ixgbe_acquire_eeprom");
1853 
1854 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1855 	    != IXGBE_SUCCESS)
1856 		status = IXGBE_ERR_SWFW_SYNC;
1857 
1858 	if (status == IXGBE_SUCCESS) {
1859 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1860 
1861 		/* Request EEPROM Access */
1862 		eec |= IXGBE_EEC_REQ;
1863 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1864 
1865 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1866 			eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1867 			if (eec & IXGBE_EEC_GNT)
1868 				break;
1869 			usec_delay(5);
1870 		}
1871 
1872 		/* Release if grant not acquired */
1873 		if (!(eec & IXGBE_EEC_GNT)) {
1874 			eec &= ~IXGBE_EEC_REQ;
1875 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1876 			DEBUGOUT("Could not acquire EEPROM grant\n");
1877 
1878 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1879 			status = IXGBE_ERR_EEPROM;
1880 		}
1881 
1882 		/* Setup EEPROM for Read/Write */
1883 		if (status == IXGBE_SUCCESS) {
1884 			/* Clear CS and SK */
1885 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1886 			IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1887 			IXGBE_WRITE_FLUSH(hw);
1888 			usec_delay(1);
1889 		}
1890 	}
1891 	return status;
1892 }
1893 
1894 /**
1895  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1896  *  @hw: pointer to hardware structure
1897  *
1898  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1899  **/
1900 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1901 {
1902 	s32 status = IXGBE_ERR_EEPROM;
1903 	u32 timeout = 2000;
1904 	u32 i;
1905 	u32 swsm;
1906 
1907 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1908 
1909 
1910 	/* Get SMBI software semaphore between device drivers first */
1911 	for (i = 0; i < timeout; i++) {
1912 		/*
1913 		 * If the SMBI bit is 0 when we read it, then the bit will be
1914 		 * set and we have the semaphore
1915 		 */
1916 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1917 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1918 			status = IXGBE_SUCCESS;
1919 			break;
1920 		}
1921 		usec_delay(50);
1922 	}
1923 
1924 	if (i == timeout) {
1925 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1926 			 "not granted.\n");
1927 		/*
1928 		 * this release is particularly important because our attempts
1929 		 * above to get the semaphore may have succeeded, and if there
1930 		 * was a timeout, we should unconditionally clear the semaphore
1931 		 * bits to free the driver to make progress
1932 		 */
1933 		ixgbe_release_eeprom_semaphore(hw);
1934 
1935 		usec_delay(50);
1936 		/*
1937 		 * one last try
1938 		 * If the SMBI bit is 0 when we read it, then the bit will be
1939 		 * set and we have the semaphore
1940 		 */
1941 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1942 		if (!(swsm & IXGBE_SWSM_SMBI))
1943 			status = IXGBE_SUCCESS;
1944 	}
1945 
1946 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1947 	if (status == IXGBE_SUCCESS) {
1948 		for (i = 0; i < timeout; i++) {
1949 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1950 
1951 			/* Set the SW EEPROM semaphore bit to request access */
1952 			swsm |= IXGBE_SWSM_SWESMBI;
1953 			IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1954 
1955 			/*
1956 			 * If we set the bit successfully then we got the
1957 			 * semaphore.
1958 			 */
1959 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1960 			if (swsm & IXGBE_SWSM_SWESMBI)
1961 				break;
1962 
1963 			usec_delay(50);
1964 		}
1965 
1966 		/*
1967 		 * Release semaphores and return error if SW EEPROM semaphore
1968 		 * was not granted because we don't have access to the EEPROM
1969 		 */
1970 		if (i >= timeout) {
1971 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1972 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1973 			ixgbe_release_eeprom_semaphore(hw);
1974 			status = IXGBE_ERR_EEPROM;
1975 		}
1976 	} else {
1977 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1978 			     "Software semaphore SMBI between device drivers "
1979 			     "not granted.\n");
1980 	}
1981 
1982 	return status;
1983 }
1984 
1985 /**
1986  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1987  *  @hw: pointer to hardware structure
1988  *
1989  *  This function clears hardware semaphore bits.
1990  **/
1991 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1992 {
1993 	u32 swsm;
1994 
1995 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1996 
1997 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1998 
1999 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
2000 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
2001 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
2002 	IXGBE_WRITE_FLUSH(hw);
2003 }
2004 
2005 /**
2006  *  ixgbe_ready_eeprom - Polls for EEPROM ready
2007  *  @hw: pointer to hardware structure
2008  **/
2009 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
2010 {
2011 	s32 status = IXGBE_SUCCESS;
2012 	u16 i;
2013 	u8 spi_stat_reg;
2014 
2015 	DEBUGFUNC("ixgbe_ready_eeprom");
2016 
2017 	/*
2018 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
2019 	 * EEPROM will signal that the command has been completed by clearing
2020 	 * bit 0 of the internal status register.  If it's not cleared within
2021 	 * 5 milliseconds, then error out.
2022 	 */
2023 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
2024 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
2025 					    IXGBE_EEPROM_OPCODE_BITS);
2026 		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
2027 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2028 			break;
2029 
2030 		usec_delay(5);
2031 		ixgbe_standby_eeprom(hw);
2032 	}
2033 
2034 	/*
2035 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2036 	 * devices (and only 0-5mSec on 5V devices)
2037 	 */
2038 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2039 		DEBUGOUT("SPI EEPROM Status error\n");
2040 		status = IXGBE_ERR_EEPROM;
2041 	}
2042 
2043 	return status;
2044 }
2045 
2046 /**
2047  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2048  *  @hw: pointer to hardware structure
2049  **/
2050 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2051 {
2052 	u32 eec;
2053 
2054 	DEBUGFUNC("ixgbe_standby_eeprom");
2055 
2056 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2057 
2058 	/* Toggle CS to flush commands */
2059 	eec |= IXGBE_EEC_CS;
2060 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2061 	IXGBE_WRITE_FLUSH(hw);
2062 	usec_delay(1);
2063 	eec &= ~IXGBE_EEC_CS;
2064 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2065 	IXGBE_WRITE_FLUSH(hw);
2066 	usec_delay(1);
2067 }
2068 
2069 /**
2070  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2071  *  @hw: pointer to hardware structure
2072  *  @data: data to send to the EEPROM
2073  *  @count: number of bits to shift out
2074  **/
2075 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2076 					u16 count)
2077 {
2078 	u32 eec;
2079 	u32 mask;
2080 	u32 i;
2081 
2082 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2083 
2084 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2085 
2086 	/*
2087 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
2088 	 * one bit at a time.  Determine the starting bit based on count
2089 	 */
2090 	mask = 0x01 << (count - 1);
2091 
2092 	for (i = 0; i < count; i++) {
2093 		/*
2094 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2095 		 * "1", and then raising and then lowering the clock (the SK
2096 		 * bit controls the clock input to the EEPROM).  A "0" is
2097 		 * shifted out to the EEPROM by setting "DI" to "0" and then
2098 		 * raising and then lowering the clock.
2099 		 */
2100 		if (data & mask)
2101 			eec |= IXGBE_EEC_DI;
2102 		else
2103 			eec &= ~IXGBE_EEC_DI;
2104 
2105 		IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2106 		IXGBE_WRITE_FLUSH(hw);
2107 
2108 		usec_delay(1);
2109 
2110 		ixgbe_raise_eeprom_clk(hw, &eec);
2111 		ixgbe_lower_eeprom_clk(hw, &eec);
2112 
2113 		/*
2114 		 * Shift mask to signify next bit of data to shift in to the
2115 		 * EEPROM
2116 		 */
2117 		mask = mask >> 1;
2118 	}
2119 
2120 	/* We leave the "DI" bit set to "0" when we leave this routine. */
2121 	eec &= ~IXGBE_EEC_DI;
2122 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2123 	IXGBE_WRITE_FLUSH(hw);
2124 }
2125 
2126 /**
2127  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2128  *  @hw: pointer to hardware structure
2129  *  @count: number of bits to shift
2130  **/
2131 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2132 {
2133 	u32 eec;
2134 	u32 i;
2135 	u16 data = 0;
2136 
2137 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2138 
2139 	/*
2140 	 * In order to read a register from the EEPROM, we need to shift
2141 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2142 	 * the clock input to the EEPROM (setting the SK bit), and then reading
2143 	 * the value of the "DO" bit.  During this "shifting in" process the
2144 	 * "DI" bit should always be clear.
2145 	 */
2146 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2147 
2148 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2149 
2150 	for (i = 0; i < count; i++) {
2151 		data = data << 1;
2152 		ixgbe_raise_eeprom_clk(hw, &eec);
2153 
2154 		eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2155 
2156 		eec &= ~(IXGBE_EEC_DI);
2157 		if (eec & IXGBE_EEC_DO)
2158 			data |= 1;
2159 
2160 		ixgbe_lower_eeprom_clk(hw, &eec);
2161 	}
2162 
2163 	return data;
2164 }
2165 
2166 /**
2167  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2168  *  @hw: pointer to hardware structure
2169  *  @eec: EEC register's current value
2170  **/
2171 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2172 {
2173 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
2174 
2175 	/*
2176 	 * Raise the clock input to the EEPROM
2177 	 * (setting the SK bit), then delay
2178 	 */
2179 	*eec = *eec | IXGBE_EEC_SK;
2180 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2181 	IXGBE_WRITE_FLUSH(hw);
2182 	usec_delay(1);
2183 }
2184 
2185 /**
2186  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2187  *  @hw: pointer to hardware structure
2188  *  @eec: EEC's current value
2189  **/
2190 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2191 {
2192 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
2193 
2194 	/*
2195 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
2196 	 * delay
2197 	 */
2198 	*eec = *eec & ~IXGBE_EEC_SK;
2199 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2200 	IXGBE_WRITE_FLUSH(hw);
2201 	usec_delay(1);
2202 }
2203 
2204 /**
2205  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
2206  *  @hw: pointer to hardware structure
2207  **/
2208 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2209 {
2210 	u32 eec;
2211 
2212 	DEBUGFUNC("ixgbe_release_eeprom");
2213 
2214 	eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2215 
2216 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
2217 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2218 
2219 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2220 	IXGBE_WRITE_FLUSH(hw);
2221 
2222 	usec_delay(1);
2223 
2224 	/* Stop requesting EEPROM access */
2225 	eec &= ~IXGBE_EEC_REQ;
2226 	IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2227 
2228 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2229 
2230 	/* Delay before attempt to obtain semaphore again to allow FW access */
2231 	msec_delay(hw->eeprom.semaphore_delay);
2232 }
2233 
2234 /**
2235  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2236  *  @hw: pointer to hardware structure
2237  *
2238  *  Returns a negative error code on error, or the 16-bit checksum
2239  **/
2240 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2241 {
2242 	u16 i;
2243 	u16 j;
2244 	u16 checksum = 0;
2245 	u16 length = 0;
2246 	u16 pointer = 0;
2247 	u16 word = 0;
2248 
2249 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2250 
2251 	/* Include 0x0-0x3F in the checksum */
2252 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2253 		if (hw->eeprom.ops.read(hw, i, &word)) {
2254 			DEBUGOUT("EEPROM read failed\n");
2255 			return IXGBE_ERR_EEPROM;
2256 		}
2257 		checksum += word;
2258 	}
2259 
2260 	/* Include all data from pointers except for the fw pointer */
2261 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2262 		if (hw->eeprom.ops.read(hw, i, &pointer)) {
2263 			DEBUGOUT("EEPROM read failed\n");
2264 			return IXGBE_ERR_EEPROM;
2265 		}
2266 
2267 		/* If the pointer seems invalid */
2268 		if (pointer == 0xFFFF || pointer == 0)
2269 			continue;
2270 
2271 		if (hw->eeprom.ops.read(hw, pointer, &length)) {
2272 			DEBUGOUT("EEPROM read failed\n");
2273 			return IXGBE_ERR_EEPROM;
2274 		}
2275 
2276 		if (length == 0xFFFF || length == 0)
2277 			continue;
2278 
2279 		for (j = pointer + 1; j <= pointer + length; j++) {
2280 			if (hw->eeprom.ops.read(hw, j, &word)) {
2281 				DEBUGOUT("EEPROM read failed\n");
2282 				return IXGBE_ERR_EEPROM;
2283 			}
2284 			checksum += word;
2285 		}
2286 	}
2287 
2288 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2289 
2290 	return (s32)checksum;
2291 }
2292 
2293 /**
2294  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2295  *  @hw: pointer to hardware structure
2296  *  @checksum_val: calculated checksum
2297  *
2298  *  Performs checksum calculation and validates the EEPROM checksum.  If the
2299  *  caller does not need checksum_val, the value can be NULL.
2300  **/
2301 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2302 					   u16 *checksum_val)
2303 {
2304 	s32 status;
2305 	u16 checksum;
2306 	u16 read_checksum = 0;
2307 
2308 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2309 
2310 	/* Read the first word from the EEPROM. If this times out or fails, do
2311 	 * not continue or we could be in for a very long wait while every
2312 	 * EEPROM read fails
2313 	 */
2314 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2315 	if (status) {
2316 		DEBUGOUT("EEPROM read failed\n");
2317 		return status;
2318 	}
2319 
2320 	status = hw->eeprom.ops.calc_checksum(hw);
2321 	if (status < 0)
2322 		return status;
2323 
2324 	checksum = (u16)(status & 0xffff);
2325 
2326 	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2327 	if (status) {
2328 		DEBUGOUT("EEPROM read failed\n");
2329 		return status;
2330 	}
2331 
2332 	/* Verify read checksum from EEPROM is the same as
2333 	 * calculated checksum
2334 	 */
2335 	if (read_checksum != checksum)
2336 		status = IXGBE_ERR_EEPROM_CHECKSUM;
2337 
2338 	/* If the user cares, return the calculated checksum */
2339 	if (checksum_val)
2340 		*checksum_val = checksum;
2341 
2342 	return status;
2343 }
2344 
2345 /**
2346  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2347  *  @hw: pointer to hardware structure
2348  **/
2349 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2350 {
2351 	s32 status;
2352 	u16 checksum;
2353 
2354 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2355 
2356 	/* Read the first word from the EEPROM. If this times out or fails, do
2357 	 * not continue or we could be in for a very long wait while every
2358 	 * EEPROM read fails
2359 	 */
2360 	status = hw->eeprom.ops.read(hw, 0, &checksum);
2361 	if (status) {
2362 		DEBUGOUT("EEPROM read failed\n");
2363 		return status;
2364 	}
2365 
2366 	status = hw->eeprom.ops.calc_checksum(hw);
2367 	if (status < 0)
2368 		return status;
2369 
2370 	checksum = (u16)(status & 0xffff);
2371 
2372 	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2373 
2374 	return status;
2375 }
2376 
2377 /**
2378  *  ixgbe_validate_mac_addr - Validate MAC address
2379  *  @mac_addr: pointer to MAC address.
2380  *
2381  *  Tests a MAC address to ensure it is a valid Individual Address.
2382  **/
2383 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2384 {
2385 	s32 status = IXGBE_SUCCESS;
2386 
2387 	DEBUGFUNC("ixgbe_validate_mac_addr");
2388 
2389 	/* Make sure it is not a multicast address */
2390 	if (IXGBE_IS_MULTICAST(mac_addr)) {
2391 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2392 	/* Not a broadcast address */
2393 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
2394 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2395 	/* Reject the zero address */
2396 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2397 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2398 		status = IXGBE_ERR_INVALID_MAC_ADDR;
2399 	}
2400 	return status;
2401 }
2402 
2403 /**
2404  *  ixgbe_set_rar_generic - Set Rx address register
2405  *  @hw: pointer to hardware structure
2406  *  @index: Receive address register to write
2407  *  @addr: Address to put into receive address register
2408  *  @vmdq: VMDq "set" or "pool" index
2409  *  @enable_addr: set flag that address is active
2410  *
2411  *  Puts an ethernet address into a receive address register.
2412  **/
2413 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2414 			  u32 enable_addr)
2415 {
2416 	u32 rar_low, rar_high;
2417 	u32 rar_entries = hw->mac.num_rar_entries;
2418 
2419 	DEBUGFUNC("ixgbe_set_rar_generic");
2420 
2421 	/* Make sure we are using a valid rar index range */
2422 	if (index >= rar_entries) {
2423 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2424 			     "RAR index %d is out of range.\n", index);
2425 		return IXGBE_ERR_INVALID_ARGUMENT;
2426 	}
2427 
2428 	/* setup VMDq pool selection before this RAR gets enabled */
2429 	hw->mac.ops.set_vmdq(hw, index, vmdq);
2430 
2431 	/*
2432 	 * HW expects these in little endian so we reverse the byte
2433 	 * order from network order (big endian) to little endian
2434 	 */
2435 	rar_low = ((u32)addr[0] |
2436 		   ((u32)addr[1] << 8) |
2437 		   ((u32)addr[2] << 16) |
2438 		   ((u32)addr[3] << 24));
2439 	/*
2440 	 * Some parts put the VMDq setting in the extra RAH bits,
2441 	 * so save everything except the lower 16 bits that hold part
2442 	 * of the address and the address valid bit.
2443 	 */
2444 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2445 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2446 	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2447 
2448 	if (enable_addr != 0)
2449 		rar_high |= IXGBE_RAH_AV;
2450 
2451 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2452 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2453 
2454 	return IXGBE_SUCCESS;
2455 }
2456 
2457 /**
2458  *  ixgbe_clear_rar_generic - Remove Rx address register
2459  *  @hw: pointer to hardware structure
2460  *  @index: Receive address register to write
2461  *
2462  *  Clears an ethernet address from a receive address register.
2463  **/
2464 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2465 {
2466 	u32 rar_high;
2467 	u32 rar_entries = hw->mac.num_rar_entries;
2468 
2469 	DEBUGFUNC("ixgbe_clear_rar_generic");
2470 
2471 	/* Make sure we are using a valid rar index range */
2472 	if (index >= rar_entries) {
2473 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2474 			     "RAR index %d is out of range.\n", index);
2475 		return IXGBE_ERR_INVALID_ARGUMENT;
2476 	}
2477 
2478 	/*
2479 	 * Some parts put the VMDq setting in the extra RAH bits,
2480 	 * so save everything except the lower 16 bits that hold part
2481 	 * of the address and the address valid bit.
2482 	 */
2483 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2484 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2485 
2486 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2487 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2488 
2489 	/* clear VMDq pool/queue selection for this RAR */
2490 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2491 
2492 	return IXGBE_SUCCESS;
2493 }
2494 
2495 /**
2496  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2497  *  @hw: pointer to hardware structure
2498  *
2499  *  Places the MAC address in receive address register 0 and clears the rest
2500  *  of the receive address registers. Clears the multicast table. Assumes
2501  *  the receiver is in reset when the routine is called.
2502  **/
2503 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2504 {
2505 	u32 i;
2506 	u32 rar_entries = hw->mac.num_rar_entries;
2507 
2508 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2509 
2510 	/*
2511 	 * If the current mac address is valid, assume it is a software override
2512 	 * to the permanent address.
2513 	 * Otherwise, use the permanent address from the eeprom.
2514 	 */
2515 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2516 	    IXGBE_ERR_INVALID_MAC_ADDR) {
2517 		/* Get the MAC address from the RAR0 for later reference */
2518 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2519 
2520 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2521 			  hw->mac.addr[0], hw->mac.addr[1],
2522 			  hw->mac.addr[2]);
2523 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2524 			  hw->mac.addr[4], hw->mac.addr[5]);
2525 	} else {
2526 		/* Setup the receive address. */
2527 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2528 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2529 			  hw->mac.addr[0], hw->mac.addr[1],
2530 			  hw->mac.addr[2]);
2531 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2532 			  hw->mac.addr[4], hw->mac.addr[5]);
2533 
2534 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2535 	}
2536 
2537 	/* clear VMDq pool/queue selection for RAR 0 */
2538 	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2539 
2540 	hw->addr_ctrl.overflow_promisc = 0;
2541 
2542 	hw->addr_ctrl.rar_used_count = 1;
2543 
2544 	/* Zero out the other receive addresses. */
2545 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2546 	for (i = 1; i < rar_entries; i++) {
2547 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2548 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2549 	}
2550 
2551 	/* Clear the MTA */
2552 	hw->addr_ctrl.mta_in_use = 0;
2553 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2554 
2555 	DEBUGOUT(" Clearing MTA\n");
2556 	for (i = 0; i < hw->mac.mcft_size; i++)
2557 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2558 
2559 	ixgbe_init_uta_tables(hw);
2560 
2561 	return IXGBE_SUCCESS;
2562 }
2563 
2564 /**
2565  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
2566  *  @hw: pointer to hardware structure
2567  *  @addr: new address
2568  *  @vmdq: VMDq "set" or "pool" index
2569  *
2570  *  Adds it to unused receive address register or goes into promiscuous mode.
2571  **/
2572 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2573 {
2574 	u32 rar_entries = hw->mac.num_rar_entries;
2575 	u32 rar;
2576 
2577 	DEBUGFUNC("ixgbe_add_uc_addr");
2578 
2579 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2580 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2581 
2582 	/*
2583 	 * Place this address in the RAR if there is room,
2584 	 * else put the controller into promiscuous mode
2585 	 */
2586 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
2587 		rar = hw->addr_ctrl.rar_used_count;
2588 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2589 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2590 		hw->addr_ctrl.rar_used_count++;
2591 	} else {
2592 		hw->addr_ctrl.overflow_promisc++;
2593 	}
2594 
2595 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2596 }
2597 
2598 /**
2599  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2600  *  @hw: pointer to hardware structure
2601  *  @addr_list: the list of new addresses
2602  *  @addr_count: number of addresses
2603  *  @next: iterator function to walk the address list
2604  *
2605  *  The given list replaces any existing list.  Clears the secondary addrs from
2606  *  receive address registers.  Uses unused receive address registers for the
2607  *  first secondary addresses, and falls back to promiscuous mode as needed.
2608  *
2609  *  Drivers using secondary unicast addresses must set user_set_promisc when
2610  *  manually putting the device into promiscuous mode.
2611  **/
2612 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2613 				      u32 addr_count, ixgbe_mc_addr_itr next)
2614 {
2615 	u8 *addr;
2616 	u32 i;
2617 	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2618 	u32 uc_addr_in_use;
2619 	u32 fctrl;
2620 	u32 vmdq;
2621 
2622 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2623 
2624 	/*
2625 	 * Clear accounting of old secondary address list,
2626 	 * don't count RAR[0]
2627 	 */
2628 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2629 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2630 	hw->addr_ctrl.overflow_promisc = 0;
2631 
2632 	/* Zero out the other receive addresses */
2633 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2634 	for (i = 0; i < uc_addr_in_use; i++) {
2635 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2636 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2637 	}
2638 
2639 	/* Add the new addresses */
2640 	for (i = 0; i < addr_count; i++) {
2641 		DEBUGOUT(" Adding the secondary addresses:\n");
2642 		addr = next(hw, &addr_list, &vmdq);
2643 		ixgbe_add_uc_addr(hw, addr, vmdq);
2644 	}
2645 
2646 	if (hw->addr_ctrl.overflow_promisc) {
2647 		/* enable promisc if not already in overflow or set by user */
2648 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2649 			DEBUGOUT(" Entering address overflow promisc mode\n");
2650 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2651 			fctrl |= IXGBE_FCTRL_UPE;
2652 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2653 		}
2654 	} else {
2655 		/* only disable if set by overflow, not by user */
2656 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2657 			DEBUGOUT(" Leaving address overflow promisc mode\n");
2658 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2659 			fctrl &= ~IXGBE_FCTRL_UPE;
2660 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2661 		}
2662 	}
2663 
2664 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2665 	return IXGBE_SUCCESS;
2666 }
2667 
2668 /**
2669  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
2670  *  @hw: pointer to hardware structure
2671  *  @mc_addr: the multicast address
2672  *
2673  *  Extracts the 12 bits, from a multicast address, to determine which
2674  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
2675  *  incoming rx multicast addresses, to determine the bit-vector to check in
2676  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2677  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
2678  *  to mc_filter_type.
2679  **/
2680 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2681 {
2682 	u32 vector = 0;
2683 
2684 	DEBUGFUNC("ixgbe_mta_vector");
2685 
2686 	switch (hw->mac.mc_filter_type) {
2687 	case 0:   /* use bits [47:36] of the address */
2688 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2689 		break;
2690 	case 1:   /* use bits [46:35] of the address */
2691 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2692 		break;
2693 	case 2:   /* use bits [45:34] of the address */
2694 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2695 		break;
2696 	case 3:   /* use bits [43:32] of the address */
2697 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2698 		break;
2699 	default:  /* Invalid mc_filter_type */
2700 		DEBUGOUT("MC filter type param set incorrectly\n");
2701 		ASSERT(0);
2702 		break;
2703 	}
2704 
2705 	/* vector can only be 12-bits or boundary will be exceeded */
2706 	vector &= 0xFFF;
2707 	return vector;
2708 }
2709 
2710 /**
2711  *  ixgbe_set_mta - Set bit-vector in multicast table
2712  *  @hw: pointer to hardware structure
2713  *  @mc_addr: Multicast address
2714  *
2715  *  Sets the bit-vector in the multicast table.
2716  **/
2717 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2718 {
2719 	u32 vector;
2720 	u32 vector_bit;
2721 	u32 vector_reg;
2722 
2723 	DEBUGFUNC("ixgbe_set_mta");
2724 
2725 	hw->addr_ctrl.mta_in_use++;
2726 
2727 	vector = ixgbe_mta_vector(hw, mc_addr);
2728 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2729 
2730 	/*
2731 	 * The MTA is a register array of 128 32-bit registers. It is treated
2732 	 * like an array of 4096 bits.  We want to set bit
2733 	 * BitArray[vector_value]. So we figure out what register the bit is
2734 	 * in, read it, OR in the new bit, then write back the new value.  The
2735 	 * register is determined by the upper 7 bits of the vector value and
2736 	 * the bit within that register are determined by the lower 5 bits of
2737 	 * the value.
2738 	 */
2739 	vector_reg = (vector >> 5) & 0x7F;
2740 	vector_bit = vector & 0x1F;
2741 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2742 }
2743 
2744 /**
2745  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2746  *  @hw: pointer to hardware structure
2747  *  @mc_addr_list: the list of new multicast addresses
2748  *  @mc_addr_count: number of addresses
2749  *  @next: iterator function to walk the multicast address list
2750  *  @clear: flag, when set clears the table beforehand
2751  *
2752  *  When the clear flag is set, the given list replaces any existing list.
2753  *  Hashes the given addresses into the multicast table.
2754  **/
2755 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2756 				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
2757 				      bool clear)
2758 {
2759 	u32 i;
2760 	u32 vmdq;
2761 
2762 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2763 
2764 	/*
2765 	 * Set the new number of MC addresses that we are being requested to
2766 	 * use.
2767 	 */
2768 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2769 	hw->addr_ctrl.mta_in_use = 0;
2770 
2771 	/* Clear mta_shadow */
2772 	if (clear) {
2773 		DEBUGOUT(" Clearing MTA\n");
2774 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2775 	}
2776 
2777 	/* Update mta_shadow */
2778 	for (i = 0; i < mc_addr_count; i++) {
2779 		DEBUGOUT(" Adding the multicast addresses:\n");
2780 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2781 	}
2782 
2783 	/* Enable mta */
2784 	for (i = 0; i < hw->mac.mcft_size; i++)
2785 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2786 				      hw->mac.mta_shadow[i]);
2787 
2788 	if (hw->addr_ctrl.mta_in_use > 0)
2789 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2790 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2791 
2792 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2793 	return IXGBE_SUCCESS;
2794 }
2795 
2796 /**
2797  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2798  *  @hw: pointer to hardware structure
2799  *
2800  *  Enables multicast address in RAR and the use of the multicast hash table.
2801  **/
2802 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2803 {
2804 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2805 
2806 	DEBUGFUNC("ixgbe_enable_mc_generic");
2807 
2808 	if (a->mta_in_use > 0)
2809 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2810 				hw->mac.mc_filter_type);
2811 
2812 	return IXGBE_SUCCESS;
2813 }
2814 
2815 /**
2816  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2817  *  @hw: pointer to hardware structure
2818  *
2819  *  Disables multicast address in RAR and the use of the multicast hash table.
2820  **/
2821 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2822 {
2823 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2824 
2825 	DEBUGFUNC("ixgbe_disable_mc_generic");
2826 
2827 	if (a->mta_in_use > 0)
2828 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2829 
2830 	return IXGBE_SUCCESS;
2831 }
2832 
2833 /**
2834  *  ixgbe_fc_enable_generic - Enable flow control
2835  *  @hw: pointer to hardware structure
2836  *
2837  *  Enable flow control according to the current settings.
2838  **/
2839 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2840 {
2841 	s32 ret_val = IXGBE_SUCCESS;
2842 	u32 mflcn_reg, fccfg_reg;
2843 	u32 reg;
2844 	u32 fcrtl, fcrth;
2845 	int i;
2846 
2847 	DEBUGFUNC("ixgbe_fc_enable_generic");
2848 
2849 	/* Validate the water mark configuration */
2850 	if (!hw->fc.pause_time) {
2851 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2852 		goto out;
2853 	}
2854 
2855 	/* Low water mark of zero causes XOFF floods */
2856 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2857 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2858 		    hw->fc.high_water[i]) {
2859 			if (!hw->fc.low_water[i] ||
2860 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2861 				DEBUGOUT("Invalid water mark configuration\n");
2862 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2863 				goto out;
2864 			}
2865 		}
2866 	}
2867 
2868 	/* Negotiate the fc mode to use */
2869 	hw->mac.ops.fc_autoneg(hw);
2870 
2871 	/* Disable any previous flow control settings */
2872 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2873 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2874 
2875 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2876 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2877 
2878 	/*
2879 	 * The possible values of fc.current_mode are:
2880 	 * 0: Flow control is completely disabled
2881 	 * 1: Rx flow control is enabled (we can receive pause frames,
2882 	 *    but not send pause frames).
2883 	 * 2: Tx flow control is enabled (we can send pause frames but
2884 	 *    we do not support receiving pause frames).
2885 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2886 	 * other: Invalid.
2887 	 */
2888 	switch (hw->fc.current_mode) {
2889 	case ixgbe_fc_none:
2890 		/*
2891 		 * Flow control is disabled by software override or autoneg.
2892 		 * The code below will actually disable it in the HW.
2893 		 */
2894 		break;
2895 	case ixgbe_fc_rx_pause:
2896 		/*
2897 		 * Rx Flow control is enabled and Tx Flow control is
2898 		 * disabled by software override. Since there really
2899 		 * isn't a way to advertise that we are capable of RX
2900 		 * Pause ONLY, we will advertise that we support both
2901 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2902 		 * disable the adapter's ability to send PAUSE frames.
2903 		 */
2904 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2905 		break;
2906 	case ixgbe_fc_tx_pause:
2907 		/*
2908 		 * Tx Flow control is enabled, and Rx Flow control is
2909 		 * disabled by software override.
2910 		 */
2911 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2912 		break;
2913 	case ixgbe_fc_full:
2914 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2915 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2916 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2917 		break;
2918 	default:
2919 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2920 			     "Flow control param set incorrectly\n");
2921 		ret_val = IXGBE_ERR_CONFIG;
2922 		goto out;
2923 		break;
2924 	}
2925 
2926 	/* Set 802.3x based flow control settings. */
2927 	mflcn_reg |= IXGBE_MFLCN_DPF;
2928 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2929 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2930 
2931 
2932 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2933 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2934 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2935 		    hw->fc.high_water[i]) {
2936 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2937 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2938 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2939 		} else {
2940 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2941 			/*
2942 			 * In order to prevent Tx hangs when the internal Tx
2943 			 * switch is enabled we must set the high water mark
2944 			 * to the Rx packet buffer size - 24KB.  This allows
2945 			 * the Tx switch to function even under heavy Rx
2946 			 * workloads.
2947 			 */
2948 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2949 		}
2950 
2951 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2952 	}
2953 
2954 	/* Configure pause time (2 TCs per register) */
2955 	reg = (u32)hw->fc.pause_time * 0x00010001;
2956 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2957 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2958 
2959 	/* Configure flow control refresh threshold value */
2960 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2961 
2962 out:
2963 	return ret_val;
2964 }
2965 
2966 /**
2967  *  ixgbe_negotiate_fc - Negotiate flow control
2968  *  @hw: pointer to hardware structure
2969  *  @adv_reg: flow control advertised settings
2970  *  @lp_reg: link partner's flow control settings
2971  *  @adv_sym: symmetric pause bit in advertisement
2972  *  @adv_asm: asymmetric pause bit in advertisement
2973  *  @lp_sym: symmetric pause bit in link partner advertisement
2974  *  @lp_asm: asymmetric pause bit in link partner advertisement
2975  *
2976  *  Find the intersection between advertised settings and link partner's
2977  *  advertised settings
2978  **/
2979 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2980 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2981 {
2982 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2983 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2984 			     "Local or link partner's advertised flow control "
2985 			     "settings are NULL. Local: %x, link partner: %x\n",
2986 			     adv_reg, lp_reg);
2987 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2988 	}
2989 
2990 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2991 		/*
2992 		 * Now we need to check if the user selected Rx ONLY
2993 		 * of pause frames.  In this case, we had to advertise
2994 		 * FULL flow control because we could not advertise RX
2995 		 * ONLY. Hence, we must now check to see if we need to
2996 		 * turn OFF the TRANSMISSION of PAUSE frames.
2997 		 */
2998 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2999 			hw->fc.current_mode = ixgbe_fc_full;
3000 			DEBUGOUT("Flow Control = FULL.\n");
3001 		} else {
3002 			hw->fc.current_mode = ixgbe_fc_rx_pause;
3003 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
3004 		}
3005 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3006 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
3007 		hw->fc.current_mode = ixgbe_fc_tx_pause;
3008 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
3009 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
3010 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
3011 		hw->fc.current_mode = ixgbe_fc_rx_pause;
3012 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
3013 	} else {
3014 		hw->fc.current_mode = ixgbe_fc_none;
3015 		DEBUGOUT("Flow Control = NONE.\n");
3016 	}
3017 	return IXGBE_SUCCESS;
3018 }
3019 
3020 /**
3021  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
3022  *  @hw: pointer to hardware structure
3023  *
3024  *  Enable flow control according on 1 gig fiber.
3025  **/
3026 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
3027 {
3028 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
3029 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3030 
3031 	/*
3032 	 * On multispeed fiber at 1g, bail out if
3033 	 * - link is up but AN did not complete, or if
3034 	 * - link is up and AN completed but timed out
3035 	 */
3036 
3037 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3038 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3039 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3040 		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3041 		goto out;
3042 	}
3043 
3044 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3045 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3046 
3047 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3048 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3049 				      IXGBE_PCS1GANA_ASM_PAUSE,
3050 				      IXGBE_PCS1GANA_SYM_PAUSE,
3051 				      IXGBE_PCS1GANA_ASM_PAUSE);
3052 
3053 out:
3054 	return ret_val;
3055 }
3056 
3057 /**
3058  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3059  *  @hw: pointer to hardware structure
3060  *
3061  *  Enable flow control according to IEEE clause 37.
3062  **/
3063 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3064 {
3065 	u32 links2, anlp1_reg, autoc_reg, links;
3066 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3067 
3068 	/*
3069 	 * On backplane, bail out if
3070 	 * - backplane autoneg was not completed, or if
3071 	 * - we are 82599 and link partner is not AN enabled
3072 	 */
3073 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3074 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3075 		DEBUGOUT("Auto-Negotiation did not complete\n");
3076 		goto out;
3077 	}
3078 
3079 	if (hw->mac.type == ixgbe_mac_82599EB) {
3080 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3081 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3082 			DEBUGOUT("Link partner is not AN enabled\n");
3083 			goto out;
3084 		}
3085 	}
3086 	/*
3087 	 * Read the 10g AN autoc and LP ability registers and resolve
3088 	 * local flow control settings accordingly
3089 	 */
3090 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3091 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3092 
3093 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3094 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3095 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3096 
3097 out:
3098 	return ret_val;
3099 }
3100 
3101 /**
3102  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3103  *  @hw: pointer to hardware structure
3104  *
3105  *  Enable flow control according to IEEE clause 37.
3106  **/
3107 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3108 {
3109 	u16 technology_ability_reg = 0;
3110 	u16 lp_technology_ability_reg = 0;
3111 
3112 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3113 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3114 			     &technology_ability_reg);
3115 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3116 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3117 			     &lp_technology_ability_reg);
3118 
3119 	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3120 				  (u32)lp_technology_ability_reg,
3121 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3122 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3123 }
3124 
3125 /**
3126  *  ixgbe_fc_autoneg - Configure flow control
3127  *  @hw: pointer to hardware structure
3128  *
3129  *  Compares our advertised flow control capabilities to those advertised by
3130  *  our link partner, and determines the proper flow control mode to use.
3131  **/
3132 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3133 {
3134 	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3135 	ixgbe_link_speed speed;
3136 	bool link_up;
3137 
3138 	DEBUGFUNC("ixgbe_fc_autoneg");
3139 
3140 	/*
3141 	 * AN should have completed when the cable was plugged in.
3142 	 * Look for reasons to bail out.  Bail out if:
3143 	 * - FC autoneg is disabled, or if
3144 	 * - link is not up.
3145 	 */
3146 	if (hw->fc.disable_fc_autoneg) {
3147 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3148 			     "Flow control autoneg is disabled");
3149 		goto out;
3150 	}
3151 
3152 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3153 	if (!link_up) {
3154 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3155 		goto out;
3156 	}
3157 
3158 	switch (hw->phy.media_type) {
3159 	/* Autoneg flow control on fiber adapters */
3160 	case ixgbe_media_type_fiber_fixed:
3161 	case ixgbe_media_type_fiber_qsfp:
3162 	case ixgbe_media_type_fiber:
3163 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3164 			ret_val = ixgbe_fc_autoneg_fiber(hw);
3165 		break;
3166 
3167 	/* Autoneg flow control on backplane adapters */
3168 	case ixgbe_media_type_backplane:
3169 		ret_val = ixgbe_fc_autoneg_backplane(hw);
3170 		break;
3171 
3172 	/* Autoneg flow control on copper adapters */
3173 	case ixgbe_media_type_copper:
3174 		if (ixgbe_device_supports_autoneg_fc(hw))
3175 			ret_val = ixgbe_fc_autoneg_copper(hw);
3176 		break;
3177 
3178 	default:
3179 		break;
3180 	}
3181 
3182 out:
3183 	if (ret_val == IXGBE_SUCCESS) {
3184 		hw->fc.fc_was_autonegged = TRUE;
3185 	} else {
3186 		hw->fc.fc_was_autonegged = FALSE;
3187 		hw->fc.current_mode = hw->fc.requested_mode;
3188 	}
3189 }
3190 
3191 /*
3192  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3193  * @hw: pointer to hardware structure
3194  *
3195  * System-wide timeout range is encoded in PCIe Device Control2 register.
3196  *
3197  * Add 10% to specified maximum and return the number of times to poll for
3198  * completion timeout, in units of 100 microsec.  Never return less than
3199  * 800 = 80 millisec.
3200  */
3201 static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3202 {
3203 	s16 devctl2;
3204 	u32 pollcnt;
3205 
3206 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3207 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3208 
3209 	switch (devctl2) {
3210 	case IXGBE_PCIDEVCTRL2_65_130ms:
3211 		pollcnt = 1300;		/* 130 millisec */
3212 		break;
3213 	case IXGBE_PCIDEVCTRL2_260_520ms:
3214 		pollcnt = 5200;		/* 520 millisec */
3215 		break;
3216 	case IXGBE_PCIDEVCTRL2_1_2s:
3217 		pollcnt = 20000;	/* 2 sec */
3218 		break;
3219 	case IXGBE_PCIDEVCTRL2_4_8s:
3220 		pollcnt = 80000;	/* 8 sec */
3221 		break;
3222 	case IXGBE_PCIDEVCTRL2_17_34s:
3223 		pollcnt = 34000;	/* 34 sec */
3224 		break;
3225 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
3226 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
3227 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
3228 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
3229 	default:
3230 		pollcnt = 800;		/* 80 millisec minimum */
3231 		break;
3232 	}
3233 
3234 	/* add 10% to spec maximum */
3235 	return (pollcnt * 11) / 10;
3236 }
3237 
3238 /**
3239  *  ixgbe_disable_pcie_master - Disable PCI-express master access
3240  *  @hw: pointer to hardware structure
3241  *
3242  *  Disables PCI-Express master access and verifies there are no pending
3243  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3244  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3245  *  is returned signifying master requests disabled.
3246  **/
3247 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3248 {
3249 	s32 status = IXGBE_SUCCESS;
3250 	u32 i, poll;
3251 	u16 value;
3252 
3253 	DEBUGFUNC("ixgbe_disable_pcie_master");
3254 
3255 	/* Always set this bit to ensure any future transactions are blocked */
3256 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3257 
3258 	/* Exit if master requests are blocked */
3259 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3260 	    IXGBE_REMOVED(hw->hw_addr))
3261 		goto out;
3262 
3263 	/* Poll for master request bit to clear */
3264 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3265 		usec_delay(100);
3266 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3267 			goto out;
3268 	}
3269 
3270 	/*
3271 	 * Two consecutive resets are required via CTRL.RST per datasheet
3272 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
3273 	 * of this need.  The first reset prevents new master requests from
3274 	 * being issued by our device.  We then must wait 1usec or more for any
3275 	 * remaining completions from the PCIe bus to trickle in, and then reset
3276 	 * again to clear out any effects they may have had on our device.
3277 	 */
3278 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3279 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3280 
3281 	if (hw->mac.type >= ixgbe_mac_X550)
3282 		goto out;
3283 
3284 	/*
3285 	 * Before proceeding, make sure that the PCIe block does not have
3286 	 * transactions pending.
3287 	 */
3288 	poll = ixgbe_pcie_timeout_poll(hw);
3289 	for (i = 0; i < poll; i++) {
3290 		usec_delay(100);
3291 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3292 		if (IXGBE_REMOVED(hw->hw_addr))
3293 			goto out;
3294 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3295 			goto out;
3296 	}
3297 
3298 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
3299 		     "PCIe transaction pending bit also did not clear.\n");
3300 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3301 
3302 out:
3303 	return status;
3304 }
3305 
3306 /**
3307  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3308  *  @hw: pointer to hardware structure
3309  *  @mask: Mask to specify which semaphore to acquire
3310  *
3311  *  Acquires the SWFW semaphore through the GSSR register for the specified
3312  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3313  **/
3314 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3315 {
3316 	u32 gssr = 0;
3317 	u32 swmask = mask;
3318 	u32 fwmask = mask << 5;
3319 	u32 timeout = 200;
3320 	u32 i;
3321 
3322 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
3323 
3324 	for (i = 0; i < timeout; i++) {
3325 		/*
3326 		 * SW NVM semaphore bit is used for access to all
3327 		 * SW_FW_SYNC bits (not just NVM)
3328 		 */
3329 		if (ixgbe_get_eeprom_semaphore(hw))
3330 			return IXGBE_ERR_SWFW_SYNC;
3331 
3332 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3333 		if (!(gssr & (fwmask | swmask))) {
3334 			gssr |= swmask;
3335 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3336 			ixgbe_release_eeprom_semaphore(hw);
3337 			return IXGBE_SUCCESS;
3338 		} else {
3339 			/* Resource is currently in use by FW or SW */
3340 			ixgbe_release_eeprom_semaphore(hw);
3341 			msec_delay(5);
3342 		}
3343 	}
3344 
3345 	/* If time expired clear the bits holding the lock and retry */
3346 	if (gssr & (fwmask | swmask))
3347 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3348 
3349 	msec_delay(5);
3350 	return IXGBE_ERR_SWFW_SYNC;
3351 }
3352 
3353 /**
3354  *  ixgbe_release_swfw_sync - Release SWFW semaphore
3355  *  @hw: pointer to hardware structure
3356  *  @mask: Mask to specify which semaphore to release
3357  *
3358  *  Releases the SWFW semaphore through the GSSR register for the specified
3359  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
3360  **/
3361 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3362 {
3363 	u32 gssr;
3364 	u32 swmask = mask;
3365 
3366 	DEBUGFUNC("ixgbe_release_swfw_sync");
3367 
3368 	ixgbe_get_eeprom_semaphore(hw);
3369 
3370 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3371 	gssr &= ~swmask;
3372 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3373 
3374 	ixgbe_release_eeprom_semaphore(hw);
3375 }
3376 
3377 /**
3378  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3379  *  @hw: pointer to hardware structure
3380  *
3381  *  Stops the receive data path and waits for the HW to internally empty
3382  *  the Rx security block
3383  **/
3384 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3385 {
3386 #define IXGBE_MAX_SECRX_POLL 4000
3387 
3388 	int i;
3389 	int secrxreg;
3390 
3391 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3392 
3393 
3394 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3395 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3396 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3397 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3398 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3399 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3400 			break;
3401 		else
3402 			/* Use interrupt-safe sleep just in case */
3403 			usec_delay(10);
3404 	}
3405 
3406 	/* For informational purposes only */
3407 	if (i >= IXGBE_MAX_SECRX_POLL)
3408 		DEBUGOUT("Rx unit being enabled before security "
3409 			 "path fully disabled.  Continuing with init.\n");
3410 
3411 	return IXGBE_SUCCESS;
3412 }
3413 
3414 /**
3415  *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3416  *  @hw: pointer to hardware structure
3417  *  @locked: bool to indicate whether the SW/FW lock was taken
3418  *  @reg_val: Value we read from AUTOC
3419  *
3420  *  The default case requires no protection so just to the register read.
3421  */
3422 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3423 {
3424 	*locked = FALSE;
3425 	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3426 	return IXGBE_SUCCESS;
3427 }
3428 
3429 /**
3430  * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3431  * @hw: pointer to hardware structure
3432  * @reg_val: value to write to AUTOC
3433  * @locked: bool to indicate whether the SW/FW lock was already taken by
3434  *           previous read.
3435  *
3436  * The default case requires no protection so just to the register write.
3437  */
3438 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3439 {
3440 	UNREFERENCED_1PARAMETER(locked);
3441 
3442 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3443 	return IXGBE_SUCCESS;
3444 }
3445 
3446 /**
3447  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3448  *  @hw: pointer to hardware structure
3449  *
3450  *  Enables the receive data path.
3451  **/
3452 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3453 {
3454 	u32 secrxreg;
3455 
3456 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3457 
3458 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3459 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3460 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3461 	IXGBE_WRITE_FLUSH(hw);
3462 
3463 	return IXGBE_SUCCESS;
3464 }
3465 
3466 /**
3467  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3468  *  @hw: pointer to hardware structure
3469  *  @regval: register value to write to RXCTRL
3470  *
3471  *  Enables the Rx DMA unit
3472  **/
3473 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3474 {
3475 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3476 
3477 	if (regval & IXGBE_RXCTRL_RXEN)
3478 		ixgbe_enable_rx(hw);
3479 	else
3480 		ixgbe_disable_rx(hw);
3481 
3482 	return IXGBE_SUCCESS;
3483 }
3484 
3485 /**
3486  *  ixgbe_blink_led_start_generic - Blink LED based on index.
3487  *  @hw: pointer to hardware structure
3488  *  @index: led number to blink
3489  **/
3490 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3491 {
3492 	ixgbe_link_speed speed = 0;
3493 	bool link_up = 0;
3494 	u32 autoc_reg = 0;
3495 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3496 	s32 ret_val = IXGBE_SUCCESS;
3497 	bool locked = FALSE;
3498 
3499 	DEBUGFUNC("ixgbe_blink_led_start_generic");
3500 
3501 	if (index > 3)
3502 		return IXGBE_ERR_PARAM;
3503 
3504 	/*
3505 	 * Link must be up to auto-blink the LEDs;
3506 	 * Force it if link is down.
3507 	 */
3508 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3509 
3510 	if (!link_up) {
3511 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3512 		if (ret_val != IXGBE_SUCCESS)
3513 			goto out;
3514 
3515 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3516 		autoc_reg |= IXGBE_AUTOC_FLU;
3517 
3518 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3519 		if (ret_val != IXGBE_SUCCESS)
3520 			goto out;
3521 
3522 		IXGBE_WRITE_FLUSH(hw);
3523 		msec_delay(10);
3524 	}
3525 
3526 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3527 	led_reg |= IXGBE_LED_BLINK(index);
3528 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3529 	IXGBE_WRITE_FLUSH(hw);
3530 
3531 out:
3532 	return ret_val;
3533 }
3534 
3535 /**
3536  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3537  *  @hw: pointer to hardware structure
3538  *  @index: led number to stop blinking
3539  **/
3540 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3541 {
3542 	u32 autoc_reg = 0;
3543 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3544 	s32 ret_val = IXGBE_SUCCESS;
3545 	bool locked = FALSE;
3546 
3547 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
3548 
3549 	if (index > 3)
3550 		return IXGBE_ERR_PARAM;
3551 
3552 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3553 	if (ret_val != IXGBE_SUCCESS)
3554 		goto out;
3555 
3556 	autoc_reg &= ~IXGBE_AUTOC_FLU;
3557 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3558 
3559 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3560 	if (ret_val != IXGBE_SUCCESS)
3561 		goto out;
3562 
3563 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
3564 	led_reg &= ~IXGBE_LED_BLINK(index);
3565 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3566 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3567 	IXGBE_WRITE_FLUSH(hw);
3568 
3569 out:
3570 	return ret_val;
3571 }
3572 
3573 /**
3574  *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3575  *  @hw: pointer to hardware structure
3576  *  @san_mac_offset: SAN MAC address offset
3577  *
3578  *  This function will read the EEPROM location for the SAN MAC address
3579  *  pointer, and returns the value at that location.  This is used in both
3580  *  get and set mac_addr routines.
3581  **/
3582 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3583 					 u16 *san_mac_offset)
3584 {
3585 	s32 ret_val;
3586 
3587 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3588 
3589 	/*
3590 	 * First read the EEPROM pointer to see if the MAC addresses are
3591 	 * available.
3592 	 */
3593 	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3594 				      san_mac_offset);
3595 	if (ret_val) {
3596 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3597 			      "eeprom at offset %d failed",
3598 			      IXGBE_SAN_MAC_ADDR_PTR);
3599 	}
3600 
3601 	return ret_val;
3602 }
3603 
3604 /**
3605  *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3606  *  @hw: pointer to hardware structure
3607  *  @san_mac_addr: SAN MAC address
3608  *
3609  *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
3610  *  per-port, so set_lan_id() must be called before reading the addresses.
3611  *  set_lan_id() is called by identify_sfp(), but this cannot be relied
3612  *  upon for non-SFP connections, so we must call it here.
3613  **/
3614 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3615 {
3616 	u16 san_mac_data, san_mac_offset;
3617 	u8 i;
3618 	s32 ret_val;
3619 
3620 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3621 
3622 	/*
3623 	 * First read the EEPROM pointer to see if the MAC addresses are
3624 	 * available.  If they're not, no point in calling set_lan_id() here.
3625 	 */
3626 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3627 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3628 		goto san_mac_addr_out;
3629 
3630 	/* make sure we know which port we need to program */
3631 	hw->mac.ops.set_lan_id(hw);
3632 	/* apply the port offset to the address offset */
3633 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3634 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3635 	for (i = 0; i < 3; i++) {
3636 		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3637 					      &san_mac_data);
3638 		if (ret_val) {
3639 			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3640 				      "eeprom read at offset %d failed",
3641 				      san_mac_offset);
3642 			goto san_mac_addr_out;
3643 		}
3644 		san_mac_addr[i * 2] = (u8)(san_mac_data);
3645 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3646 		san_mac_offset++;
3647 	}
3648 	return IXGBE_SUCCESS;
3649 
3650 san_mac_addr_out:
3651 	/*
3652 	 * No addresses available in this EEPROM.  It's not an
3653 	 * error though, so just wipe the local address and return.
3654 	 */
3655 	for (i = 0; i < 6; i++)
3656 		san_mac_addr[i] = 0xFF;
3657 	return IXGBE_SUCCESS;
3658 }
3659 
3660 /**
3661  *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3662  *  @hw: pointer to hardware structure
3663  *  @san_mac_addr: SAN MAC address
3664  *
3665  *  Write a SAN MAC address to the EEPROM.
3666  **/
3667 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3668 {
3669 	s32 ret_val;
3670 	u16 san_mac_data, san_mac_offset;
3671 	u8 i;
3672 
3673 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3674 
3675 	/* Look for SAN mac address pointer.  If not defined, return */
3676 	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3677 	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3678 		return IXGBE_ERR_NO_SAN_ADDR_PTR;
3679 
3680 	/* Make sure we know which port we need to write */
3681 	hw->mac.ops.set_lan_id(hw);
3682 	/* Apply the port offset to the address offset */
3683 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3684 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3685 
3686 	for (i = 0; i < 3; i++) {
3687 		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3688 		san_mac_data |= (u16)(san_mac_addr[i * 2]);
3689 		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3690 		san_mac_offset++;
3691 	}
3692 
3693 	return IXGBE_SUCCESS;
3694 }
3695 
3696 /**
3697  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3698  *  @hw: pointer to hardware structure
3699  *
3700  *  Read PCIe configuration space, and get the MSI-X vector count from
3701  *  the capabilities table.
3702  **/
3703 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3704 {
3705 	u16 msix_count = 1;
3706 	u16 max_msix_count;
3707 	u16 pcie_offset;
3708 
3709 	switch (hw->mac.type) {
3710 	case ixgbe_mac_82598EB:
3711 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3712 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3713 		break;
3714 	case ixgbe_mac_82599EB:
3715 	case ixgbe_mac_X540:
3716 	case ixgbe_mac_X550:
3717 	case ixgbe_mac_X550EM_x:
3718 	case ixgbe_mac_X550EM_a:
3719 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3720 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3721 		break;
3722 	default:
3723 		return msix_count;
3724 	}
3725 
3726 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3727 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3728 	if (IXGBE_REMOVED(hw->hw_addr))
3729 		msix_count = 0;
3730 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3731 
3732 	/* MSI-X count is zero-based in HW */
3733 	msix_count++;
3734 
3735 	if (msix_count > max_msix_count)
3736 		msix_count = max_msix_count;
3737 
3738 	return msix_count;
3739 }
3740 
3741 /**
3742  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3743  *  @hw: pointer to hardware structure
3744  *  @addr: Address to put into receive address register
3745  *  @vmdq: VMDq pool to assign
3746  *
3747  *  Puts an ethernet address into a receive address register, or
3748  *  finds the rar that it is already in; adds to the pool list
3749  **/
3750 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3751 {
3752 	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3753 	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3754 	u32 rar;
3755 	u32 rar_low, rar_high;
3756 	u32 addr_low, addr_high;
3757 
3758 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3759 
3760 	/* swap bytes for HW little endian */
3761 	addr_low  = addr[0] | (addr[1] << 8)
3762 			    | (addr[2] << 16)
3763 			    | (addr[3] << 24);
3764 	addr_high = addr[4] | (addr[5] << 8);
3765 
3766 	/*
3767 	 * Either find the mac_id in rar or find the first empty space.
3768 	 * rar_highwater points to just after the highest currently used
3769 	 * rar in order to shorten the search.  It grows when we add a new
3770 	 * rar to the top.
3771 	 */
3772 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3773 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3774 
3775 		if (((IXGBE_RAH_AV & rar_high) == 0)
3776 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3777 			first_empty_rar = rar;
3778 		} else if ((rar_high & 0xFFFF) == addr_high) {
3779 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3780 			if (rar_low == addr_low)
3781 				break;    /* found it already in the rars */
3782 		}
3783 	}
3784 
3785 	if (rar < hw->mac.rar_highwater) {
3786 		/* already there so just add to the pool bits */
3787 		ixgbe_set_vmdq(hw, rar, vmdq);
3788 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3789 		/* stick it into first empty RAR slot we found */
3790 		rar = first_empty_rar;
3791 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3792 	} else if (rar == hw->mac.rar_highwater) {
3793 		/* add it to the top of the list and inc the highwater mark */
3794 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3795 		hw->mac.rar_highwater++;
3796 	} else if (rar >= hw->mac.num_rar_entries) {
3797 		return IXGBE_ERR_INVALID_MAC_ADDR;
3798 	}
3799 
3800 	/*
3801 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
3802 	 * remains cleared to be sure default pool packets will get delivered
3803 	 */
3804 	if (rar == 0)
3805 		ixgbe_clear_vmdq(hw, rar, 0);
3806 
3807 	return rar;
3808 }
3809 
3810 /**
3811  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3812  *  @hw: pointer to hardware struct
3813  *  @rar: receive address register index to disassociate
3814  *  @vmdq: VMDq pool index to remove from the rar
3815  **/
3816 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3817 {
3818 	u32 mpsar_lo, mpsar_hi;
3819 	u32 rar_entries = hw->mac.num_rar_entries;
3820 
3821 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
3822 
3823 	/* Make sure we are using a valid rar index range */
3824 	if (rar >= rar_entries) {
3825 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3826 			     "RAR index %d is out of range.\n", rar);
3827 		return IXGBE_ERR_INVALID_ARGUMENT;
3828 	}
3829 
3830 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3831 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3832 
3833 	if (IXGBE_REMOVED(hw->hw_addr))
3834 		goto done;
3835 
3836 	if (!mpsar_lo && !mpsar_hi)
3837 		goto done;
3838 
3839 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3840 		if (mpsar_lo) {
3841 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3842 			mpsar_lo = 0;
3843 		}
3844 		if (mpsar_hi) {
3845 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3846 			mpsar_hi = 0;
3847 		}
3848 	} else if (vmdq < 32) {
3849 		mpsar_lo &= ~(1 << vmdq);
3850 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3851 	} else {
3852 		mpsar_hi &= ~(1 << (vmdq - 32));
3853 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3854 	}
3855 
3856 	/* was that the last pool using this rar? */
3857 	if (mpsar_lo == 0 && mpsar_hi == 0 &&
3858 	    rar != 0 && rar != hw->mac.san_mac_rar_index)
3859 		hw->mac.ops.clear_rar(hw, rar);
3860 done:
3861 	return IXGBE_SUCCESS;
3862 }
3863 
3864 /**
3865  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3866  *  @hw: pointer to hardware struct
3867  *  @rar: receive address register index to associate with a VMDq index
3868  *  @vmdq: VMDq pool index
3869  **/
3870 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3871 {
3872 	u32 mpsar;
3873 	u32 rar_entries = hw->mac.num_rar_entries;
3874 
3875 	DEBUGFUNC("ixgbe_set_vmdq_generic");
3876 
3877 	/* Make sure we are using a valid rar index range */
3878 	if (rar >= rar_entries) {
3879 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3880 			     "RAR index %d is out of range.\n", rar);
3881 		return IXGBE_ERR_INVALID_ARGUMENT;
3882 	}
3883 
3884 	if (vmdq < 32) {
3885 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3886 		mpsar |= 1 << vmdq;
3887 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3888 	} else {
3889 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3890 		mpsar |= 1 << (vmdq - 32);
3891 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3892 	}
3893 	return IXGBE_SUCCESS;
3894 }
3895 
3896 /**
3897  *  This function should only be involved in the IOV mode.
3898  *  In IOV mode, Default pool is next pool after the number of
3899  *  VFs advertized and not 0.
3900  *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3901  *
3902  *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3903  *  @hw: pointer to hardware struct
3904  *  @vmdq: VMDq pool index
3905  **/
3906 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3907 {
3908 	u32 rar = hw->mac.san_mac_rar_index;
3909 
3910 	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3911 
3912 	if (vmdq < 32) {
3913 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3914 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3915 	} else {
3916 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3917 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3918 	}
3919 
3920 	return IXGBE_SUCCESS;
3921 }
3922 
3923 /**
3924  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3925  *  @hw: pointer to hardware structure
3926  **/
3927 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3928 {
3929 	int i;
3930 
3931 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3932 	DEBUGOUT(" Clearing UTA\n");
3933 
3934 	for (i = 0; i < 128; i++)
3935 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3936 
3937 	return IXGBE_SUCCESS;
3938 }
3939 
3940 /**
3941  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3942  *  @hw: pointer to hardware structure
3943  *  @vlan: VLAN id to write to VLAN filter
3944  *  @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if
3945  *		  vlanid not found
3946  *
3947  *
3948  *  return the VLVF index where this VLAN id should be placed
3949  *
3950  **/
3951 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3952 {
3953 	s32 regindex, first_empty_slot;
3954 	u32 bits;
3955 
3956 	/* short cut the special case */
3957 	if (vlan == 0)
3958 		return 0;
3959 
3960 	/* if vlvf_bypass is set we don't want to use an empty slot, we
3961 	 * will simply bypass the VLVF if there are no entries present in the
3962 	 * VLVF that contain our VLAN
3963 	 */
3964 	first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3965 
3966 	/* add VLAN enable bit for comparison */
3967 	vlan |= IXGBE_VLVF_VIEN;
3968 
3969 	/* Search for the vlan id in the VLVF entries. Save off the first empty
3970 	 * slot found along the way.
3971 	 *
3972 	 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3973 	 */
3974 	for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3975 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3976 		if (bits == vlan)
3977 			return regindex;
3978 		if (!first_empty_slot && !bits)
3979 			first_empty_slot = regindex;
3980 	}
3981 
3982 	/* If we are here then we didn't find the VLAN.  Return first empty
3983 	 * slot we found during our search, else error.
3984 	 */
3985 	if (!first_empty_slot)
3986 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3987 
3988 	return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3989 }
3990 
3991 /**
3992  *  ixgbe_set_vfta_generic - Set VLAN filter table
3993  *  @hw: pointer to hardware structure
3994  *  @vlan: VLAN id to write to VLAN filter
3995  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
3996  *  @vlan_on: boolean flag to turn on/off VLAN
3997  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
3998  *
3999  *  Turn on/off specified VLAN in the VLAN filter table.
4000  **/
4001 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4002 			   bool vlan_on, bool vlvf_bypass)
4003 {
4004 	u32 regidx, vfta_delta, vfta;
4005 	s32 ret_val;
4006 
4007 	DEBUGFUNC("ixgbe_set_vfta_generic");
4008 
4009 	if (vlan > 4095 || vind > 63)
4010 		return IXGBE_ERR_PARAM;
4011 
4012 	/*
4013 	 * this is a 2 part operation - first the VFTA, then the
4014 	 * VLVF and VLVFB if VT Mode is set
4015 	 * We don't write the VFTA until we know the VLVF part succeeded.
4016 	 */
4017 
4018 	/* Part 1
4019 	 * The VFTA is a bitstring made up of 128 32-bit registers
4020 	 * that enable the particular VLAN id, much like the MTA:
4021 	 *    bits[11-5]: which register
4022 	 *    bits[4-0]:  which bit in the register
4023 	 */
4024 	regidx = vlan / 32;
4025 	vfta_delta = (u32)1 << (vlan % 32);
4026 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
4027 
4028 	/*
4029 	 * vfta_delta represents the difference between the current value
4030 	 * of vfta and the value we want in the register.  Since the diff
4031 	 * is an XOR mask we can just update the vfta using an XOR
4032 	 */
4033 	vfta_delta &= vlan_on ? ~vfta : vfta;
4034 	vfta ^= vfta_delta;
4035 
4036 	/* Part 2
4037 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4038 	 */
4039 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4040 					 vfta, vlvf_bypass);
4041 	if (ret_val != IXGBE_SUCCESS) {
4042 		if (vlvf_bypass)
4043 			goto vfta_update;
4044 		return ret_val;
4045 	}
4046 
4047 vfta_update:
4048 	/* Update VFTA now that we are ready for traffic */
4049 	if (vfta_delta)
4050 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4051 
4052 	return IXGBE_SUCCESS;
4053 }
4054 
4055 /**
4056  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4057  *  @hw: pointer to hardware structure
4058  *  @vlan: VLAN id to write to VLAN filter
4059  *  @vind: VMDq output index that maps queue to VLAN id in VLVFB
4060  *  @vlan_on: boolean flag to turn on/off VLAN in VLVF
4061  *  @vfta_delta: pointer to the difference between the current value of VFTA
4062  *		 and the desired value
4063  *  @vfta: the desired value of the VFTA
4064  *  @vlvf_bypass: boolean flag indicating updating default pool is okay
4065  *
4066  *  Turn on/off specified bit in VLVF table.
4067  **/
4068 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4069 			   bool vlan_on, u32 *vfta_delta, u32 vfta,
4070 			   bool vlvf_bypass)
4071 {
4072 	u32 bits;
4073 	s32 vlvf_index;
4074 
4075 	DEBUGFUNC("ixgbe_set_vlvf_generic");
4076 
4077 	if (vlan > 4095 || vind > 63)
4078 		return IXGBE_ERR_PARAM;
4079 
4080 	/* If VT Mode is set
4081 	 *   Either vlan_on
4082 	 *     make sure the vlan is in VLVF
4083 	 *     set the vind bit in the matching VLVFB
4084 	 *   Or !vlan_on
4085 	 *     clear the pool bit and possibly the vind
4086 	 */
4087 	if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4088 		return IXGBE_SUCCESS;
4089 
4090 	vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4091 	if (vlvf_index < 0)
4092 		return vlvf_index;
4093 
4094 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4095 
4096 	/* set the pool bit */
4097 	bits |= 1 << (vind % 32);
4098 	if (vlan_on)
4099 		goto vlvf_update;
4100 
4101 	/* clear the pool bit */
4102 	bits ^= 1 << (vind % 32);
4103 
4104 	if (!bits &&
4105 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4106 		/* Clear VFTA first, then disable VLVF.  Otherwise
4107 		 * we run the risk of stray packets leaking into
4108 		 * the PF via the default pool
4109 		 */
4110 		if (*vfta_delta)
4111 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4112 
4113 		/* disable VLVF and clear remaining bit from pool */
4114 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4115 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4116 
4117 		return IXGBE_SUCCESS;
4118 	}
4119 
4120 	/* If there are still bits set in the VLVFB registers
4121 	 * for the VLAN ID indicated we need to see if the
4122 	 * caller is requesting that we clear the VFTA entry bit.
4123 	 * If the caller has requested that we clear the VFTA
4124 	 * entry bit but there are still pools/VFs using this VLAN
4125 	 * ID entry then ignore the request.  We're not worried
4126 	 * about the case where we're turning the VFTA VLAN ID
4127 	 * entry bit on, only when requested to turn it off as
4128 	 * there may be multiple pools and/or VFs using the
4129 	 * VLAN ID entry.  In that case we cannot clear the
4130 	 * VFTA bit until all pools/VFs using that VLAN ID have also
4131 	 * been cleared.  This will be indicated by "bits" being
4132 	 * zero.
4133 	 */
4134 	*vfta_delta = 0;
4135 
4136 vlvf_update:
4137 	/* record pool change and enable VLAN ID if not already enabled */
4138 	IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4139 	IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4140 
4141 	return IXGBE_SUCCESS;
4142 }
4143 
4144 /**
4145  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
4146  *  @hw: pointer to hardware structure
4147  *
4148  *  Clears the VLAN filer table, and the VMDq index associated with the filter
4149  **/
4150 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4151 {
4152 	u32 offset;
4153 
4154 	DEBUGFUNC("ixgbe_clear_vfta_generic");
4155 
4156 	for (offset = 0; offset < hw->mac.vft_size; offset++)
4157 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4158 
4159 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4160 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4161 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4162 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4163 	}
4164 
4165 	return IXGBE_SUCCESS;
4166 }
4167 
4168 /**
4169  *  ixgbe_toggle_txdctl_generic - Toggle VF's queues
4170  *  @hw: pointer to hardware structure
4171  *  @vf_number: VF index
4172  *
4173  *  Enable and disable each queue in VF.
4174  */
4175 s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number)
4176 {
4177 	u8  queue_count, i;
4178 	u32 offset, reg;
4179 
4180 	if (vf_number > 63)
4181 		return IXGBE_ERR_PARAM;
4182 
4183 	/*
4184 	 * Determine number of queues by checking
4185 	 * number of virtual functions
4186 	 */
4187 	reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4188 	switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
4189 	case IXGBE_GCR_EXT_VT_MODE_64:
4190 		queue_count = 2;
4191 		break;
4192 	case IXGBE_GCR_EXT_VT_MODE_32:
4193 		queue_count = 4;
4194 		break;
4195 	case IXGBE_GCR_EXT_VT_MODE_16:
4196 		queue_count = 8;
4197 		break;
4198 	default:
4199 		return IXGBE_ERR_CONFIG;
4200 	}
4201 
4202 	/* Toggle queues */
4203 	for (i = 0; i < queue_count; ++i) {
4204 		/* Calculate offset of current queue */
4205 		offset = queue_count * vf_number + i;
4206 
4207 		/* Enable queue */
4208 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
4209 		reg |= IXGBE_TXDCTL_ENABLE;
4210 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
4211 		IXGBE_WRITE_FLUSH(hw);
4212 
4213 		/* Disable queue */
4214 		reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
4215 		reg &= ~IXGBE_TXDCTL_ENABLE;
4216 		IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
4217 		IXGBE_WRITE_FLUSH(hw);
4218 	}
4219 
4220 	return IXGBE_SUCCESS;
4221 }
4222 
4223 /**
4224  *  ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4225  *  @hw: pointer to hardware structure
4226  *
4227  *  Contains the logic to identify if we need to verify link for the
4228  *  crosstalk fix
4229  **/
4230 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4231 {
4232 
4233 	/* Does FW say we need the fix */
4234 	if (!hw->need_crosstalk_fix)
4235 		return FALSE;
4236 
4237 	/* Only consider SFP+ PHYs i.e. media type fiber */
4238 	switch (hw->mac.ops.get_media_type(hw)) {
4239 	case ixgbe_media_type_fiber:
4240 	case ixgbe_media_type_fiber_qsfp:
4241 		break;
4242 	default:
4243 		return FALSE;
4244 	}
4245 
4246 	return TRUE;
4247 }
4248 
4249 /**
4250  *  ixgbe_check_mac_link_generic - Determine link and speed status
4251  *  @hw: pointer to hardware structure
4252  *  @speed: pointer to link speed
4253  *  @link_up: TRUE when link is up
4254  *  @link_up_wait_to_complete: bool used to wait for link up or not
4255  *
4256  *  Reads the links register to determine if link is up and the current speed
4257  **/
4258 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4259 				 bool *link_up, bool link_up_wait_to_complete)
4260 {
4261 	u32 links_reg, links_orig;
4262 	u32 i;
4263 
4264 	DEBUGFUNC("ixgbe_check_mac_link_generic");
4265 
4266 	/* If Crosstalk fix enabled do the sanity check of making sure
4267 	 * the SFP+ cage is full.
4268 	 */
4269 	if (ixgbe_need_crosstalk_fix(hw)) {
4270 		u32 sfp_cage_full;
4271 
4272 		switch (hw->mac.type) {
4273 		case ixgbe_mac_82599EB:
4274 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4275 					IXGBE_ESDP_SDP2;
4276 			break;
4277 		case ixgbe_mac_X550EM_x:
4278 		case ixgbe_mac_X550EM_a:
4279 			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4280 					IXGBE_ESDP_SDP0;
4281 			break;
4282 		default:
4283 			/* sanity check - No SFP+ devices here */
4284 			sfp_cage_full = FALSE;
4285 			break;
4286 		}
4287 
4288 		if (!sfp_cage_full) {
4289 			*link_up = FALSE;
4290 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4291 			return IXGBE_SUCCESS;
4292 		}
4293 	}
4294 
4295 	/* clear the old state */
4296 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4297 
4298 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4299 
4300 	if (links_orig != links_reg) {
4301 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
4302 			  links_orig, links_reg);
4303 	}
4304 
4305 	if (link_up_wait_to_complete) {
4306 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4307 			if (links_reg & IXGBE_LINKS_UP) {
4308 				*link_up = TRUE;
4309 				break;
4310 			} else {
4311 				*link_up = FALSE;
4312 			}
4313 			msec_delay(100);
4314 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4315 		}
4316 	} else {
4317 		if (links_reg & IXGBE_LINKS_UP)
4318 			*link_up = TRUE;
4319 		else
4320 			*link_up = FALSE;
4321 	}
4322 
4323 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4324 	case IXGBE_LINKS_SPEED_10G_82599:
4325 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
4326 		if (hw->mac.type >= ixgbe_mac_X550) {
4327 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4328 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4329 		}
4330 		break;
4331 	case IXGBE_LINKS_SPEED_1G_82599:
4332 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
4333 		break;
4334 	case IXGBE_LINKS_SPEED_100_82599:
4335 		*speed = IXGBE_LINK_SPEED_100_FULL;
4336 		if (hw->mac.type >= ixgbe_mac_X550) {
4337 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4338 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
4339 		}
4340 		break;
4341 	case IXGBE_LINKS_SPEED_10_X550EM_A:
4342 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4343 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4344 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4345 			*speed = IXGBE_LINK_SPEED_10_FULL;
4346 		break;
4347 	default:
4348 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4349 	}
4350 
4351 	return IXGBE_SUCCESS;
4352 }
4353 
4354 /**
4355  *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4356  *  the EEPROM
4357  *  @hw: pointer to hardware structure
4358  *  @wwnn_prefix: the alternative WWNN prefix
4359  *  @wwpn_prefix: the alternative WWPN prefix
4360  *
4361  *  This function will read the EEPROM from the alternative SAN MAC address
4362  *  block to check the support for the alternative WWNN/WWPN prefix support.
4363  **/
4364 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4365 				 u16 *wwpn_prefix)
4366 {
4367 	u16 offset, caps;
4368 	u16 alt_san_mac_blk_offset;
4369 
4370 	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4371 
4372 	/* clear output first */
4373 	*wwnn_prefix = 0xFFFF;
4374 	*wwpn_prefix = 0xFFFF;
4375 
4376 	/* check if alternative SAN MAC is supported */
4377 	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4378 	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4379 		goto wwn_prefix_err;
4380 
4381 	if ((alt_san_mac_blk_offset == 0) ||
4382 	    (alt_san_mac_blk_offset == 0xFFFF))
4383 		goto wwn_prefix_out;
4384 
4385 	/* check capability in alternative san mac address block */
4386 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4387 	if (hw->eeprom.ops.read(hw, offset, &caps))
4388 		goto wwn_prefix_err;
4389 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4390 		goto wwn_prefix_out;
4391 
4392 	/* get the corresponding prefix for WWNN/WWPN */
4393 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4394 	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4395 		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4396 			      "eeprom read at offset %d failed", offset);
4397 	}
4398 
4399 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4400 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4401 		goto wwn_prefix_err;
4402 
4403 wwn_prefix_out:
4404 	return IXGBE_SUCCESS;
4405 
4406 wwn_prefix_err:
4407 	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4408 		      "eeprom read at offset %d failed", offset);
4409 	return IXGBE_SUCCESS;
4410 }
4411 
4412 /**
4413  *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4414  *  @hw: pointer to hardware structure
4415  *  @bs: the fcoe boot status
4416  *
4417  *  This function will read the FCOE boot status from the iSCSI FCOE block
4418  **/
4419 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4420 {
4421 	u16 offset, caps, flags;
4422 	s32 status;
4423 
4424 	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4425 
4426 	/* clear output first */
4427 	*bs = ixgbe_fcoe_bootstatus_unavailable;
4428 
4429 	/* check if FCOE IBA block is present */
4430 	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4431 	status = hw->eeprom.ops.read(hw, offset, &caps);
4432 	if (status != IXGBE_SUCCESS)
4433 		goto out;
4434 
4435 	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4436 		goto out;
4437 
4438 	/* check if iSCSI FCOE block is populated */
4439 	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4440 	if (status != IXGBE_SUCCESS)
4441 		goto out;
4442 
4443 	if ((offset == 0) || (offset == 0xFFFF))
4444 		goto out;
4445 
4446 	/* read fcoe flags in iSCSI FCOE block */
4447 	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4448 	status = hw->eeprom.ops.read(hw, offset, &flags);
4449 	if (status != IXGBE_SUCCESS)
4450 		goto out;
4451 
4452 	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4453 		*bs = ixgbe_fcoe_bootstatus_enabled;
4454 	else
4455 		*bs = ixgbe_fcoe_bootstatus_disabled;
4456 
4457 out:
4458 	return status;
4459 }
4460 
4461 /**
4462  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4463  *  @hw: pointer to hardware structure
4464  *  @enable: enable or disable switch for MAC anti-spoofing
4465  *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4466  *
4467  **/
4468 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4469 {
4470 	int vf_target_reg = vf >> 3;
4471 	int vf_target_shift = vf % 8;
4472 	u32 pfvfspoof;
4473 
4474 	if (hw->mac.type == ixgbe_mac_82598EB)
4475 		return;
4476 
4477 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4478 	if (enable)
4479 		pfvfspoof |= (1 << vf_target_shift);
4480 	else
4481 		pfvfspoof &= ~(1 << vf_target_shift);
4482 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4483 }
4484 
4485 /**
4486  *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4487  *  @hw: pointer to hardware structure
4488  *  @enable: enable or disable switch for VLAN anti-spoofing
4489  *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4490  *
4491  **/
4492 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4493 {
4494 	int vf_target_reg = vf >> 3;
4495 	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4496 	u32 pfvfspoof;
4497 
4498 	if (hw->mac.type == ixgbe_mac_82598EB)
4499 		return;
4500 
4501 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4502 	if (enable)
4503 		pfvfspoof |= (1 << vf_target_shift);
4504 	else
4505 		pfvfspoof &= ~(1 << vf_target_shift);
4506 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4507 }
4508 
4509 /**
4510  *  ixgbe_get_device_caps_generic - Get additional device capabilities
4511  *  @hw: pointer to hardware structure
4512  *  @device_caps: the EEPROM word with the extra device capabilities
4513  *
4514  *  This function will read the EEPROM location for the device capabilities,
4515  *  and return the word through device_caps.
4516  **/
4517 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4518 {
4519 	DEBUGFUNC("ixgbe_get_device_caps_generic");
4520 
4521 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4522 
4523 	return IXGBE_SUCCESS;
4524 }
4525 
4526 /**
4527  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4528  *  @hw: pointer to hardware structure
4529  *
4530  **/
4531 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4532 {
4533 	u32 regval;
4534 	u32 i;
4535 
4536 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4537 
4538 	/* Enable relaxed ordering */
4539 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
4540 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4541 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4542 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4543 	}
4544 
4545 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
4546 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4547 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4548 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4549 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4550 	}
4551 
4552 }
4553 
4554 /**
4555  *  ixgbe_calculate_checksum - Calculate checksum for buffer
4556  *  @buffer: pointer to EEPROM
4557  *  @length: size of EEPROM to calculate a checksum for
4558  *  Calculates the checksum for some buffer on a specified length.  The
4559  *  checksum calculated is returned.
4560  **/
4561 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4562 {
4563 	u32 i;
4564 	u8 sum = 0;
4565 
4566 	DEBUGFUNC("ixgbe_calculate_checksum");
4567 
4568 	if (!buffer)
4569 		return 0;
4570 
4571 	for (i = 0; i < length; i++)
4572 		sum += buffer[i];
4573 
4574 	return (u8) (0 - sum);
4575 }
4576 
4577 /**
4578  *  ixgbe_hic_unlocked - Issue command to manageability block unlocked
4579  *  @hw: pointer to the HW structure
4580  *  @buffer: command to write and where the return status will be placed
4581  *  @length: length of buffer, must be multiple of 4 bytes
4582  *  @timeout: time in ms to wait for command completion
4583  *
4584  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4585  *  else returns semaphore error when encountering an error acquiring
4586  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4587  *
4588  *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4589  *  by the caller.
4590  **/
4591 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4592 		       u32 timeout)
4593 {
4594 	u32 hicr, i, fwsts;
4595 	u16 dword_len;
4596 
4597 	DEBUGFUNC("ixgbe_hic_unlocked");
4598 
4599 	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4600 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4601 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4602 	}
4603 
4604 	/* Set bit 9 of FWSTS clearing FW reset indication */
4605 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4606 	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4607 
4608 	/* Check that the host interface is enabled. */
4609 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4610 	if (!(hicr & IXGBE_HICR_EN)) {
4611 		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4612 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4613 	}
4614 
4615 	/* Calculate length in DWORDs. We must be DWORD aligned */
4616 	if (length % sizeof(u32)) {
4617 		DEBUGOUT("Buffer length failure, not aligned to dword");
4618 		return IXGBE_ERR_INVALID_ARGUMENT;
4619 	}
4620 
4621 	dword_len = length >> 2;
4622 
4623 	/* The device driver writes the relevant command block
4624 	 * into the ram area.
4625 	 */
4626 	for (i = 0; i < dword_len; i++)
4627 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4628 				      i, IXGBE_CPU_TO_LE32(buffer[i]));
4629 
4630 	/* Setting this bit tells the ARC that a new command is pending. */
4631 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4632 
4633 	for (i = 0; i < timeout; i++) {
4634 		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4635 		if (!(hicr & IXGBE_HICR_C))
4636 			break;
4637 		msec_delay(1);
4638 	}
4639 
4640 	/* For each command except "Apply Update" perform
4641 	 * status checks in the HICR registry.
4642 	 */
4643 	if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) ==
4644 	    IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD)
4645 		return IXGBE_SUCCESS;
4646 
4647 	/* Check command completion */
4648 	if ((timeout && i == timeout) ||
4649 	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4650 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4651 			      "Command has failed with no status valid.\n");
4652 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4653 	}
4654 
4655 	return IXGBE_SUCCESS;
4656 }
4657 
4658 /**
4659  *  ixgbe_host_interface_command - Issue command to manageability block
4660  *  @hw: pointer to the HW structure
4661  *  @buffer: contains the command to write and where the return status will
4662  *   be placed
4663  *  @length: length of buffer, must be multiple of 4 bytes
4664  *  @timeout: time in ms to wait for command completion
4665  *  @return_data: read and return data from the buffer (TRUE) or not (FALSE)
4666  *   Needed because FW structures are big endian and decoding of
4667  *   these fields can be 8 bit or 16 bit based on command. Decoding
4668  *   is not easily understood without making a table of commands.
4669  *   So we will leave this up to the caller to read back the data
4670  *   in these cases.
4671  *
4672  *  Communicates with the manageability block. On success return IXGBE_SUCCESS
4673  *  else returns semaphore error when encountering an error acquiring
4674  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4675  **/
4676 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4677 				 u32 length, u32 timeout, bool return_data)
4678 {
4679 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4680 	struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
4681 	u16 buf_len;
4682 	s32 status;
4683 	u32 bi;
4684 	u32 dword_len;
4685 
4686 	DEBUGFUNC("ixgbe_host_interface_command");
4687 
4688 	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4689 		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4690 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4691 	}
4692 
4693 	/* Take management host interface semaphore */
4694 	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4695 	if (status)
4696 		return status;
4697 
4698 	status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4699 	if (status)
4700 		goto rel_out;
4701 
4702 	if (!return_data)
4703 		goto rel_out;
4704 
4705 	/* Calculate length in DWORDs */
4706 	dword_len = hdr_size >> 2;
4707 
4708 	/* first pull in the header so we know the buffer length */
4709 	for (bi = 0; bi < dword_len; bi++) {
4710 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4711 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4712 	}
4713 
4714 	/*
4715 	 * If there is any thing in data position pull it in
4716 	 * Read Flash command requires reading buffer length from
4717 	 * two byes instead of one byte
4718 	 */
4719 	if (resp->cmd == 0x30 || resp->cmd == 0x31) {
4720 		for (; bi < dword_len + 2; bi++) {
4721 			buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4722 							  bi);
4723 			IXGBE_LE32_TO_CPUS(&buffer[bi]);
4724 		}
4725 		buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
4726 				  & 0xF00) | resp->buf_len;
4727 		hdr_size += (2 << 2);
4728 	} else {
4729 		buf_len = resp->buf_len;
4730 	}
4731 	if (!buf_len)
4732 		goto rel_out;
4733 
4734 	if (length < buf_len + hdr_size) {
4735 		DEBUGOUT("Buffer not large enough for reply message.\n");
4736 		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4737 		goto rel_out;
4738 	}
4739 
4740 	/* Calculate length in DWORDs, add 3 for odd lengths */
4741 	dword_len = (buf_len + 3) >> 2;
4742 
4743 	/* Pull in the rest of the buffer (bi is where we left off) */
4744 	for (; bi <= dword_len; bi++) {
4745 		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4746 		IXGBE_LE32_TO_CPUS(&buffer[bi]);
4747 	}
4748 
4749 rel_out:
4750 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4751 
4752 	return status;
4753 }
4754 
4755 /**
4756  *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4757  *  @hw: pointer to the HW structure
4758  *  @maj: driver version major number
4759  *  @minr: driver version minor number
4760  *  @build: driver version build number
4761  *  @sub: driver version sub build number
4762  *  @len: unused
4763  *  @driver_ver: unused
4764  *
4765  *  Sends driver version number to firmware through the manageability
4766  *  block.  On success return IXGBE_SUCCESS
4767  *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4768  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4769  **/
4770 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 minr,
4771 				 u8 build, u8 sub, u16 len,
4772 				 const char *driver_ver)
4773 {
4774 	struct ixgbe_hic_drv_info fw_cmd;
4775 	int i;
4776 	s32 ret_val = IXGBE_SUCCESS;
4777 
4778 	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4779 	UNREFERENCED_2PARAMETER(len, driver_ver);
4780 
4781 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4782 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4783 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4784 	fw_cmd.port_num = (u8)hw->bus.func;
4785 	fw_cmd.ver_maj = maj;
4786 	fw_cmd.ver_min = minr;
4787 	fw_cmd.ver_build = build;
4788 	fw_cmd.ver_sub = sub;
4789 	fw_cmd.hdr.checksum = 0;
4790 	fw_cmd.pad = 0;
4791 	fw_cmd.pad2 = 0;
4792 	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4793 				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4794 
4795 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4796 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4797 						       sizeof(fw_cmd),
4798 						       IXGBE_HI_COMMAND_TIMEOUT,
4799 						       TRUE);
4800 		if (ret_val != IXGBE_SUCCESS)
4801 			continue;
4802 
4803 		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4804 		    FW_CEM_RESP_STATUS_SUCCESS)
4805 			ret_val = IXGBE_SUCCESS;
4806 		else
4807 			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4808 
4809 		break;
4810 	}
4811 
4812 	return ret_val;
4813 }
4814 
4815 /**
4816  * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4817  * @hw: pointer to hardware structure
4818  * @num_pb: number of packet buffers to allocate
4819  * @headroom: reserve n KB of headroom
4820  * @strategy: packet buffer allocation strategy
4821  **/
4822 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4823 			     int strategy)
4824 {
4825 	u32 pbsize = hw->mac.rx_pb_size;
4826 	int i = 0;
4827 	u32 rxpktsize, txpktsize, txpbthresh;
4828 
4829 	/* Reserve headroom */
4830 	pbsize -= headroom;
4831 
4832 	if (!num_pb)
4833 		num_pb = 1;
4834 
4835 	/* Divide remaining packet buffer space amongst the number of packet
4836 	 * buffers requested using supplied strategy.
4837 	 */
4838 	switch (strategy) {
4839 	case PBA_STRATEGY_WEIGHTED:
4840 		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4841 		 * buffer with 5/8 of the packet buffer space.
4842 		 */
4843 		rxpktsize = (pbsize * 5) / (num_pb * 4);
4844 		pbsize -= rxpktsize * (num_pb / 2);
4845 		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4846 		for (; i < (num_pb / 2); i++)
4847 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4848 		/* fall through - configure remaining packet buffers */
4849 	case PBA_STRATEGY_EQUAL:
4850 		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4851 		for (; i < num_pb; i++)
4852 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4853 		break;
4854 	default:
4855 		break;
4856 	}
4857 
4858 	/* Only support an equally distributed Tx packet buffer strategy. */
4859 	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4860 	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4861 	for (i = 0; i < num_pb; i++) {
4862 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4863 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4864 	}
4865 
4866 	/* Clear unused TCs, if any, to zero buffer size*/
4867 	for (; i < IXGBE_MAX_PB; i++) {
4868 		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4869 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4870 		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4871 	}
4872 }
4873 
4874 /**
4875  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4876  * @hw: pointer to the hardware structure
4877  *
4878  * The 82599 and x540 MACs can experience issues if TX work is still pending
4879  * when a reset occurs.  This function prevents this by flushing the PCIe
4880  * buffers on the system.
4881  **/
4882 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4883 {
4884 	u32 gcr_ext, hlreg0, i, poll;
4885 	u16 value;
4886 
4887 	/*
4888 	 * If double reset is not requested then all transactions should
4889 	 * already be clear and as such there is no work to do
4890 	 */
4891 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4892 		return;
4893 
4894 	/*
4895 	 * Set loopback enable to prevent any transmits from being sent
4896 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
4897 	 * has already been cleared.
4898 	 */
4899 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4900 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4901 
4902 	/* Wait for a last completion before clearing buffers */
4903 	IXGBE_WRITE_FLUSH(hw);
4904 	msec_delay(3);
4905 
4906 	/*
4907 	 * Before proceeding, make sure that the PCIe block does not have
4908 	 * transactions pending.
4909 	 */
4910 	poll = ixgbe_pcie_timeout_poll(hw);
4911 	for (i = 0; i < poll; i++) {
4912 		usec_delay(100);
4913 		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4914 		if (IXGBE_REMOVED(hw->hw_addr))
4915 			goto out;
4916 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4917 			goto out;
4918 	}
4919 
4920 out:
4921 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
4922 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4923 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4924 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4925 
4926 	/* Flush all writes and allow 20usec for all transactions to clear */
4927 	IXGBE_WRITE_FLUSH(hw);
4928 	usec_delay(20);
4929 
4930 	/* restore previous register values */
4931 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4932 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4933 }
4934 
4935 /**
4936  *  ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
4937  *
4938  *  @hw: pointer to hardware structure
4939  *  @cmd: Command we send to the FW
4940  *  @status: The reply from the FW
4941  *
4942  *  Bit-bangs the cmd to the by_pass FW status points to what is returned.
4943  **/
4944 #define IXGBE_BYPASS_BB_WAIT 1
4945 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
4946 {
4947 	int i;
4948 	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
4949 	u32 esdp;
4950 
4951 	if (!status)
4952 		return IXGBE_ERR_PARAM;
4953 
4954 	*status = 0;
4955 
4956 	/* SDP vary by MAC type */
4957 	switch (hw->mac.type) {
4958 	case ixgbe_mac_82599EB:
4959 		sck = IXGBE_ESDP_SDP7;
4960 		sdi = IXGBE_ESDP_SDP0;
4961 		sdo = IXGBE_ESDP_SDP6;
4962 		dir_sck = IXGBE_ESDP_SDP7_DIR;
4963 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4964 		dir_sdo = IXGBE_ESDP_SDP6_DIR;
4965 		break;
4966 	case ixgbe_mac_X540:
4967 		sck = IXGBE_ESDP_SDP2;
4968 		sdi = IXGBE_ESDP_SDP0;
4969 		sdo = IXGBE_ESDP_SDP1;
4970 		dir_sck = IXGBE_ESDP_SDP2_DIR;
4971 		dir_sdi = IXGBE_ESDP_SDP0_DIR;
4972 		dir_sdo = IXGBE_ESDP_SDP1_DIR;
4973 		break;
4974 	default:
4975 		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4976 	}
4977 
4978 	/* Set SDP pins direction */
4979 	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4980 	esdp |= dir_sck;	/* SCK as output */
4981 	esdp |= dir_sdi;	/* SDI as output */
4982 	esdp &= ~dir_sdo;	/* SDO as input */
4983 	esdp |= sck;
4984 	esdp |= sdi;
4985 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4986 	IXGBE_WRITE_FLUSH(hw);
4987 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4988 
4989 	/* Generate start condition */
4990 	esdp &= ~sdi;
4991 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4992 	IXGBE_WRITE_FLUSH(hw);
4993 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4994 
4995 	esdp &= ~sck;
4996 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4997 	IXGBE_WRITE_FLUSH(hw);
4998 	msec_delay(IXGBE_BYPASS_BB_WAIT);
4999 
5000 	/* Clock out the new control word and clock in the status */
5001 	for (i = 0; i < 32; i++) {
5002 		if ((cmd >> (31 - i)) & 0x01) {
5003 			esdp |= sdi;
5004 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5005 		} else {
5006 			esdp &= ~sdi;
5007 			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5008 		}
5009 		IXGBE_WRITE_FLUSH(hw);
5010 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5011 
5012 		esdp |= sck;
5013 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5014 		IXGBE_WRITE_FLUSH(hw);
5015 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5016 
5017 		esdp &= ~sck;
5018 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5019 		IXGBE_WRITE_FLUSH(hw);
5020 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5021 
5022 		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5023 		if (esdp & sdo)
5024 			*status = (*status << 1) | 0x01;
5025 		else
5026 			*status = (*status << 1) | 0x00;
5027 		msec_delay(IXGBE_BYPASS_BB_WAIT);
5028 	}
5029 
5030 	/* stop condition */
5031 	esdp |= sck;
5032 	esdp &= ~sdi;
5033 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5034 	IXGBE_WRITE_FLUSH(hw);
5035 	msec_delay(IXGBE_BYPASS_BB_WAIT);
5036 
5037 	esdp |= sdi;
5038 	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
5039 	IXGBE_WRITE_FLUSH(hw);
5040 
5041 	/* set the page bits to match the cmd that the status it belongs to */
5042 	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
5043 
5044 	return IXGBE_SUCCESS;
5045 }
5046 
5047 /**
5048  * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
5049  *
5050  * If we send a write we can't be sure it took until we can read back
5051  * that same register.  It can be a problem as some of the feilds may
5052  * for valid reasons change inbetween the time wrote the register and
5053  * we read it again to verify.  So this function check everything we
5054  * can check and then assumes it worked.
5055  *
5056  * @u32 in_reg - The register cmd for the bit-bang read.
5057  * @u32 out_reg - The register returned from a bit-bang read.
5058  **/
5059 bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
5060 {
5061 	u32 mask;
5062 
5063 	/* Page must match for all control pages */
5064 	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
5065 		return FALSE;
5066 
5067 	switch (in_reg & BYPASS_PAGE_M) {
5068 	case BYPASS_PAGE_CTL0:
5069 		/* All the following can't change since the last write
5070 		 *  - All the event actions
5071 		 *  - The timeout value
5072 		 */
5073 		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
5074 		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
5075 		       BYPASS_WDTIMEOUT_M |
5076 		       BYPASS_WDT_VALUE_M;
5077 		if ((out_reg & mask) != (in_reg & mask))
5078 			return FALSE;
5079 
5080 		/* 0x0 is never a valid value for bypass status */
5081 		if (!(out_reg & BYPASS_STATUS_OFF_M))
5082 			return FALSE;
5083 		break;
5084 	case BYPASS_PAGE_CTL1:
5085 		/* All the following can't change since the last write
5086 		 *  - time valid bit
5087 		 *  - time we last sent
5088 		 */
5089 		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
5090 		if ((out_reg & mask) != (in_reg & mask))
5091 			return FALSE;
5092 		break;
5093 	case BYPASS_PAGE_CTL2:
5094 		/* All we can check in this page is control number
5095 		 * which is already done above.
5096 		 */
5097 		break;
5098 	}
5099 
5100 	/* We are as sure as we can be return TRUE */
5101 	return TRUE;
5102 }
5103 
5104 /**
5105  *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
5106  *
5107  *  @hw: pointer to hardware structure
5108  *  @cmd: The control word we are setting.
5109  *  @event: The event we are setting in the FW.  This also happens to
5110  *	    be the mask for the event we are setting (handy)
5111  *  @action: The action we set the event to in the FW. This is in a
5112  *	     bit field that happens to be what we want to put in
5113  *	     the event spot (also handy)
5114  **/
5115 s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
5116 			     u32 action)
5117 {
5118 	u32 by_ctl = 0;
5119 	u32 cmd, verify;
5120 	u32 count = 0;
5121 
5122 	/* Get current values */
5123 	cmd = ctrl;	/* just reading only need control number */
5124 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5125 		return IXGBE_ERR_INVALID_ARGUMENT;
5126 
5127 	/* Set to new action */
5128 	cmd = (by_ctl & ~event) | BYPASS_WE | action;
5129 	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
5130 		return IXGBE_ERR_INVALID_ARGUMENT;
5131 
5132 	/* Page 0 force a FW eeprom write which is slow so verify */
5133 	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
5134 		verify = BYPASS_PAGE_CTL0;
5135 		do {
5136 			if (count++ > 5)
5137 				return IXGBE_BYPASS_FW_WRITE_FAILURE;
5138 
5139 			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
5140 				return IXGBE_ERR_INVALID_ARGUMENT;
5141 		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
5142 	} else {
5143 		/* We have give the FW time for the write to stick */
5144 		msec_delay(100);
5145 	}
5146 
5147 	return IXGBE_SUCCESS;
5148 }
5149 
5150 /**
5151  *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
5152  *
5153  *  @hw: pointer to hardware structure
5154  *  @addr: The bypass eeprom address to read.
5155  *  @value: The 8b of data at the address above.
5156  **/
5157 s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
5158 {
5159 	u32 cmd;
5160 	u32 status;
5161 
5162 
5163 	/* send the request */
5164 	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
5165 	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
5166 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5167 		return IXGBE_ERR_INVALID_ARGUMENT;
5168 
5169 	/* We have give the FW time for the write to stick */
5170 	msec_delay(100);
5171 
5172 	/* now read the results */
5173 	cmd &= ~BYPASS_WE;
5174 	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
5175 		return IXGBE_ERR_INVALID_ARGUMENT;
5176 
5177 	*value = status & BYPASS_CTL2_DATA_M;
5178 
5179 	return IXGBE_SUCCESS;
5180 }
5181 
5182 /**
5183  *  ixgbe_get_orom_version - Return option ROM from EEPROM
5184  *
5185  *  @hw: pointer to hardware structure
5186  *  @nvm_ver: pointer to output structure
5187  *
5188  *  if valid option ROM version, nvm_ver->or_valid set to TRUE
5189  *  else nvm_ver->or_valid is FALSE.
5190  **/
5191 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
5192 			    struct ixgbe_nvm_version *nvm_ver)
5193 {
5194 	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
5195 
5196 	nvm_ver->or_valid = FALSE;
5197 	/* Option Rom may or may not be present.  Start with pointer */
5198 	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
5199 
5200 	/* make sure offset is valid */
5201 	if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5202 		return;
5203 
5204 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5205 	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5206 
5207 	/* option rom exists and is valid */
5208 	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5209 	    eeprom_cfg_blkl == NVM_VER_INVALID ||
5210 	    eeprom_cfg_blkh == NVM_VER_INVALID)
5211 		return;
5212 
5213 	nvm_ver->or_valid = TRUE;
5214 	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5215 	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5216 			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5217 	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5218 }
5219 
5220 /**
5221  *  ixgbe_get_oem_prod_version - Return OEM Product version
5222  *
5223  *  @hw: pointer to hardware structure
5224  *  @nvm_ver: pointer to output structure
5225  *
5226  *  if valid OEM product version, nvm_ver->oem_valid set to TRUE
5227  *  else nvm_ver->oem_valid is FALSE.
5228  **/
5229 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5230 				struct ixgbe_nvm_version *nvm_ver)
5231 {
5232 	u16 rel_num, prod_ver, mod_len, cap, offset;
5233 
5234 	nvm_ver->oem_valid = FALSE;
5235 	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5236 
5237 	/* Return is offset to OEM Product Version block is invalid */
5238 	if (offset == 0x0 || offset == NVM_INVALID_PTR)
5239 		return;
5240 
5241 	/* Read product version block */
5242 	hw->eeprom.ops.read(hw, offset, &mod_len);
5243 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5244 
5245 	/* Return if OEM product version block is invalid */
5246 	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5247 	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5248 		return;
5249 
5250 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5251 	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5252 
5253 	/* Return if version is invalid */
5254 	if ((rel_num | prod_ver) == 0x0 ||
5255 	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5256 		return;
5257 
5258 	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5259 	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5260 	nvm_ver->oem_release = rel_num;
5261 	nvm_ver->oem_valid = TRUE;
5262 }
5263 
5264 /**
5265  *  ixgbe_get_etk_id - Return Etrack ID from EEPROM
5266  *
5267  *  @hw: pointer to hardware structure
5268  *  @nvm_ver: pointer to output structure
5269  *
5270  *  word read errors will return 0xFFFF
5271  **/
5272 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5273 {
5274 	u16 etk_id_l, etk_id_h;
5275 
5276 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5277 		etk_id_l = NVM_VER_INVALID;
5278 	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5279 		etk_id_h = NVM_VER_INVALID;
5280 
5281 	/* The word order for the version format is determined by high order
5282 	 * word bit 15.
5283 	 */
5284 	if ((etk_id_h & NVM_ETK_VALID) == 0) {
5285 		nvm_ver->etk_id = etk_id_h;
5286 		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5287 	} else {
5288 		nvm_ver->etk_id = etk_id_l;
5289 		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5290 	}
5291 }
5292 
5293 
5294 /**
5295  * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5296  * @hw: pointer to hardware structure
5297  * @map: pointer to u8 arr for returning map
5298  *
5299  * Read the rtrup2tc HW register and resolve its content into map
5300  **/
5301 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5302 {
5303 	u32 reg, i;
5304 
5305 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5306 	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5307 		map[i] = IXGBE_RTRUP2TC_UP_MASK &
5308 			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5309 	return;
5310 }
5311 
5312 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5313 {
5314 	u32 pfdtxgswc;
5315 	u32 rxctrl;
5316 
5317 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5318 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
5319 		if (hw->mac.type != ixgbe_mac_82598EB) {
5320 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5321 			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5322 				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5323 				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5324 				hw->mac.set_lben = TRUE;
5325 			} else {
5326 				hw->mac.set_lben = FALSE;
5327 			}
5328 		}
5329 		rxctrl &= ~IXGBE_RXCTRL_RXEN;
5330 		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5331 	}
5332 }
5333 
5334 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5335 {
5336 	u32 pfdtxgswc;
5337 	u32 rxctrl;
5338 
5339 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5340 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5341 
5342 	if (hw->mac.type != ixgbe_mac_82598EB) {
5343 		if (hw->mac.set_lben) {
5344 			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5345 			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5346 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5347 			hw->mac.set_lben = FALSE;
5348 		}
5349 	}
5350 }
5351 
5352 /**
5353  * ixgbe_mng_present - returns TRUE when management capability is present
5354  * @hw: pointer to hardware structure
5355  */
5356 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5357 {
5358 	u32 fwsm;
5359 
5360 	if (hw->mac.type < ixgbe_mac_82599EB)
5361 		return FALSE;
5362 
5363 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5364 	return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5365 }
5366 
5367 /**
5368  * ixgbe_mng_enabled - Is the manageability engine enabled?
5369  * @hw: pointer to hardware structure
5370  *
5371  * Returns TRUE if the manageability engine is enabled.
5372  **/
5373 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5374 {
5375 	u32 fwsm, manc, factps;
5376 
5377 	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5378 	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5379 		return FALSE;
5380 
5381 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5382 	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5383 		return FALSE;
5384 
5385 	if (hw->mac.type <= ixgbe_mac_X540) {
5386 		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5387 		if (factps & IXGBE_FACTPS_MNGCG)
5388 			return FALSE;
5389 	}
5390 
5391 	return TRUE;
5392 }
5393 
5394 /**
5395  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5396  *  @hw: pointer to hardware structure
5397  *  @speed: new link speed
5398  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
5399  *
5400  *  Set the link speed in the MAC and/or PHY register and restarts link.
5401  **/
5402 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5403 					  ixgbe_link_speed speed,
5404 					  bool autoneg_wait_to_complete)
5405 {
5406 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5407 	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5408 	s32 status = IXGBE_SUCCESS;
5409 	u32 speedcnt = 0;
5410 	u32 i = 0;
5411 	bool autoneg, link_up = FALSE;
5412 
5413 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5414 
5415 	/* Mask off requested but non-supported speeds */
5416 	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5417 	if (status != IXGBE_SUCCESS)
5418 		return status;
5419 
5420 	speed &= link_speed;
5421 
5422 	/* Try each speed one by one, highest priority first.  We do this in
5423 	 * software because 10Gb fiber doesn't support speed autonegotiation.
5424 	 */
5425 	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5426 		speedcnt++;
5427 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5428 
5429 		/* Set the module link speed */
5430 		switch (hw->phy.media_type) {
5431 		case ixgbe_media_type_fiber_fixed:
5432 		case ixgbe_media_type_fiber:
5433 			ixgbe_set_rate_select_speed(hw,
5434 						    IXGBE_LINK_SPEED_10GB_FULL);
5435 			break;
5436 		case ixgbe_media_type_fiber_qsfp:
5437 			/* QSFP module automatically detects MAC link speed */
5438 			break;
5439 		default:
5440 			DEBUGOUT("Unexpected media type.\n");
5441 			break;
5442 		}
5443 
5444 		/* Allow module to change analog characteristics (1G->10G) */
5445 		msec_delay(40);
5446 
5447 		status = ixgbe_setup_mac_link(hw,
5448 					      IXGBE_LINK_SPEED_10GB_FULL,
5449 					      autoneg_wait_to_complete);
5450 		if (status != IXGBE_SUCCESS)
5451 			return status;
5452 
5453 		/* Flap the Tx laser if it has not already been done */
5454 		ixgbe_flap_tx_laser(hw);
5455 
5456 		/* Wait for the controller to acquire link.  Per IEEE 802.3ap,
5457 		 * Section 73.10.2, we may have to wait up to 500ms if KR is
5458 		 * attempted.  82599 uses the same timing for 10g SFI.
5459 		 */
5460 		for (i = 0; i < 5; i++) {
5461 			/* Wait for the link partner to also set speed */
5462 			msec_delay(100);
5463 
5464 			/* If we have link, just jump out */
5465 			status = ixgbe_check_link(hw, &link_speed,
5466 						  &link_up, FALSE);
5467 			if (status != IXGBE_SUCCESS)
5468 				return status;
5469 
5470 			if (link_up)
5471 				goto out;
5472 		}
5473 	}
5474 
5475 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5476 		speedcnt++;
5477 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5478 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5479 
5480 		/* Set the module link speed */
5481 		switch (hw->phy.media_type) {
5482 		case ixgbe_media_type_fiber_fixed:
5483 		case ixgbe_media_type_fiber:
5484 			ixgbe_set_rate_select_speed(hw,
5485 						    IXGBE_LINK_SPEED_1GB_FULL);
5486 			break;
5487 		case ixgbe_media_type_fiber_qsfp:
5488 			/* QSFP module automatically detects link speed */
5489 			break;
5490 		default:
5491 			DEBUGOUT("Unexpected media type.\n");
5492 			break;
5493 		}
5494 
5495 		/* Allow module to change analog characteristics (10G->1G) */
5496 		msec_delay(40);
5497 
5498 		status = ixgbe_setup_mac_link(hw,
5499 					      IXGBE_LINK_SPEED_1GB_FULL,
5500 					      autoneg_wait_to_complete);
5501 		if (status != IXGBE_SUCCESS)
5502 			return status;
5503 
5504 		/* Flap the Tx laser if it has not already been done */
5505 		ixgbe_flap_tx_laser(hw);
5506 
5507 		/* Wait for the link partner to also set speed */
5508 		msec_delay(100);
5509 
5510 		/* If we have link, just jump out */
5511 		status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
5512 		if (status != IXGBE_SUCCESS)
5513 			return status;
5514 
5515 		if (link_up)
5516 			goto out;
5517 	}
5518 
5519 	if (speed == 0) {
5520 		/* Disable the Tx laser for media none */
5521 		ixgbe_disable_tx_laser(hw);
5522 
5523 		goto out;
5524 	}
5525 
5526 	/* We didn't get link.  Configure back to the highest speed we tried,
5527 	 * (if there was more than one).  We call ourselves back with just the
5528 	 * single highest speed that the user requested.
5529 	 */
5530 	if (speedcnt > 1)
5531 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5532 						      highest_link_speed,
5533 						      autoneg_wait_to_complete);
5534 
5535 out:
5536 	/* Set autoneg_advertised value based on input link speed */
5537 	hw->phy.autoneg_advertised = 0;
5538 
5539 	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5540 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5541 
5542 	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5543 		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5544 
5545 	return status;
5546 }
5547 
5548 /**
5549  *  ixgbe_set_soft_rate_select_speed - Set module link speed
5550  *  @hw: pointer to hardware structure
5551  *  @speed: link speed to set
5552  *
5553  *  Set module link speed via the soft rate select.
5554  */
5555 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5556 					ixgbe_link_speed speed)
5557 {
5558 	s32 status;
5559 	u8 rs, eeprom_data;
5560 
5561 	switch (speed) {
5562 	case IXGBE_LINK_SPEED_10GB_FULL:
5563 		/* one bit mask same as setting on */
5564 		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5565 		break;
5566 	case IXGBE_LINK_SPEED_1GB_FULL:
5567 		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5568 		break;
5569 	default:
5570 		DEBUGOUT("Invalid fixed module speed\n");
5571 		return;
5572 	}
5573 
5574 	/* Set RS0 */
5575 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5576 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5577 					   &eeprom_data);
5578 	if (status) {
5579 		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5580 		goto out;
5581 	}
5582 
5583 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5584 
5585 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5586 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5587 					    eeprom_data);
5588 	if (status) {
5589 		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5590 		goto out;
5591 	}
5592 
5593 	/* Set RS1 */
5594 	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5595 					   IXGBE_I2C_EEPROM_DEV_ADDR2,
5596 					   &eeprom_data);
5597 	if (status) {
5598 		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5599 		goto out;
5600 	}
5601 
5602 	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5603 
5604 	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5605 					    IXGBE_I2C_EEPROM_DEV_ADDR2,
5606 					    eeprom_data);
5607 	if (status) {
5608 		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5609 		goto out;
5610 	}
5611 out:
5612 	return;
5613 }
5614