xref: /openbsd-src/sys/dev/pci/ixgbe.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: ixgbe.c,v 1.14 2014/11/26 17:03:52 kettenis Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2013, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c 251964 Jun 18 21:28:19 2013 UTC */
36 /* FreeBSD: src/sys/dev/ixgbe/ixgbe_mbx.c 230775 Jan 30 16:42:02 2012 UTC */
37 
38 #include <dev/pci/ixgbe.h>
39 
40 #ifdef __sparc64__
41 #include <dev/ofw/openfirm.h>
42 #endif
43 
44 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
45 				       uint16_t link_status);
46 
47 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
48 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
49 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
50 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw);
51 void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
52 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
53 				 uint16_t count);
54 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
55 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
56 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
57 void ixgbe_release_eeprom(struct ixgbe_hw *hw);
58 
59 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
60 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
61 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
62 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
63 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
64 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
65 			   uint32_t lp_reg, uint32_t adv_sym, uint32_t adv_asm,
66 			   uint32_t lp_sym, uint32_t lp_asm);
67 
68 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan);
69 
70 /* MBX */
71 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
72 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
73 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
74 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask,
75 			       int32_t index);
76 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
77 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
78 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
79 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
80 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
81 			   uint16_t vf_number);
82 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
83 			  uint16_t vf_number);
84 
85 
86 /**
87  *  ixgbe_init_ops_generic - Inits function ptrs
88  *  @hw: pointer to the hardware structure
89  *
90  *  Initialize the function pointers.
91  **/
92 int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
93 {
94 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
95 	struct ixgbe_mac_info *mac = &hw->mac;
96 	uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC);
97 
98 	DEBUGFUNC("ixgbe_init_ops_generic");
99 
100 	/* EEPROM */
101 	eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
102 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
103 	if (eec & IXGBE_EEC_PRES)
104 		eeprom->ops.read = &ixgbe_read_eerd_generic;
105 	else
106 		eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
107 	eeprom->ops.write = &ixgbe_write_eeprom_generic;
108 	eeprom->ops.validate_checksum =
109 				      &ixgbe_validate_eeprom_checksum_generic;
110 	eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
111 	eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
112 
113 	/* MAC */
114 	mac->ops.init_hw = &ixgbe_init_hw_generic;
115 	mac->ops.reset_hw = NULL;
116 	mac->ops.start_hw = &ixgbe_start_hw_generic;
117 	mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
118 	mac->ops.get_media_type = NULL;
119 	mac->ops.get_supported_physical_layer = NULL;
120 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
121 	mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
122 	mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
123 	mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
124 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
125 	mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
126 	mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
127 
128 	/* LEDs */
129 	mac->ops.led_on = &ixgbe_led_on_generic;
130 	mac->ops.led_off = &ixgbe_led_off_generic;
131 	mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
132 	mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
133 
134 	/* RAR, Multicast, VLAN */
135 	mac->ops.set_rar = &ixgbe_set_rar_generic;
136 	mac->ops.clear_rar = &ixgbe_clear_rar_generic;
137 	mac->ops.insert_mac_addr = NULL;
138 	mac->ops.set_vmdq = NULL;
139 	mac->ops.clear_vmdq = NULL;
140 	mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
141 	mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
142 	mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
143 	mac->ops.enable_mc = &ixgbe_enable_mc_generic;
144 	mac->ops.disable_mc = &ixgbe_disable_mc_generic;
145 	mac->ops.clear_vfta = NULL;
146 	mac->ops.set_vfta = NULL;
147 	mac->ops.init_uta_tables = NULL;
148 
149 	/* Flow Control */
150 	mac->ops.fc_enable = &ixgbe_fc_enable_generic;
151 
152 	/* Link */
153 	mac->ops.get_link_capabilities = NULL;
154 	mac->ops.setup_link = NULL;
155 	mac->ops.check_link = NULL;
156 
157 	return IXGBE_SUCCESS;
158 }
159 
160 /**
161  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
162  * of flow control
163  * @hw: pointer to hardware structure
164  *
165  * This function returns TRUE if the device supports flow control
166  * autonegotiation, and FALSE if it does not.
167  *
168  **/
169 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
170 {
171 	bool supported = FALSE;
172 	ixgbe_link_speed speed;
173 	bool link_up;
174 
175 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
176 
177 	switch (hw->phy.media_type) {
178 	case ixgbe_media_type_fiber_fixed:
179 	case ixgbe_media_type_fiber:
180 		hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
181 		/* if link is down, assume supported */
182 		if (link_up)
183 			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
184 				TRUE : FALSE;
185 		else
186 			supported = TRUE;
187 		break;
188 	case ixgbe_media_type_backplane:
189 		supported = TRUE;
190 		break;
191 	case ixgbe_media_type_copper:
192 		/* only some copper devices support flow control autoneg */
193 		switch (hw->device_id) {
194 		case IXGBE_DEV_ID_82599_T3_LOM:
195 		case IXGBE_DEV_ID_X540T:
196 		case IXGBE_DEV_ID_X540_BYPASS:
197 			supported = TRUE;
198 			break;
199 		default:
200 			supported = FALSE;
201 		}
202 	default:
203 		break;
204 	}
205 
206 	ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
207 		      "Device %x does not support flow control autoneg",
208 		      hw->device_id);
209 	return supported;
210 }
211 
212 /**
213  *  ixgbe_setup_fc - Set up flow control
214  *  @hw: pointer to hardware structure
215  *
216  *  Called at init time to set up flow control.
217  **/
218 int32_t ixgbe_setup_fc(struct ixgbe_hw *hw)
219 {
220 	int32_t ret_val = IXGBE_SUCCESS;
221 	uint32_t reg = 0, reg_bp = 0;
222 	uint16_t reg_cu = 0;
223 	bool got_lock = FALSE;
224 
225 	DEBUGFUNC("ixgbe_setup_fc");
226 
227 	/*
228 	 * Validate the requested mode.  Strict IEEE mode does not allow
229 	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
230 	 */
231 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
232 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
233 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
234 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
235 		goto out;
236 	}
237 
238 	/*
239 	 * 10gig parts do not have a word in the EEPROM to determine the
240 	 * default flow control setting, so we explicitly set it to full.
241 	 */
242 	if (hw->fc.requested_mode == ixgbe_fc_default)
243 		hw->fc.requested_mode = ixgbe_fc_full;
244 
245 	/*
246 	 * Set up the 1G and 10G flow control advertisement registers so the
247 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
248 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
249 	 */
250 	switch (hw->phy.media_type) {
251 	case ixgbe_media_type_fiber_fixed:
252 	case ixgbe_media_type_fiber:
253 	case ixgbe_media_type_backplane:
254 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
255 		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
256 		break;
257 	case ixgbe_media_type_copper:
258 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
259 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
260 		break;
261 	default:
262 		break;
263 	}
264 
265 	/*
266 	 * The possible values of fc.requested_mode are:
267 	 * 0: Flow control is completely disabled
268 	 * 1: Rx flow control is enabled (we can receive pause frames,
269 	 *    but not send pause frames).
270 	 * 2: Tx flow control is enabled (we can send pause frames but
271 	 *    we do not support receiving pause frames).
272 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
273 	 * other: Invalid.
274 	 */
275 	switch (hw->fc.requested_mode) {
276 	case ixgbe_fc_none:
277 		/* Flow control completely disabled by software override. */
278 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
279 		if (hw->phy.media_type == ixgbe_media_type_backplane)
280 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
281 				    IXGBE_AUTOC_ASM_PAUSE);
282 		else if (hw->phy.media_type == ixgbe_media_type_copper)
283 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
284 		break;
285 	case ixgbe_fc_tx_pause:
286 		/*
287 		 * Tx Flow control is enabled, and Rx Flow control is
288 		 * disabled by software override.
289 		 */
290 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
291 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
292 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
293 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
294 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
295 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
296 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
297 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
298 		}
299 		break;
300 	case ixgbe_fc_rx_pause:
301 		/*
302 		 * Rx Flow control is enabled and Tx Flow control is
303 		 * disabled by software override. Since there really
304 		 * isn't a way to advertise that we are capable of RX
305 		 * Pause ONLY, we will advertise that we support both
306 		 * symmetric and asymmetric Rx PAUSE, as such we fall
307 		 * through to the fc_full statement.  Later, we will
308 		 * disable the adapter's ability to send PAUSE frames.
309 		 */
310 	case ixgbe_fc_full:
311 		/* Flow control (both Rx and Tx) is enabled by SW override. */
312 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
313 		if (hw->phy.media_type == ixgbe_media_type_backplane)
314 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
315 				  IXGBE_AUTOC_ASM_PAUSE;
316 		else if (hw->phy.media_type == ixgbe_media_type_copper)
317 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
318 		break;
319 	default:
320 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
321 			     "Flow control param set incorrectly\n");
322 		ret_val = IXGBE_ERR_CONFIG;
323 		goto out;
324 		break;
325 	}
326 
327 	if (hw->mac.type != ixgbe_mac_X540) {
328 		/*
329 		 * Enable auto-negotiation between the MAC & PHY;
330 		 * the MAC will advertise clause 37 flow control.
331 		 */
332 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
333 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
334 
335 		/* Disable AN timeout */
336 		if (hw->fc.strict_ieee)
337 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
338 
339 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
340 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
341 	}
342 
343 	/*
344 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
345 	 * and copper. There is no need to set the PCS1GCTL register.
346 	 *
347 	 */
348 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
349 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
350 		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
351 		 * LESM is on, likewise reset_pipeline requries the lock as
352 		 * it also writes AUTOC.
353 		 */
354 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
355 		    ixgbe_verify_lesm_fw_enabled(hw)) {
356 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
357 							IXGBE_GSSR_MAC_CSR_SM);
358 			if (ret_val != IXGBE_SUCCESS) {
359 				ret_val = IXGBE_ERR_SWFW_SYNC;
360 				goto out;
361 			}
362 			got_lock = TRUE;
363 		}
364 
365 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
366 		if (hw->mac.type == ixgbe_mac_82599EB)
367 			ixgbe_reset_pipeline(hw);
368 
369 		if (got_lock)
370 			hw->mac.ops.release_swfw_sync(hw,
371 						      IXGBE_GSSR_MAC_CSR_SM);
372 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
373 		    (ixgbe_device_supports_autoneg_fc(hw))) {
374 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
375 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
376 	}
377 
378 	DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
379 out:
380 	return ret_val;
381 }
382 
383 /**
384  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
385  *  @hw: pointer to hardware structure
386  *
387  *  Starts the hardware by filling the bus info structure and media type, clears
388  *  all on chip counters, initializes receive address registers, multicast
389  *  table, VLAN filter table, calls routine to set up link and flow control
390  *  settings, and leaves transmit and receive units disabled and uninitialized
391  **/
392 int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
393 {
394 	int32_t ret_val;
395 	uint32_t ctrl_ext;
396 
397 	DEBUGFUNC("ixgbe_start_hw_generic");
398 
399 	/* Set the media type */
400 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
401 
402 	/* PHY ops initialization must be done in reset_hw() */
403 
404 	/* Clear the VLAN filter table */
405 	hw->mac.ops.clear_vfta(hw);
406 
407 	/* Clear statistics registers */
408 	hw->mac.ops.clear_hw_cntrs(hw);
409 
410 	/* Set No Snoop Disable */
411 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
412 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
413 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
414 	IXGBE_WRITE_FLUSH(hw);
415 
416 	/* Setup flow control */
417 	ret_val = ixgbe_setup_fc(hw);
418 	if (ret_val != IXGBE_SUCCESS)
419 		goto out;
420 
421 	/* Clear adapter stopped flag */
422 	hw->adapter_stopped = FALSE;
423 
424 out:
425 	return ret_val;
426 }
427 
428 /**
429  *  ixgbe_start_hw_gen2 - Init sequence for common device family
430  *  @hw: pointer to hw structure
431  *
432  * Performs the init sequence common to the second generation
433  * of 10 GbE devices.
434  * Devices in the second generation:
435  *     82599
436  *     X540
437  **/
438 int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
439 {
440 	uint32_t i;
441 	uint32_t regval;
442 
443 	/* Clear the rate limiters */
444 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
445 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
446 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
447 	}
448 	IXGBE_WRITE_FLUSH(hw);
449 
450 	/* Disable relaxed ordering */
451 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
452 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
453 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
454 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
455 	}
456 
457 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
458 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
459 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
460 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
461 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
462 	}
463 
464 	return IXGBE_SUCCESS;
465 }
466 
467 /**
468  *  ixgbe_init_hw_generic - Generic hardware initialization
469  *  @hw: pointer to hardware structure
470  *
471  *  Initialize the hardware by resetting the hardware, filling the bus info
472  *  structure and media type, clears all on chip counters, initializes receive
473  *  address registers, multicast table, VLAN filter table, calls routine to set
474  *  up link and flow control settings, and leaves transmit and receive units
475  *  disabled and uninitialized
476  **/
477 int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
478 {
479 	int32_t status;
480 
481 	DEBUGFUNC("ixgbe_init_hw_generic");
482 
483 	/* Reset the hardware */
484 	status = hw->mac.ops.reset_hw(hw);
485 
486 	if (status == IXGBE_SUCCESS) {
487 		/* Start the HW */
488 		status = hw->mac.ops.start_hw(hw);
489 	}
490 
491 	return status;
492 }
493 
494 /**
495  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
496  *  @hw: pointer to hardware structure
497  *
498  *  Clears all hardware statistics counters by reading them from the hardware
499  *  Statistics counters are clear on read.
500  **/
501 int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
502 {
503 	uint16_t i = 0;
504 
505 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
506 
507 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
508 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
509 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
510 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
511 	for (i = 0; i < 8; i++)
512 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
513 
514 	IXGBE_READ_REG(hw, IXGBE_MLFC);
515 	IXGBE_READ_REG(hw, IXGBE_MRFC);
516 	IXGBE_READ_REG(hw, IXGBE_RLEC);
517 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
518 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
519 	if (hw->mac.type >= ixgbe_mac_82599EB) {
520 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
521 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
522 	} else {
523 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
524 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
525 	}
526 
527 	for (i = 0; i < 8; i++) {
528 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
529 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
530 		if (hw->mac.type >= ixgbe_mac_82599EB) {
531 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
532 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
533 		} else {
534 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
535 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
536 		}
537 	}
538 	if (hw->mac.type >= ixgbe_mac_82599EB)
539 		for (i = 0; i < 8; i++)
540 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
541 	IXGBE_READ_REG(hw, IXGBE_PRC64);
542 	IXGBE_READ_REG(hw, IXGBE_PRC127);
543 	IXGBE_READ_REG(hw, IXGBE_PRC255);
544 	IXGBE_READ_REG(hw, IXGBE_PRC511);
545 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
546 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
547 	IXGBE_READ_REG(hw, IXGBE_GPRC);
548 	IXGBE_READ_REG(hw, IXGBE_BPRC);
549 	IXGBE_READ_REG(hw, IXGBE_MPRC);
550 	IXGBE_READ_REG(hw, IXGBE_GPTC);
551 	IXGBE_READ_REG(hw, IXGBE_GORCL);
552 	IXGBE_READ_REG(hw, IXGBE_GORCH);
553 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
554 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
555 	if (hw->mac.type == ixgbe_mac_82598EB)
556 		for (i = 0; i < 8; i++)
557 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
558 	IXGBE_READ_REG(hw, IXGBE_RUC);
559 	IXGBE_READ_REG(hw, IXGBE_RFC);
560 	IXGBE_READ_REG(hw, IXGBE_ROC);
561 	IXGBE_READ_REG(hw, IXGBE_RJC);
562 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
563 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
564 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
565 	IXGBE_READ_REG(hw, IXGBE_TORL);
566 	IXGBE_READ_REG(hw, IXGBE_TORH);
567 	IXGBE_READ_REG(hw, IXGBE_TPR);
568 	IXGBE_READ_REG(hw, IXGBE_TPT);
569 	IXGBE_READ_REG(hw, IXGBE_PTC64);
570 	IXGBE_READ_REG(hw, IXGBE_PTC127);
571 	IXGBE_READ_REG(hw, IXGBE_PTC255);
572 	IXGBE_READ_REG(hw, IXGBE_PTC511);
573 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
574 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
575 	IXGBE_READ_REG(hw, IXGBE_MPTC);
576 	IXGBE_READ_REG(hw, IXGBE_BPTC);
577 	for (i = 0; i < 16; i++) {
578 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
579 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
580 		if (hw->mac.type >= ixgbe_mac_82599EB) {
581 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
582 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
583 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
584 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
585 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
586 		} else {
587 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
588 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
589 		}
590 	}
591 
592 	if (hw->mac.type == ixgbe_mac_X540) {
593 		if (hw->phy.id == 0)
594 			ixgbe_identify_phy(hw);
595 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
596 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
597 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
598 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
599 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
600 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
601 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
602 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
603 	}
604 
605 	return IXGBE_SUCCESS;
606 }
607 
608 /**
609  *  ixgbe_get_mac_addr_generic - Generic get MAC address
610  *  @hw: pointer to hardware structure
611  *  @mac_addr: Adapter MAC address
612  *
613  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
614  *  A reset of the adapter must be performed prior to calling this function
615  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
616  **/
617 int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr)
618 {
619 	uint32_t rar_high;
620 	uint32_t rar_low;
621 	uint16_t i;
622 
623 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
624 
625 #ifdef __sparc64__
626 	struct ixgbe_osdep *os = hw->back;
627 
628 	if (OF_getprop(PCITAG_NODE(os->os_pa.pa_tag), "local-mac-address",
629 	    mac_addr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
630 		return IXGBE_SUCCESS;
631 #endif
632 
633 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
634 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
635 
636 	for (i = 0; i < 4; i++)
637 		mac_addr[i] = (uint8_t)(rar_low >> (i*8));
638 
639 	for (i = 0; i < 2; i++)
640 		mac_addr[i+4] = (uint8_t)(rar_high >> (i*8));
641 
642 	return IXGBE_SUCCESS;
643 }
644 
645 /**
646  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
647  *  @hw: pointer to hardware structure
648  *  @link_status: the link status returned by the PCI config space
649  *
650  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
651  **/
652 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
653 				       uint16_t link_status)
654 {
655 	struct ixgbe_mac_info *mac = &hw->mac;
656 
657 	hw->bus.type = ixgbe_bus_type_pci_express;
658 
659 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
660 	case IXGBE_PCI_LINK_WIDTH_1:
661 		hw->bus.width = ixgbe_bus_width_pcie_x1;
662 		break;
663 	case IXGBE_PCI_LINK_WIDTH_2:
664 		hw->bus.width = ixgbe_bus_width_pcie_x2;
665 		break;
666 	case IXGBE_PCI_LINK_WIDTH_4:
667 		hw->bus.width = ixgbe_bus_width_pcie_x4;
668 		break;
669 	case IXGBE_PCI_LINK_WIDTH_8:
670 		hw->bus.width = ixgbe_bus_width_pcie_x8;
671 		break;
672 	default:
673 		hw->bus.width = ixgbe_bus_width_unknown;
674 		break;
675 	}
676 
677 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
678 	case IXGBE_PCI_LINK_SPEED_2500:
679 		hw->bus.speed = ixgbe_bus_speed_2500;
680 		break;
681 	case IXGBE_PCI_LINK_SPEED_5000:
682 		hw->bus.speed = ixgbe_bus_speed_5000;
683 		break;
684 	case IXGBE_PCI_LINK_SPEED_8000:
685 		hw->bus.speed = ixgbe_bus_speed_8000;
686 		break;
687 	default:
688 		hw->bus.speed = ixgbe_bus_speed_unknown;
689 		break;
690 	}
691 
692 	mac->ops.set_lan_id(hw);
693 }
694 
695 /**
696  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
697  *  @hw: pointer to hardware structure
698  *
699  *  Gets the PCI bus info (speed, width, type) then calls helper function to
700  *  store this data within the ixgbe_hw structure.
701  **/
702 int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
703 {
704 	uint16_t link_status;
705 
706 	DEBUGFUNC("ixgbe_get_bus_info_generic");
707 
708 	/* Get the negotiated link width and speed from PCI config space */
709 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
710 
711 	ixgbe_set_pci_config_data_generic(hw, link_status);
712 
713 	return IXGBE_SUCCESS;
714 }
715 
716 /**
717  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
718  *  @hw: pointer to the HW structure
719  *
720  *  Determines the LAN function id by reading memory-mapped registers
721  *  and swaps the port value if requested.
722  **/
723 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
724 {
725 	struct ixgbe_bus_info *bus = &hw->bus;
726 	uint32_t reg;
727 
728 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
729 
730 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
731 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
732 	bus->lan_id = bus->func;
733 
734 	/* check for a port swap */
735 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
736 	if (reg & IXGBE_FACTPS_LFS)
737 		bus->func ^= 0x1;
738 }
739 
740 /**
741  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
742  *  @hw: pointer to hardware structure
743  *
744  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
745  *  disables transmit and receive units. The adapter_stopped flag is used by
746  *  the shared code and drivers to determine if the adapter is in a stopped
747  *  state and should not touch the hardware.
748  **/
749 int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
750 {
751 	uint32_t reg_val;
752 	uint16_t i;
753 
754 	DEBUGFUNC("ixgbe_stop_adapter_generic");
755 
756 	/*
757 	 * Set the adapter_stopped flag so other driver functions stop touching
758 	 * the hardware
759 	 */
760 	hw->adapter_stopped = TRUE;
761 
762 	/* Disable the receive unit */
763 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
764 
765 	/* Clear interrupt mask to stop interrupts from being generated */
766 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
767 
768 	/* Clear any pending interrupts, flush previous writes */
769 	IXGBE_READ_REG(hw, IXGBE_EICR);
770 
771 	/* Disable the transmit unit.  Each queue must be disabled. */
772 	for (i = 0; i < hw->mac.max_tx_queues; i++)
773 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
774 
775 	/* Disable the receive unit by stopping each queue */
776 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
777 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
778 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
779 		reg_val |= IXGBE_RXDCTL_SWFLSH;
780 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
781 	}
782 
783 	/* flush all queues disables */
784 	IXGBE_WRITE_FLUSH(hw);
785 	msec_delay(2);
786 
787 	/*
788 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
789 	 * access and verify no pending requests
790 	 */
791 	return ixgbe_disable_pcie_master(hw);
792 }
793 
794 /**
795  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
796  *  @hw: pointer to hardware structure
797  *  @index: led number to turn on
798  **/
799 int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
800 {
801 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
802 
803 	DEBUGFUNC("ixgbe_led_on_generic");
804 
805 	/* To turn on the LED, set mode to ON. */
806 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
807 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
808 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
809 	IXGBE_WRITE_FLUSH(hw);
810 
811 	return IXGBE_SUCCESS;
812 }
813 
814 /**
815  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
816  *  @hw: pointer to hardware structure
817  *  @index: led number to turn off
818  **/
819 int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
820 {
821 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
822 
823 	DEBUGFUNC("ixgbe_led_off_generic");
824 
825 	/* To turn off the LED, set mode to OFF. */
826 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
827 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
828 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
829 	IXGBE_WRITE_FLUSH(hw);
830 
831 	return IXGBE_SUCCESS;
832 }
833 
834 /**
835  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
836  *  @hw: pointer to hardware structure
837  *
838  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
839  *  ixgbe_hw struct in order to set up EEPROM access.
840  **/
841 int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
842 {
843 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
844 	uint32_t eec;
845 	uint16_t eeprom_size;
846 
847 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
848 
849 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
850 		eeprom->type = ixgbe_eeprom_none;
851 		/* Set default semaphore delay to 10ms which is a well
852 		 * tested value */
853 		eeprom->semaphore_delay = 10;
854 		/* Clear EEPROM page size, it will be initialized as needed */
855 		eeprom->word_page_size = 0;
856 
857 		/*
858 		 * Check for EEPROM present first.
859 		 * If not present leave as none
860 		 */
861 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
862 		if (eec & IXGBE_EEC_PRES) {
863 			eeprom->type = ixgbe_eeprom_spi;
864 
865 			/*
866 			 * SPI EEPROM is assumed here.  This code would need to
867 			 * change if a future EEPROM is not SPI.
868 			 */
869 			eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
870 					    IXGBE_EEC_SIZE_SHIFT);
871 			eeprom->word_size = 1 << (eeprom_size +
872 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
873 		}
874 
875 		if (eec & IXGBE_EEC_ADDR_SIZE)
876 			eeprom->address_bits = 16;
877 		else
878 			eeprom->address_bits = 8;
879 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
880 			  "%d\n", eeprom->type, eeprom->word_size,
881 			  eeprom->address_bits);
882 	}
883 
884 	return IXGBE_SUCCESS;
885 }
886 
887 /**
888  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
889  *  @hw: pointer to hardware structure
890  *  @offset: offset within the EEPROM to be written to
891  *  @data: 16 bit word to be written to the EEPROM
892  *
893  *  If ixgbe_eeprom_update_checksum is not called after this function, the
894  *  EEPROM will most likely contain an invalid checksum.
895  **/
896 int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
897 {
898 	int32_t status;
899 	uint8_t write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
900 
901 	DEBUGFUNC("ixgbe_write_eeprom_generic");
902 
903 	hw->eeprom.ops.init_params(hw);
904 
905 	if (offset >= hw->eeprom.word_size) {
906 		status = IXGBE_ERR_EEPROM;
907 		goto out;
908 	}
909 
910 	/* Prepare the EEPROM for writing  */
911 	status = ixgbe_acquire_eeprom(hw);
912 
913 	if (status == IXGBE_SUCCESS) {
914 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
915 			ixgbe_release_eeprom(hw);
916 			status = IXGBE_ERR_EEPROM;
917 		}
918 	}
919 
920 	if (status == IXGBE_SUCCESS) {
921 		ixgbe_standby_eeprom(hw);
922 
923 		/*  Send the WRITE ENABLE command (8 bit opcode )  */
924 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
925 					    IXGBE_EEPROM_OPCODE_BITS);
926 
927 		ixgbe_standby_eeprom(hw);
928 
929 		/*
930 		 * Some SPI eeproms use the 8th address bit embedded in the
931 		 * opcode
932 		 */
933 		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
934 			write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
935 
936 		/* Send the Write command (8-bit opcode + addr) */
937 		ixgbe_shift_out_eeprom_bits(hw, write_opcode,
938 					    IXGBE_EEPROM_OPCODE_BITS);
939 		ixgbe_shift_out_eeprom_bits(hw, (uint16_t)(offset*2),
940 					    hw->eeprom.address_bits);
941 
942 		/* Send the data */
943 		data = (data >> 8) | (data << 8);
944 		ixgbe_shift_out_eeprom_bits(hw, data, 16);
945 		ixgbe_standby_eeprom(hw);
946 
947 		/* Done with writing - release the EEPROM */
948 		ixgbe_release_eeprom(hw);
949 	}
950 
951 out:
952 	return status;
953 }
954 
955 /**
956  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
957  *  @hw: pointer to hardware structure
958  *  @offset: offset within the EEPROM to be read
959  *  @data: read 16 bit value from EEPROM
960  *
961  *  Reads 16 bit value from EEPROM through bit-bang method
962  **/
963 int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
964 				       uint16_t *data)
965 {
966 	int32_t status;
967 	uint16_t word_in;
968 	uint8_t read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
969 
970 	hw->eeprom.ops.init_params(hw);
971 
972 	if (offset >= hw->eeprom.word_size) {
973 		status = IXGBE_ERR_EEPROM;
974 		goto out;
975 	}
976 
977 	/* Prepare the EEPROM for reading  */
978 	status = ixgbe_acquire_eeprom(hw);
979 
980 	if (status == IXGBE_SUCCESS) {
981 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
982 			ixgbe_release_eeprom(hw);
983 			status = IXGBE_ERR_EEPROM;
984 		}
985 	}
986 
987 	if (status == IXGBE_SUCCESS) {
988 		ixgbe_standby_eeprom(hw);
989 
990 		/*
991 		 * Some SPI eeproms use the 8th address bit embedded in the
992 		 * opcode
993 		 */
994 		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
995 			read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
996 
997 		/* Send the READ command (opcode + addr) */
998 		ixgbe_shift_out_eeprom_bits(hw, read_opcode,
999 					    IXGBE_EEPROM_OPCODE_BITS);
1000 		ixgbe_shift_out_eeprom_bits(hw, (uint16_t)(offset*2),
1001 					    hw->eeprom.address_bits);
1002 
1003 		/* Read the data. */
1004 		word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1005 		*data = (word_in >> 8) | (word_in << 8);
1006 
1007 		/* End this read operation */
1008 		ixgbe_release_eeprom(hw);
1009 	}
1010 
1011 out:
1012 	return status;
1013 }
1014 
1015 /**
1016  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1017  *  @hw: pointer to hardware structure
1018  *  @offset: offset of  word in the EEPROM to read
1019  *  @data: word read from the EEPROM
1020  *
1021  *  Reads a 16 bit word from the EEPROM using the EERD register.
1022  **/
1023 int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data)
1024 {
1025 	uint32_t eerd;
1026 	int32_t status;
1027 
1028 	hw->eeprom.ops.init_params(hw);
1029 
1030 	if (offset >= hw->eeprom.word_size) {
1031 		status = IXGBE_ERR_EEPROM;
1032 		goto out;
1033 	}
1034 
1035 	eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1036 	       IXGBE_EEPROM_RW_REG_START;
1037 
1038 	IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1039 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1040 
1041 	if (status == IXGBE_SUCCESS)
1042 		*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1043 			 IXGBE_EEPROM_RW_REG_DATA);
1044 	else
1045 		DEBUGOUT("Eeprom read timed out\n");
1046 
1047 out:
1048 	return status;
1049 }
1050 
1051 /**
1052  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1053  *  @hw: pointer to hardware structure
1054  *  @offset: offset of  word in the EEPROM to write
1055  *  @data: word write to the EEPROM
1056  *
1057  *  Write a 16 bit word to the EEPROM using the EEWR register.
1058  **/
1059 int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1060 {
1061 	uint32_t eewr;
1062 	int32_t status;
1063 
1064 	hw->eeprom.ops.init_params(hw);
1065 
1066 	if (offset >= hw->eeprom.word_size) {
1067 		status = IXGBE_ERR_EEPROM;
1068 		goto out;
1069 	}
1070 
1071 	eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1072 	       (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
1073 
1074 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1075 	if (status != IXGBE_SUCCESS) {
1076 		DEBUGOUT("Eeprom write EEWR timed out\n");
1077 		goto out;
1078 	}
1079 
1080 	IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1081 
1082 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1083 	if (status != IXGBE_SUCCESS) {
1084 		DEBUGOUT("Eeprom write EEWR timed out\n");
1085 		goto out;
1086 	}
1087 
1088 out:
1089 	return status;
1090 }
1091 
1092 /**
1093  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1094  *  @hw: pointer to hardware structure
1095  *  @ee_reg: EEPROM flag for polling
1096  *
1097  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1098  *  read or write is done respectively.
1099  **/
1100 int32_t ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, uint32_t ee_reg)
1101 {
1102 	uint32_t i;
1103 	uint32_t reg;
1104 	int32_t status = IXGBE_ERR_EEPROM;
1105 
1106 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1107 
1108 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1109 		if (ee_reg == IXGBE_NVM_POLL_READ)
1110 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1111 		else
1112 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1113 
1114 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1115 			status = IXGBE_SUCCESS;
1116 			break;
1117 		}
1118 		usec_delay(5);
1119 	}
1120 
1121 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1122 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1123 			     "EEPROM read/write done polling timed out");
1124 
1125 	return status;
1126 }
1127 
1128 /**
1129  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1130  *  @hw: pointer to hardware structure
1131  *
1132  *  Prepares EEPROM for access using bit-bang method. This function should
1133  *  be called before issuing a command to the EEPROM.
1134  **/
1135 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1136 {
1137 	int32_t status = IXGBE_SUCCESS;
1138 	uint32_t eec;
1139 	uint32_t i;
1140 
1141 	DEBUGFUNC("ixgbe_acquire_eeprom");
1142 
1143 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1144 	    != IXGBE_SUCCESS)
1145 		status = IXGBE_ERR_SWFW_SYNC;
1146 
1147 	if (status == IXGBE_SUCCESS) {
1148 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1149 
1150 		/* Request EEPROM Access */
1151 		eec |= IXGBE_EEC_REQ;
1152 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1153 
1154 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1155 			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1156 			if (eec & IXGBE_EEC_GNT)
1157 				break;
1158 			usec_delay(5);
1159 		}
1160 
1161 		/* Release if grant not acquired */
1162 		if (!(eec & IXGBE_EEC_GNT)) {
1163 			eec &= ~IXGBE_EEC_REQ;
1164 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1165 			DEBUGOUT("Could not acquire EEPROM grant\n");
1166 
1167 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1168 			status = IXGBE_ERR_EEPROM;
1169 		}
1170 
1171 		/* Setup EEPROM for Read/Write */
1172 		if (status == IXGBE_SUCCESS) {
1173 			/* Clear CS and SK */
1174 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1175 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1176 			IXGBE_WRITE_FLUSH(hw);
1177 			usec_delay(1);
1178 		}
1179 	}
1180 	return status;
1181 }
1182 
1183 /**
1184  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1185  *  @hw: pointer to hardware structure
1186  *
1187  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1188  **/
1189 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1190 {
1191 	int32_t status = IXGBE_ERR_EEPROM;
1192 	uint32_t timeout = 2000;
1193 	uint32_t i;
1194 	uint32_t swsm;
1195 
1196 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1197 
1198 
1199 	/* Get SMBI software semaphore between device drivers first */
1200 	for (i = 0; i < timeout; i++) {
1201 		/*
1202 		 * If the SMBI bit is 0 when we read it, then the bit will be
1203 		 * set and we have the semaphore
1204 		 */
1205 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1206 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1207 			status = IXGBE_SUCCESS;
1208 			break;
1209 		}
1210 		usec_delay(50);
1211 	}
1212 
1213 	if (i == timeout) {
1214 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1215 			 "not granted.\n");
1216 		/*
1217 		 * this release is particularly important because our attempts
1218 		 * above to get the semaphore may have succeeded, and if there
1219 		 * was a timeout, we should unconditionally clear the semaphore
1220 		 * bits to free the driver to make progress
1221 		 */
1222 		ixgbe_release_eeprom_semaphore(hw);
1223 
1224 		usec_delay(50);
1225 		/*
1226 		 * one last try
1227 		 * If the SMBI bit is 0 when we read it, then the bit will be
1228 		 * set and we have the semaphore
1229 		 */
1230 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1231 		if (!(swsm & IXGBE_SWSM_SMBI))
1232 			status = IXGBE_SUCCESS;
1233 	}
1234 
1235 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1236 	if (status == IXGBE_SUCCESS) {
1237 		for (i = 0; i < timeout; i++) {
1238 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1239 
1240 			/* Set the SW EEPROM semaphore bit to request access */
1241 			swsm |= IXGBE_SWSM_SWESMBI;
1242 			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1243 
1244 			/*
1245 			 * If we set the bit successfully then we got the
1246 			 * semaphore.
1247 			 */
1248 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1249 			if (swsm & IXGBE_SWSM_SWESMBI)
1250 				break;
1251 
1252 			usec_delay(50);
1253 		}
1254 
1255 		/*
1256 		 * Release semaphores and return error if SW EEPROM semaphore
1257 		 * was not granted because we don't have access to the EEPROM
1258 		 */
1259 		if (i >= timeout) {
1260 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1261 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1262 			ixgbe_release_eeprom_semaphore(hw);
1263 			status = IXGBE_ERR_EEPROM;
1264 		}
1265 	} else {
1266 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1267 			     "Software semaphore SMBI between device drivers "
1268 			     "not granted.\n");
1269 	}
1270 
1271 	return status;
1272 }
1273 
1274 /**
1275  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1276  *  @hw: pointer to hardware structure
1277  *
1278  *  This function clears hardware semaphore bits.
1279  **/
1280 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1281 {
1282 	uint32_t swsm;
1283 
1284 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1285 
1286 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1287 
1288 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1289 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1290 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1291 	IXGBE_WRITE_FLUSH(hw);
1292 }
1293 
1294 /**
1295  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1296  *  @hw: pointer to hardware structure
1297  **/
1298 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1299 {
1300 	int32_t status = IXGBE_SUCCESS;
1301 	uint16_t i;
1302 	uint8_t spi_stat_reg;
1303 
1304 	DEBUGFUNC("ixgbe_ready_eeprom");
1305 
1306 	/*
1307 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1308 	 * EEPROM will signal that the command has been completed by clearing
1309 	 * bit 0 of the internal status register.  If it's not cleared within
1310 	 * 5 milliseconds, then error out.
1311 	 */
1312 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1313 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1314 					    IXGBE_EEPROM_OPCODE_BITS);
1315 		spi_stat_reg = (uint8_t)ixgbe_shift_in_eeprom_bits(hw, 8);
1316 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1317 			break;
1318 
1319 		usec_delay(5);
1320 		ixgbe_standby_eeprom(hw);
1321 	};
1322 
1323 	/*
1324 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1325 	 * devices (and only 0-5mSec on 5V devices)
1326 	 */
1327 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1328 		DEBUGOUT("SPI EEPROM Status error\n");
1329 		status = IXGBE_ERR_EEPROM;
1330 	}
1331 
1332 	return status;
1333 }
1334 
1335 /**
1336  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1337  *  @hw: pointer to hardware structure
1338  **/
1339 void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1340 {
1341 	uint32_t eec;
1342 
1343 	DEBUGFUNC("ixgbe_standby_eeprom");
1344 
1345 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1346 
1347 	/* Toggle CS to flush commands */
1348 	eec |= IXGBE_EEC_CS;
1349 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1350 	IXGBE_WRITE_FLUSH(hw);
1351 	usec_delay(1);
1352 	eec &= ~IXGBE_EEC_CS;
1353 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1354 	IXGBE_WRITE_FLUSH(hw);
1355 	usec_delay(1);
1356 }
1357 
1358 /**
1359  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1360  *  @hw: pointer to hardware structure
1361  *  @data: data to send to the EEPROM
1362  *  @count: number of bits to shift out
1363  **/
1364 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
1365 					uint16_t count)
1366 {
1367 	uint32_t eec;
1368 	uint32_t mask;
1369 	uint32_t i;
1370 
1371 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1372 
1373 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1374 
1375 	/*
1376 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1377 	 * one bit at a time.  Determine the starting bit based on count
1378 	 */
1379 	mask = 0x01 << (count - 1);
1380 
1381 	for (i = 0; i < count; i++) {
1382 		/*
1383 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1384 		 * "1", and then raising and then lowering the clock (the SK
1385 		 * bit controls the clock input to the EEPROM).  A "0" is
1386 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1387 		 * raising and then lowering the clock.
1388 		 */
1389 		if (data & mask)
1390 			eec |= IXGBE_EEC_DI;
1391 		else
1392 			eec &= ~IXGBE_EEC_DI;
1393 
1394 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1395 		IXGBE_WRITE_FLUSH(hw);
1396 
1397 		usec_delay(1);
1398 
1399 		ixgbe_raise_eeprom_clk(hw, &eec);
1400 		ixgbe_lower_eeprom_clk(hw, &eec);
1401 
1402 		/*
1403 		 * Shift mask to signify next bit of data to shift in to the
1404 		 * EEPROM
1405 		 */
1406 		mask = mask >> 1;
1407 	};
1408 
1409 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1410 	eec &= ~IXGBE_EEC_DI;
1411 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1412 	IXGBE_WRITE_FLUSH(hw);
1413 }
1414 
1415 /**
1416  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1417  *  @hw: pointer to hardware structure
1418  **/
1419 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
1420 {
1421 	uint32_t eec;
1422 	uint32_t i;
1423 	uint16_t data = 0;
1424 
1425 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1426 
1427 	/*
1428 	 * In order to read a register from the EEPROM, we need to shift
1429 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1430 	 * the clock input to the EEPROM (setting the SK bit), and then reading
1431 	 * the value of the "DO" bit.  During this "shifting in" process the
1432 	 * "DI" bit should always be clear.
1433 	 */
1434 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1435 
1436 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1437 
1438 	for (i = 0; i < count; i++) {
1439 		data = data << 1;
1440 		ixgbe_raise_eeprom_clk(hw, &eec);
1441 
1442 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1443 
1444 		eec &= ~(IXGBE_EEC_DI);
1445 		if (eec & IXGBE_EEC_DO)
1446 			data |= 1;
1447 
1448 		ixgbe_lower_eeprom_clk(hw, &eec);
1449 	}
1450 
1451 	return data;
1452 }
1453 
1454 /**
1455  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1456  *  @hw: pointer to hardware structure
1457  *  @eec: EEC register's current value
1458  **/
1459 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1460 {
1461 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
1462 
1463 	/*
1464 	 * Raise the clock input to the EEPROM
1465 	 * (setting the SK bit), then delay
1466 	 */
1467 	*eec = *eec | IXGBE_EEC_SK;
1468 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1469 	IXGBE_WRITE_FLUSH(hw);
1470 	usec_delay(1);
1471 }
1472 
1473 /**
1474  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1475  *  @hw: pointer to hardware structure
1476  *  @eecd: EECD's current value
1477  **/
1478 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1479 {
1480 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
1481 
1482 	/*
1483 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
1484 	 * delay
1485 	 */
1486 	*eec = *eec & ~IXGBE_EEC_SK;
1487 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1488 	IXGBE_WRITE_FLUSH(hw);
1489 	usec_delay(1);
1490 }
1491 
1492 /**
1493  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1494  *  @hw: pointer to hardware structure
1495  **/
1496 void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1497 {
1498 	uint32_t eec;
1499 
1500 	DEBUGFUNC("ixgbe_release_eeprom");
1501 
1502 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1503 
1504 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
1505 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1506 
1507 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1508 	IXGBE_WRITE_FLUSH(hw);
1509 
1510 	usec_delay(1);
1511 
1512 	/* Stop requesting EEPROM access */
1513 	eec &= ~IXGBE_EEC_REQ;
1514 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1515 
1516 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1517 
1518 	/* Delay before attempt to obtain semaphore again to allow FW access */
1519 	msec_delay(hw->eeprom.semaphore_delay);
1520 }
1521 
1522 /**
1523  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1524  *  @hw: pointer to hardware structure
1525  **/
1526 uint16_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1527 {
1528 	uint16_t i;
1529 	uint16_t j;
1530 	uint16_t checksum = 0;
1531 	uint16_t length = 0;
1532 	uint16_t pointer = 0;
1533 	uint16_t word = 0;
1534 
1535 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1536 
1537 	/* Include 0x0-0x3F in the checksum */
1538 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1539 		if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1540 			DEBUGOUT("EEPROM read failed\n");
1541 			break;
1542 		}
1543 		checksum += word;
1544 	}
1545 
1546 	/* Include all data from pointers except for the fw pointer */
1547 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1548 		hw->eeprom.ops.read(hw, i, &pointer);
1549 
1550 		/* Make sure the pointer seems valid */
1551 		if (pointer != 0xFFFF && pointer != 0) {
1552 			hw->eeprom.ops.read(hw, pointer, &length);
1553 
1554 			if (length != 0xFFFF && length != 0) {
1555 				for (j = pointer+1; j <= pointer+length; j++) {
1556 					hw->eeprom.ops.read(hw, j, &word);
1557 					checksum += word;
1558 				}
1559 			}
1560 		}
1561 	}
1562 
1563 	checksum = (uint16_t)IXGBE_EEPROM_SUM - checksum;
1564 
1565 	return checksum;
1566 }
1567 
1568 /**
1569  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1570  *  @hw: pointer to hardware structure
1571  *  @checksum_val: calculated checksum
1572  *
1573  *  Performs checksum calculation and validates the EEPROM checksum.  If the
1574  *  caller does not need checksum_val, the value can be NULL.
1575  **/
1576 int32_t ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1577 					       uint16_t *checksum_val)
1578 {
1579 	int32_t status;
1580 	uint16_t checksum;
1581 	uint16_t read_checksum = 0;
1582 
1583 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1584 
1585 	/*
1586 	 * Read the first word from the EEPROM. If this times out or fails, do
1587 	 * not continue or we could be in for a very long wait while every
1588 	 * EEPROM read fails
1589 	 */
1590 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1591 
1592 	if (status == IXGBE_SUCCESS) {
1593 		checksum = hw->eeprom.ops.calc_checksum(hw);
1594 
1595 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1596 
1597 		/*
1598 		 * Verify read checksum from EEPROM is the same as
1599 		 * calculated checksum
1600 		 */
1601 		if (read_checksum != checksum)
1602 			status = IXGBE_ERR_EEPROM_CHECKSUM;
1603 
1604 		/* If the user cares, return the calculated checksum */
1605 		if (checksum_val)
1606 			*checksum_val = checksum;
1607 	} else {
1608 		DEBUGOUT("EEPROM read failed\n");
1609 	}
1610 
1611 	return status;
1612 }
1613 
1614 /**
1615  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1616  *  @hw: pointer to hardware structure
1617  **/
1618 int32_t ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1619 {
1620 	int32_t status;
1621 	uint16_t checksum;
1622 
1623 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1624 
1625 	/*
1626 	 * Read the first word from the EEPROM. If this times out or fails, do
1627 	 * not continue or we could be in for a very long wait while every
1628 	 * EEPROM read fails
1629 	 */
1630 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1631 
1632 	if (status == IXGBE_SUCCESS) {
1633 		checksum = hw->eeprom.ops.calc_checksum(hw);
1634 		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1635 					      checksum);
1636 	} else {
1637 		DEBUGOUT("EEPROM read failed\n");
1638 	}
1639 
1640 	return status;
1641 }
1642 
1643 /**
1644  *  ixgbe_validate_mac_addr - Validate MAC address
1645  *  @mac_addr: pointer to MAC address.
1646  *
1647  *  Tests a MAC address to ensure it is a valid Individual Address
1648  **/
1649 int32_t ixgbe_validate_mac_addr(uint8_t *mac_addr)
1650 {
1651 	int32_t status = IXGBE_SUCCESS;
1652 
1653 	DEBUGFUNC("ixgbe_validate_mac_addr");
1654 
1655 	/* Make sure it is not a multicast address */
1656 	if (IXGBE_IS_MULTICAST(mac_addr)) {
1657 		DEBUGOUT("MAC address is multicast\n");
1658 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1659 	/* Not a broadcast address */
1660 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
1661 		DEBUGOUT("MAC address is broadcast\n");
1662 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1663 	/* Reject the zero address */
1664 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1665 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1666 		DEBUGOUT("MAC address is all zeros\n");
1667 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1668 	}
1669 	return status;
1670 }
1671 
1672 /**
1673  *  ixgbe_set_rar_generic - Set Rx address register
1674  *  @hw: pointer to hardware structure
1675  *  @index: Receive address register to write
1676  *  @addr: Address to put into receive address register
1677  *  @vmdq: VMDq "set" or "pool" index
1678  *  @enable_addr: set flag that address is active
1679  *
1680  *  Puts an ethernet address into a receive address register.
1681  **/
1682 int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
1683 			      uint32_t vmdq, uint32_t enable_addr)
1684 {
1685 	uint32_t rar_low, rar_high;
1686 	uint32_t rar_entries = hw->mac.num_rar_entries;
1687 
1688 	DEBUGFUNC("ixgbe_set_rar_generic");
1689 
1690 	/* Make sure we are using a valid rar index range */
1691 	if (index >= rar_entries) {
1692 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1693 			     "RAR index %d is out of range.\n", index);
1694 		return IXGBE_ERR_INVALID_ARGUMENT;
1695 	}
1696 
1697 	/* setup VMDq pool selection before this RAR gets enabled */
1698 	hw->mac.ops.set_vmdq(hw, index, vmdq);
1699 
1700 	/*
1701 	 * HW expects these in little endian so we reverse the byte
1702 	 * order from network order (big endian) to little endian
1703 	 */
1704 	rar_low = ((uint32_t)addr[0] |
1705 		   ((uint32_t)addr[1] << 8) |
1706 		   ((uint32_t)addr[2] << 16) |
1707 		   ((uint32_t)addr[3] << 24));
1708 	/*
1709 	 * Some parts put the VMDq setting in the extra RAH bits,
1710 	 * so save everything except the lower 16 bits that hold part
1711 	 * of the address and the address valid bit.
1712 	 */
1713 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1714 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1715 	rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1716 
1717 	if (enable_addr != 0)
1718 		rar_high |= IXGBE_RAH_AV;
1719 
1720 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1721 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1722 
1723 	return IXGBE_SUCCESS;
1724 }
1725 
1726 /**
1727  *  ixgbe_clear_rar_generic - Remove Rx address register
1728  *  @hw: pointer to hardware structure
1729  *  @index: Receive address register to write
1730  *
1731  *  Clears an ethernet address from a receive address register.
1732  **/
1733 int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
1734 {
1735 	uint32_t rar_high;
1736 	uint32_t rar_entries = hw->mac.num_rar_entries;
1737 
1738 	DEBUGFUNC("ixgbe_clear_rar_generic");
1739 
1740 	/* Make sure we are using a valid rar index range */
1741 	if (index >= rar_entries) {
1742 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1743 			     "RAR index %d is out of range.\n", index);
1744 		return IXGBE_ERR_INVALID_ARGUMENT;
1745 	}
1746 
1747 	/*
1748 	 * Some parts put the VMDq setting in the extra RAH bits,
1749 	 * so save everything except the lower 16 bits that hold part
1750 	 * of the address and the address valid bit.
1751 	 */
1752 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1753 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1754 
1755 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1756 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1757 
1758 	/* clear VMDq pool/queue selection for this RAR */
1759 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1760 
1761 	return IXGBE_SUCCESS;
1762 }
1763 
1764 /**
1765  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1766  *  @hw: pointer to hardware structure
1767  *
1768  *  Places the MAC address in receive address register 0 and clears the rest
1769  *  of the receive address registers. Clears the multicast table. Assumes
1770  *  the receiver is in reset when the routine is called.
1771  **/
1772 int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1773 {
1774 	uint32_t i;
1775 	uint32_t rar_entries = hw->mac.num_rar_entries;
1776 
1777 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
1778 
1779 	/*
1780 	 * If the current mac address is valid, assume it is a software override
1781 	 * to the permanent address.
1782 	 * Otherwise, use the permanent address from the eeprom.
1783 	 */
1784 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1785 	    IXGBE_ERR_INVALID_MAC_ADDR) {
1786 		/* Get the MAC address from the RAR0 for later reference */
1787 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1788 
1789 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1790 			  hw->mac.addr[0], hw->mac.addr[1],
1791 			  hw->mac.addr[2]);
1792 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1793 			  hw->mac.addr[4], hw->mac.addr[5]);
1794 	} else {
1795 		/* Setup the receive address. */
1796 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1797 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1798 			  hw->mac.addr[0], hw->mac.addr[1],
1799 			  hw->mac.addr[2]);
1800 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1801 			  hw->mac.addr[4], hw->mac.addr[5]);
1802 
1803 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1804 
1805 		/* clear VMDq pool/queue selection for RAR 0 */
1806 		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1807 	}
1808 	hw->addr_ctrl.overflow_promisc = 0;
1809 
1810 	hw->addr_ctrl.rar_used_count = 1;
1811 
1812 	/* Zero out the other receive addresses. */
1813 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1814 	for (i = 1; i < rar_entries; i++) {
1815 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1816 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1817 	}
1818 
1819 	/* Clear the MTA */
1820 	hw->addr_ctrl.mta_in_use = 0;
1821 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1822 
1823 	DEBUGOUT(" Clearing MTA\n");
1824 	for (i = 0; i < hw->mac.mcft_size; i++)
1825 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1826 
1827 	ixgbe_init_uta_tables(hw);
1828 
1829 	return IXGBE_SUCCESS;
1830 }
1831 
1832 /**
1833  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
1834  *  @hw: pointer to hardware structure
1835  *  @addr: new address
1836  *
1837  *  Adds it to unused receive address register or goes into promiscuous mode.
1838  **/
1839 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
1840 {
1841 	uint32_t rar_entries = hw->mac.num_rar_entries;
1842 	uint32_t rar;
1843 
1844 	DEBUGFUNC("ixgbe_add_uc_addr");
1845 
1846 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1847 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1848 
1849 	/*
1850 	 * Place this address in the RAR if there is room,
1851 	 * else put the controller into promiscuous mode
1852 	 */
1853 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
1854 		rar = hw->addr_ctrl.rar_used_count;
1855 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1856 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
1857 		hw->addr_ctrl.rar_used_count++;
1858 	} else {
1859 		hw->addr_ctrl.overflow_promisc++;
1860 	}
1861 
1862 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
1863 }
1864 
1865 /**
1866  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1867  *  @hw: pointer to hardware structure
1868  *  @addr_list: the list of new addresses
1869  *  @addr_count: number of addresses
1870  *  @next: iterator function to walk the address list
1871  *
1872  *  The given list replaces any existing list.  Clears the secondary addrs from
1873  *  receive address registers.  Uses unused receive address registers for the
1874  *  first secondary addresses, and falls back to promiscuous mode as needed.
1875  *
1876  *  Drivers using secondary unicast addresses must set user_set_promisc when
1877  *  manually putting the device into promiscuous mode.
1878  **/
1879 int32_t ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *addr_list,
1880 					  uint32_t addr_count, ixgbe_mc_addr_itr next)
1881 {
1882 	uint8_t *addr;
1883 	uint32_t i;
1884 	uint32_t old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1885 	uint32_t uc_addr_in_use;
1886 	uint32_t fctrl;
1887 	uint32_t vmdq;
1888 
1889 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
1890 
1891 	/*
1892 	 * Clear accounting of old secondary address list,
1893 	 * don't count RAR[0]
1894 	 */
1895 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1896 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1897 	hw->addr_ctrl.overflow_promisc = 0;
1898 
1899 	/* Zero out the other receive addresses */
1900 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
1901 	for (i = 0; i < uc_addr_in_use; i++) {
1902 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1903 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1904 	}
1905 
1906 	/* Add the new addresses */
1907 	for (i = 0; i < addr_count; i++) {
1908 		DEBUGOUT(" Adding the secondary addresses:\n");
1909 		addr = next(hw, &addr_list, &vmdq);
1910 		ixgbe_add_uc_addr(hw, addr, vmdq);
1911 	}
1912 
1913 	if (hw->addr_ctrl.overflow_promisc) {
1914 		/* enable promisc if not already in overflow or set by user */
1915 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1916 			DEBUGOUT(" Entering address overflow promisc mode\n");
1917 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1918 			fctrl |= IXGBE_FCTRL_UPE;
1919 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1920 		}
1921 	} else {
1922 		/* only disable if set by overflow, not by user */
1923 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1924 			DEBUGOUT(" Leaving address overflow promisc mode\n");
1925 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1926 			fctrl &= ~IXGBE_FCTRL_UPE;
1927 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1928 		}
1929 	}
1930 
1931 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
1932 	return IXGBE_SUCCESS;
1933 }
1934 
1935 /**
1936  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
1937  *  @hw: pointer to hardware structure
1938  *  @mc_addr: the multicast address
1939  *
1940  *  Extracts the 12 bits, from a multicast address, to determine which
1941  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
1942  *  incoming rx multicast addresses, to determine the bit-vector to check in
1943  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1944  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
1945  *  to mc_filter_type.
1946  **/
1947 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
1948 {
1949 	uint32_t vector = 0;
1950 
1951 	DEBUGFUNC("ixgbe_mta_vector");
1952 
1953 	switch (hw->mac.mc_filter_type) {
1954 	case 0:   /* use bits [47:36] of the address */
1955 		vector = ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
1956 		break;
1957 	case 1:   /* use bits [46:35] of the address */
1958 		vector = ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
1959 		break;
1960 	case 2:   /* use bits [45:34] of the address */
1961 		vector = ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
1962 		break;
1963 	case 3:   /* use bits [43:32] of the address */
1964 		vector = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
1965 		break;
1966 	default:  /* Invalid mc_filter_type */
1967 		DEBUGOUT("MC filter type param set incorrectly\n");
1968 		panic("incorrect multicast filter type");
1969 		break;
1970 	}
1971 
1972 	/* vector can only be 12-bits or boundary will be exceeded */
1973 	vector &= 0xFFF;
1974 	return vector;
1975 }
1976 
1977 /**
1978  *  ixgbe_set_mta - Set bit-vector in multicast table
1979  *  @hw: pointer to hardware structure
1980  *  @hash_value: Multicast address hash value
1981  *
1982  *  Sets the bit-vector in the multicast table.
1983  **/
1984 void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
1985 {
1986 	uint32_t vector;
1987 	uint32_t vector_bit;
1988 	uint32_t vector_reg;
1989 
1990 	DEBUGFUNC("ixgbe_set_mta");
1991 
1992 	hw->addr_ctrl.mta_in_use++;
1993 
1994 	vector = ixgbe_mta_vector(hw, mc_addr);
1995 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
1996 
1997 	/*
1998 	 * The MTA is a register array of 128 32-bit registers. It is treated
1999 	 * like an array of 4096 bits.  We want to set bit
2000 	 * BitArray[vector_value]. So we figure out what register the bit is
2001 	 * in, read it, OR in the new bit, then write back the new value.  The
2002 	 * register is determined by the upper 7 bits of the vector value and
2003 	 * the bit within that register are determined by the lower 5 bits of
2004 	 * the value.
2005 	 */
2006 	vector_reg = (vector >> 5) & 0x7F;
2007 	vector_bit = vector & 0x1F;
2008 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2009 }
2010 
2011 /**
2012  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2013  *  @hw: pointer to hardware structure
2014  *  @mc_addr_list: the list of new multicast addresses
2015  *  @mc_addr_count: number of addresses
2016  *  @next: iterator function to walk the multicast address list
2017  *  @clear: flag, when set clears the table beforehand
2018  *
2019  *  When the clear flag is set, the given list replaces any existing list.
2020  *  Hashes the given addresses into the multicast table.
2021  **/
2022 int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
2023 					  uint32_t mc_addr_count, ixgbe_mc_addr_itr next,
2024 					  bool clear)
2025 {
2026 	uint32_t i;
2027 	uint32_t vmdq;
2028 
2029 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2030 
2031 	/*
2032 	 * Set the new number of MC addresses that we are being requested to
2033 	 * use.
2034 	 */
2035 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2036 	hw->addr_ctrl.mta_in_use = 0;
2037 
2038 	/* Clear mta_shadow */
2039 	if (clear) {
2040 		DEBUGOUT(" Clearing MTA\n");
2041 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2042 	}
2043 
2044 	/* Update mta_shadow */
2045 	for (i = 0; i < mc_addr_count; i++) {
2046 		DEBUGOUT(" Adding the multicast addresses:\n");
2047 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2048 	}
2049 
2050 	/* Enable mta */
2051 	for (i = 0; i < hw->mac.mcft_size; i++)
2052 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2053 				      hw->mac.mta_shadow[i]);
2054 
2055 	if (hw->addr_ctrl.mta_in_use > 0)
2056 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2057 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2058 
2059 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2060 	return IXGBE_SUCCESS;
2061 }
2062 
2063 /**
2064  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2065  *  @hw: pointer to hardware structure
2066  *
2067  *  Enables multicast address in RAR and the use of the multicast hash table.
2068  **/
2069 int32_t ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2070 {
2071 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2072 
2073 	DEBUGFUNC("ixgbe_enable_mc_generic");
2074 
2075 	if (a->mta_in_use > 0)
2076 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2077 				hw->mac.mc_filter_type);
2078 
2079 	return IXGBE_SUCCESS;
2080 }
2081 
2082 /**
2083  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2084  *  @hw: pointer to hardware structure
2085  *
2086  *  Disables multicast address in RAR and the use of the multicast hash table.
2087  **/
2088 int32_t ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2089 {
2090 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2091 
2092 	DEBUGFUNC("ixgbe_disable_mc_generic");
2093 
2094 	if (a->mta_in_use > 0)
2095 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2096 
2097 	return IXGBE_SUCCESS;
2098 }
2099 
2100 /**
2101  *  ixgbe_fc_enable_generic - Enable flow control
2102  *  @hw: pointer to hardware structure
2103  *
2104  *  Enable flow control according to the current settings.
2105  **/
2106 int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2107 {
2108 	int32_t ret_val = IXGBE_SUCCESS;
2109 	uint32_t mflcn_reg, fccfg_reg;
2110 	uint32_t reg;
2111 	uint32_t fcrtl, fcrth;
2112 	int i;
2113 
2114 	DEBUGFUNC("ixgbe_fc_enable_generic");
2115 
2116 	/* Validate the water mark configuration */
2117 	if (!hw->fc.pause_time) {
2118 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2119 		goto out;
2120 	}
2121 
2122 	/* Low water mark of zero causes XOFF floods */
2123 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2124 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2125 		    hw->fc.high_water[i]) {
2126 			if (!hw->fc.low_water[i] ||
2127 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2128 				DEBUGOUT("Invalid water mark configuration\n");
2129 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2130 				goto out;
2131 			}
2132 		}
2133 	}
2134 
2135 	/* Negotiate the fc mode to use */
2136 	ixgbe_fc_autoneg(hw);
2137 
2138 	/* Disable any previous flow control settings */
2139 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2140 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2141 
2142 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2143 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2144 
2145 	/*
2146 	 * The possible values of fc.current_mode are:
2147 	 * 0: Flow control is completely disabled
2148 	 * 1: Rx flow control is enabled (we can receive pause frames,
2149 	 *    but not send pause frames).
2150 	 * 2: Tx flow control is enabled (we can send pause frames but
2151 	 *    we do not support receiving pause frames).
2152 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2153 	 * other: Invalid.
2154 	 */
2155 	switch (hw->fc.current_mode) {
2156 	case ixgbe_fc_none:
2157 		/*
2158 		 * Flow control is disabled by software override or autoneg.
2159 		 * The code below will actually disable it in the HW.
2160 		 */
2161 		break;
2162 	case ixgbe_fc_rx_pause:
2163 		/*
2164 		 * Rx Flow control is enabled and Tx Flow control is
2165 		 * disabled by software override. Since there really
2166 		 * isn't a way to advertise that we are capable of RX
2167 		 * Pause ONLY, we will advertise that we support both
2168 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2169 		 * disable the adapter's ability to send PAUSE frames.
2170 		 */
2171 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2172 		break;
2173 	case ixgbe_fc_tx_pause:
2174 		/*
2175 		 * Tx Flow control is enabled, and Rx Flow control is
2176 		 * disabled by software override.
2177 		 */
2178 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2179 		break;
2180 	case ixgbe_fc_full:
2181 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2182 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2183 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2184 		break;
2185 	default:
2186 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2187 			     "Flow control param set incorrectly\n");
2188 		ret_val = IXGBE_ERR_CONFIG;
2189 		goto out;
2190 		break;
2191 	}
2192 
2193 	/* Set 802.3x based flow control settings. */
2194 	mflcn_reg |= IXGBE_MFLCN_DPF;
2195 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2196 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2197 
2198 
2199 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2200 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2201 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2202 		    hw->fc.high_water[i]) {
2203 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2204 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2205 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2206 		} else {
2207 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2208 			/*
2209 			 * In order to prevent Tx hangs when the internal Tx
2210 			 * switch is enabled we must set the high water mark
2211 			 * to the maximum FCRTH value.  This allows the Tx
2212 			 * switch to function even under heavy Rx workloads.
2213 			 */
2214 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2215 		}
2216 
2217 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2218 	}
2219 
2220 	/* Configure pause time (2 TCs per register) */
2221 	reg = hw->fc.pause_time * 0x00010001;
2222 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2223 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2224 
2225 	/* Configure flow control refresh threshold value */
2226 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2227 
2228 out:
2229 	return ret_val;
2230 }
2231 
2232 /**
2233  *  ixgbe_negotiate_fc - Negotiate flow control
2234  *  @hw: pointer to hardware structure
2235  *  @adv_reg: flow control advertised settings
2236  *  @lp_reg: link partner's flow control settings
2237  *  @adv_sym: symmetric pause bit in advertisement
2238  *  @adv_asm: asymmetric pause bit in advertisement
2239  *  @lp_sym: symmetric pause bit in link partner advertisement
2240  *  @lp_asm: asymmetric pause bit in link partner advertisement
2241  *
2242  *  Find the intersection between advertised settings and link partner's
2243  *  advertised settings
2244  **/
2245 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
2246 			   uint32_t lp_reg, uint32_t adv_sym,
2247 			   uint32_t adv_asm, uint32_t lp_sym,
2248 			   uint32_t lp_asm)
2249 {
2250 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2251 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2252 			     "Local or link partner's advertised flow control "
2253 			     "settings are NULL. Local: %x, link partner: %x\n",
2254 			     adv_reg, lp_reg);
2255 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2256 	}
2257 
2258 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2259 		/*
2260 		 * Now we need to check if the user selected Rx ONLY
2261 		 * of pause frames.  In this case, we had to advertise
2262 		 * FULL flow control because we could not advertise RX
2263 		 * ONLY. Hence, we must now check to see if we need to
2264 		 * turn OFF the TRANSMISSION of PAUSE frames.
2265 		 */
2266 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2267 			hw->fc.current_mode = ixgbe_fc_full;
2268 			DEBUGOUT("Flow Control = FULL.\n");
2269 		} else {
2270 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2271 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2272 		}
2273 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2274 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2275 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2276 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2277 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2278 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2279 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2280 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2281 	} else {
2282 		hw->fc.current_mode = ixgbe_fc_none;
2283 		DEBUGOUT("Flow Control = NONE.\n");
2284 	}
2285 	return IXGBE_SUCCESS;
2286 }
2287 
2288 /**
2289  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2290  *  @hw: pointer to hardware structure
2291  *
2292  *  Enable flow control according on 1 gig fiber.
2293  **/
2294 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2295 {
2296 	uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
2297 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2298 
2299 	/*
2300 	 * On multispeed fiber at 1g, bail out if
2301 	 * - link is up but AN did not complete, or if
2302 	 * - link is up and AN completed but timed out
2303 	 */
2304 
2305 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2306 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2307 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2308 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
2309 			     "Auto-Negotiation did not complete or timed out");
2310 		goto out;
2311 	}
2312 
2313 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2314 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2315 
2316 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2317 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2318 				      IXGBE_PCS1GANA_ASM_PAUSE,
2319 				      IXGBE_PCS1GANA_SYM_PAUSE,
2320 				      IXGBE_PCS1GANA_ASM_PAUSE);
2321 
2322 out:
2323 	return ret_val;
2324 }
2325 
2326 /**
2327  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2328  *  @hw: pointer to hardware structure
2329  *
2330  *  Enable flow control according to IEEE clause 37.
2331  **/
2332 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2333 {
2334 	uint32_t links2, anlp1_reg, autoc_reg, links;
2335 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2336 
2337 	/*
2338 	 * On backplane, bail out if
2339 	 * - backplane autoneg was not completed, or if
2340 	 * - we are 82599 and link partner is not AN enabled
2341 	 */
2342 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2343 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2344 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
2345 			     "Auto-Negotiation did not complete");
2346 		goto out;
2347 	}
2348 
2349 	if (hw->mac.type == ixgbe_mac_82599EB) {
2350 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2351 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2352 			ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2353 				     "Link partner is not AN enabled");
2354 			goto out;
2355 		}
2356 	}
2357 	/*
2358 	 * Read the 10g AN autoc and LP ability registers and resolve
2359 	 * local flow control settings accordingly
2360 	 */
2361 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2362 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2363 
2364 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2365 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2366 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2367 
2368 out:
2369 	return ret_val;
2370 }
2371 
2372 /**
2373  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2374  *  @hw: pointer to hardware structure
2375  *
2376  *  Enable flow control according to IEEE clause 37.
2377  **/
2378 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2379 {
2380 	uint16_t technology_ability_reg = 0;
2381 	uint16_t lp_technology_ability_reg = 0;
2382 
2383 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2384 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2385 			     &technology_ability_reg);
2386 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2387 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2388 			     &lp_technology_ability_reg);
2389 
2390 	return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
2391 				  (uint32_t)lp_technology_ability_reg,
2392 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2393 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2394 }
2395 
2396 /**
2397  *  ixgbe_fc_autoneg - Configure flow control
2398  *  @hw: pointer to hardware structure
2399  *
2400  *  Compares our advertised flow control capabilities to those advertised by
2401  *  our link partner, and determines the proper flow control mode to use.
2402  **/
2403 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2404 {
2405 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2406 	ixgbe_link_speed speed;
2407 	bool link_up;
2408 
2409 	DEBUGFUNC("ixgbe_fc_autoneg");
2410 
2411 	/*
2412 	 * AN should have completed when the cable was plugged in.
2413 	 * Look for reasons to bail out.  Bail out if:
2414 	 * - FC autoneg is disabled, or if
2415 	 * - link is not up.
2416 	 */
2417 	if (hw->fc.disable_fc_autoneg) {
2418 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2419 			     "Flow control autoneg is disabled");
2420 		goto out;
2421 	}
2422 
2423 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2424 	if (!link_up) {
2425 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2426 		goto out;
2427 	}
2428 
2429 	switch (hw->phy.media_type) {
2430 	/* Autoneg flow control on fiber adapters */
2431 	case ixgbe_media_type_fiber_fixed:
2432 	case ixgbe_media_type_fiber:
2433 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2434 			ret_val = ixgbe_fc_autoneg_fiber(hw);
2435 		break;
2436 
2437 	/* Autoneg flow control on backplane adapters */
2438 	case ixgbe_media_type_backplane:
2439 		ret_val = ixgbe_fc_autoneg_backplane(hw);
2440 		break;
2441 
2442 	/* Autoneg flow control on copper adapters */
2443 	case ixgbe_media_type_copper:
2444 		if (ixgbe_device_supports_autoneg_fc(hw))
2445 			ret_val = ixgbe_fc_autoneg_copper(hw);
2446 		break;
2447 
2448 	default:
2449 		break;
2450 	}
2451 
2452 out:
2453 	if (ret_val == IXGBE_SUCCESS) {
2454 		hw->fc.fc_was_autonegged = TRUE;
2455 		hw->fc.current_mode = hw->fc.requested_mode;
2456 	} else {
2457 		hw->fc.fc_was_autonegged = FALSE;
2458 	}
2459 }
2460 
2461 /*
2462  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2463  * @hw: pointer to hardware structure
2464  *
2465  * System-wide timeout range is encoded in PCIe Device Control2 register.
2466  *
2467  * Add 10% to specified maximum and return the number of times to poll for
2468  * completion timeout, in units of 100 microsec.  Never return less than
2469  * 800 = 80 millisec.
2470  */
2471 static uint32_t ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2472 {
2473 	int16_t devctl2;
2474 	uint32_t pollcnt;
2475 
2476 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
2477 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2478 
2479 	switch (devctl2) {
2480 	case IXGBE_PCIDEVCTRL2_65_130ms:
2481 		pollcnt = 1300;		/* 130 millisec */
2482 		break;
2483 	case IXGBE_PCIDEVCTRL2_260_520ms:
2484 		pollcnt = 5200;		/* 520 millisec */
2485 		break;
2486 	case IXGBE_PCIDEVCTRL2_1_2s:
2487 		pollcnt = 20000;	/* 2 sec */
2488 		break;
2489 	case IXGBE_PCIDEVCTRL2_4_8s:
2490 		pollcnt = 80000;	/* 8 sec */
2491 		break;
2492 	case IXGBE_PCIDEVCTRL2_17_34s:
2493 		pollcnt = 34000;	/* 34 sec */
2494 		break;
2495 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
2496 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
2497 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
2498 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
2499 	default:
2500 		pollcnt = 800;		/* 80 millisec minimum */
2501 		break;
2502 	}
2503 
2504 	/* add 10% to spec maximum */
2505 	return (pollcnt * 11) / 10;
2506 }
2507 
2508 /**
2509  *  ixgbe_disable_pcie_master - Disable PCI-express master access
2510  *  @hw: pointer to hardware structure
2511  *
2512  *  Disables PCI-Express master access and verifies there are no pending
2513  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2514  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2515  *  is returned signifying master requests disabled.
2516  **/
2517 int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2518 {
2519 	int32_t status = IXGBE_SUCCESS;
2520 	uint32_t i, poll;
2521 
2522 	DEBUGFUNC("ixgbe_disable_pcie_master");
2523 
2524 	/* Always set this bit to ensure any future transactions are blocked */
2525 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2526 
2527 	/* Exit if master requests are blocked */
2528 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2529 		goto out;
2530 
2531 	/* Poll for master request bit to clear */
2532 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2533 		usec_delay(100);
2534 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2535 			goto out;
2536 	}
2537 
2538 	/*
2539 	 * Two consecutive resets are required via CTRL.RST per datasheet
2540 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2541 	 * of this need.  The first reset prevents new master requests from
2542 	 * being issued by our device.  We then must wait 1usec or more for any
2543 	 * remaining completions from the PCIe bus to trickle in, and then reset
2544 	 * again to clear out any effects they may have had on our device.
2545 	 */
2546 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2547 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2548 
2549 	/*
2550 	 * Before proceeding, make sure that the PCIe block does not have
2551 	 * transactions pending.
2552 	 */
2553 	poll = ixgbe_pcie_timeout_poll(hw);
2554 	for (i = 0; i < poll; i++) {
2555 		usec_delay(100);
2556 		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2557 		    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2558 			goto out;
2559 	}
2560 
2561 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
2562 		     "PCIe transaction pending bit also did not clear.\n");
2563 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2564 
2565 out:
2566 	return status;
2567 }
2568 
2569 /**
2570  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2571  *  @hw: pointer to hardware structure
2572  *  @mask: Mask to specify which semaphore to acquire
2573  *
2574  *  Acquires the SWFW semaphore through the GSSR register for the specified
2575  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2576  **/
2577 int32_t ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, uint16_t mask)
2578 {
2579 	uint32_t gssr = 0;
2580 	uint32_t swmask = mask;
2581 	uint32_t fwmask = mask << 5;
2582 	uint32_t timeout = 200;
2583 	uint32_t i;
2584 
2585 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
2586 
2587 	for (i = 0; i < timeout; i++) {
2588 		/*
2589 		 * SW NVM semaphore bit is used for access to all
2590 		 * SW_FW_SYNC bits (not just NVM)
2591 		 */
2592 		if (ixgbe_get_eeprom_semaphore(hw))
2593 			return IXGBE_ERR_SWFW_SYNC;
2594 
2595 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2596 		if (!(gssr & (fwmask | swmask))) {
2597 			gssr |= swmask;
2598 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2599 			ixgbe_release_eeprom_semaphore(hw);
2600 			return IXGBE_SUCCESS;
2601 		} else {
2602 			/* Resource is currently in use by FW or SW */
2603 			ixgbe_release_eeprom_semaphore(hw);
2604 			msec_delay(5);
2605 		}
2606 	}
2607 
2608 	/* If time expired clear the bits holding the lock and retry */
2609 	if (gssr & (fwmask | swmask))
2610 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2611 
2612 	msec_delay(5);
2613 	return IXGBE_ERR_SWFW_SYNC;
2614 }
2615 
2616 /**
2617  *  ixgbe_release_swfw_sync - Release SWFW semaphore
2618  *  @hw: pointer to hardware structure
2619  *  @mask: Mask to specify which semaphore to release
2620  *
2621  *  Releases the SWFW semaphore through the GSSR register for the specified
2622  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2623  **/
2624 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, uint16_t mask)
2625 {
2626 	uint32_t gssr;
2627 	uint32_t swmask = mask;
2628 
2629 	DEBUGFUNC("ixgbe_release_swfw_sync");
2630 
2631 	ixgbe_get_eeprom_semaphore(hw);
2632 
2633 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2634 	gssr &= ~swmask;
2635 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2636 
2637 	ixgbe_release_eeprom_semaphore(hw);
2638 }
2639 
2640 /**
2641  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2642  *  @hw: pointer to hardware structure
2643  *
2644  *  Stops the receive data path and waits for the HW to internally empty
2645  *  the Rx security block
2646  **/
2647 int32_t ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2648 {
2649 #define IXGBE_MAX_SECRX_POLL 40
2650 
2651 	int i;
2652 	int secrxreg;
2653 
2654 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2655 
2656 
2657 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2658 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2659 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2660 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2661 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2662 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2663 			break;
2664 		else
2665 			/* Use interrupt-safe sleep just in case */
2666 			usec_delay(1000);
2667 	}
2668 
2669 	/* For informational purposes only */
2670 	if (i >= IXGBE_MAX_SECRX_POLL)
2671 		DEBUGOUT("Rx unit being enabled before security "
2672 			 "path fully disabled.  Continuing with init.\n");
2673 
2674 	return IXGBE_SUCCESS;
2675 }
2676 
2677 /**
2678  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2679  *  @hw: pointer to hardware structure
2680  *
2681  *  Enables the receive data path.
2682  **/
2683 int32_t ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2684 {
2685 	int secrxreg;
2686 
2687 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2688 
2689 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2690 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2691 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2692 	IXGBE_WRITE_FLUSH(hw);
2693 
2694 	return IXGBE_SUCCESS;
2695 }
2696 
2697 /**
2698  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2699  *  @hw: pointer to hardware structure
2700  *  @regval: register value to write to RXCTRL
2701  *
2702  *  Enables the Rx DMA unit
2703  **/
2704 int32_t ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, uint32_t regval)
2705 {
2706 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2707 
2708 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2709 
2710 	return IXGBE_SUCCESS;
2711 }
2712 
2713 /**
2714  *  ixgbe_blink_led_start_generic - Blink LED based on index.
2715  *  @hw: pointer to hardware structure
2716  *  @index: led number to blink
2717  **/
2718 int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
2719 {
2720 	ixgbe_link_speed speed = 0;
2721 	bool link_up = 0;
2722 	uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2723 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2724 	int32_t ret_val = IXGBE_SUCCESS;
2725 
2726 	DEBUGFUNC("ixgbe_blink_led_start_generic");
2727 
2728 	/*
2729 	 * Link must be up to auto-blink the LEDs;
2730 	 * Force it if link is down.
2731 	 */
2732 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2733 
2734 	if (!link_up) {
2735 		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
2736 		 * LESM is on.
2737 		 */
2738 		bool got_lock = FALSE;
2739 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2740 		    ixgbe_verify_lesm_fw_enabled(hw)) {
2741 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2742 							IXGBE_GSSR_MAC_CSR_SM);
2743 			if (ret_val != IXGBE_SUCCESS) {
2744 				ret_val = IXGBE_ERR_SWFW_SYNC;
2745 				goto out;
2746 			}
2747 			got_lock = TRUE;
2748 		}
2749 
2750 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2751 		autoc_reg |= IXGBE_AUTOC_FLU;
2752 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2753 		IXGBE_WRITE_FLUSH(hw);
2754 
2755 		if (got_lock)
2756 			hw->mac.ops.release_swfw_sync(hw,
2757 						      IXGBE_GSSR_MAC_CSR_SM);
2758 		msec_delay(10);
2759 	}
2760 
2761 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2762 	led_reg |= IXGBE_LED_BLINK(index);
2763 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2764 	IXGBE_WRITE_FLUSH(hw);
2765 
2766 out:
2767 	return ret_val;
2768 }
2769 
2770 /**
2771  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2772  *  @hw: pointer to hardware structure
2773  *  @index: led number to stop blinking
2774  **/
2775 int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
2776 {
2777 	uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2778 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2779 	int32_t ret_val = IXGBE_SUCCESS;
2780 	bool got_lock = FALSE;
2781 
2782 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
2783 	/* Need the SW/FW semaphore around AUTOC writes if 82599 and
2784 	 * LESM is on.
2785 	 */
2786 	if ((hw->mac.type == ixgbe_mac_82599EB) &&
2787 	    ixgbe_verify_lesm_fw_enabled(hw)) {
2788 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2789 						IXGBE_GSSR_MAC_CSR_SM);
2790 		if (ret_val != IXGBE_SUCCESS) {
2791 			ret_val = IXGBE_ERR_SWFW_SYNC;
2792 			goto out;
2793 		}
2794 		got_lock = TRUE;
2795 	}
2796 
2797 	autoc_reg &= ~IXGBE_AUTOC_FLU;
2798 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2799 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2800 
2801 	if (hw->mac.type == ixgbe_mac_82599EB)
2802 		ixgbe_reset_pipeline(hw);
2803 
2804 	if (got_lock)
2805 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
2806 
2807 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2808 	led_reg &= ~IXGBE_LED_BLINK(index);
2809 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2810 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2811 	IXGBE_WRITE_FLUSH(hw);
2812 
2813 out:
2814 	return ret_val;
2815 }
2816 
2817 /**
2818  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2819  *  @hw: pointer to hardware structure
2820  *
2821  *  Read PCIe configuration space, and get the MSI-X vector count from
2822  *  the capabilities table.
2823  **/
2824 uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2825 {
2826 	uint16_t msix_count = 1;
2827 	uint16_t max_msix_count;
2828 	uint16_t pcie_offset;
2829 
2830 	switch (hw->mac.type) {
2831 	case ixgbe_mac_82598EB:
2832 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2833 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2834 		break;
2835 	case ixgbe_mac_82599EB:
2836 	case ixgbe_mac_X540:
2837 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2838 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2839 		break;
2840 	default:
2841 		return msix_count;
2842 	}
2843 
2844 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
2845 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
2846 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2847 
2848 	/* MSI-X count is zero-based in HW */
2849 	msix_count++;
2850 
2851 	if (msix_count > max_msix_count)
2852 		msix_count = max_msix_count;
2853 
2854 	return msix_count;
2855 }
2856 
2857 /**
2858  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
2859  *  @hw: pointer to hardware structure
2860  *  @addr: Address to put into receive address register
2861  *  @vmdq: VMDq pool to assign
2862  *
2863  *  Puts an ethernet address into a receive address register, or
2864  *  finds the rar that it is aleady in; adds to the pool list
2865  **/
2866 int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
2867 {
2868 	static const uint32_t NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
2869 	uint32_t first_empty_rar = NO_EMPTY_RAR_FOUND;
2870 	uint32_t rar;
2871 	uint32_t rar_low, rar_high;
2872 	uint32_t addr_low, addr_high;
2873 
2874 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
2875 
2876 	/* swap bytes for HW little endian */
2877 	addr_low  = addr[0] | (addr[1] << 8)
2878 			    | (addr[2] << 16)
2879 			    | (addr[3] << 24);
2880 	addr_high = addr[4] | (addr[5] << 8);
2881 
2882 	/*
2883 	 * Either find the mac_id in rar or find the first empty space.
2884 	 * rar_highwater points to just after the highest currently used
2885 	 * rar in order to shorten the search.  It grows when we add a new
2886 	 * rar to the top.
2887 	 */
2888 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
2889 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
2890 
2891 		if (((IXGBE_RAH_AV & rar_high) == 0)
2892 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
2893 			first_empty_rar = rar;
2894 		} else if ((rar_high & 0xFFFF) == addr_high) {
2895 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
2896 			if (rar_low == addr_low)
2897 				break;    /* found it already in the rars */
2898 		}
2899 	}
2900 
2901 	if (rar < hw->mac.rar_highwater) {
2902 		/* already there so just add to the pool bits */
2903 		ixgbe_set_vmdq(hw, rar, vmdq);
2904 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
2905 		/* stick it into first empty RAR slot we found */
2906 		rar = first_empty_rar;
2907 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2908 	} else if (rar == hw->mac.rar_highwater) {
2909 		/* add it to the top of the list and inc the highwater mark */
2910 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2911 		hw->mac.rar_highwater++;
2912 	} else if (rar >= hw->mac.num_rar_entries) {
2913 		return IXGBE_ERR_INVALID_MAC_ADDR;
2914 	}
2915 
2916 	/*
2917 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
2918 	 * remains cleared to be sure default pool packets will get delivered
2919 	 */
2920 	if (rar == 0)
2921 		ixgbe_clear_vmdq(hw, rar, 0);
2922 
2923 	return rar;
2924 }
2925 
2926 /**
2927  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2928  *  @hw: pointer to hardware struct
2929  *  @rar: receive address register index to disassociate
2930  *  @vmdq: VMDq pool index to remove from the rar
2931  **/
2932 int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
2933 {
2934 	uint32_t mpsar_lo, mpsar_hi;
2935 	uint32_t rar_entries = hw->mac.num_rar_entries;
2936 
2937 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
2938 
2939 	/* Make sure we are using a valid rar index range */
2940 	if (rar >= rar_entries) {
2941 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2942 			     "RAR index %d is out of range.\n", rar);
2943 		return IXGBE_ERR_INVALID_ARGUMENT;
2944 	}
2945 
2946 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2947 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2948 
2949 	if (!mpsar_lo && !mpsar_hi)
2950 		goto done;
2951 
2952 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2953 		if (mpsar_lo) {
2954 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2955 			mpsar_lo = 0;
2956 		}
2957 		if (mpsar_hi) {
2958 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2959 			mpsar_hi = 0;
2960 		}
2961 	} else if (vmdq < 32) {
2962 		mpsar_lo &= ~(1 << vmdq);
2963 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2964 	} else {
2965 		mpsar_hi &= ~(1 << (vmdq - 32));
2966 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2967 	}
2968 
2969 	/* was that the last pool using this rar? */
2970 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2971 		hw->mac.ops.clear_rar(hw, rar);
2972 done:
2973 	return IXGBE_SUCCESS;
2974 }
2975 
2976 /**
2977  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2978  *  @hw: pointer to hardware struct
2979  *  @rar: receive address register index to associate with a VMDq index
2980  *  @vmdq: VMDq pool index
2981  **/
2982 int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
2983 {
2984 	uint32_t mpsar;
2985 	uint32_t rar_entries = hw->mac.num_rar_entries;
2986 
2987 	DEBUGFUNC("ixgbe_set_vmdq_generic");
2988 
2989 	/* Make sure we are using a valid rar index range */
2990 	if (rar >= rar_entries) {
2991 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2992 			     "RAR index %d is out of range.\n", rar);
2993 		return IXGBE_ERR_INVALID_ARGUMENT;
2994 	}
2995 
2996 	if (vmdq < 32) {
2997 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2998 		mpsar |= 1 << vmdq;
2999 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3000 	} else {
3001 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3002 		mpsar |= 1 << (vmdq - 32);
3003 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3004 	}
3005 	return IXGBE_SUCCESS;
3006 }
3007 
3008 /**
3009  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3010  *  @hw: pointer to hardware structure
3011  **/
3012 int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3013 {
3014 	int i;
3015 
3016 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3017 	DEBUGOUT(" Clearing UTA\n");
3018 
3019 	for (i = 0; i < 128; i++)
3020 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3021 
3022 	return IXGBE_SUCCESS;
3023 }
3024 
3025 /**
3026  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3027  *  @hw: pointer to hardware structure
3028  *  @vlan: VLAN id to write to VLAN filter
3029  *
3030  *  return the VLVF index where this VLAN id should be placed
3031  *
3032  **/
3033 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
3034 {
3035 	uint32_t bits = 0;
3036 	uint32_t first_empty_slot = 0;
3037 	int32_t regindex;
3038 
3039 	/* short cut the special case */
3040 	if (vlan == 0)
3041 		return 0;
3042 
3043 	/*
3044 	  * Search for the vlan id in the VLVF entries. Save off the first empty
3045 	  * slot found along the way
3046 	  */
3047 	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3048 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3049 		if (!bits && !(first_empty_slot))
3050 			first_empty_slot = regindex;
3051 		else if ((bits & 0x0FFF) == vlan)
3052 			break;
3053 	}
3054 
3055 	/*
3056 	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3057 	  * in the VLVF. Else use the first empty VLVF register for this
3058 	  * vlan id.
3059 	  */
3060 	if (regindex >= IXGBE_VLVF_ENTRIES) {
3061 		if (first_empty_slot)
3062 			regindex = first_empty_slot;
3063 		else {
3064 			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3065 				     "No space in VLVF.\n");
3066 			regindex = IXGBE_ERR_NO_SPACE;
3067 		}
3068 	}
3069 
3070 	return regindex;
3071 }
3072 
3073 /**
3074  *  ixgbe_set_vfta_generic - Set VLAN filter table
3075  *  @hw: pointer to hardware structure
3076  *  @vlan: VLAN id to write to VLAN filter
3077  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3078  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3079  *
3080  *  Turn on/off specified VLAN in the VLAN filter table.
3081  **/
3082 int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3083 			       bool vlan_on)
3084 {
3085 	int32_t regindex;
3086 	uint32_t bitindex;
3087 	uint32_t vfta;
3088 	uint32_t targetbit;
3089 	int32_t ret_val = IXGBE_SUCCESS;
3090 	bool vfta_changed = FALSE;
3091 
3092 	DEBUGFUNC("ixgbe_set_vfta_generic");
3093 
3094 	if (vlan > 4095)
3095 		return IXGBE_ERR_PARAM;
3096 
3097 	/*
3098 	 * this is a 2 part operation - first the VFTA, then the
3099 	 * VLVF and VLVFB if VT Mode is set
3100 	 * We don't write the VFTA until we know the VLVF part succeeded.
3101 	 */
3102 
3103 	/* Part 1
3104 	 * The VFTA is a bitstring made up of 128 32-bit registers
3105 	 * that enable the particular VLAN id, much like the MTA:
3106 	 *    bits[11-5]: which register
3107 	 *    bits[4-0]:  which bit in the register
3108 	 */
3109 	regindex = (vlan >> 5) & 0x7F;
3110 	bitindex = vlan & 0x1F;
3111 	targetbit = (1 << bitindex);
3112 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3113 
3114 	if (vlan_on) {
3115 		if (!(vfta & targetbit)) {
3116 			vfta |= targetbit;
3117 			vfta_changed = TRUE;
3118 		}
3119 	} else {
3120 		if ((vfta & targetbit)) {
3121 			vfta &= ~targetbit;
3122 			vfta_changed = TRUE;
3123 		}
3124 	}
3125 
3126 	/* Part 2
3127 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3128 	 */
3129 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3130 					 &vfta_changed);
3131 	if (ret_val != IXGBE_SUCCESS)
3132 		return ret_val;
3133 
3134 	if (vfta_changed)
3135 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3136 
3137 	return IXGBE_SUCCESS;
3138 }
3139 
3140 /**
3141  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3142  *  @hw: pointer to hardware structure
3143  *  @vlan: VLAN id to write to VLAN filter
3144  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3145  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3146  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
3147  *                 should be changed
3148  *
3149  *  Turn on/off specified bit in VLVF table.
3150  **/
3151 int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3152 			       bool vlan_on, bool *vfta_changed)
3153 {
3154 	uint32_t vt;
3155 
3156 	DEBUGFUNC("ixgbe_set_vlvf_generic");
3157 
3158 	if (vlan > 4095)
3159 		return IXGBE_ERR_PARAM;
3160 
3161 	/* If VT Mode is set
3162 	 *   Either vlan_on
3163 	 *     make sure the vlan is in VLVF
3164 	 *     set the vind bit in the matching VLVFB
3165 	 *   Or !vlan_on
3166 	 *     clear the pool bit and possibly the vind
3167 	 */
3168 	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3169 	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3170 		int32_t vlvf_index;
3171 		uint32_t bits;
3172 
3173 		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3174 		if (vlvf_index < 0)
3175 			return vlvf_index;
3176 
3177 		if (vlan_on) {
3178 			/* set the pool bit */
3179 			if (vind < 32) {
3180 				bits = IXGBE_READ_REG(hw,
3181 						IXGBE_VLVFB(vlvf_index * 2));
3182 				bits |= (1 << vind);
3183 				IXGBE_WRITE_REG(hw,
3184 						IXGBE_VLVFB(vlvf_index * 2),
3185 						bits);
3186 			} else {
3187 				bits = IXGBE_READ_REG(hw,
3188 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3189 				bits |= (1 << (vind - 32));
3190 				IXGBE_WRITE_REG(hw,
3191 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3192 					bits);
3193 			}
3194 		} else {
3195 			/* clear the pool bit */
3196 			if (vind < 32) {
3197 				bits = IXGBE_READ_REG(hw,
3198 						IXGBE_VLVFB(vlvf_index * 2));
3199 				bits &= ~(1 << vind);
3200 				IXGBE_WRITE_REG(hw,
3201 						IXGBE_VLVFB(vlvf_index * 2),
3202 						bits);
3203 				bits |= IXGBE_READ_REG(hw,
3204 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3205 			} else {
3206 				bits = IXGBE_READ_REG(hw,
3207 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3208 				bits &= ~(1 << (vind - 32));
3209 				IXGBE_WRITE_REG(hw,
3210 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3211 					bits);
3212 				bits |= IXGBE_READ_REG(hw,
3213 						IXGBE_VLVFB(vlvf_index * 2));
3214 			}
3215 		}
3216 
3217 		/*
3218 		 * If there are still bits set in the VLVFB registers
3219 		 * for the VLAN ID indicated we need to see if the
3220 		 * caller is requesting that we clear the VFTA entry bit.
3221 		 * If the caller has requested that we clear the VFTA
3222 		 * entry bit but there are still pools/VFs using this VLAN
3223 		 * ID entry then ignore the request.  We're not worried
3224 		 * about the case where we're turning the VFTA VLAN ID
3225 		 * entry bit on, only when requested to turn it off as
3226 		 * there may be multiple pools and/or VFs using the
3227 		 * VLAN ID entry.  In that case we cannot clear the
3228 		 * VFTA bit until all pools/VFs using that VLAN ID have also
3229 		 * been cleared.  This will be indicated by "bits" being
3230 		 * zero.
3231 		 */
3232 		if (bits) {
3233 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3234 					(IXGBE_VLVF_VIEN | vlan));
3235 			if ((!vlan_on) && (vfta_changed != NULL)) {
3236 				/* someone wants to clear the vfta entry
3237 				 * but some pools/VFs are still using it.
3238 				 * Ignore it. */
3239 				*vfta_changed = FALSE;
3240 			}
3241 		} else
3242 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3243 	}
3244 
3245 	return IXGBE_SUCCESS;
3246 }
3247 
3248 /**
3249  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3250  *  @hw: pointer to hardware structure
3251  *
3252  *  Clears the VLAN filer table, and the VMDq index associated with the filter
3253  **/
3254 int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3255 {
3256 	uint32_t offset;
3257 
3258 	DEBUGFUNC("ixgbe_clear_vfta_generic");
3259 
3260 	for (offset = 0; offset < hw->mac.vft_size; offset++)
3261 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3262 
3263 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3264 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3265 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3266 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3267 	}
3268 
3269 	return IXGBE_SUCCESS;
3270 }
3271 
3272 /**
3273  *  ixgbe_check_mac_link_generic - Determine link and speed status
3274  *  @hw: pointer to hardware structure
3275  *  @speed: pointer to link speed
3276  *  @link_up: TRUE when link is up
3277  *  @link_up_wait_to_complete: bool used to wait for link up or not
3278  *
3279  *  Reads the links register to determine if link is up and the current speed
3280  **/
3281 int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3282 				     bool *link_up, bool link_up_wait_to_complete)
3283 {
3284 	uint32_t links_reg, links_orig;
3285 	uint32_t i;
3286 
3287 	DEBUGFUNC("ixgbe_check_mac_link_generic");
3288 
3289 	/* clear the old state */
3290 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3291 
3292 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3293 
3294 	if (links_orig != links_reg) {
3295 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
3296 			  links_orig, links_reg);
3297 	}
3298 
3299 	if (link_up_wait_to_complete) {
3300 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3301 			if (links_reg & IXGBE_LINKS_UP) {
3302 				*link_up = TRUE;
3303 				break;
3304 			} else {
3305 				*link_up = FALSE;
3306 			}
3307 			msec_delay(100);
3308 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3309 		}
3310 	} else {
3311 		if (links_reg & IXGBE_LINKS_UP)
3312 			*link_up = TRUE;
3313 		else
3314 			*link_up = FALSE;
3315 	}
3316 
3317 	if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3318 	    IXGBE_LINKS_SPEED_10G_82599)
3319 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3320 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3321 		 IXGBE_LINKS_SPEED_1G_82599)
3322 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3323 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3324 		 IXGBE_LINKS_SPEED_100_82599)
3325 		*speed = IXGBE_LINK_SPEED_100_FULL;
3326 	else
3327 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3328 
3329 	return IXGBE_SUCCESS;
3330 }
3331 
3332 /**
3333  *  ixgbe_get_device_caps_generic - Get additional device capabilities
3334  *  @hw: pointer to hardware structure
3335  *  @device_caps: the EEPROM word with the extra device capabilities
3336  *
3337  *  This function will read the EEPROM location for the device capabilities,
3338  *  and return the word through device_caps.
3339  **/
3340 int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
3341 {
3342 	DEBUGFUNC("ixgbe_get_device_caps_generic");
3343 
3344 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3345 
3346 	return IXGBE_SUCCESS;
3347 }
3348 
3349 /**
3350  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3351  *  @hw: pointer to hardware structure
3352  *
3353  **/
3354 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3355 {
3356 	uint32_t regval;
3357 	uint32_t i;
3358 
3359 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3360 
3361 	/* Enable relaxed ordering */
3362 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
3363 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3364 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3365 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3366 	}
3367 
3368 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
3369 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3370 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3371 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3372 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3373 	}
3374 
3375 }
3376 
3377 /**
3378  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3379  * @hw: pointer to the hardware structure
3380  *
3381  * The 82599 and x540 MACs can experience issues if TX work is still pending
3382  * when a reset occurs.  This function prevents this by flushing the PCIe
3383  * buffers on the system.
3384  **/
3385 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3386 {
3387 	uint32_t gcr_ext, hlreg0;
3388 
3389 	/*
3390 	 * If double reset is not requested then all transactions should
3391 	 * already be clear and as such there is no work to do
3392 	 */
3393 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3394 		return;
3395 
3396 	/*
3397 	 * Set loopback enable to prevent any transmits from being sent
3398 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
3399 	 * has already been cleared.
3400 	 */
3401 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3402 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3403 
3404 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
3405 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3406 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3407 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3408 
3409 	/* Flush all writes and allow 20usec for all transactions to clear */
3410 	IXGBE_WRITE_FLUSH(hw);
3411 	usec_delay(20);
3412 
3413 	/* restore previous register values */
3414 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3415 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3416 }
3417 
3418 /* MAC Operations */
3419 
3420 /**
3421  *  ixgbe_init_shared_code - Initialize the shared code
3422  *  @hw: pointer to hardware structure
3423  *
3424  *  This will assign function pointers and assign the MAC type and PHY code.
3425  *  Does not touch the hardware. This function must be called prior to any
3426  *  other function in the shared code. The ixgbe_hw structure should be
3427  *  memset to 0 prior to calling this function.  The following fields in
3428  *  hw structure should be filled in prior to calling this function:
3429  *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
3430  *  subsystem_vendor_id, and revision_id
3431  **/
3432 int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
3433 {
3434 	int32_t status;
3435 
3436 	DEBUGFUNC("ixgbe_init_shared_code");
3437 
3438 	switch (hw->mac.type) {
3439 	case ixgbe_mac_82598EB:
3440 		status = ixgbe_init_ops_82598(hw);
3441 		break;
3442 	case ixgbe_mac_82599EB:
3443 		status = ixgbe_init_ops_82599(hw);
3444 		break;
3445 	case ixgbe_mac_X540:
3446 		status = ixgbe_init_ops_X540(hw);
3447 		break;
3448 	default:
3449 		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3450 		break;
3451 	}
3452 
3453 	return status;
3454 }
3455 
3456 /**
3457  *  ixgbe_init_hw - Initialize the hardware
3458  *  @hw: pointer to hardware structure
3459  *
3460  *  Initialize the hardware by resetting and then starting the hardware
3461  **/
3462 int32_t ixgbe_init_hw(struct ixgbe_hw *hw)
3463 {
3464 	if (hw->mac.ops.init_hw)
3465 		return hw->mac.ops.init_hw(hw);
3466 	else
3467 		return IXGBE_NOT_IMPLEMENTED;
3468 }
3469 
3470 /**
3471  *  ixgbe_get_media_type - Get media type
3472  *  @hw: pointer to hardware structure
3473  *
3474  *  Returns the media type (fiber, copper, backplane)
3475  **/
3476 enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
3477 {
3478 	if (hw->mac.ops.get_media_type)
3479 		return hw->mac.ops.get_media_type(hw);
3480 	else
3481 		return ixgbe_media_type_unknown;
3482 }
3483 
3484 /**
3485  *  ixgbe_identify_phy - Get PHY type
3486  *  @hw: pointer to hardware structure
3487  *
3488  *  Determines the physical layer module found on the current adapter.
3489  **/
3490 int32_t ixgbe_identify_phy(struct ixgbe_hw *hw)
3491 {
3492 	int32_t status = IXGBE_SUCCESS;
3493 
3494 	if (hw->phy.type == ixgbe_phy_unknown) {
3495 		if (hw->phy.ops.identify)
3496 			status = hw->phy.ops.identify(hw);
3497 		else
3498 			status = IXGBE_NOT_IMPLEMENTED;
3499 	}
3500 
3501 	return status;
3502 }
3503 
3504 /**
3505  *  ixgbe_check_link - Get link and speed status
3506  *  @hw: pointer to hardware structure
3507  *
3508  *  Reads the links register to determine if link is up and the current speed
3509  **/
3510 int32_t ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3511 			 bool *link_up, bool link_up_wait_to_complete)
3512 {
3513 	if (hw->mac.ops.check_link)
3514 		return hw->mac.ops.check_link(hw, speed, link_up,
3515 					      link_up_wait_to_complete);
3516 	else
3517 		return IXGBE_NOT_IMPLEMENTED;
3518 }
3519 
3520 /**
3521  *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
3522  *  @hw: pointer to hardware structure
3523  *
3524  *  When the driver changes the link speeds that it can support then
3525  *  flap the tx laser to alert the link partner to start autotry
3526  *  process on its end.
3527  **/
3528 void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
3529 {
3530 	if (hw->mac.ops.flap_tx_laser)
3531 		hw->mac.ops.flap_tx_laser(hw);
3532 }
3533 
3534 /**
3535  *  ixgbe_set_rar - Set Rx address register
3536  *  @hw: pointer to hardware structure
3537  *  @index: Receive address register to write
3538  *  @addr: Address to put into receive address register
3539  *  @vmdq: VMDq "set"
3540  *  @enable_addr: set flag that address is active
3541  *
3542  *  Puts an ethernet address into a receive address register.
3543  **/
3544 int32_t ixgbe_set_rar(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
3545 		      uint32_t vmdq, uint32_t enable_addr)
3546 {
3547 	if (hw->mac.ops.set_rar)
3548 		return hw->mac.ops.set_rar(hw, index, addr, vmdq, enable_addr);
3549 	else
3550 		return IXGBE_NOT_IMPLEMENTED;
3551 }
3552 
3553 /**
3554  *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
3555  *  @hw: pointer to hardware structure
3556  *  @rar: receive address register index to associate with VMDq index
3557  *  @vmdq: VMDq set or pool index
3558  **/
3559 int32_t ixgbe_set_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3560 {
3561 	if (hw->mac.ops.set_vmdq)
3562 		return hw->mac.ops.set_vmdq(hw, rar, vmdq);
3563 	else
3564 		return IXGBE_NOT_IMPLEMENTED;
3565 }
3566 
3567 /**
3568  *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
3569  *  @hw: pointer to hardware structure
3570  *  @rar: receive address register index to disassociate with VMDq index
3571  *  @vmdq: VMDq set or pool index
3572  **/
3573 int32_t ixgbe_clear_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3574 {
3575 	if (hw->mac.ops.clear_vmdq)
3576 		return hw->mac.ops.clear_vmdq(hw, rar, vmdq);
3577 	else
3578 		return IXGBE_NOT_IMPLEMENTED;
3579 }
3580 
3581 /**
3582  *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
3583  *  @hw: pointer to hardware structure
3584  *
3585  *  Initializes the Unicast Table Arrays to zero on device load.  This
3586  *  is part of the Rx init addr execution path.
3587  **/
3588 int32_t ixgbe_init_uta_tables(struct ixgbe_hw *hw)
3589 {
3590 	if (hw->mac.ops.init_uta_tables)
3591 		return hw->mac.ops.init_uta_tables(hw);
3592 	else
3593 		return IXGBE_NOT_IMPLEMENTED;
3594 }
3595 
3596 bool ixgbe_verify_lesm_fw_enabled(struct ixgbe_hw *hw)
3597 {
3598 	if (hw->mac.ops.verify_lesm_fw_enabled)
3599 		return hw->mac.ops.verify_lesm_fw_enabled(hw);
3600 	else
3601 		return IXGBE_NOT_IMPLEMENTED;
3602 }
3603 
3604 int32_t ixgbe_reset_pipeline(struct ixgbe_hw *hw)
3605 {
3606 	if (hw->mac.ops.reset_pipeline)
3607 		return hw->mac.ops.reset_pipeline(hw);
3608 	else
3609 		return IXGBE_NOT_IMPLEMENTED;
3610 }
3611 
3612 /*
3613  * MBX: Mailbox handling
3614  */
3615 
3616 /**
3617  *  ixgbe_read_mbx - Reads a message from the mailbox
3618  *  @hw: pointer to the HW structure
3619  *  @msg: The message buffer
3620  *  @size: Length of buffer
3621  *  @mbx_id: id of mailbox to read
3622  *
3623  *  returns SUCCESS if it successfuly read message from buffer
3624  **/
3625 int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3626 {
3627 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3628 	int32_t ret_val = IXGBE_ERR_MBX;
3629 
3630 	DEBUGFUNC("ixgbe_read_mbx");
3631 
3632 	/* limit read to size of mailbox */
3633 	if (size > mbx->size)
3634 		size = mbx->size;
3635 
3636 	if (mbx->ops.read)
3637 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3638 
3639 	return ret_val;
3640 }
3641 
3642 /**
3643  *  ixgbe_write_mbx - Write a message to the mailbox
3644  *  @hw: pointer to the HW structure
3645  *  @msg: The message buffer
3646  *  @size: Length of buffer
3647  *  @mbx_id: id of mailbox to write
3648  *
3649  *  returns SUCCESS if it successfully copied message into the buffer
3650  **/
3651 int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3652 {
3653 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3654 	int32_t ret_val = IXGBE_SUCCESS;
3655 
3656 	DEBUGFUNC("ixgbe_write_mbx");
3657 
3658 	if (size > mbx->size)
3659 		ret_val = IXGBE_ERR_MBX;
3660 
3661 	else if (mbx->ops.write)
3662 		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3663 
3664 	return ret_val;
3665 }
3666 
3667 /**
3668  *  ixgbe_check_for_msg - checks to see if someone sent us mail
3669  *  @hw: pointer to the HW structure
3670  *  @mbx_id: id of mailbox to check
3671  *
3672  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3673  **/
3674 int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
3675 {
3676 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3677 	int32_t ret_val = IXGBE_ERR_MBX;
3678 
3679 	DEBUGFUNC("ixgbe_check_for_msg");
3680 
3681 	if (mbx->ops.check_for_msg)
3682 		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
3683 
3684 	return ret_val;
3685 }
3686 
3687 /**
3688  *  ixgbe_check_for_ack - checks to see if someone sent us ACK
3689  *  @hw: pointer to the HW structure
3690  *  @mbx_id: id of mailbox to check
3691  *
3692  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3693  **/
3694 int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
3695 {
3696 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3697 	int32_t ret_val = IXGBE_ERR_MBX;
3698 
3699 	DEBUGFUNC("ixgbe_check_for_ack");
3700 
3701 	if (mbx->ops.check_for_ack)
3702 		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
3703 
3704 	return ret_val;
3705 }
3706 
3707 /**
3708  *  ixgbe_check_for_rst - checks to see if other side has reset
3709  *  @hw: pointer to the HW structure
3710  *  @mbx_id: id of mailbox to check
3711  *
3712  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3713  **/
3714 int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
3715 {
3716 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3717 	int32_t ret_val = IXGBE_ERR_MBX;
3718 
3719 	DEBUGFUNC("ixgbe_check_for_rst");
3720 
3721 	if (mbx->ops.check_for_rst)
3722 		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
3723 
3724 	return ret_val;
3725 }
3726 
3727 /**
3728  *  ixgbe_poll_for_msg - Wait for message notification
3729  *  @hw: pointer to the HW structure
3730  *  @mbx_id: id of mailbox to write
3731  *
3732  *  returns SUCCESS if it successfully received a message notification
3733  **/
3734 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
3735 {
3736 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3737 	int countdown = mbx->timeout;
3738 
3739 	DEBUGFUNC("ixgbe_poll_for_msg");
3740 
3741 	if (!countdown || !mbx->ops.check_for_msg)
3742 		goto out;
3743 
3744 	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
3745 		countdown--;
3746 		if (!countdown)
3747 			break;
3748 		usec_delay(mbx->usec_delay);
3749 	}
3750 
3751 out:
3752 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
3753 }
3754 
3755 /**
3756  *  ixgbe_poll_for_ack - Wait for message acknowledgement
3757  *  @hw: pointer to the HW structure
3758  *  @mbx_id: id of mailbox to write
3759  *
3760  *  returns SUCCESS if it successfully received a message acknowledgement
3761  **/
3762 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
3763 {
3764 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3765 	int countdown = mbx->timeout;
3766 
3767 	DEBUGFUNC("ixgbe_poll_for_ack");
3768 
3769 	if (!countdown || !mbx->ops.check_for_ack)
3770 		goto out;
3771 
3772 	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
3773 		countdown--;
3774 		if (!countdown)
3775 			break;
3776 		usec_delay(mbx->usec_delay);
3777 	}
3778 
3779 out:
3780 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
3781 }
3782 
3783 /**
3784  *  ixgbe_read_posted_mbx - Wait for message notification and receive message
3785  *  @hw: pointer to the HW structure
3786  *  @msg: The message buffer
3787  *  @size: Length of buffer
3788  *  @mbx_id: id of mailbox to write
3789  *
3790  *  returns SUCCESS if it successfully received a message notification and
3791  *  copied it into the receive buffer.
3792  **/
3793 int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3794 {
3795 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3796 	int32_t ret_val = IXGBE_ERR_MBX;
3797 
3798 	DEBUGFUNC("ixgbe_read_posted_mbx");
3799 
3800 	if (!mbx->ops.read)
3801 		goto out;
3802 
3803 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
3804 
3805 	/* if ack received read message, otherwise we timed out */
3806 	if (!ret_val)
3807 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3808 out:
3809 	return ret_val;
3810 }
3811 
3812 /**
3813  *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
3814  *  @hw: pointer to the HW structure
3815  *  @msg: The message buffer
3816  *  @size: Length of buffer
3817  *  @mbx_id: id of mailbox to write
3818  *
3819  *  returns SUCCESS if it successfully copied message into the buffer and
3820  *  received an ack to that message within delay * timeout period
3821  **/
3822 int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3823 			   uint16_t mbx_id)
3824 {
3825 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3826 	int32_t ret_val = IXGBE_ERR_MBX;
3827 
3828 	DEBUGFUNC("ixgbe_write_posted_mbx");
3829 
3830 	/* exit if either we can't write or there isn't a defined timeout */
3831 	if (!mbx->ops.write || !mbx->timeout)
3832 		goto out;
3833 
3834 	/* send msg */
3835 	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3836 
3837 	/* if msg sent wait until we receive an ack */
3838 	if (!ret_val)
3839 		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
3840 out:
3841 	return ret_val;
3842 }
3843 
3844 /**
3845  *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
3846  *  @hw: pointer to the HW structure
3847  *
3848  *  Setups up the mailbox read and write message function pointers
3849  **/
3850 void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
3851 {
3852 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3853 
3854 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
3855 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
3856 }
3857 
3858 /**
3859  *  ixgbe_read_v2p_mailbox - read v2p mailbox
3860  *  @hw: pointer to the HW structure
3861  *
3862  *  This function is used to read the v2p mailbox without losing the read to
3863  *  clear status bits.
3864  **/
3865 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
3866 {
3867 	uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
3868 
3869 	v2p_mailbox |= hw->mbx.v2p_mailbox;
3870 	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
3871 
3872 	return v2p_mailbox;
3873 }
3874 
3875 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
3876 {
3877 	uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
3878 	int32_t ret_val = IXGBE_ERR_MBX;
3879 
3880 	if (mbvficr & mask) {
3881 		ret_val = IXGBE_SUCCESS;
3882 		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
3883 	}
3884 
3885 	return ret_val;
3886 }
3887 
3888 /**
3889  *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
3890  *  @hw: pointer to the HW structure
3891  *  @vf_number: the VF index
3892  *
3893  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3894  **/
3895 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3896 {
3897 	int32_t ret_val = IXGBE_ERR_MBX;
3898 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
3899 	uint32_t vf_bit = vf_number % 16;
3900 
3901 	DEBUGFUNC("ixgbe_check_for_msg_pf");
3902 
3903 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
3904 				    index)) {
3905 		ret_val = IXGBE_SUCCESS;
3906 		hw->mbx.stats.reqs++;
3907 	}
3908 
3909 	return ret_val;
3910 }
3911 
3912 /**
3913  *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
3914  *  @hw: pointer to the HW structure
3915  *  @vf_number: the VF index
3916  *
3917  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3918  **/
3919 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3920 {
3921 	int32_t ret_val = IXGBE_ERR_MBX;
3922 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
3923 	uint32_t vf_bit = vf_number % 16;
3924 
3925 	DEBUGFUNC("ixgbe_check_for_ack_pf");
3926 
3927 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
3928 				    index)) {
3929 		ret_val = IXGBE_SUCCESS;
3930 		hw->mbx.stats.acks++;
3931 	}
3932 
3933 	return ret_val;
3934 }
3935 
3936 /**
3937  *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
3938  *  @hw: pointer to the HW structure
3939  *  @vf_number: the VF index
3940  *
3941  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3942  **/
3943 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3944 {
3945 	uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
3946 	uint32_t vf_shift = vf_number % 32;
3947 	uint32_t vflre = 0;
3948 	int32_t ret_val = IXGBE_ERR_MBX;
3949 
3950 	DEBUGFUNC("ixgbe_check_for_rst_pf");
3951 
3952 	switch (hw->mac.type) {
3953 	case ixgbe_mac_82599EB:
3954 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
3955 		break;
3956 	case ixgbe_mac_X540:
3957 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
3958 		break;
3959 	default:
3960 		break;
3961 	}
3962 
3963 	if (vflre & (1 << vf_shift)) {
3964 		ret_val = IXGBE_SUCCESS;
3965 		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
3966 		hw->mbx.stats.rsts++;
3967 	}
3968 
3969 	return ret_val;
3970 }
3971 
3972 /**
3973  *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
3974  *  @hw: pointer to the HW structure
3975  *  @vf_number: the VF index
3976  *
3977  *  return SUCCESS if we obtained the mailbox lock
3978  **/
3979 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3980 {
3981 	int32_t ret_val = IXGBE_ERR_MBX;
3982 	uint32_t p2v_mailbox;
3983 
3984 	DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
3985 
3986 	/* Take ownership of the buffer */
3987 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
3988 
3989 	/* reserve mailbox for vf use */
3990 	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
3991 	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
3992 		ret_val = IXGBE_SUCCESS;
3993 
3994 	return ret_val;
3995 }
3996 
3997 /**
3998  *  ixgbe_write_mbx_pf - Places a message in the mailbox
3999  *  @hw: pointer to the HW structure
4000  *  @msg: The message buffer
4001  *  @size: Length of buffer
4002  *  @vf_number: the VF index
4003  *
4004  *  returns SUCCESS if it successfully copied message into the buffer
4005  **/
4006 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4007 			   uint16_t vf_number)
4008 {
4009 	int32_t ret_val;
4010 	uint16_t i;
4011 
4012 	DEBUGFUNC("ixgbe_write_mbx_pf");
4013 
4014 	/* lock the mailbox to prevent pf/vf race condition */
4015 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4016 	if (ret_val)
4017 		goto out_no_write;
4018 
4019 	/* flush msg and acks as we are overwriting the message buffer */
4020 	ixgbe_check_for_msg_pf(hw, vf_number);
4021 	ixgbe_check_for_ack_pf(hw, vf_number);
4022 
4023 	/* copy the caller specified message to the mailbox memory buffer */
4024 	for (i = 0; i < size; i++)
4025 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
4026 
4027 	/* Interrupt VF to tell it a message has been sent and release buffer*/
4028 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
4029 
4030 	/* update stats */
4031 	hw->mbx.stats.msgs_tx++;
4032 
4033 out_no_write:
4034 	return ret_val;
4035 
4036 }
4037 
4038 /**
4039  *  ixgbe_read_mbx_pf - Read a message from the mailbox
4040  *  @hw: pointer to the HW structure
4041  *  @msg: The message buffer
4042  *  @size: Length of buffer
4043  *  @vf_number: the VF index
4044  *
4045  *  This function copies a message from the mailbox buffer to the caller's
4046  *  memory buffer.  The presumption is that the caller knows that there was
4047  *  a message due to a VF request so no polling for message is needed.
4048  **/
4049 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4050 			  uint16_t vf_number)
4051 {
4052 	int32_t ret_val;
4053 	uint16_t i;
4054 
4055 	DEBUGFUNC("ixgbe_read_mbx_pf");
4056 
4057 	/* lock the mailbox to prevent pf/vf race condition */
4058 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4059 	if (ret_val)
4060 		goto out_no_read;
4061 
4062 	/* copy the message to the mailbox memory buffer */
4063 	for (i = 0; i < size; i++)
4064 		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
4065 
4066 	/* Acknowledge the message and release buffer */
4067 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
4068 
4069 	/* update stats */
4070 	hw->mbx.stats.msgs_rx++;
4071 
4072 out_no_read:
4073 	return ret_val;
4074 }
4075 
4076 /**
4077  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
4078  *  @hw: pointer to the HW structure
4079  *
4080  *  Initializes the hw->mbx struct to correct values for pf mailbox
4081  */
4082 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
4083 {
4084 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4085 
4086 	if (hw->mac.type != ixgbe_mac_82599EB &&
4087 	    hw->mac.type != ixgbe_mac_X540)
4088 		return;
4089 
4090 	mbx->timeout = 0;
4091 	mbx->usec_delay = 0;
4092 
4093 	mbx->size = IXGBE_VFMAILBOX_SIZE;
4094 
4095 	mbx->ops.read = ixgbe_read_mbx_pf;
4096 	mbx->ops.write = ixgbe_write_mbx_pf;
4097 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
4098 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
4099 	mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
4100 	mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
4101 	mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
4102 
4103 	mbx->stats.msgs_tx = 0;
4104 	mbx->stats.msgs_rx = 0;
4105 	mbx->stats.reqs = 0;
4106 	mbx->stats.acks = 0;
4107 	mbx->stats.rsts = 0;
4108 }
4109