xref: /openbsd-src/sys/dev/pci/ixgbe.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: ixgbe.c,v 1.13 2013/08/05 19:58:05 mikeb Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2013, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c 251964 Jun 18 21:28:19 2013 UTC */
36 /* FreeBSD: src/sys/dev/ixgbe/ixgbe_mbx.c 230775 Jan 30 16:42:02 2012 UTC */
37 
38 #include <dev/pci/ixgbe.h>
39 
40 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
41 				       uint16_t link_status);
42 
43 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
44 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
45 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
46 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw);
47 void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
48 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
49 				 uint16_t count);
50 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
51 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
52 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
53 void ixgbe_release_eeprom(struct ixgbe_hw *hw);
54 
55 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
56 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
57 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
58 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
59 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
60 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
61 			   uint32_t lp_reg, uint32_t adv_sym, uint32_t adv_asm,
62 			   uint32_t lp_sym, uint32_t lp_asm);
63 
64 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan);
65 
66 /* MBX */
67 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
68 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
69 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
70 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask,
71 			       int32_t index);
72 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
73 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
74 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
75 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
76 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
77 			   uint16_t vf_number);
78 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
79 			  uint16_t vf_number);
80 
81 
82 /**
83  *  ixgbe_init_ops_generic - Inits function ptrs
84  *  @hw: pointer to the hardware structure
85  *
86  *  Initialize the function pointers.
87  **/
88 int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
89 {
90 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
91 	struct ixgbe_mac_info *mac = &hw->mac;
92 	uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC);
93 
94 	DEBUGFUNC("ixgbe_init_ops_generic");
95 
96 	/* EEPROM */
97 	eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
98 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
99 	if (eec & IXGBE_EEC_PRES)
100 		eeprom->ops.read = &ixgbe_read_eerd_generic;
101 	else
102 		eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
103 	eeprom->ops.write = &ixgbe_write_eeprom_generic;
104 	eeprom->ops.validate_checksum =
105 				      &ixgbe_validate_eeprom_checksum_generic;
106 	eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
107 	eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
108 
109 	/* MAC */
110 	mac->ops.init_hw = &ixgbe_init_hw_generic;
111 	mac->ops.reset_hw = NULL;
112 	mac->ops.start_hw = &ixgbe_start_hw_generic;
113 	mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
114 	mac->ops.get_media_type = NULL;
115 	mac->ops.get_supported_physical_layer = NULL;
116 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
117 	mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
118 	mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
119 	mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
120 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
121 	mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
122 	mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
123 
124 	/* LEDs */
125 	mac->ops.led_on = &ixgbe_led_on_generic;
126 	mac->ops.led_off = &ixgbe_led_off_generic;
127 	mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
128 	mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
129 
130 	/* RAR, Multicast, VLAN */
131 	mac->ops.set_rar = &ixgbe_set_rar_generic;
132 	mac->ops.clear_rar = &ixgbe_clear_rar_generic;
133 	mac->ops.insert_mac_addr = NULL;
134 	mac->ops.set_vmdq = NULL;
135 	mac->ops.clear_vmdq = NULL;
136 	mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
137 	mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
138 	mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
139 	mac->ops.enable_mc = &ixgbe_enable_mc_generic;
140 	mac->ops.disable_mc = &ixgbe_disable_mc_generic;
141 	mac->ops.clear_vfta = NULL;
142 	mac->ops.set_vfta = NULL;
143 	mac->ops.init_uta_tables = NULL;
144 
145 	/* Flow Control */
146 	mac->ops.fc_enable = &ixgbe_fc_enable_generic;
147 
148 	/* Link */
149 	mac->ops.get_link_capabilities = NULL;
150 	mac->ops.setup_link = NULL;
151 	mac->ops.check_link = NULL;
152 
153 	return IXGBE_SUCCESS;
154 }
155 
156 /**
157  * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
158  * of flow control
159  * @hw: pointer to hardware structure
160  *
161  * This function returns TRUE if the device supports flow control
162  * autonegotiation, and FALSE if it does not.
163  *
164  **/
165 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
166 {
167 	bool supported = FALSE;
168 	ixgbe_link_speed speed;
169 	bool link_up;
170 
171 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
172 
173 	switch (hw->phy.media_type) {
174 	case ixgbe_media_type_fiber_fixed:
175 	case ixgbe_media_type_fiber:
176 		hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
177 		/* if link is down, assume supported */
178 		if (link_up)
179 			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
180 				TRUE : FALSE;
181 		else
182 			supported = TRUE;
183 		break;
184 	case ixgbe_media_type_backplane:
185 		supported = TRUE;
186 		break;
187 	case ixgbe_media_type_copper:
188 		/* only some copper devices support flow control autoneg */
189 		switch (hw->device_id) {
190 		case IXGBE_DEV_ID_82599_T3_LOM:
191 		case IXGBE_DEV_ID_X540T:
192 		case IXGBE_DEV_ID_X540_BYPASS:
193 			supported = TRUE;
194 			break;
195 		default:
196 			supported = FALSE;
197 		}
198 	default:
199 		break;
200 	}
201 
202 	ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
203 		      "Device %x does not support flow control autoneg",
204 		      hw->device_id);
205 	return supported;
206 }
207 
208 /**
209  *  ixgbe_setup_fc - Set up flow control
210  *  @hw: pointer to hardware structure
211  *
212  *  Called at init time to set up flow control.
213  **/
214 int32_t ixgbe_setup_fc(struct ixgbe_hw *hw)
215 {
216 	int32_t ret_val = IXGBE_SUCCESS;
217 	uint32_t reg = 0, reg_bp = 0;
218 	uint16_t reg_cu = 0;
219 	bool got_lock = FALSE;
220 
221 	DEBUGFUNC("ixgbe_setup_fc");
222 
223 	/*
224 	 * Validate the requested mode.  Strict IEEE mode does not allow
225 	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
226 	 */
227 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
228 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
229 			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
230 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
231 		goto out;
232 	}
233 
234 	/*
235 	 * 10gig parts do not have a word in the EEPROM to determine the
236 	 * default flow control setting, so we explicitly set it to full.
237 	 */
238 	if (hw->fc.requested_mode == ixgbe_fc_default)
239 		hw->fc.requested_mode = ixgbe_fc_full;
240 
241 	/*
242 	 * Set up the 1G and 10G flow control advertisement registers so the
243 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
244 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
245 	 */
246 	switch (hw->phy.media_type) {
247 	case ixgbe_media_type_fiber_fixed:
248 	case ixgbe_media_type_fiber:
249 	case ixgbe_media_type_backplane:
250 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
251 		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
252 		break;
253 	case ixgbe_media_type_copper:
254 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
255 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
256 		break;
257 	default:
258 		break;
259 	}
260 
261 	/*
262 	 * The possible values of fc.requested_mode are:
263 	 * 0: Flow control is completely disabled
264 	 * 1: Rx flow control is enabled (we can receive pause frames,
265 	 *    but not send pause frames).
266 	 * 2: Tx flow control is enabled (we can send pause frames but
267 	 *    we do not support receiving pause frames).
268 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
269 	 * other: Invalid.
270 	 */
271 	switch (hw->fc.requested_mode) {
272 	case ixgbe_fc_none:
273 		/* Flow control completely disabled by software override. */
274 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
275 		if (hw->phy.media_type == ixgbe_media_type_backplane)
276 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
277 				    IXGBE_AUTOC_ASM_PAUSE);
278 		else if (hw->phy.media_type == ixgbe_media_type_copper)
279 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
280 		break;
281 	case ixgbe_fc_tx_pause:
282 		/*
283 		 * Tx Flow control is enabled, and Rx Flow control is
284 		 * disabled by software override.
285 		 */
286 		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
287 		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
288 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
289 			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
290 			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
291 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
292 			reg_cu |= IXGBE_TAF_ASM_PAUSE;
293 			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
294 		}
295 		break;
296 	case ixgbe_fc_rx_pause:
297 		/*
298 		 * Rx Flow control is enabled and Tx Flow control is
299 		 * disabled by software override. Since there really
300 		 * isn't a way to advertise that we are capable of RX
301 		 * Pause ONLY, we will advertise that we support both
302 		 * symmetric and asymmetric Rx PAUSE, as such we fall
303 		 * through to the fc_full statement.  Later, we will
304 		 * disable the adapter's ability to send PAUSE frames.
305 		 */
306 	case ixgbe_fc_full:
307 		/* Flow control (both Rx and Tx) is enabled by SW override. */
308 		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
309 		if (hw->phy.media_type == ixgbe_media_type_backplane)
310 			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
311 				  IXGBE_AUTOC_ASM_PAUSE;
312 		else if (hw->phy.media_type == ixgbe_media_type_copper)
313 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
314 		break;
315 	default:
316 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
317 			     "Flow control param set incorrectly\n");
318 		ret_val = IXGBE_ERR_CONFIG;
319 		goto out;
320 		break;
321 	}
322 
323 	if (hw->mac.type != ixgbe_mac_X540) {
324 		/*
325 		 * Enable auto-negotiation between the MAC & PHY;
326 		 * the MAC will advertise clause 37 flow control.
327 		 */
328 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
329 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
330 
331 		/* Disable AN timeout */
332 		if (hw->fc.strict_ieee)
333 			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
334 
335 		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
336 		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
337 	}
338 
339 	/*
340 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
341 	 * and copper. There is no need to set the PCS1GCTL register.
342 	 *
343 	 */
344 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
345 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
346 		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
347 		 * LESM is on, likewise reset_pipeline requries the lock as
348 		 * it also writes AUTOC.
349 		 */
350 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
351 		    ixgbe_verify_lesm_fw_enabled(hw)) {
352 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
353 							IXGBE_GSSR_MAC_CSR_SM);
354 			if (ret_val != IXGBE_SUCCESS) {
355 				ret_val = IXGBE_ERR_SWFW_SYNC;
356 				goto out;
357 			}
358 			got_lock = TRUE;
359 		}
360 
361 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
362 		if (hw->mac.type == ixgbe_mac_82599EB)
363 			ixgbe_reset_pipeline(hw);
364 
365 		if (got_lock)
366 			hw->mac.ops.release_swfw_sync(hw,
367 						      IXGBE_GSSR_MAC_CSR_SM);
368 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
369 		    (ixgbe_device_supports_autoneg_fc(hw))) {
370 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
371 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
372 	}
373 
374 	DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
375 out:
376 	return ret_val;
377 }
378 
379 /**
380  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
381  *  @hw: pointer to hardware structure
382  *
383  *  Starts the hardware by filling the bus info structure and media type, clears
384  *  all on chip counters, initializes receive address registers, multicast
385  *  table, VLAN filter table, calls routine to set up link and flow control
386  *  settings, and leaves transmit and receive units disabled and uninitialized
387  **/
388 int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
389 {
390 	int32_t ret_val;
391 	uint32_t ctrl_ext;
392 
393 	DEBUGFUNC("ixgbe_start_hw_generic");
394 
395 	/* Set the media type */
396 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
397 
398 	/* PHY ops initialization must be done in reset_hw() */
399 
400 	/* Clear the VLAN filter table */
401 	hw->mac.ops.clear_vfta(hw);
402 
403 	/* Clear statistics registers */
404 	hw->mac.ops.clear_hw_cntrs(hw);
405 
406 	/* Set No Snoop Disable */
407 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
408 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
409 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
410 	IXGBE_WRITE_FLUSH(hw);
411 
412 	/* Setup flow control */
413 	ret_val = ixgbe_setup_fc(hw);
414 	if (ret_val != IXGBE_SUCCESS)
415 		goto out;
416 
417 	/* Clear adapter stopped flag */
418 	hw->adapter_stopped = FALSE;
419 
420 out:
421 	return ret_val;
422 }
423 
424 /**
425  *  ixgbe_start_hw_gen2 - Init sequence for common device family
426  *  @hw: pointer to hw structure
427  *
428  * Performs the init sequence common to the second generation
429  * of 10 GbE devices.
430  * Devices in the second generation:
431  *     82599
432  *     X540
433  **/
434 int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
435 {
436 	uint32_t i;
437 	uint32_t regval;
438 
439 	/* Clear the rate limiters */
440 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
441 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
442 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
443 	}
444 	IXGBE_WRITE_FLUSH(hw);
445 
446 	/* Disable relaxed ordering */
447 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
448 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
449 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
450 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
451 	}
452 
453 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
454 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
455 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
456 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
457 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
458 	}
459 
460 	return IXGBE_SUCCESS;
461 }
462 
463 /**
464  *  ixgbe_init_hw_generic - Generic hardware initialization
465  *  @hw: pointer to hardware structure
466  *
467  *  Initialize the hardware by resetting the hardware, filling the bus info
468  *  structure and media type, clears all on chip counters, initializes receive
469  *  address registers, multicast table, VLAN filter table, calls routine to set
470  *  up link and flow control settings, and leaves transmit and receive units
471  *  disabled and uninitialized
472  **/
473 int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
474 {
475 	int32_t status;
476 
477 	DEBUGFUNC("ixgbe_init_hw_generic");
478 
479 	/* Reset the hardware */
480 	status = hw->mac.ops.reset_hw(hw);
481 
482 	if (status == IXGBE_SUCCESS) {
483 		/* Start the HW */
484 		status = hw->mac.ops.start_hw(hw);
485 	}
486 
487 	return status;
488 }
489 
490 /**
491  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
492  *  @hw: pointer to hardware structure
493  *
494  *  Clears all hardware statistics counters by reading them from the hardware
495  *  Statistics counters are clear on read.
496  **/
497 int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
498 {
499 	uint16_t i = 0;
500 
501 	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
502 
503 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
504 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
505 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
506 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
507 	for (i = 0; i < 8; i++)
508 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
509 
510 	IXGBE_READ_REG(hw, IXGBE_MLFC);
511 	IXGBE_READ_REG(hw, IXGBE_MRFC);
512 	IXGBE_READ_REG(hw, IXGBE_RLEC);
513 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
514 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
515 	if (hw->mac.type >= ixgbe_mac_82599EB) {
516 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
517 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
518 	} else {
519 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
520 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
521 	}
522 
523 	for (i = 0; i < 8; i++) {
524 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
525 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
526 		if (hw->mac.type >= ixgbe_mac_82599EB) {
527 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
528 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
529 		} else {
530 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
531 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
532 		}
533 	}
534 	if (hw->mac.type >= ixgbe_mac_82599EB)
535 		for (i = 0; i < 8; i++)
536 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
537 	IXGBE_READ_REG(hw, IXGBE_PRC64);
538 	IXGBE_READ_REG(hw, IXGBE_PRC127);
539 	IXGBE_READ_REG(hw, IXGBE_PRC255);
540 	IXGBE_READ_REG(hw, IXGBE_PRC511);
541 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
542 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
543 	IXGBE_READ_REG(hw, IXGBE_GPRC);
544 	IXGBE_READ_REG(hw, IXGBE_BPRC);
545 	IXGBE_READ_REG(hw, IXGBE_MPRC);
546 	IXGBE_READ_REG(hw, IXGBE_GPTC);
547 	IXGBE_READ_REG(hw, IXGBE_GORCL);
548 	IXGBE_READ_REG(hw, IXGBE_GORCH);
549 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
550 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
551 	if (hw->mac.type == ixgbe_mac_82598EB)
552 		for (i = 0; i < 8; i++)
553 			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
554 	IXGBE_READ_REG(hw, IXGBE_RUC);
555 	IXGBE_READ_REG(hw, IXGBE_RFC);
556 	IXGBE_READ_REG(hw, IXGBE_ROC);
557 	IXGBE_READ_REG(hw, IXGBE_RJC);
558 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
559 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
560 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
561 	IXGBE_READ_REG(hw, IXGBE_TORL);
562 	IXGBE_READ_REG(hw, IXGBE_TORH);
563 	IXGBE_READ_REG(hw, IXGBE_TPR);
564 	IXGBE_READ_REG(hw, IXGBE_TPT);
565 	IXGBE_READ_REG(hw, IXGBE_PTC64);
566 	IXGBE_READ_REG(hw, IXGBE_PTC127);
567 	IXGBE_READ_REG(hw, IXGBE_PTC255);
568 	IXGBE_READ_REG(hw, IXGBE_PTC511);
569 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
570 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
571 	IXGBE_READ_REG(hw, IXGBE_MPTC);
572 	IXGBE_READ_REG(hw, IXGBE_BPTC);
573 	for (i = 0; i < 16; i++) {
574 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
575 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
576 		if (hw->mac.type >= ixgbe_mac_82599EB) {
577 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
578 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
579 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
580 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
581 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
582 		} else {
583 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
584 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
585 		}
586 	}
587 
588 	if (hw->mac.type == ixgbe_mac_X540) {
589 		if (hw->phy.id == 0)
590 			ixgbe_identify_phy(hw);
591 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
592 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
593 		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
594 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
595 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
596 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
597 		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
598 				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
599 	}
600 
601 	return IXGBE_SUCCESS;
602 }
603 
604 /**
605  *  ixgbe_get_mac_addr_generic - Generic get MAC address
606  *  @hw: pointer to hardware structure
607  *  @mac_addr: Adapter MAC address
608  *
609  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
610  *  A reset of the adapter must be performed prior to calling this function
611  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
612  **/
613 int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr)
614 {
615 	uint32_t rar_high;
616 	uint32_t rar_low;
617 	uint16_t i;
618 
619 	DEBUGFUNC("ixgbe_get_mac_addr_generic");
620 
621 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
622 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
623 
624 	for (i = 0; i < 4; i++)
625 		mac_addr[i] = (uint8_t)(rar_low >> (i*8));
626 
627 	for (i = 0; i < 2; i++)
628 		mac_addr[i+4] = (uint8_t)(rar_high >> (i*8));
629 
630 	return IXGBE_SUCCESS;
631 }
632 
633 /**
634  *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
635  *  @hw: pointer to hardware structure
636  *  @link_status: the link status returned by the PCI config space
637  *
638  *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
639  **/
640 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
641 				       uint16_t link_status)
642 {
643 	struct ixgbe_mac_info *mac = &hw->mac;
644 
645 	hw->bus.type = ixgbe_bus_type_pci_express;
646 
647 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
648 	case IXGBE_PCI_LINK_WIDTH_1:
649 		hw->bus.width = ixgbe_bus_width_pcie_x1;
650 		break;
651 	case IXGBE_PCI_LINK_WIDTH_2:
652 		hw->bus.width = ixgbe_bus_width_pcie_x2;
653 		break;
654 	case IXGBE_PCI_LINK_WIDTH_4:
655 		hw->bus.width = ixgbe_bus_width_pcie_x4;
656 		break;
657 	case IXGBE_PCI_LINK_WIDTH_8:
658 		hw->bus.width = ixgbe_bus_width_pcie_x8;
659 		break;
660 	default:
661 		hw->bus.width = ixgbe_bus_width_unknown;
662 		break;
663 	}
664 
665 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
666 	case IXGBE_PCI_LINK_SPEED_2500:
667 		hw->bus.speed = ixgbe_bus_speed_2500;
668 		break;
669 	case IXGBE_PCI_LINK_SPEED_5000:
670 		hw->bus.speed = ixgbe_bus_speed_5000;
671 		break;
672 	case IXGBE_PCI_LINK_SPEED_8000:
673 		hw->bus.speed = ixgbe_bus_speed_8000;
674 		break;
675 	default:
676 		hw->bus.speed = ixgbe_bus_speed_unknown;
677 		break;
678 	}
679 
680 	mac->ops.set_lan_id(hw);
681 }
682 
683 /**
684  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
685  *  @hw: pointer to hardware structure
686  *
687  *  Gets the PCI bus info (speed, width, type) then calls helper function to
688  *  store this data within the ixgbe_hw structure.
689  **/
690 int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
691 {
692 	uint16_t link_status;
693 
694 	DEBUGFUNC("ixgbe_get_bus_info_generic");
695 
696 	/* Get the negotiated link width and speed from PCI config space */
697 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
698 
699 	ixgbe_set_pci_config_data_generic(hw, link_status);
700 
701 	return IXGBE_SUCCESS;
702 }
703 
704 /**
705  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
706  *  @hw: pointer to the HW structure
707  *
708  *  Determines the LAN function id by reading memory-mapped registers
709  *  and swaps the port value if requested.
710  **/
711 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
712 {
713 	struct ixgbe_bus_info *bus = &hw->bus;
714 	uint32_t reg;
715 
716 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
717 
718 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
719 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
720 	bus->lan_id = bus->func;
721 
722 	/* check for a port swap */
723 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
724 	if (reg & IXGBE_FACTPS_LFS)
725 		bus->func ^= 0x1;
726 }
727 
728 /**
729  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
730  *  @hw: pointer to hardware structure
731  *
732  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
733  *  disables transmit and receive units. The adapter_stopped flag is used by
734  *  the shared code and drivers to determine if the adapter is in a stopped
735  *  state and should not touch the hardware.
736  **/
737 int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
738 {
739 	uint32_t reg_val;
740 	uint16_t i;
741 
742 	DEBUGFUNC("ixgbe_stop_adapter_generic");
743 
744 	/*
745 	 * Set the adapter_stopped flag so other driver functions stop touching
746 	 * the hardware
747 	 */
748 	hw->adapter_stopped = TRUE;
749 
750 	/* Disable the receive unit */
751 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
752 
753 	/* Clear interrupt mask to stop interrupts from being generated */
754 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
755 
756 	/* Clear any pending interrupts, flush previous writes */
757 	IXGBE_READ_REG(hw, IXGBE_EICR);
758 
759 	/* Disable the transmit unit.  Each queue must be disabled. */
760 	for (i = 0; i < hw->mac.max_tx_queues; i++)
761 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
762 
763 	/* Disable the receive unit by stopping each queue */
764 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
765 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
766 		reg_val &= ~IXGBE_RXDCTL_ENABLE;
767 		reg_val |= IXGBE_RXDCTL_SWFLSH;
768 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
769 	}
770 
771 	/* flush all queues disables */
772 	IXGBE_WRITE_FLUSH(hw);
773 	msec_delay(2);
774 
775 	/*
776 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
777 	 * access and verify no pending requests
778 	 */
779 	return ixgbe_disable_pcie_master(hw);
780 }
781 
782 /**
783  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
784  *  @hw: pointer to hardware structure
785  *  @index: led number to turn on
786  **/
787 int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
788 {
789 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
790 
791 	DEBUGFUNC("ixgbe_led_on_generic");
792 
793 	/* To turn on the LED, set mode to ON. */
794 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
795 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
796 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
797 	IXGBE_WRITE_FLUSH(hw);
798 
799 	return IXGBE_SUCCESS;
800 }
801 
802 /**
803  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
804  *  @hw: pointer to hardware structure
805  *  @index: led number to turn off
806  **/
807 int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
808 {
809 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
810 
811 	DEBUGFUNC("ixgbe_led_off_generic");
812 
813 	/* To turn off the LED, set mode to OFF. */
814 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
815 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
816 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
817 	IXGBE_WRITE_FLUSH(hw);
818 
819 	return IXGBE_SUCCESS;
820 }
821 
822 /**
823  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
824  *  @hw: pointer to hardware structure
825  *
826  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
827  *  ixgbe_hw struct in order to set up EEPROM access.
828  **/
829 int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
830 {
831 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
832 	uint32_t eec;
833 	uint16_t eeprom_size;
834 
835 	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
836 
837 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
838 		eeprom->type = ixgbe_eeprom_none;
839 		/* Set default semaphore delay to 10ms which is a well
840 		 * tested value */
841 		eeprom->semaphore_delay = 10;
842 		/* Clear EEPROM page size, it will be initialized as needed */
843 		eeprom->word_page_size = 0;
844 
845 		/*
846 		 * Check for EEPROM present first.
847 		 * If not present leave as none
848 		 */
849 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
850 		if (eec & IXGBE_EEC_PRES) {
851 			eeprom->type = ixgbe_eeprom_spi;
852 
853 			/*
854 			 * SPI EEPROM is assumed here.  This code would need to
855 			 * change if a future EEPROM is not SPI.
856 			 */
857 			eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
858 					    IXGBE_EEC_SIZE_SHIFT);
859 			eeprom->word_size = 1 << (eeprom_size +
860 					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
861 		}
862 
863 		if (eec & IXGBE_EEC_ADDR_SIZE)
864 			eeprom->address_bits = 16;
865 		else
866 			eeprom->address_bits = 8;
867 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
868 			  "%d\n", eeprom->type, eeprom->word_size,
869 			  eeprom->address_bits);
870 	}
871 
872 	return IXGBE_SUCCESS;
873 }
874 
875 /**
876  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
877  *  @hw: pointer to hardware structure
878  *  @offset: offset within the EEPROM to be written to
879  *  @data: 16 bit word to be written to the EEPROM
880  *
881  *  If ixgbe_eeprom_update_checksum is not called after this function, the
882  *  EEPROM will most likely contain an invalid checksum.
883  **/
884 int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
885 {
886 	int32_t status;
887 	uint8_t write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
888 
889 	DEBUGFUNC("ixgbe_write_eeprom_generic");
890 
891 	hw->eeprom.ops.init_params(hw);
892 
893 	if (offset >= hw->eeprom.word_size) {
894 		status = IXGBE_ERR_EEPROM;
895 		goto out;
896 	}
897 
898 	/* Prepare the EEPROM for writing  */
899 	status = ixgbe_acquire_eeprom(hw);
900 
901 	if (status == IXGBE_SUCCESS) {
902 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
903 			ixgbe_release_eeprom(hw);
904 			status = IXGBE_ERR_EEPROM;
905 		}
906 	}
907 
908 	if (status == IXGBE_SUCCESS) {
909 		ixgbe_standby_eeprom(hw);
910 
911 		/*  Send the WRITE ENABLE command (8 bit opcode )  */
912 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
913 					    IXGBE_EEPROM_OPCODE_BITS);
914 
915 		ixgbe_standby_eeprom(hw);
916 
917 		/*
918 		 * Some SPI eeproms use the 8th address bit embedded in the
919 		 * opcode
920 		 */
921 		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
922 			write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
923 
924 		/* Send the Write command (8-bit opcode + addr) */
925 		ixgbe_shift_out_eeprom_bits(hw, write_opcode,
926 					    IXGBE_EEPROM_OPCODE_BITS);
927 		ixgbe_shift_out_eeprom_bits(hw, (uint16_t)(offset*2),
928 					    hw->eeprom.address_bits);
929 
930 		/* Send the data */
931 		data = (data >> 8) | (data << 8);
932 		ixgbe_shift_out_eeprom_bits(hw, data, 16);
933 		ixgbe_standby_eeprom(hw);
934 
935 		/* Done with writing - release the EEPROM */
936 		ixgbe_release_eeprom(hw);
937 	}
938 
939 out:
940 	return status;
941 }
942 
943 /**
944  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
945  *  @hw: pointer to hardware structure
946  *  @offset: offset within the EEPROM to be read
947  *  @data: read 16 bit value from EEPROM
948  *
949  *  Reads 16 bit value from EEPROM through bit-bang method
950  **/
951 int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
952 				       uint16_t *data)
953 {
954 	int32_t status;
955 	uint16_t word_in;
956 	uint8_t read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
957 
958 	hw->eeprom.ops.init_params(hw);
959 
960 	if (offset >= hw->eeprom.word_size) {
961 		status = IXGBE_ERR_EEPROM;
962 		goto out;
963 	}
964 
965 	/* Prepare the EEPROM for reading  */
966 	status = ixgbe_acquire_eeprom(hw);
967 
968 	if (status == IXGBE_SUCCESS) {
969 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
970 			ixgbe_release_eeprom(hw);
971 			status = IXGBE_ERR_EEPROM;
972 		}
973 	}
974 
975 	if (status == IXGBE_SUCCESS) {
976 		ixgbe_standby_eeprom(hw);
977 
978 		/*
979 		 * Some SPI eeproms use the 8th address bit embedded in the
980 		 * opcode
981 		 */
982 		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
983 			read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
984 
985 		/* Send the READ command (opcode + addr) */
986 		ixgbe_shift_out_eeprom_bits(hw, read_opcode,
987 					    IXGBE_EEPROM_OPCODE_BITS);
988 		ixgbe_shift_out_eeprom_bits(hw, (uint16_t)(offset*2),
989 					    hw->eeprom.address_bits);
990 
991 		/* Read the data. */
992 		word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
993 		*data = (word_in >> 8) | (word_in << 8);
994 
995 		/* End this read operation */
996 		ixgbe_release_eeprom(hw);
997 	}
998 
999 out:
1000 	return status;
1001 }
1002 
1003 /**
1004  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1005  *  @hw: pointer to hardware structure
1006  *  @offset: offset of  word in the EEPROM to read
1007  *  @data: word read from the EEPROM
1008  *
1009  *  Reads a 16 bit word from the EEPROM using the EERD register.
1010  **/
1011 int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data)
1012 {
1013 	uint32_t eerd;
1014 	int32_t status;
1015 
1016 	hw->eeprom.ops.init_params(hw);
1017 
1018 	if (offset >= hw->eeprom.word_size) {
1019 		status = IXGBE_ERR_EEPROM;
1020 		goto out;
1021 	}
1022 
1023 	eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1024 	       IXGBE_EEPROM_RW_REG_START;
1025 
1026 	IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1027 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1028 
1029 	if (status == IXGBE_SUCCESS)
1030 		*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1031 			 IXGBE_EEPROM_RW_REG_DATA);
1032 	else
1033 		DEBUGOUT("Eeprom read timed out\n");
1034 
1035 out:
1036 	return status;
1037 }
1038 
1039 /**
1040  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1041  *  @hw: pointer to hardware structure
1042  *  @offset: offset of  word in the EEPROM to write
1043  *  @data: word write to the EEPROM
1044  *
1045  *  Write a 16 bit word to the EEPROM using the EEWR register.
1046  **/
1047 int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1048 {
1049 	uint32_t eewr;
1050 	int32_t status;
1051 
1052 	hw->eeprom.ops.init_params(hw);
1053 
1054 	if (offset >= hw->eeprom.word_size) {
1055 		status = IXGBE_ERR_EEPROM;
1056 		goto out;
1057 	}
1058 
1059 	eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1060 	       (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
1061 
1062 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1063 	if (status != IXGBE_SUCCESS) {
1064 		DEBUGOUT("Eeprom write EEWR timed out\n");
1065 		goto out;
1066 	}
1067 
1068 	IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1069 
1070 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1071 	if (status != IXGBE_SUCCESS) {
1072 		DEBUGOUT("Eeprom write EEWR timed out\n");
1073 		goto out;
1074 	}
1075 
1076 out:
1077 	return status;
1078 }
1079 
1080 /**
1081  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1082  *  @hw: pointer to hardware structure
1083  *  @ee_reg: EEPROM flag for polling
1084  *
1085  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1086  *  read or write is done respectively.
1087  **/
1088 int32_t ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, uint32_t ee_reg)
1089 {
1090 	uint32_t i;
1091 	uint32_t reg;
1092 	int32_t status = IXGBE_ERR_EEPROM;
1093 
1094 	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1095 
1096 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1097 		if (ee_reg == IXGBE_NVM_POLL_READ)
1098 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1099 		else
1100 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1101 
1102 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1103 			status = IXGBE_SUCCESS;
1104 			break;
1105 		}
1106 		usec_delay(5);
1107 	}
1108 
1109 	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1110 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1111 			     "EEPROM read/write done polling timed out");
1112 
1113 	return status;
1114 }
1115 
1116 /**
1117  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1118  *  @hw: pointer to hardware structure
1119  *
1120  *  Prepares EEPROM for access using bit-bang method. This function should
1121  *  be called before issuing a command to the EEPROM.
1122  **/
1123 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1124 {
1125 	int32_t status = IXGBE_SUCCESS;
1126 	uint32_t eec;
1127 	uint32_t i;
1128 
1129 	DEBUGFUNC("ixgbe_acquire_eeprom");
1130 
1131 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1132 	    != IXGBE_SUCCESS)
1133 		status = IXGBE_ERR_SWFW_SYNC;
1134 
1135 	if (status == IXGBE_SUCCESS) {
1136 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1137 
1138 		/* Request EEPROM Access */
1139 		eec |= IXGBE_EEC_REQ;
1140 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1141 
1142 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1143 			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1144 			if (eec & IXGBE_EEC_GNT)
1145 				break;
1146 			usec_delay(5);
1147 		}
1148 
1149 		/* Release if grant not acquired */
1150 		if (!(eec & IXGBE_EEC_GNT)) {
1151 			eec &= ~IXGBE_EEC_REQ;
1152 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1153 			DEBUGOUT("Could not acquire EEPROM grant\n");
1154 
1155 			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1156 			status = IXGBE_ERR_EEPROM;
1157 		}
1158 
1159 		/* Setup EEPROM for Read/Write */
1160 		if (status == IXGBE_SUCCESS) {
1161 			/* Clear CS and SK */
1162 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1163 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1164 			IXGBE_WRITE_FLUSH(hw);
1165 			usec_delay(1);
1166 		}
1167 	}
1168 	return status;
1169 }
1170 
1171 /**
1172  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1173  *  @hw: pointer to hardware structure
1174  *
1175  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1176  **/
1177 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1178 {
1179 	int32_t status = IXGBE_ERR_EEPROM;
1180 	uint32_t timeout = 2000;
1181 	uint32_t i;
1182 	uint32_t swsm;
1183 
1184 	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1185 
1186 
1187 	/* Get SMBI software semaphore between device drivers first */
1188 	for (i = 0; i < timeout; i++) {
1189 		/*
1190 		 * If the SMBI bit is 0 when we read it, then the bit will be
1191 		 * set and we have the semaphore
1192 		 */
1193 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1194 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1195 			status = IXGBE_SUCCESS;
1196 			break;
1197 		}
1198 		usec_delay(50);
1199 	}
1200 
1201 	if (i == timeout) {
1202 		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1203 			 "not granted.\n");
1204 		/*
1205 		 * this release is particularly important because our attempts
1206 		 * above to get the semaphore may have succeeded, and if there
1207 		 * was a timeout, we should unconditionally clear the semaphore
1208 		 * bits to free the driver to make progress
1209 		 */
1210 		ixgbe_release_eeprom_semaphore(hw);
1211 
1212 		usec_delay(50);
1213 		/*
1214 		 * one last try
1215 		 * If the SMBI bit is 0 when we read it, then the bit will be
1216 		 * set and we have the semaphore
1217 		 */
1218 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1219 		if (!(swsm & IXGBE_SWSM_SMBI))
1220 			status = IXGBE_SUCCESS;
1221 	}
1222 
1223 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1224 	if (status == IXGBE_SUCCESS) {
1225 		for (i = 0; i < timeout; i++) {
1226 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1227 
1228 			/* Set the SW EEPROM semaphore bit to request access */
1229 			swsm |= IXGBE_SWSM_SWESMBI;
1230 			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1231 
1232 			/*
1233 			 * If we set the bit successfully then we got the
1234 			 * semaphore.
1235 			 */
1236 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1237 			if (swsm & IXGBE_SWSM_SWESMBI)
1238 				break;
1239 
1240 			usec_delay(50);
1241 		}
1242 
1243 		/*
1244 		 * Release semaphores and return error if SW EEPROM semaphore
1245 		 * was not granted because we don't have access to the EEPROM
1246 		 */
1247 		if (i >= timeout) {
1248 			ERROR_REPORT1(IXGBE_ERROR_POLLING,
1249 			    "SWESMBI Software EEPROM semaphore not granted.\n");
1250 			ixgbe_release_eeprom_semaphore(hw);
1251 			status = IXGBE_ERR_EEPROM;
1252 		}
1253 	} else {
1254 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
1255 			     "Software semaphore SMBI between device drivers "
1256 			     "not granted.\n");
1257 	}
1258 
1259 	return status;
1260 }
1261 
1262 /**
1263  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1264  *  @hw: pointer to hardware structure
1265  *
1266  *  This function clears hardware semaphore bits.
1267  **/
1268 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1269 {
1270 	uint32_t swsm;
1271 
1272 	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1273 
1274 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1275 
1276 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1277 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1278 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1279 	IXGBE_WRITE_FLUSH(hw);
1280 }
1281 
1282 /**
1283  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1284  *  @hw: pointer to hardware structure
1285  **/
1286 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1287 {
1288 	int32_t status = IXGBE_SUCCESS;
1289 	uint16_t i;
1290 	uint8_t spi_stat_reg;
1291 
1292 	DEBUGFUNC("ixgbe_ready_eeprom");
1293 
1294 	/*
1295 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1296 	 * EEPROM will signal that the command has been completed by clearing
1297 	 * bit 0 of the internal status register.  If it's not cleared within
1298 	 * 5 milliseconds, then error out.
1299 	 */
1300 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1301 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1302 					    IXGBE_EEPROM_OPCODE_BITS);
1303 		spi_stat_reg = (uint8_t)ixgbe_shift_in_eeprom_bits(hw, 8);
1304 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1305 			break;
1306 
1307 		usec_delay(5);
1308 		ixgbe_standby_eeprom(hw);
1309 	};
1310 
1311 	/*
1312 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1313 	 * devices (and only 0-5mSec on 5V devices)
1314 	 */
1315 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1316 		DEBUGOUT("SPI EEPROM Status error\n");
1317 		status = IXGBE_ERR_EEPROM;
1318 	}
1319 
1320 	return status;
1321 }
1322 
1323 /**
1324  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1325  *  @hw: pointer to hardware structure
1326  **/
1327 void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1328 {
1329 	uint32_t eec;
1330 
1331 	DEBUGFUNC("ixgbe_standby_eeprom");
1332 
1333 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1334 
1335 	/* Toggle CS to flush commands */
1336 	eec |= IXGBE_EEC_CS;
1337 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1338 	IXGBE_WRITE_FLUSH(hw);
1339 	usec_delay(1);
1340 	eec &= ~IXGBE_EEC_CS;
1341 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1342 	IXGBE_WRITE_FLUSH(hw);
1343 	usec_delay(1);
1344 }
1345 
1346 /**
1347  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1348  *  @hw: pointer to hardware structure
1349  *  @data: data to send to the EEPROM
1350  *  @count: number of bits to shift out
1351  **/
1352 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
1353 					uint16_t count)
1354 {
1355 	uint32_t eec;
1356 	uint32_t mask;
1357 	uint32_t i;
1358 
1359 	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1360 
1361 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1362 
1363 	/*
1364 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1365 	 * one bit at a time.  Determine the starting bit based on count
1366 	 */
1367 	mask = 0x01 << (count - 1);
1368 
1369 	for (i = 0; i < count; i++) {
1370 		/*
1371 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1372 		 * "1", and then raising and then lowering the clock (the SK
1373 		 * bit controls the clock input to the EEPROM).  A "0" is
1374 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1375 		 * raising and then lowering the clock.
1376 		 */
1377 		if (data & mask)
1378 			eec |= IXGBE_EEC_DI;
1379 		else
1380 			eec &= ~IXGBE_EEC_DI;
1381 
1382 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1383 		IXGBE_WRITE_FLUSH(hw);
1384 
1385 		usec_delay(1);
1386 
1387 		ixgbe_raise_eeprom_clk(hw, &eec);
1388 		ixgbe_lower_eeprom_clk(hw, &eec);
1389 
1390 		/*
1391 		 * Shift mask to signify next bit of data to shift in to the
1392 		 * EEPROM
1393 		 */
1394 		mask = mask >> 1;
1395 	};
1396 
1397 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1398 	eec &= ~IXGBE_EEC_DI;
1399 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1400 	IXGBE_WRITE_FLUSH(hw);
1401 }
1402 
1403 /**
1404  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1405  *  @hw: pointer to hardware structure
1406  **/
1407 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
1408 {
1409 	uint32_t eec;
1410 	uint32_t i;
1411 	uint16_t data = 0;
1412 
1413 	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1414 
1415 	/*
1416 	 * In order to read a register from the EEPROM, we need to shift
1417 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1418 	 * the clock input to the EEPROM (setting the SK bit), and then reading
1419 	 * the value of the "DO" bit.  During this "shifting in" process the
1420 	 * "DI" bit should always be clear.
1421 	 */
1422 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1423 
1424 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1425 
1426 	for (i = 0; i < count; i++) {
1427 		data = data << 1;
1428 		ixgbe_raise_eeprom_clk(hw, &eec);
1429 
1430 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1431 
1432 		eec &= ~(IXGBE_EEC_DI);
1433 		if (eec & IXGBE_EEC_DO)
1434 			data |= 1;
1435 
1436 		ixgbe_lower_eeprom_clk(hw, &eec);
1437 	}
1438 
1439 	return data;
1440 }
1441 
1442 /**
1443  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1444  *  @hw: pointer to hardware structure
1445  *  @eec: EEC register's current value
1446  **/
1447 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1448 {
1449 	DEBUGFUNC("ixgbe_raise_eeprom_clk");
1450 
1451 	/*
1452 	 * Raise the clock input to the EEPROM
1453 	 * (setting the SK bit), then delay
1454 	 */
1455 	*eec = *eec | IXGBE_EEC_SK;
1456 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1457 	IXGBE_WRITE_FLUSH(hw);
1458 	usec_delay(1);
1459 }
1460 
1461 /**
1462  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1463  *  @hw: pointer to hardware structure
1464  *  @eecd: EECD's current value
1465  **/
1466 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1467 {
1468 	DEBUGFUNC("ixgbe_lower_eeprom_clk");
1469 
1470 	/*
1471 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
1472 	 * delay
1473 	 */
1474 	*eec = *eec & ~IXGBE_EEC_SK;
1475 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1476 	IXGBE_WRITE_FLUSH(hw);
1477 	usec_delay(1);
1478 }
1479 
1480 /**
1481  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1482  *  @hw: pointer to hardware structure
1483  **/
1484 void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1485 {
1486 	uint32_t eec;
1487 
1488 	DEBUGFUNC("ixgbe_release_eeprom");
1489 
1490 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1491 
1492 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
1493 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1494 
1495 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1496 	IXGBE_WRITE_FLUSH(hw);
1497 
1498 	usec_delay(1);
1499 
1500 	/* Stop requesting EEPROM access */
1501 	eec &= ~IXGBE_EEC_REQ;
1502 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1503 
1504 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1505 
1506 	/* Delay before attempt to obtain semaphore again to allow FW access */
1507 	msec_delay(hw->eeprom.semaphore_delay);
1508 }
1509 
1510 /**
1511  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1512  *  @hw: pointer to hardware structure
1513  **/
1514 uint16_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1515 {
1516 	uint16_t i;
1517 	uint16_t j;
1518 	uint16_t checksum = 0;
1519 	uint16_t length = 0;
1520 	uint16_t pointer = 0;
1521 	uint16_t word = 0;
1522 
1523 	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1524 
1525 	/* Include 0x0-0x3F in the checksum */
1526 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1527 		if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1528 			DEBUGOUT("EEPROM read failed\n");
1529 			break;
1530 		}
1531 		checksum += word;
1532 	}
1533 
1534 	/* Include all data from pointers except for the fw pointer */
1535 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1536 		hw->eeprom.ops.read(hw, i, &pointer);
1537 
1538 		/* Make sure the pointer seems valid */
1539 		if (pointer != 0xFFFF && pointer != 0) {
1540 			hw->eeprom.ops.read(hw, pointer, &length);
1541 
1542 			if (length != 0xFFFF && length != 0) {
1543 				for (j = pointer+1; j <= pointer+length; j++) {
1544 					hw->eeprom.ops.read(hw, j, &word);
1545 					checksum += word;
1546 				}
1547 			}
1548 		}
1549 	}
1550 
1551 	checksum = (uint16_t)IXGBE_EEPROM_SUM - checksum;
1552 
1553 	return checksum;
1554 }
1555 
1556 /**
1557  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1558  *  @hw: pointer to hardware structure
1559  *  @checksum_val: calculated checksum
1560  *
1561  *  Performs checksum calculation and validates the EEPROM checksum.  If the
1562  *  caller does not need checksum_val, the value can be NULL.
1563  **/
1564 int32_t ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1565 					       uint16_t *checksum_val)
1566 {
1567 	int32_t status;
1568 	uint16_t checksum;
1569 	uint16_t read_checksum = 0;
1570 
1571 	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1572 
1573 	/*
1574 	 * Read the first word from the EEPROM. If this times out or fails, do
1575 	 * not continue or we could be in for a very long wait while every
1576 	 * EEPROM read fails
1577 	 */
1578 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1579 
1580 	if (status == IXGBE_SUCCESS) {
1581 		checksum = hw->eeprom.ops.calc_checksum(hw);
1582 
1583 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1584 
1585 		/*
1586 		 * Verify read checksum from EEPROM is the same as
1587 		 * calculated checksum
1588 		 */
1589 		if (read_checksum != checksum)
1590 			status = IXGBE_ERR_EEPROM_CHECKSUM;
1591 
1592 		/* If the user cares, return the calculated checksum */
1593 		if (checksum_val)
1594 			*checksum_val = checksum;
1595 	} else {
1596 		DEBUGOUT("EEPROM read failed\n");
1597 	}
1598 
1599 	return status;
1600 }
1601 
1602 /**
1603  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1604  *  @hw: pointer to hardware structure
1605  **/
1606 int32_t ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1607 {
1608 	int32_t status;
1609 	uint16_t checksum;
1610 
1611 	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1612 
1613 	/*
1614 	 * Read the first word from the EEPROM. If this times out or fails, do
1615 	 * not continue or we could be in for a very long wait while every
1616 	 * EEPROM read fails
1617 	 */
1618 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1619 
1620 	if (status == IXGBE_SUCCESS) {
1621 		checksum = hw->eeprom.ops.calc_checksum(hw);
1622 		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1623 					      checksum);
1624 	} else {
1625 		DEBUGOUT("EEPROM read failed\n");
1626 	}
1627 
1628 	return status;
1629 }
1630 
1631 /**
1632  *  ixgbe_validate_mac_addr - Validate MAC address
1633  *  @mac_addr: pointer to MAC address.
1634  *
1635  *  Tests a MAC address to ensure it is a valid Individual Address
1636  **/
1637 int32_t ixgbe_validate_mac_addr(uint8_t *mac_addr)
1638 {
1639 	int32_t status = IXGBE_SUCCESS;
1640 
1641 	DEBUGFUNC("ixgbe_validate_mac_addr");
1642 
1643 	/* Make sure it is not a multicast address */
1644 	if (IXGBE_IS_MULTICAST(mac_addr)) {
1645 		DEBUGOUT("MAC address is multicast\n");
1646 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1647 	/* Not a broadcast address */
1648 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
1649 		DEBUGOUT("MAC address is broadcast\n");
1650 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1651 	/* Reject the zero address */
1652 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1653 		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1654 		DEBUGOUT("MAC address is all zeros\n");
1655 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1656 	}
1657 	return status;
1658 }
1659 
1660 /**
1661  *  ixgbe_set_rar_generic - Set Rx address register
1662  *  @hw: pointer to hardware structure
1663  *  @index: Receive address register to write
1664  *  @addr: Address to put into receive address register
1665  *  @vmdq: VMDq "set" or "pool" index
1666  *  @enable_addr: set flag that address is active
1667  *
1668  *  Puts an ethernet address into a receive address register.
1669  **/
1670 int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
1671 			      uint32_t vmdq, uint32_t enable_addr)
1672 {
1673 	uint32_t rar_low, rar_high;
1674 	uint32_t rar_entries = hw->mac.num_rar_entries;
1675 
1676 	DEBUGFUNC("ixgbe_set_rar_generic");
1677 
1678 	/* Make sure we are using a valid rar index range */
1679 	if (index >= rar_entries) {
1680 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1681 			     "RAR index %d is out of range.\n", index);
1682 		return IXGBE_ERR_INVALID_ARGUMENT;
1683 	}
1684 
1685 	/* setup VMDq pool selection before this RAR gets enabled */
1686 	hw->mac.ops.set_vmdq(hw, index, vmdq);
1687 
1688 	/*
1689 	 * HW expects these in little endian so we reverse the byte
1690 	 * order from network order (big endian) to little endian
1691 	 */
1692 	rar_low = ((uint32_t)addr[0] |
1693 		   ((uint32_t)addr[1] << 8) |
1694 		   ((uint32_t)addr[2] << 16) |
1695 		   ((uint32_t)addr[3] << 24));
1696 	/*
1697 	 * Some parts put the VMDq setting in the extra RAH bits,
1698 	 * so save everything except the lower 16 bits that hold part
1699 	 * of the address and the address valid bit.
1700 	 */
1701 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1702 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1703 	rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1704 
1705 	if (enable_addr != 0)
1706 		rar_high |= IXGBE_RAH_AV;
1707 
1708 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1709 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1710 
1711 	return IXGBE_SUCCESS;
1712 }
1713 
1714 /**
1715  *  ixgbe_clear_rar_generic - Remove Rx address register
1716  *  @hw: pointer to hardware structure
1717  *  @index: Receive address register to write
1718  *
1719  *  Clears an ethernet address from a receive address register.
1720  **/
1721 int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
1722 {
1723 	uint32_t rar_high;
1724 	uint32_t rar_entries = hw->mac.num_rar_entries;
1725 
1726 	DEBUGFUNC("ixgbe_clear_rar_generic");
1727 
1728 	/* Make sure we are using a valid rar index range */
1729 	if (index >= rar_entries) {
1730 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1731 			     "RAR index %d is out of range.\n", index);
1732 		return IXGBE_ERR_INVALID_ARGUMENT;
1733 	}
1734 
1735 	/*
1736 	 * Some parts put the VMDq setting in the extra RAH bits,
1737 	 * so save everything except the lower 16 bits that hold part
1738 	 * of the address and the address valid bit.
1739 	 */
1740 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1741 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1742 
1743 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1744 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1745 
1746 	/* clear VMDq pool/queue selection for this RAR */
1747 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1748 
1749 	return IXGBE_SUCCESS;
1750 }
1751 
1752 /**
1753  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1754  *  @hw: pointer to hardware structure
1755  *
1756  *  Places the MAC address in receive address register 0 and clears the rest
1757  *  of the receive address registers. Clears the multicast table. Assumes
1758  *  the receiver is in reset when the routine is called.
1759  **/
1760 int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1761 {
1762 	uint32_t i;
1763 	uint32_t rar_entries = hw->mac.num_rar_entries;
1764 
1765 	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
1766 
1767 	/*
1768 	 * If the current mac address is valid, assume it is a software override
1769 	 * to the permanent address.
1770 	 * Otherwise, use the permanent address from the eeprom.
1771 	 */
1772 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1773 	    IXGBE_ERR_INVALID_MAC_ADDR) {
1774 		/* Get the MAC address from the RAR0 for later reference */
1775 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1776 
1777 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1778 			  hw->mac.addr[0], hw->mac.addr[1],
1779 			  hw->mac.addr[2]);
1780 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1781 			  hw->mac.addr[4], hw->mac.addr[5]);
1782 	} else {
1783 		/* Setup the receive address. */
1784 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1785 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1786 			  hw->mac.addr[0], hw->mac.addr[1],
1787 			  hw->mac.addr[2]);
1788 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1789 			  hw->mac.addr[4], hw->mac.addr[5]);
1790 
1791 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1792 
1793 		/* clear VMDq pool/queue selection for RAR 0 */
1794 		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1795 	}
1796 	hw->addr_ctrl.overflow_promisc = 0;
1797 
1798 	hw->addr_ctrl.rar_used_count = 1;
1799 
1800 	/* Zero out the other receive addresses. */
1801 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1802 	for (i = 1; i < rar_entries; i++) {
1803 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1804 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1805 	}
1806 
1807 	/* Clear the MTA */
1808 	hw->addr_ctrl.mta_in_use = 0;
1809 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1810 
1811 	DEBUGOUT(" Clearing MTA\n");
1812 	for (i = 0; i < hw->mac.mcft_size; i++)
1813 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1814 
1815 	ixgbe_init_uta_tables(hw);
1816 
1817 	return IXGBE_SUCCESS;
1818 }
1819 
1820 /**
1821  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
1822  *  @hw: pointer to hardware structure
1823  *  @addr: new address
1824  *
1825  *  Adds it to unused receive address register or goes into promiscuous mode.
1826  **/
1827 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
1828 {
1829 	uint32_t rar_entries = hw->mac.num_rar_entries;
1830 	uint32_t rar;
1831 
1832 	DEBUGFUNC("ixgbe_add_uc_addr");
1833 
1834 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1835 		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1836 
1837 	/*
1838 	 * Place this address in the RAR if there is room,
1839 	 * else put the controller into promiscuous mode
1840 	 */
1841 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
1842 		rar = hw->addr_ctrl.rar_used_count;
1843 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1844 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
1845 		hw->addr_ctrl.rar_used_count++;
1846 	} else {
1847 		hw->addr_ctrl.overflow_promisc++;
1848 	}
1849 
1850 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
1851 }
1852 
1853 /**
1854  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1855  *  @hw: pointer to hardware structure
1856  *  @addr_list: the list of new addresses
1857  *  @addr_count: number of addresses
1858  *  @next: iterator function to walk the address list
1859  *
1860  *  The given list replaces any existing list.  Clears the secondary addrs from
1861  *  receive address registers.  Uses unused receive address registers for the
1862  *  first secondary addresses, and falls back to promiscuous mode as needed.
1863  *
1864  *  Drivers using secondary unicast addresses must set user_set_promisc when
1865  *  manually putting the device into promiscuous mode.
1866  **/
1867 int32_t ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *addr_list,
1868 					  uint32_t addr_count, ixgbe_mc_addr_itr next)
1869 {
1870 	uint8_t *addr;
1871 	uint32_t i;
1872 	uint32_t old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1873 	uint32_t uc_addr_in_use;
1874 	uint32_t fctrl;
1875 	uint32_t vmdq;
1876 
1877 	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
1878 
1879 	/*
1880 	 * Clear accounting of old secondary address list,
1881 	 * don't count RAR[0]
1882 	 */
1883 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1884 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1885 	hw->addr_ctrl.overflow_promisc = 0;
1886 
1887 	/* Zero out the other receive addresses */
1888 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
1889 	for (i = 0; i < uc_addr_in_use; i++) {
1890 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1891 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1892 	}
1893 
1894 	/* Add the new addresses */
1895 	for (i = 0; i < addr_count; i++) {
1896 		DEBUGOUT(" Adding the secondary addresses:\n");
1897 		addr = next(hw, &addr_list, &vmdq);
1898 		ixgbe_add_uc_addr(hw, addr, vmdq);
1899 	}
1900 
1901 	if (hw->addr_ctrl.overflow_promisc) {
1902 		/* enable promisc if not already in overflow or set by user */
1903 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1904 			DEBUGOUT(" Entering address overflow promisc mode\n");
1905 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1906 			fctrl |= IXGBE_FCTRL_UPE;
1907 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1908 		}
1909 	} else {
1910 		/* only disable if set by overflow, not by user */
1911 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1912 			DEBUGOUT(" Leaving address overflow promisc mode\n");
1913 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1914 			fctrl &= ~IXGBE_FCTRL_UPE;
1915 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1916 		}
1917 	}
1918 
1919 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
1920 	return IXGBE_SUCCESS;
1921 }
1922 
1923 /**
1924  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
1925  *  @hw: pointer to hardware structure
1926  *  @mc_addr: the multicast address
1927  *
1928  *  Extracts the 12 bits, from a multicast address, to determine which
1929  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
1930  *  incoming rx multicast addresses, to determine the bit-vector to check in
1931  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1932  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
1933  *  to mc_filter_type.
1934  **/
1935 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
1936 {
1937 	uint32_t vector = 0;
1938 
1939 	DEBUGFUNC("ixgbe_mta_vector");
1940 
1941 	switch (hw->mac.mc_filter_type) {
1942 	case 0:   /* use bits [47:36] of the address */
1943 		vector = ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
1944 		break;
1945 	case 1:   /* use bits [46:35] of the address */
1946 		vector = ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
1947 		break;
1948 	case 2:   /* use bits [45:34] of the address */
1949 		vector = ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
1950 		break;
1951 	case 3:   /* use bits [43:32] of the address */
1952 		vector = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
1953 		break;
1954 	default:  /* Invalid mc_filter_type */
1955 		DEBUGOUT("MC filter type param set incorrectly\n");
1956 		panic("incorrect multicast filter type");
1957 		break;
1958 	}
1959 
1960 	/* vector can only be 12-bits or boundary will be exceeded */
1961 	vector &= 0xFFF;
1962 	return vector;
1963 }
1964 
1965 /**
1966  *  ixgbe_set_mta - Set bit-vector in multicast table
1967  *  @hw: pointer to hardware structure
1968  *  @hash_value: Multicast address hash value
1969  *
1970  *  Sets the bit-vector in the multicast table.
1971  **/
1972 void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
1973 {
1974 	uint32_t vector;
1975 	uint32_t vector_bit;
1976 	uint32_t vector_reg;
1977 
1978 	DEBUGFUNC("ixgbe_set_mta");
1979 
1980 	hw->addr_ctrl.mta_in_use++;
1981 
1982 	vector = ixgbe_mta_vector(hw, mc_addr);
1983 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
1984 
1985 	/*
1986 	 * The MTA is a register array of 128 32-bit registers. It is treated
1987 	 * like an array of 4096 bits.  We want to set bit
1988 	 * BitArray[vector_value]. So we figure out what register the bit is
1989 	 * in, read it, OR in the new bit, then write back the new value.  The
1990 	 * register is determined by the upper 7 bits of the vector value and
1991 	 * the bit within that register are determined by the lower 5 bits of
1992 	 * the value.
1993 	 */
1994 	vector_reg = (vector >> 5) & 0x7F;
1995 	vector_bit = vector & 0x1F;
1996 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1997 }
1998 
1999 /**
2000  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2001  *  @hw: pointer to hardware structure
2002  *  @mc_addr_list: the list of new multicast addresses
2003  *  @mc_addr_count: number of addresses
2004  *  @next: iterator function to walk the multicast address list
2005  *  @clear: flag, when set clears the table beforehand
2006  *
2007  *  When the clear flag is set, the given list replaces any existing list.
2008  *  Hashes the given addresses into the multicast table.
2009  **/
2010 int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
2011 					  uint32_t mc_addr_count, ixgbe_mc_addr_itr next,
2012 					  bool clear)
2013 {
2014 	uint32_t i;
2015 	uint32_t vmdq;
2016 
2017 	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2018 
2019 	/*
2020 	 * Set the new number of MC addresses that we are being requested to
2021 	 * use.
2022 	 */
2023 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2024 	hw->addr_ctrl.mta_in_use = 0;
2025 
2026 	/* Clear mta_shadow */
2027 	if (clear) {
2028 		DEBUGOUT(" Clearing MTA\n");
2029 		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2030 	}
2031 
2032 	/* Update mta_shadow */
2033 	for (i = 0; i < mc_addr_count; i++) {
2034 		DEBUGOUT(" Adding the multicast addresses:\n");
2035 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2036 	}
2037 
2038 	/* Enable mta */
2039 	for (i = 0; i < hw->mac.mcft_size; i++)
2040 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2041 				      hw->mac.mta_shadow[i]);
2042 
2043 	if (hw->addr_ctrl.mta_in_use > 0)
2044 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2045 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2046 
2047 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2048 	return IXGBE_SUCCESS;
2049 }
2050 
2051 /**
2052  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2053  *  @hw: pointer to hardware structure
2054  *
2055  *  Enables multicast address in RAR and the use of the multicast hash table.
2056  **/
2057 int32_t ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2058 {
2059 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2060 
2061 	DEBUGFUNC("ixgbe_enable_mc_generic");
2062 
2063 	if (a->mta_in_use > 0)
2064 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2065 				hw->mac.mc_filter_type);
2066 
2067 	return IXGBE_SUCCESS;
2068 }
2069 
2070 /**
2071  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2072  *  @hw: pointer to hardware structure
2073  *
2074  *  Disables multicast address in RAR and the use of the multicast hash table.
2075  **/
2076 int32_t ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2077 {
2078 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2079 
2080 	DEBUGFUNC("ixgbe_disable_mc_generic");
2081 
2082 	if (a->mta_in_use > 0)
2083 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2084 
2085 	return IXGBE_SUCCESS;
2086 }
2087 
2088 /**
2089  *  ixgbe_fc_enable_generic - Enable flow control
2090  *  @hw: pointer to hardware structure
2091  *
2092  *  Enable flow control according to the current settings.
2093  **/
2094 int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2095 {
2096 	int32_t ret_val = IXGBE_SUCCESS;
2097 	uint32_t mflcn_reg, fccfg_reg;
2098 	uint32_t reg;
2099 	uint32_t fcrtl, fcrth;
2100 	int i;
2101 
2102 	DEBUGFUNC("ixgbe_fc_enable_generic");
2103 
2104 	/* Validate the water mark configuration */
2105 	if (!hw->fc.pause_time) {
2106 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2107 		goto out;
2108 	}
2109 
2110 	/* Low water mark of zero causes XOFF floods */
2111 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2112 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2113 		    hw->fc.high_water[i]) {
2114 			if (!hw->fc.low_water[i] ||
2115 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2116 				DEBUGOUT("Invalid water mark configuration\n");
2117 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2118 				goto out;
2119 			}
2120 		}
2121 	}
2122 
2123 	/* Negotiate the fc mode to use */
2124 	ixgbe_fc_autoneg(hw);
2125 
2126 	/* Disable any previous flow control settings */
2127 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2128 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2129 
2130 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2131 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2132 
2133 	/*
2134 	 * The possible values of fc.current_mode are:
2135 	 * 0: Flow control is completely disabled
2136 	 * 1: Rx flow control is enabled (we can receive pause frames,
2137 	 *    but not send pause frames).
2138 	 * 2: Tx flow control is enabled (we can send pause frames but
2139 	 *    we do not support receiving pause frames).
2140 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2141 	 * other: Invalid.
2142 	 */
2143 	switch (hw->fc.current_mode) {
2144 	case ixgbe_fc_none:
2145 		/*
2146 		 * Flow control is disabled by software override or autoneg.
2147 		 * The code below will actually disable it in the HW.
2148 		 */
2149 		break;
2150 	case ixgbe_fc_rx_pause:
2151 		/*
2152 		 * Rx Flow control is enabled and Tx Flow control is
2153 		 * disabled by software override. Since there really
2154 		 * isn't a way to advertise that we are capable of RX
2155 		 * Pause ONLY, we will advertise that we support both
2156 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2157 		 * disable the adapter's ability to send PAUSE frames.
2158 		 */
2159 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2160 		break;
2161 	case ixgbe_fc_tx_pause:
2162 		/*
2163 		 * Tx Flow control is enabled, and Rx Flow control is
2164 		 * disabled by software override.
2165 		 */
2166 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2167 		break;
2168 	case ixgbe_fc_full:
2169 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2170 		mflcn_reg |= IXGBE_MFLCN_RFCE;
2171 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2172 		break;
2173 	default:
2174 		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2175 			     "Flow control param set incorrectly\n");
2176 		ret_val = IXGBE_ERR_CONFIG;
2177 		goto out;
2178 		break;
2179 	}
2180 
2181 	/* Set 802.3x based flow control settings. */
2182 	mflcn_reg |= IXGBE_MFLCN_DPF;
2183 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2184 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2185 
2186 
2187 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
2188 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2189 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2190 		    hw->fc.high_water[i]) {
2191 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2192 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2193 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2194 		} else {
2195 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2196 			/*
2197 			 * In order to prevent Tx hangs when the internal Tx
2198 			 * switch is enabled we must set the high water mark
2199 			 * to the maximum FCRTH value.  This allows the Tx
2200 			 * switch to function even under heavy Rx workloads.
2201 			 */
2202 			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2203 		}
2204 
2205 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2206 	}
2207 
2208 	/* Configure pause time (2 TCs per register) */
2209 	reg = hw->fc.pause_time * 0x00010001;
2210 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2211 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2212 
2213 	/* Configure flow control refresh threshold value */
2214 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2215 
2216 out:
2217 	return ret_val;
2218 }
2219 
2220 /**
2221  *  ixgbe_negotiate_fc - Negotiate flow control
2222  *  @hw: pointer to hardware structure
2223  *  @adv_reg: flow control advertised settings
2224  *  @lp_reg: link partner's flow control settings
2225  *  @adv_sym: symmetric pause bit in advertisement
2226  *  @adv_asm: asymmetric pause bit in advertisement
2227  *  @lp_sym: symmetric pause bit in link partner advertisement
2228  *  @lp_asm: asymmetric pause bit in link partner advertisement
2229  *
2230  *  Find the intersection between advertised settings and link partner's
2231  *  advertised settings
2232  **/
2233 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
2234 			   uint32_t lp_reg, uint32_t adv_sym,
2235 			   uint32_t adv_asm, uint32_t lp_sym,
2236 			   uint32_t lp_asm)
2237 {
2238 	if ((!(adv_reg)) ||  (!(lp_reg))) {
2239 		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2240 			     "Local or link partner's advertised flow control "
2241 			     "settings are NULL. Local: %x, link partner: %x\n",
2242 			     adv_reg, lp_reg);
2243 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2244 	}
2245 
2246 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2247 		/*
2248 		 * Now we need to check if the user selected Rx ONLY
2249 		 * of pause frames.  In this case, we had to advertise
2250 		 * FULL flow control because we could not advertise RX
2251 		 * ONLY. Hence, we must now check to see if we need to
2252 		 * turn OFF the TRANSMISSION of PAUSE frames.
2253 		 */
2254 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2255 			hw->fc.current_mode = ixgbe_fc_full;
2256 			DEBUGOUT("Flow Control = FULL.\n");
2257 		} else {
2258 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2259 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2260 		}
2261 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2262 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2263 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2264 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2265 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2266 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2267 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2268 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2269 	} else {
2270 		hw->fc.current_mode = ixgbe_fc_none;
2271 		DEBUGOUT("Flow Control = NONE.\n");
2272 	}
2273 	return IXGBE_SUCCESS;
2274 }
2275 
2276 /**
2277  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2278  *  @hw: pointer to hardware structure
2279  *
2280  *  Enable flow control according on 1 gig fiber.
2281  **/
2282 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2283 {
2284 	uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
2285 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2286 
2287 	/*
2288 	 * On multispeed fiber at 1g, bail out if
2289 	 * - link is up but AN did not complete, or if
2290 	 * - link is up and AN completed but timed out
2291 	 */
2292 
2293 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2294 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2295 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2296 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
2297 			     "Auto-Negotiation did not complete or timed out");
2298 		goto out;
2299 	}
2300 
2301 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2302 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2303 
2304 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2305 				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2306 				      IXGBE_PCS1GANA_ASM_PAUSE,
2307 				      IXGBE_PCS1GANA_SYM_PAUSE,
2308 				      IXGBE_PCS1GANA_ASM_PAUSE);
2309 
2310 out:
2311 	return ret_val;
2312 }
2313 
2314 /**
2315  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2316  *  @hw: pointer to hardware structure
2317  *
2318  *  Enable flow control according to IEEE clause 37.
2319  **/
2320 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2321 {
2322 	uint32_t links2, anlp1_reg, autoc_reg, links;
2323 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2324 
2325 	/*
2326 	 * On backplane, bail out if
2327 	 * - backplane autoneg was not completed, or if
2328 	 * - we are 82599 and link partner is not AN enabled
2329 	 */
2330 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2331 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2332 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
2333 			     "Auto-Negotiation did not complete");
2334 		goto out;
2335 	}
2336 
2337 	if (hw->mac.type == ixgbe_mac_82599EB) {
2338 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2339 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2340 			ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2341 				     "Link partner is not AN enabled");
2342 			goto out;
2343 		}
2344 	}
2345 	/*
2346 	 * Read the 10g AN autoc and LP ability registers and resolve
2347 	 * local flow control settings accordingly
2348 	 */
2349 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2350 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2351 
2352 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2353 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2354 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2355 
2356 out:
2357 	return ret_val;
2358 }
2359 
2360 /**
2361  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2362  *  @hw: pointer to hardware structure
2363  *
2364  *  Enable flow control according to IEEE clause 37.
2365  **/
2366 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2367 {
2368 	uint16_t technology_ability_reg = 0;
2369 	uint16_t lp_technology_ability_reg = 0;
2370 
2371 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2372 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2373 			     &technology_ability_reg);
2374 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2375 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2376 			     &lp_technology_ability_reg);
2377 
2378 	return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
2379 				  (uint32_t)lp_technology_ability_reg,
2380 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2381 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2382 }
2383 
2384 /**
2385  *  ixgbe_fc_autoneg - Configure flow control
2386  *  @hw: pointer to hardware structure
2387  *
2388  *  Compares our advertised flow control capabilities to those advertised by
2389  *  our link partner, and determines the proper flow control mode to use.
2390  **/
2391 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2392 {
2393 	int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2394 	ixgbe_link_speed speed;
2395 	bool link_up;
2396 
2397 	DEBUGFUNC("ixgbe_fc_autoneg");
2398 
2399 	/*
2400 	 * AN should have completed when the cable was plugged in.
2401 	 * Look for reasons to bail out.  Bail out if:
2402 	 * - FC autoneg is disabled, or if
2403 	 * - link is not up.
2404 	 */
2405 	if (hw->fc.disable_fc_autoneg) {
2406 		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2407 			     "Flow control autoneg is disabled");
2408 		goto out;
2409 	}
2410 
2411 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2412 	if (!link_up) {
2413 		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2414 		goto out;
2415 	}
2416 
2417 	switch (hw->phy.media_type) {
2418 	/* Autoneg flow control on fiber adapters */
2419 	case ixgbe_media_type_fiber_fixed:
2420 	case ixgbe_media_type_fiber:
2421 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2422 			ret_val = ixgbe_fc_autoneg_fiber(hw);
2423 		break;
2424 
2425 	/* Autoneg flow control on backplane adapters */
2426 	case ixgbe_media_type_backplane:
2427 		ret_val = ixgbe_fc_autoneg_backplane(hw);
2428 		break;
2429 
2430 	/* Autoneg flow control on copper adapters */
2431 	case ixgbe_media_type_copper:
2432 		if (ixgbe_device_supports_autoneg_fc(hw))
2433 			ret_val = ixgbe_fc_autoneg_copper(hw);
2434 		break;
2435 
2436 	default:
2437 		break;
2438 	}
2439 
2440 out:
2441 	if (ret_val == IXGBE_SUCCESS) {
2442 		hw->fc.fc_was_autonegged = TRUE;
2443 		hw->fc.current_mode = hw->fc.requested_mode;
2444 	} else {
2445 		hw->fc.fc_was_autonegged = FALSE;
2446 	}
2447 }
2448 
2449 /*
2450  * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2451  * @hw: pointer to hardware structure
2452  *
2453  * System-wide timeout range is encoded in PCIe Device Control2 register.
2454  *
2455  * Add 10% to specified maximum and return the number of times to poll for
2456  * completion timeout, in units of 100 microsec.  Never return less than
2457  * 800 = 80 millisec.
2458  */
2459 static uint32_t ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2460 {
2461 	int16_t devctl2;
2462 	uint32_t pollcnt;
2463 
2464 	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
2465 	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2466 
2467 	switch (devctl2) {
2468 	case IXGBE_PCIDEVCTRL2_65_130ms:
2469 		pollcnt = 1300;		/* 130 millisec */
2470 		break;
2471 	case IXGBE_PCIDEVCTRL2_260_520ms:
2472 		pollcnt = 5200;		/* 520 millisec */
2473 		break;
2474 	case IXGBE_PCIDEVCTRL2_1_2s:
2475 		pollcnt = 20000;	/* 2 sec */
2476 		break;
2477 	case IXGBE_PCIDEVCTRL2_4_8s:
2478 		pollcnt = 80000;	/* 8 sec */
2479 		break;
2480 	case IXGBE_PCIDEVCTRL2_17_34s:
2481 		pollcnt = 34000;	/* 34 sec */
2482 		break;
2483 	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
2484 	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
2485 	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
2486 	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
2487 	default:
2488 		pollcnt = 800;		/* 80 millisec minimum */
2489 		break;
2490 	}
2491 
2492 	/* add 10% to spec maximum */
2493 	return (pollcnt * 11) / 10;
2494 }
2495 
2496 /**
2497  *  ixgbe_disable_pcie_master - Disable PCI-express master access
2498  *  @hw: pointer to hardware structure
2499  *
2500  *  Disables PCI-Express master access and verifies there are no pending
2501  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2502  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2503  *  is returned signifying master requests disabled.
2504  **/
2505 int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2506 {
2507 	int32_t status = IXGBE_SUCCESS;
2508 	uint32_t i, poll;
2509 
2510 	DEBUGFUNC("ixgbe_disable_pcie_master");
2511 
2512 	/* Always set this bit to ensure any future transactions are blocked */
2513 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2514 
2515 	/* Exit if master requests are blocked */
2516 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2517 		goto out;
2518 
2519 	/* Poll for master request bit to clear */
2520 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2521 		usec_delay(100);
2522 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2523 			goto out;
2524 	}
2525 
2526 	/*
2527 	 * Two consecutive resets are required via CTRL.RST per datasheet
2528 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2529 	 * of this need.  The first reset prevents new master requests from
2530 	 * being issued by our device.  We then must wait 1usec or more for any
2531 	 * remaining completions from the PCIe bus to trickle in, and then reset
2532 	 * again to clear out any effects they may have had on our device.
2533 	 */
2534 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2535 	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2536 
2537 	/*
2538 	 * Before proceeding, make sure that the PCIe block does not have
2539 	 * transactions pending.
2540 	 */
2541 	poll = ixgbe_pcie_timeout_poll(hw);
2542 	for (i = 0; i < poll; i++) {
2543 		usec_delay(100);
2544 		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2545 		    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2546 			goto out;
2547 	}
2548 
2549 	ERROR_REPORT1(IXGBE_ERROR_POLLING,
2550 		     "PCIe transaction pending bit also did not clear.\n");
2551 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2552 
2553 out:
2554 	return status;
2555 }
2556 
2557 /**
2558  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2559  *  @hw: pointer to hardware structure
2560  *  @mask: Mask to specify which semaphore to acquire
2561  *
2562  *  Acquires the SWFW semaphore through the GSSR register for the specified
2563  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2564  **/
2565 int32_t ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, uint16_t mask)
2566 {
2567 	uint32_t gssr = 0;
2568 	uint32_t swmask = mask;
2569 	uint32_t fwmask = mask << 5;
2570 	uint32_t timeout = 200;
2571 	uint32_t i;
2572 
2573 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
2574 
2575 	for (i = 0; i < timeout; i++) {
2576 		/*
2577 		 * SW NVM semaphore bit is used for access to all
2578 		 * SW_FW_SYNC bits (not just NVM)
2579 		 */
2580 		if (ixgbe_get_eeprom_semaphore(hw))
2581 			return IXGBE_ERR_SWFW_SYNC;
2582 
2583 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2584 		if (!(gssr & (fwmask | swmask))) {
2585 			gssr |= swmask;
2586 			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2587 			ixgbe_release_eeprom_semaphore(hw);
2588 			return IXGBE_SUCCESS;
2589 		} else {
2590 			/* Resource is currently in use by FW or SW */
2591 			ixgbe_release_eeprom_semaphore(hw);
2592 			msec_delay(5);
2593 		}
2594 	}
2595 
2596 	/* If time expired clear the bits holding the lock and retry */
2597 	if (gssr & (fwmask | swmask))
2598 		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2599 
2600 	msec_delay(5);
2601 	return IXGBE_ERR_SWFW_SYNC;
2602 }
2603 
2604 /**
2605  *  ixgbe_release_swfw_sync - Release SWFW semaphore
2606  *  @hw: pointer to hardware structure
2607  *  @mask: Mask to specify which semaphore to release
2608  *
2609  *  Releases the SWFW semaphore through the GSSR register for the specified
2610  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2611  **/
2612 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, uint16_t mask)
2613 {
2614 	uint32_t gssr;
2615 	uint32_t swmask = mask;
2616 
2617 	DEBUGFUNC("ixgbe_release_swfw_sync");
2618 
2619 	ixgbe_get_eeprom_semaphore(hw);
2620 
2621 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2622 	gssr &= ~swmask;
2623 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2624 
2625 	ixgbe_release_eeprom_semaphore(hw);
2626 }
2627 
2628 /**
2629  *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2630  *  @hw: pointer to hardware structure
2631  *
2632  *  Stops the receive data path and waits for the HW to internally empty
2633  *  the Rx security block
2634  **/
2635 int32_t ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2636 {
2637 #define IXGBE_MAX_SECRX_POLL 40
2638 
2639 	int i;
2640 	int secrxreg;
2641 
2642 	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2643 
2644 
2645 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2646 	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2647 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2648 	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2649 		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2650 		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2651 			break;
2652 		else
2653 			/* Use interrupt-safe sleep just in case */
2654 			usec_delay(1000);
2655 	}
2656 
2657 	/* For informational purposes only */
2658 	if (i >= IXGBE_MAX_SECRX_POLL)
2659 		DEBUGOUT("Rx unit being enabled before security "
2660 			 "path fully disabled.  Continuing with init.\n");
2661 
2662 	return IXGBE_SUCCESS;
2663 }
2664 
2665 /**
2666  *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2667  *  @hw: pointer to hardware structure
2668  *
2669  *  Enables the receive data path.
2670  **/
2671 int32_t ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2672 {
2673 	int secrxreg;
2674 
2675 	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2676 
2677 	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2678 	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2679 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2680 	IXGBE_WRITE_FLUSH(hw);
2681 
2682 	return IXGBE_SUCCESS;
2683 }
2684 
2685 /**
2686  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2687  *  @hw: pointer to hardware structure
2688  *  @regval: register value to write to RXCTRL
2689  *
2690  *  Enables the Rx DMA unit
2691  **/
2692 int32_t ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, uint32_t regval)
2693 {
2694 	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2695 
2696 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2697 
2698 	return IXGBE_SUCCESS;
2699 }
2700 
2701 /**
2702  *  ixgbe_blink_led_start_generic - Blink LED based on index.
2703  *  @hw: pointer to hardware structure
2704  *  @index: led number to blink
2705  **/
2706 int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
2707 {
2708 	ixgbe_link_speed speed = 0;
2709 	bool link_up = 0;
2710 	uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2711 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2712 	int32_t ret_val = IXGBE_SUCCESS;
2713 
2714 	DEBUGFUNC("ixgbe_blink_led_start_generic");
2715 
2716 	/*
2717 	 * Link must be up to auto-blink the LEDs;
2718 	 * Force it if link is down.
2719 	 */
2720 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2721 
2722 	if (!link_up) {
2723 		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
2724 		 * LESM is on.
2725 		 */
2726 		bool got_lock = FALSE;
2727 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2728 		    ixgbe_verify_lesm_fw_enabled(hw)) {
2729 			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2730 							IXGBE_GSSR_MAC_CSR_SM);
2731 			if (ret_val != IXGBE_SUCCESS) {
2732 				ret_val = IXGBE_ERR_SWFW_SYNC;
2733 				goto out;
2734 			}
2735 			got_lock = TRUE;
2736 		}
2737 
2738 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2739 		autoc_reg |= IXGBE_AUTOC_FLU;
2740 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2741 		IXGBE_WRITE_FLUSH(hw);
2742 
2743 		if (got_lock)
2744 			hw->mac.ops.release_swfw_sync(hw,
2745 						      IXGBE_GSSR_MAC_CSR_SM);
2746 		msec_delay(10);
2747 	}
2748 
2749 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2750 	led_reg |= IXGBE_LED_BLINK(index);
2751 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2752 	IXGBE_WRITE_FLUSH(hw);
2753 
2754 out:
2755 	return ret_val;
2756 }
2757 
2758 /**
2759  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2760  *  @hw: pointer to hardware structure
2761  *  @index: led number to stop blinking
2762  **/
2763 int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
2764 {
2765 	uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2766 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2767 	int32_t ret_val = IXGBE_SUCCESS;
2768 	bool got_lock = FALSE;
2769 
2770 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
2771 	/* Need the SW/FW semaphore around AUTOC writes if 82599 and
2772 	 * LESM is on.
2773 	 */
2774 	if ((hw->mac.type == ixgbe_mac_82599EB) &&
2775 	    ixgbe_verify_lesm_fw_enabled(hw)) {
2776 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2777 						IXGBE_GSSR_MAC_CSR_SM);
2778 		if (ret_val != IXGBE_SUCCESS) {
2779 			ret_val = IXGBE_ERR_SWFW_SYNC;
2780 			goto out;
2781 		}
2782 		got_lock = TRUE;
2783 	}
2784 
2785 	autoc_reg &= ~IXGBE_AUTOC_FLU;
2786 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2787 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2788 
2789 	if (hw->mac.type == ixgbe_mac_82599EB)
2790 		ixgbe_reset_pipeline(hw);
2791 
2792 	if (got_lock)
2793 		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
2794 
2795 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2796 	led_reg &= ~IXGBE_LED_BLINK(index);
2797 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2798 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2799 	IXGBE_WRITE_FLUSH(hw);
2800 
2801 out:
2802 	return ret_val;
2803 }
2804 
2805 /**
2806  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2807  *  @hw: pointer to hardware structure
2808  *
2809  *  Read PCIe configuration space, and get the MSI-X vector count from
2810  *  the capabilities table.
2811  **/
2812 uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2813 {
2814 	uint16_t msix_count = 1;
2815 	uint16_t max_msix_count;
2816 	uint16_t pcie_offset;
2817 
2818 	switch (hw->mac.type) {
2819 	case ixgbe_mac_82598EB:
2820 		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2821 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2822 		break;
2823 	case ixgbe_mac_82599EB:
2824 	case ixgbe_mac_X540:
2825 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2826 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2827 		break;
2828 	default:
2829 		return msix_count;
2830 	}
2831 
2832 	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
2833 	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
2834 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2835 
2836 	/* MSI-X count is zero-based in HW */
2837 	msix_count++;
2838 
2839 	if (msix_count > max_msix_count)
2840 		msix_count = max_msix_count;
2841 
2842 	return msix_count;
2843 }
2844 
2845 /**
2846  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
2847  *  @hw: pointer to hardware structure
2848  *  @addr: Address to put into receive address register
2849  *  @vmdq: VMDq pool to assign
2850  *
2851  *  Puts an ethernet address into a receive address register, or
2852  *  finds the rar that it is aleady in; adds to the pool list
2853  **/
2854 int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
2855 {
2856 	static const uint32_t NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
2857 	uint32_t first_empty_rar = NO_EMPTY_RAR_FOUND;
2858 	uint32_t rar;
2859 	uint32_t rar_low, rar_high;
2860 	uint32_t addr_low, addr_high;
2861 
2862 	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
2863 
2864 	/* swap bytes for HW little endian */
2865 	addr_low  = addr[0] | (addr[1] << 8)
2866 			    | (addr[2] << 16)
2867 			    | (addr[3] << 24);
2868 	addr_high = addr[4] | (addr[5] << 8);
2869 
2870 	/*
2871 	 * Either find the mac_id in rar or find the first empty space.
2872 	 * rar_highwater points to just after the highest currently used
2873 	 * rar in order to shorten the search.  It grows when we add a new
2874 	 * rar to the top.
2875 	 */
2876 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
2877 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
2878 
2879 		if (((IXGBE_RAH_AV & rar_high) == 0)
2880 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
2881 			first_empty_rar = rar;
2882 		} else if ((rar_high & 0xFFFF) == addr_high) {
2883 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
2884 			if (rar_low == addr_low)
2885 				break;    /* found it already in the rars */
2886 		}
2887 	}
2888 
2889 	if (rar < hw->mac.rar_highwater) {
2890 		/* already there so just add to the pool bits */
2891 		ixgbe_set_vmdq(hw, rar, vmdq);
2892 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
2893 		/* stick it into first empty RAR slot we found */
2894 		rar = first_empty_rar;
2895 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2896 	} else if (rar == hw->mac.rar_highwater) {
2897 		/* add it to the top of the list and inc the highwater mark */
2898 		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2899 		hw->mac.rar_highwater++;
2900 	} else if (rar >= hw->mac.num_rar_entries) {
2901 		return IXGBE_ERR_INVALID_MAC_ADDR;
2902 	}
2903 
2904 	/*
2905 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
2906 	 * remains cleared to be sure default pool packets will get delivered
2907 	 */
2908 	if (rar == 0)
2909 		ixgbe_clear_vmdq(hw, rar, 0);
2910 
2911 	return rar;
2912 }
2913 
2914 /**
2915  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2916  *  @hw: pointer to hardware struct
2917  *  @rar: receive address register index to disassociate
2918  *  @vmdq: VMDq pool index to remove from the rar
2919  **/
2920 int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
2921 {
2922 	uint32_t mpsar_lo, mpsar_hi;
2923 	uint32_t rar_entries = hw->mac.num_rar_entries;
2924 
2925 	DEBUGFUNC("ixgbe_clear_vmdq_generic");
2926 
2927 	/* Make sure we are using a valid rar index range */
2928 	if (rar >= rar_entries) {
2929 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2930 			     "RAR index %d is out of range.\n", rar);
2931 		return IXGBE_ERR_INVALID_ARGUMENT;
2932 	}
2933 
2934 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2935 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2936 
2937 	if (!mpsar_lo && !mpsar_hi)
2938 		goto done;
2939 
2940 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2941 		if (mpsar_lo) {
2942 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2943 			mpsar_lo = 0;
2944 		}
2945 		if (mpsar_hi) {
2946 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2947 			mpsar_hi = 0;
2948 		}
2949 	} else if (vmdq < 32) {
2950 		mpsar_lo &= ~(1 << vmdq);
2951 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2952 	} else {
2953 		mpsar_hi &= ~(1 << (vmdq - 32));
2954 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2955 	}
2956 
2957 	/* was that the last pool using this rar? */
2958 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2959 		hw->mac.ops.clear_rar(hw, rar);
2960 done:
2961 	return IXGBE_SUCCESS;
2962 }
2963 
2964 /**
2965  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2966  *  @hw: pointer to hardware struct
2967  *  @rar: receive address register index to associate with a VMDq index
2968  *  @vmdq: VMDq pool index
2969  **/
2970 int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
2971 {
2972 	uint32_t mpsar;
2973 	uint32_t rar_entries = hw->mac.num_rar_entries;
2974 
2975 	DEBUGFUNC("ixgbe_set_vmdq_generic");
2976 
2977 	/* Make sure we are using a valid rar index range */
2978 	if (rar >= rar_entries) {
2979 		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2980 			     "RAR index %d is out of range.\n", rar);
2981 		return IXGBE_ERR_INVALID_ARGUMENT;
2982 	}
2983 
2984 	if (vmdq < 32) {
2985 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2986 		mpsar |= 1 << vmdq;
2987 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2988 	} else {
2989 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2990 		mpsar |= 1 << (vmdq - 32);
2991 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2992 	}
2993 	return IXGBE_SUCCESS;
2994 }
2995 
2996 /**
2997  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2998  *  @hw: pointer to hardware structure
2999  **/
3000 int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3001 {
3002 	int i;
3003 
3004 	DEBUGFUNC("ixgbe_init_uta_tables_generic");
3005 	DEBUGOUT(" Clearing UTA\n");
3006 
3007 	for (i = 0; i < 128; i++)
3008 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3009 
3010 	return IXGBE_SUCCESS;
3011 }
3012 
3013 /**
3014  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3015  *  @hw: pointer to hardware structure
3016  *  @vlan: VLAN id to write to VLAN filter
3017  *
3018  *  return the VLVF index where this VLAN id should be placed
3019  *
3020  **/
3021 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
3022 {
3023 	uint32_t bits = 0;
3024 	uint32_t first_empty_slot = 0;
3025 	int32_t regindex;
3026 
3027 	/* short cut the special case */
3028 	if (vlan == 0)
3029 		return 0;
3030 
3031 	/*
3032 	  * Search for the vlan id in the VLVF entries. Save off the first empty
3033 	  * slot found along the way
3034 	  */
3035 	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3036 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3037 		if (!bits && !(first_empty_slot))
3038 			first_empty_slot = regindex;
3039 		else if ((bits & 0x0FFF) == vlan)
3040 			break;
3041 	}
3042 
3043 	/*
3044 	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3045 	  * in the VLVF. Else use the first empty VLVF register for this
3046 	  * vlan id.
3047 	  */
3048 	if (regindex >= IXGBE_VLVF_ENTRIES) {
3049 		if (first_empty_slot)
3050 			regindex = first_empty_slot;
3051 		else {
3052 			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3053 				     "No space in VLVF.\n");
3054 			regindex = IXGBE_ERR_NO_SPACE;
3055 		}
3056 	}
3057 
3058 	return regindex;
3059 }
3060 
3061 /**
3062  *  ixgbe_set_vfta_generic - Set VLAN filter table
3063  *  @hw: pointer to hardware structure
3064  *  @vlan: VLAN id to write to VLAN filter
3065  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3066  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3067  *
3068  *  Turn on/off specified VLAN in the VLAN filter table.
3069  **/
3070 int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3071 			       bool vlan_on)
3072 {
3073 	int32_t regindex;
3074 	uint32_t bitindex;
3075 	uint32_t vfta;
3076 	uint32_t targetbit;
3077 	int32_t ret_val = IXGBE_SUCCESS;
3078 	bool vfta_changed = FALSE;
3079 
3080 	DEBUGFUNC("ixgbe_set_vfta_generic");
3081 
3082 	if (vlan > 4095)
3083 		return IXGBE_ERR_PARAM;
3084 
3085 	/*
3086 	 * this is a 2 part operation - first the VFTA, then the
3087 	 * VLVF and VLVFB if VT Mode is set
3088 	 * We don't write the VFTA until we know the VLVF part succeeded.
3089 	 */
3090 
3091 	/* Part 1
3092 	 * The VFTA is a bitstring made up of 128 32-bit registers
3093 	 * that enable the particular VLAN id, much like the MTA:
3094 	 *    bits[11-5]: which register
3095 	 *    bits[4-0]:  which bit in the register
3096 	 */
3097 	regindex = (vlan >> 5) & 0x7F;
3098 	bitindex = vlan & 0x1F;
3099 	targetbit = (1 << bitindex);
3100 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3101 
3102 	if (vlan_on) {
3103 		if (!(vfta & targetbit)) {
3104 			vfta |= targetbit;
3105 			vfta_changed = TRUE;
3106 		}
3107 	} else {
3108 		if ((vfta & targetbit)) {
3109 			vfta &= ~targetbit;
3110 			vfta_changed = TRUE;
3111 		}
3112 	}
3113 
3114 	/* Part 2
3115 	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3116 	 */
3117 	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3118 					 &vfta_changed);
3119 	if (ret_val != IXGBE_SUCCESS)
3120 		return ret_val;
3121 
3122 	if (vfta_changed)
3123 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3124 
3125 	return IXGBE_SUCCESS;
3126 }
3127 
3128 /**
3129  *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3130  *  @hw: pointer to hardware structure
3131  *  @vlan: VLAN id to write to VLAN filter
3132  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3133  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3134  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
3135  *                 should be changed
3136  *
3137  *  Turn on/off specified bit in VLVF table.
3138  **/
3139 int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3140 			       bool vlan_on, bool *vfta_changed)
3141 {
3142 	uint32_t vt;
3143 
3144 	DEBUGFUNC("ixgbe_set_vlvf_generic");
3145 
3146 	if (vlan > 4095)
3147 		return IXGBE_ERR_PARAM;
3148 
3149 	/* If VT Mode is set
3150 	 *   Either vlan_on
3151 	 *     make sure the vlan is in VLVF
3152 	 *     set the vind bit in the matching VLVFB
3153 	 *   Or !vlan_on
3154 	 *     clear the pool bit and possibly the vind
3155 	 */
3156 	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3157 	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3158 		int32_t vlvf_index;
3159 		uint32_t bits;
3160 
3161 		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3162 		if (vlvf_index < 0)
3163 			return vlvf_index;
3164 
3165 		if (vlan_on) {
3166 			/* set the pool bit */
3167 			if (vind < 32) {
3168 				bits = IXGBE_READ_REG(hw,
3169 						IXGBE_VLVFB(vlvf_index * 2));
3170 				bits |= (1 << vind);
3171 				IXGBE_WRITE_REG(hw,
3172 						IXGBE_VLVFB(vlvf_index * 2),
3173 						bits);
3174 			} else {
3175 				bits = IXGBE_READ_REG(hw,
3176 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3177 				bits |= (1 << (vind - 32));
3178 				IXGBE_WRITE_REG(hw,
3179 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3180 					bits);
3181 			}
3182 		} else {
3183 			/* clear the pool bit */
3184 			if (vind < 32) {
3185 				bits = IXGBE_READ_REG(hw,
3186 						IXGBE_VLVFB(vlvf_index * 2));
3187 				bits &= ~(1 << vind);
3188 				IXGBE_WRITE_REG(hw,
3189 						IXGBE_VLVFB(vlvf_index * 2),
3190 						bits);
3191 				bits |= IXGBE_READ_REG(hw,
3192 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3193 			} else {
3194 				bits = IXGBE_READ_REG(hw,
3195 					IXGBE_VLVFB((vlvf_index * 2) + 1));
3196 				bits &= ~(1 << (vind - 32));
3197 				IXGBE_WRITE_REG(hw,
3198 					IXGBE_VLVFB((vlvf_index * 2) + 1),
3199 					bits);
3200 				bits |= IXGBE_READ_REG(hw,
3201 						IXGBE_VLVFB(vlvf_index * 2));
3202 			}
3203 		}
3204 
3205 		/*
3206 		 * If there are still bits set in the VLVFB registers
3207 		 * for the VLAN ID indicated we need to see if the
3208 		 * caller is requesting that we clear the VFTA entry bit.
3209 		 * If the caller has requested that we clear the VFTA
3210 		 * entry bit but there are still pools/VFs using this VLAN
3211 		 * ID entry then ignore the request.  We're not worried
3212 		 * about the case where we're turning the VFTA VLAN ID
3213 		 * entry bit on, only when requested to turn it off as
3214 		 * there may be multiple pools and/or VFs using the
3215 		 * VLAN ID entry.  In that case we cannot clear the
3216 		 * VFTA bit until all pools/VFs using that VLAN ID have also
3217 		 * been cleared.  This will be indicated by "bits" being
3218 		 * zero.
3219 		 */
3220 		if (bits) {
3221 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3222 					(IXGBE_VLVF_VIEN | vlan));
3223 			if ((!vlan_on) && (vfta_changed != NULL)) {
3224 				/* someone wants to clear the vfta entry
3225 				 * but some pools/VFs are still using it.
3226 				 * Ignore it. */
3227 				*vfta_changed = FALSE;
3228 			}
3229 		} else
3230 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3231 	}
3232 
3233 	return IXGBE_SUCCESS;
3234 }
3235 
3236 /**
3237  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3238  *  @hw: pointer to hardware structure
3239  *
3240  *  Clears the VLAN filer table, and the VMDq index associated with the filter
3241  **/
3242 int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3243 {
3244 	uint32_t offset;
3245 
3246 	DEBUGFUNC("ixgbe_clear_vfta_generic");
3247 
3248 	for (offset = 0; offset < hw->mac.vft_size; offset++)
3249 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3250 
3251 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3252 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3253 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3254 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3255 	}
3256 
3257 	return IXGBE_SUCCESS;
3258 }
3259 
3260 /**
3261  *  ixgbe_check_mac_link_generic - Determine link and speed status
3262  *  @hw: pointer to hardware structure
3263  *  @speed: pointer to link speed
3264  *  @link_up: TRUE when link is up
3265  *  @link_up_wait_to_complete: bool used to wait for link up or not
3266  *
3267  *  Reads the links register to determine if link is up and the current speed
3268  **/
3269 int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3270 				     bool *link_up, bool link_up_wait_to_complete)
3271 {
3272 	uint32_t links_reg, links_orig;
3273 	uint32_t i;
3274 
3275 	DEBUGFUNC("ixgbe_check_mac_link_generic");
3276 
3277 	/* clear the old state */
3278 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3279 
3280 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3281 
3282 	if (links_orig != links_reg) {
3283 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
3284 			  links_orig, links_reg);
3285 	}
3286 
3287 	if (link_up_wait_to_complete) {
3288 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3289 			if (links_reg & IXGBE_LINKS_UP) {
3290 				*link_up = TRUE;
3291 				break;
3292 			} else {
3293 				*link_up = FALSE;
3294 			}
3295 			msec_delay(100);
3296 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3297 		}
3298 	} else {
3299 		if (links_reg & IXGBE_LINKS_UP)
3300 			*link_up = TRUE;
3301 		else
3302 			*link_up = FALSE;
3303 	}
3304 
3305 	if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3306 	    IXGBE_LINKS_SPEED_10G_82599)
3307 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3308 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3309 		 IXGBE_LINKS_SPEED_1G_82599)
3310 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3311 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3312 		 IXGBE_LINKS_SPEED_100_82599)
3313 		*speed = IXGBE_LINK_SPEED_100_FULL;
3314 	else
3315 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3316 
3317 	return IXGBE_SUCCESS;
3318 }
3319 
3320 /**
3321  *  ixgbe_get_device_caps_generic - Get additional device capabilities
3322  *  @hw: pointer to hardware structure
3323  *  @device_caps: the EEPROM word with the extra device capabilities
3324  *
3325  *  This function will read the EEPROM location for the device capabilities,
3326  *  and return the word through device_caps.
3327  **/
3328 int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
3329 {
3330 	DEBUGFUNC("ixgbe_get_device_caps_generic");
3331 
3332 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3333 
3334 	return IXGBE_SUCCESS;
3335 }
3336 
3337 /**
3338  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3339  *  @hw: pointer to hardware structure
3340  *
3341  **/
3342 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3343 {
3344 	uint32_t regval;
3345 	uint32_t i;
3346 
3347 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3348 
3349 	/* Enable relaxed ordering */
3350 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
3351 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3352 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3353 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3354 	}
3355 
3356 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
3357 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3358 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3359 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3360 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3361 	}
3362 
3363 }
3364 
3365 /**
3366  * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3367  * @hw: pointer to the hardware structure
3368  *
3369  * The 82599 and x540 MACs can experience issues if TX work is still pending
3370  * when a reset occurs.  This function prevents this by flushing the PCIe
3371  * buffers on the system.
3372  **/
3373 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3374 {
3375 	uint32_t gcr_ext, hlreg0;
3376 
3377 	/*
3378 	 * If double reset is not requested then all transactions should
3379 	 * already be clear and as such there is no work to do
3380 	 */
3381 	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3382 		return;
3383 
3384 	/*
3385 	 * Set loopback enable to prevent any transmits from being sent
3386 	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
3387 	 * has already been cleared.
3388 	 */
3389 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3390 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3391 
3392 	/* initiate cleaning flow for buffers in the PCIe transaction layer */
3393 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3394 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3395 			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3396 
3397 	/* Flush all writes and allow 20usec for all transactions to clear */
3398 	IXGBE_WRITE_FLUSH(hw);
3399 	usec_delay(20);
3400 
3401 	/* restore previous register values */
3402 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3403 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3404 }
3405 
3406 /* MAC Operations */
3407 
3408 /**
3409  *  ixgbe_init_shared_code - Initialize the shared code
3410  *  @hw: pointer to hardware structure
3411  *
3412  *  This will assign function pointers and assign the MAC type and PHY code.
3413  *  Does not touch the hardware. This function must be called prior to any
3414  *  other function in the shared code. The ixgbe_hw structure should be
3415  *  memset to 0 prior to calling this function.  The following fields in
3416  *  hw structure should be filled in prior to calling this function:
3417  *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
3418  *  subsystem_vendor_id, and revision_id
3419  **/
3420 int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
3421 {
3422 	int32_t status;
3423 
3424 	DEBUGFUNC("ixgbe_init_shared_code");
3425 
3426 	switch (hw->mac.type) {
3427 	case ixgbe_mac_82598EB:
3428 		status = ixgbe_init_ops_82598(hw);
3429 		break;
3430 	case ixgbe_mac_82599EB:
3431 		status = ixgbe_init_ops_82599(hw);
3432 		break;
3433 	case ixgbe_mac_X540:
3434 		status = ixgbe_init_ops_X540(hw);
3435 		break;
3436 	default:
3437 		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3438 		break;
3439 	}
3440 
3441 	return status;
3442 }
3443 
3444 /**
3445  *  ixgbe_init_hw - Initialize the hardware
3446  *  @hw: pointer to hardware structure
3447  *
3448  *  Initialize the hardware by resetting and then starting the hardware
3449  **/
3450 int32_t ixgbe_init_hw(struct ixgbe_hw *hw)
3451 {
3452 	if (hw->mac.ops.init_hw)
3453 		return hw->mac.ops.init_hw(hw);
3454 	else
3455 		return IXGBE_NOT_IMPLEMENTED;
3456 }
3457 
3458 /**
3459  *  ixgbe_get_media_type - Get media type
3460  *  @hw: pointer to hardware structure
3461  *
3462  *  Returns the media type (fiber, copper, backplane)
3463  **/
3464 enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
3465 {
3466 	if (hw->mac.ops.get_media_type)
3467 		return hw->mac.ops.get_media_type(hw);
3468 	else
3469 		return ixgbe_media_type_unknown;
3470 }
3471 
3472 /**
3473  *  ixgbe_identify_phy - Get PHY type
3474  *  @hw: pointer to hardware structure
3475  *
3476  *  Determines the physical layer module found on the current adapter.
3477  **/
3478 int32_t ixgbe_identify_phy(struct ixgbe_hw *hw)
3479 {
3480 	int32_t status = IXGBE_SUCCESS;
3481 
3482 	if (hw->phy.type == ixgbe_phy_unknown) {
3483 		if (hw->phy.ops.identify)
3484 			status = hw->phy.ops.identify(hw);
3485 		else
3486 			status = IXGBE_NOT_IMPLEMENTED;
3487 	}
3488 
3489 	return status;
3490 }
3491 
3492 /**
3493  *  ixgbe_check_link - Get link and speed status
3494  *  @hw: pointer to hardware structure
3495  *
3496  *  Reads the links register to determine if link is up and the current speed
3497  **/
3498 int32_t ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3499 			 bool *link_up, bool link_up_wait_to_complete)
3500 {
3501 	if (hw->mac.ops.check_link)
3502 		return hw->mac.ops.check_link(hw, speed, link_up,
3503 					      link_up_wait_to_complete);
3504 	else
3505 		return IXGBE_NOT_IMPLEMENTED;
3506 }
3507 
3508 /**
3509  *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
3510  *  @hw: pointer to hardware structure
3511  *
3512  *  When the driver changes the link speeds that it can support then
3513  *  flap the tx laser to alert the link partner to start autotry
3514  *  process on its end.
3515  **/
3516 void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
3517 {
3518 	if (hw->mac.ops.flap_tx_laser)
3519 		hw->mac.ops.flap_tx_laser(hw);
3520 }
3521 
3522 /**
3523  *  ixgbe_set_rar - Set Rx address register
3524  *  @hw: pointer to hardware structure
3525  *  @index: Receive address register to write
3526  *  @addr: Address to put into receive address register
3527  *  @vmdq: VMDq "set"
3528  *  @enable_addr: set flag that address is active
3529  *
3530  *  Puts an ethernet address into a receive address register.
3531  **/
3532 int32_t ixgbe_set_rar(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
3533 		      uint32_t vmdq, uint32_t enable_addr)
3534 {
3535 	if (hw->mac.ops.set_rar)
3536 		return hw->mac.ops.set_rar(hw, index, addr, vmdq, enable_addr);
3537 	else
3538 		return IXGBE_NOT_IMPLEMENTED;
3539 }
3540 
3541 /**
3542  *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
3543  *  @hw: pointer to hardware structure
3544  *  @rar: receive address register index to associate with VMDq index
3545  *  @vmdq: VMDq set or pool index
3546  **/
3547 int32_t ixgbe_set_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3548 {
3549 	if (hw->mac.ops.set_vmdq)
3550 		return hw->mac.ops.set_vmdq(hw, rar, vmdq);
3551 	else
3552 		return IXGBE_NOT_IMPLEMENTED;
3553 }
3554 
3555 /**
3556  *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
3557  *  @hw: pointer to hardware structure
3558  *  @rar: receive address register index to disassociate with VMDq index
3559  *  @vmdq: VMDq set or pool index
3560  **/
3561 int32_t ixgbe_clear_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3562 {
3563 	if (hw->mac.ops.clear_vmdq)
3564 		return hw->mac.ops.clear_vmdq(hw, rar, vmdq);
3565 	else
3566 		return IXGBE_NOT_IMPLEMENTED;
3567 }
3568 
3569 /**
3570  *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
3571  *  @hw: pointer to hardware structure
3572  *
3573  *  Initializes the Unicast Table Arrays to zero on device load.  This
3574  *  is part of the Rx init addr execution path.
3575  **/
3576 int32_t ixgbe_init_uta_tables(struct ixgbe_hw *hw)
3577 {
3578 	if (hw->mac.ops.init_uta_tables)
3579 		return hw->mac.ops.init_uta_tables(hw);
3580 	else
3581 		return IXGBE_NOT_IMPLEMENTED;
3582 }
3583 
3584 bool ixgbe_verify_lesm_fw_enabled(struct ixgbe_hw *hw)
3585 {
3586 	if (hw->mac.ops.verify_lesm_fw_enabled)
3587 		return hw->mac.ops.verify_lesm_fw_enabled(hw);
3588 	else
3589 		return IXGBE_NOT_IMPLEMENTED;
3590 }
3591 
3592 int32_t ixgbe_reset_pipeline(struct ixgbe_hw *hw)
3593 {
3594 	if (hw->mac.ops.reset_pipeline)
3595 		return hw->mac.ops.reset_pipeline(hw);
3596 	else
3597 		return IXGBE_NOT_IMPLEMENTED;
3598 }
3599 
3600 /*
3601  * MBX: Mailbox handling
3602  */
3603 
3604 /**
3605  *  ixgbe_read_mbx - Reads a message from the mailbox
3606  *  @hw: pointer to the HW structure
3607  *  @msg: The message buffer
3608  *  @size: Length of buffer
3609  *  @mbx_id: id of mailbox to read
3610  *
3611  *  returns SUCCESS if it successfuly read message from buffer
3612  **/
3613 int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3614 {
3615 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3616 	int32_t ret_val = IXGBE_ERR_MBX;
3617 
3618 	DEBUGFUNC("ixgbe_read_mbx");
3619 
3620 	/* limit read to size of mailbox */
3621 	if (size > mbx->size)
3622 		size = mbx->size;
3623 
3624 	if (mbx->ops.read)
3625 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3626 
3627 	return ret_val;
3628 }
3629 
3630 /**
3631  *  ixgbe_write_mbx - Write a message to the mailbox
3632  *  @hw: pointer to the HW structure
3633  *  @msg: The message buffer
3634  *  @size: Length of buffer
3635  *  @mbx_id: id of mailbox to write
3636  *
3637  *  returns SUCCESS if it successfully copied message into the buffer
3638  **/
3639 int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3640 {
3641 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3642 	int32_t ret_val = IXGBE_SUCCESS;
3643 
3644 	DEBUGFUNC("ixgbe_write_mbx");
3645 
3646 	if (size > mbx->size)
3647 		ret_val = IXGBE_ERR_MBX;
3648 
3649 	else if (mbx->ops.write)
3650 		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3651 
3652 	return ret_val;
3653 }
3654 
3655 /**
3656  *  ixgbe_check_for_msg - checks to see if someone sent us mail
3657  *  @hw: pointer to the HW structure
3658  *  @mbx_id: id of mailbox to check
3659  *
3660  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3661  **/
3662 int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
3663 {
3664 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3665 	int32_t ret_val = IXGBE_ERR_MBX;
3666 
3667 	DEBUGFUNC("ixgbe_check_for_msg");
3668 
3669 	if (mbx->ops.check_for_msg)
3670 		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
3671 
3672 	return ret_val;
3673 }
3674 
3675 /**
3676  *  ixgbe_check_for_ack - checks to see if someone sent us ACK
3677  *  @hw: pointer to the HW structure
3678  *  @mbx_id: id of mailbox to check
3679  *
3680  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3681  **/
3682 int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
3683 {
3684 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3685 	int32_t ret_val = IXGBE_ERR_MBX;
3686 
3687 	DEBUGFUNC("ixgbe_check_for_ack");
3688 
3689 	if (mbx->ops.check_for_ack)
3690 		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
3691 
3692 	return ret_val;
3693 }
3694 
3695 /**
3696  *  ixgbe_check_for_rst - checks to see if other side has reset
3697  *  @hw: pointer to the HW structure
3698  *  @mbx_id: id of mailbox to check
3699  *
3700  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3701  **/
3702 int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
3703 {
3704 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3705 	int32_t ret_val = IXGBE_ERR_MBX;
3706 
3707 	DEBUGFUNC("ixgbe_check_for_rst");
3708 
3709 	if (mbx->ops.check_for_rst)
3710 		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
3711 
3712 	return ret_val;
3713 }
3714 
3715 /**
3716  *  ixgbe_poll_for_msg - Wait for message notification
3717  *  @hw: pointer to the HW structure
3718  *  @mbx_id: id of mailbox to write
3719  *
3720  *  returns SUCCESS if it successfully received a message notification
3721  **/
3722 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
3723 {
3724 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3725 	int countdown = mbx->timeout;
3726 
3727 	DEBUGFUNC("ixgbe_poll_for_msg");
3728 
3729 	if (!countdown || !mbx->ops.check_for_msg)
3730 		goto out;
3731 
3732 	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
3733 		countdown--;
3734 		if (!countdown)
3735 			break;
3736 		usec_delay(mbx->usec_delay);
3737 	}
3738 
3739 out:
3740 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
3741 }
3742 
3743 /**
3744  *  ixgbe_poll_for_ack - Wait for message acknowledgement
3745  *  @hw: pointer to the HW structure
3746  *  @mbx_id: id of mailbox to write
3747  *
3748  *  returns SUCCESS if it successfully received a message acknowledgement
3749  **/
3750 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
3751 {
3752 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3753 	int countdown = mbx->timeout;
3754 
3755 	DEBUGFUNC("ixgbe_poll_for_ack");
3756 
3757 	if (!countdown || !mbx->ops.check_for_ack)
3758 		goto out;
3759 
3760 	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
3761 		countdown--;
3762 		if (!countdown)
3763 			break;
3764 		usec_delay(mbx->usec_delay);
3765 	}
3766 
3767 out:
3768 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
3769 }
3770 
3771 /**
3772  *  ixgbe_read_posted_mbx - Wait for message notification and receive message
3773  *  @hw: pointer to the HW structure
3774  *  @msg: The message buffer
3775  *  @size: Length of buffer
3776  *  @mbx_id: id of mailbox to write
3777  *
3778  *  returns SUCCESS if it successfully received a message notification and
3779  *  copied it into the receive buffer.
3780  **/
3781 int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3782 {
3783 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3784 	int32_t ret_val = IXGBE_ERR_MBX;
3785 
3786 	DEBUGFUNC("ixgbe_read_posted_mbx");
3787 
3788 	if (!mbx->ops.read)
3789 		goto out;
3790 
3791 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
3792 
3793 	/* if ack received read message, otherwise we timed out */
3794 	if (!ret_val)
3795 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3796 out:
3797 	return ret_val;
3798 }
3799 
3800 /**
3801  *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
3802  *  @hw: pointer to the HW structure
3803  *  @msg: The message buffer
3804  *  @size: Length of buffer
3805  *  @mbx_id: id of mailbox to write
3806  *
3807  *  returns SUCCESS if it successfully copied message into the buffer and
3808  *  received an ack to that message within delay * timeout period
3809  **/
3810 int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3811 			   uint16_t mbx_id)
3812 {
3813 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3814 	int32_t ret_val = IXGBE_ERR_MBX;
3815 
3816 	DEBUGFUNC("ixgbe_write_posted_mbx");
3817 
3818 	/* exit if either we can't write or there isn't a defined timeout */
3819 	if (!mbx->ops.write || !mbx->timeout)
3820 		goto out;
3821 
3822 	/* send msg */
3823 	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3824 
3825 	/* if msg sent wait until we receive an ack */
3826 	if (!ret_val)
3827 		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
3828 out:
3829 	return ret_val;
3830 }
3831 
3832 /**
3833  *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
3834  *  @hw: pointer to the HW structure
3835  *
3836  *  Setups up the mailbox read and write message function pointers
3837  **/
3838 void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
3839 {
3840 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3841 
3842 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
3843 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
3844 }
3845 
3846 /**
3847  *  ixgbe_read_v2p_mailbox - read v2p mailbox
3848  *  @hw: pointer to the HW structure
3849  *
3850  *  This function is used to read the v2p mailbox without losing the read to
3851  *  clear status bits.
3852  **/
3853 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
3854 {
3855 	uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
3856 
3857 	v2p_mailbox |= hw->mbx.v2p_mailbox;
3858 	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
3859 
3860 	return v2p_mailbox;
3861 }
3862 
3863 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
3864 {
3865 	uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
3866 	int32_t ret_val = IXGBE_ERR_MBX;
3867 
3868 	if (mbvficr & mask) {
3869 		ret_val = IXGBE_SUCCESS;
3870 		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
3871 	}
3872 
3873 	return ret_val;
3874 }
3875 
3876 /**
3877  *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
3878  *  @hw: pointer to the HW structure
3879  *  @vf_number: the VF index
3880  *
3881  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3882  **/
3883 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3884 {
3885 	int32_t ret_val = IXGBE_ERR_MBX;
3886 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
3887 	uint32_t vf_bit = vf_number % 16;
3888 
3889 	DEBUGFUNC("ixgbe_check_for_msg_pf");
3890 
3891 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
3892 				    index)) {
3893 		ret_val = IXGBE_SUCCESS;
3894 		hw->mbx.stats.reqs++;
3895 	}
3896 
3897 	return ret_val;
3898 }
3899 
3900 /**
3901  *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
3902  *  @hw: pointer to the HW structure
3903  *  @vf_number: the VF index
3904  *
3905  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3906  **/
3907 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3908 {
3909 	int32_t ret_val = IXGBE_ERR_MBX;
3910 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
3911 	uint32_t vf_bit = vf_number % 16;
3912 
3913 	DEBUGFUNC("ixgbe_check_for_ack_pf");
3914 
3915 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
3916 				    index)) {
3917 		ret_val = IXGBE_SUCCESS;
3918 		hw->mbx.stats.acks++;
3919 	}
3920 
3921 	return ret_val;
3922 }
3923 
3924 /**
3925  *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
3926  *  @hw: pointer to the HW structure
3927  *  @vf_number: the VF index
3928  *
3929  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3930  **/
3931 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3932 {
3933 	uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
3934 	uint32_t vf_shift = vf_number % 32;
3935 	uint32_t vflre = 0;
3936 	int32_t ret_val = IXGBE_ERR_MBX;
3937 
3938 	DEBUGFUNC("ixgbe_check_for_rst_pf");
3939 
3940 	switch (hw->mac.type) {
3941 	case ixgbe_mac_82599EB:
3942 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
3943 		break;
3944 	case ixgbe_mac_X540:
3945 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
3946 		break;
3947 	default:
3948 		break;
3949 	}
3950 
3951 	if (vflre & (1 << vf_shift)) {
3952 		ret_val = IXGBE_SUCCESS;
3953 		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
3954 		hw->mbx.stats.rsts++;
3955 	}
3956 
3957 	return ret_val;
3958 }
3959 
3960 /**
3961  *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
3962  *  @hw: pointer to the HW structure
3963  *  @vf_number: the VF index
3964  *
3965  *  return SUCCESS if we obtained the mailbox lock
3966  **/
3967 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3968 {
3969 	int32_t ret_val = IXGBE_ERR_MBX;
3970 	uint32_t p2v_mailbox;
3971 
3972 	DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
3973 
3974 	/* Take ownership of the buffer */
3975 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
3976 
3977 	/* reserve mailbox for vf use */
3978 	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
3979 	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
3980 		ret_val = IXGBE_SUCCESS;
3981 
3982 	return ret_val;
3983 }
3984 
3985 /**
3986  *  ixgbe_write_mbx_pf - Places a message in the mailbox
3987  *  @hw: pointer to the HW structure
3988  *  @msg: The message buffer
3989  *  @size: Length of buffer
3990  *  @vf_number: the VF index
3991  *
3992  *  returns SUCCESS if it successfully copied message into the buffer
3993  **/
3994 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3995 			   uint16_t vf_number)
3996 {
3997 	int32_t ret_val;
3998 	uint16_t i;
3999 
4000 	DEBUGFUNC("ixgbe_write_mbx_pf");
4001 
4002 	/* lock the mailbox to prevent pf/vf race condition */
4003 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4004 	if (ret_val)
4005 		goto out_no_write;
4006 
4007 	/* flush msg and acks as we are overwriting the message buffer */
4008 	ixgbe_check_for_msg_pf(hw, vf_number);
4009 	ixgbe_check_for_ack_pf(hw, vf_number);
4010 
4011 	/* copy the caller specified message to the mailbox memory buffer */
4012 	for (i = 0; i < size; i++)
4013 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
4014 
4015 	/* Interrupt VF to tell it a message has been sent and release buffer*/
4016 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
4017 
4018 	/* update stats */
4019 	hw->mbx.stats.msgs_tx++;
4020 
4021 out_no_write:
4022 	return ret_val;
4023 
4024 }
4025 
4026 /**
4027  *  ixgbe_read_mbx_pf - Read a message from the mailbox
4028  *  @hw: pointer to the HW structure
4029  *  @msg: The message buffer
4030  *  @size: Length of buffer
4031  *  @vf_number: the VF index
4032  *
4033  *  This function copies a message from the mailbox buffer to the caller's
4034  *  memory buffer.  The presumption is that the caller knows that there was
4035  *  a message due to a VF request so no polling for message is needed.
4036  **/
4037 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4038 			  uint16_t vf_number)
4039 {
4040 	int32_t ret_val;
4041 	uint16_t i;
4042 
4043 	DEBUGFUNC("ixgbe_read_mbx_pf");
4044 
4045 	/* lock the mailbox to prevent pf/vf race condition */
4046 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4047 	if (ret_val)
4048 		goto out_no_read;
4049 
4050 	/* copy the message to the mailbox memory buffer */
4051 	for (i = 0; i < size; i++)
4052 		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
4053 
4054 	/* Acknowledge the message and release buffer */
4055 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
4056 
4057 	/* update stats */
4058 	hw->mbx.stats.msgs_rx++;
4059 
4060 out_no_read:
4061 	return ret_val;
4062 }
4063 
4064 /**
4065  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
4066  *  @hw: pointer to the HW structure
4067  *
4068  *  Initializes the hw->mbx struct to correct values for pf mailbox
4069  */
4070 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
4071 {
4072 	struct ixgbe_mbx_info *mbx = &hw->mbx;
4073 
4074 	if (hw->mac.type != ixgbe_mac_82599EB &&
4075 	    hw->mac.type != ixgbe_mac_X540)
4076 		return;
4077 
4078 	mbx->timeout = 0;
4079 	mbx->usec_delay = 0;
4080 
4081 	mbx->size = IXGBE_VFMAILBOX_SIZE;
4082 
4083 	mbx->ops.read = ixgbe_read_mbx_pf;
4084 	mbx->ops.write = ixgbe_write_mbx_pf;
4085 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
4086 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
4087 	mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
4088 	mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
4089 	mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
4090 
4091 	mbx->stats.msgs_tx = 0;
4092 	mbx->stats.msgs_rx = 0;
4093 	mbx->stats.reqs = 0;
4094 	mbx->stats.acks = 0;
4095 	mbx->stats.rsts = 0;
4096 }
4097