xref: /openbsd-src/sys/dev/pci/ixgbe.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: ixgbe.c,v 1.6 2011/06/10 12:46:35 claudio Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2009, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.9 2009/12/07 21:30:54 jfv Exp $*/
36 
37 #include <dev/pci/ixgbe.h>
38 
39 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
45                                         uint16_t count);
46 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
47 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
48 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
49 void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50 
51 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
52 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
53 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
54 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
55 int32_t ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
56 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg, uint32_t lp_reg,
57                               uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm);
58 
59 
60 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan);
61 
62 /* MBX */
63 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
64 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
65 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
66 int32_t ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, uint32_t mask);
67 int32_t ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, uint16_t mbx_id);
68 int32_t ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, uint16_t mbx_id);
69 int32_t ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, uint16_t mbx_id);
70 int32_t ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw);
71 int32_t ixgbe_write_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
72                               uint16_t mbx_id);
73 int32_t ixgbe_read_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
74                               uint16_t mbx_id);
75 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index);
76 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
77 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
78 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
79 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
80 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
81                              uint16_t vf_number);
82 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
83                              uint16_t vf_number);
84 
85 
86 /**
87  *  ixgbe_init_ops_generic - Inits function ptrs
88  *  @hw: pointer to the hardware structure
89  *
90  *  Initialize the function pointers.
91  **/
92 int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
93 {
94 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
95 	struct ixgbe_mac_info *mac = &hw->mac;
96 	uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC);
97 
98 	/* EEPROM */
99 	eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
100 	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
101 	if (eec & (1 << 8))
102 		eeprom->ops.read = &ixgbe_read_eerd_generic;
103 	else
104 		eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
105 	eeprom->ops.write = &ixgbe_write_eeprom_generic;
106 	eeprom->ops.validate_checksum =
107 	                              &ixgbe_validate_eeprom_checksum_generic;
108 	eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
109 	eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
110 
111 	/* MAC */
112 	mac->ops.init_hw = &ixgbe_init_hw_generic;
113 	mac->ops.reset_hw = NULL;
114 	mac->ops.start_hw = &ixgbe_start_hw_generic;
115 	mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
116 	mac->ops.get_media_type = NULL;
117 	mac->ops.get_supported_physical_layer = NULL;
118 	mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
119 	mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
120 	mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
121 	mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
122 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
123 	mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
124 	mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
125 
126 	/* LEDs */
127 	mac->ops.led_on = &ixgbe_led_on_generic;
128 	mac->ops.led_off = &ixgbe_led_off_generic;
129 	mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
130 	mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
131 
132 	/* RAR, Multicast, VLAN */
133 	mac->ops.set_rar = &ixgbe_set_rar_generic;
134 	mac->ops.clear_rar = &ixgbe_clear_rar_generic;
135 	mac->ops.insert_mac_addr = NULL;
136 	mac->ops.set_vmdq = NULL;
137 	mac->ops.clear_vmdq = NULL;
138 	mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
139 	mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
140 	mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
141 	mac->ops.enable_mc = &ixgbe_enable_mc_generic;
142 	mac->ops.disable_mc = &ixgbe_disable_mc_generic;
143 	mac->ops.clear_vfta = NULL;
144 	mac->ops.set_vfta = NULL;
145 	mac->ops.init_uta_tables = NULL;
146 
147 	/* Flow Control */
148 	mac->ops.fc_enable = &ixgbe_fc_enable_generic;
149 
150 	/* Link */
151 	mac->ops.get_link_capabilities = NULL;
152 	mac->ops.setup_link = NULL;
153 	mac->ops.check_link = NULL;
154 
155 	return IXGBE_SUCCESS;
156 }
157 
158 /**
159  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
160  *  @hw: pointer to hardware structure
161  *
162  *  Starts the hardware by filling the bus info structure and media type, clears
163  *  all on chip counters, initializes receive address registers, multicast
164  *  table, VLAN filter table, calls routine to set up link and flow control
165  *  settings, and leaves transmit and receive units disabled and uninitialized
166  **/
167 int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
168 {
169 	uint32_t ctrl_ext;
170 
171 	/* Set the media type */
172 	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
173 
174 	/* PHY ops initialization must be done in reset_hw() */
175 
176 	/* Clear the VLAN filter table */
177 	hw->mac.ops.clear_vfta(hw);
178 
179 	/* Clear statistics registers */
180 	hw->mac.ops.clear_hw_cntrs(hw);
181 
182 	/* Set No Snoop Disable */
183 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
184 	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
185 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
186 	IXGBE_WRITE_FLUSH(hw);
187 
188 	/* Setup flow control */
189 	ixgbe_setup_fc(hw, 0);
190 
191 	/* Clear adapter stopped flag */
192 	hw->adapter_stopped = FALSE;
193 
194 	return IXGBE_SUCCESS;
195 }
196 
197 /**
198  *  ixgbe_start_hw_gen2 - Init sequence for common device family
199  *  @hw: pointer to hw structure
200  *
201  * Performs the init sequence common to the second generation
202  * of 10 GbE devices.
203  * Devices in the second generation:
204  *     82599
205  *     X540
206  **/
207 int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
208 {
209 	uint32_t i;
210 	uint32_t regval;
211 
212 	/* Clear the rate limiters */
213 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
214 		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
215 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
216 	}
217 	IXGBE_WRITE_FLUSH(hw);
218 
219 	/* Disable relaxed ordering */
220 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
221 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
222 		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
223 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
224 	}
225 
226 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
227 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
228 		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
229 					IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
230 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
231 	}
232 
233 	return IXGBE_SUCCESS;
234 }
235 
236 /**
237  *  ixgbe_init_hw_generic - Generic hardware initialization
238  *  @hw: pointer to hardware structure
239  *
240  *  Initialize the hardware by resetting the hardware, filling the bus info
241  *  structure and media type, clears all on chip counters, initializes receive
242  *  address registers, multicast table, VLAN filter table, calls routine to set
243  *  up link and flow control settings, and leaves transmit and receive units
244  *  disabled and uninitialized
245  **/
246 int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
247 {
248 	int32_t status = IXGBE_SUCCESS;
249 
250 	/* Reset the hardware */
251 	status = hw->mac.ops.reset_hw(hw);
252 
253 	if (status == IXGBE_SUCCESS) {
254 		/* Start the HW */
255 		status = hw->mac.ops.start_hw(hw);
256 	}
257 
258 	return status;
259 }
260 
261 /**
262  *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
263  *  @hw: pointer to hardware structure
264  *
265  *  Clears all hardware statistics counters by reading them from the hardware
266  *  Statistics counters are clear on read.
267  **/
268 int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
269 {
270 	uint16_t i = 0;
271 
272 	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
273 	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
274 	IXGBE_READ_REG(hw, IXGBE_ERRBC);
275 	IXGBE_READ_REG(hw, IXGBE_MSPDC);
276 	for (i = 0; i < 8; i++)
277 		IXGBE_READ_REG(hw, IXGBE_MPC(i));
278 
279 	IXGBE_READ_REG(hw, IXGBE_MLFC);
280 	IXGBE_READ_REG(hw, IXGBE_MRFC);
281 	IXGBE_READ_REG(hw, IXGBE_RLEC);
282 	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
283 	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
284 	if (hw->mac.type >= ixgbe_mac_82599EB) {
285 		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
286 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
287 	} else {
288 		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
289 		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
290 	}
291 
292 	for (i = 0; i < 8; i++) {
293 		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
294 		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
295 		if (hw->mac.type >= ixgbe_mac_82599EB) {
296 			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
297 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
298 		} else {
299 			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
300 			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
301 		}
302 	}
303 	if (hw->mac.type >= ixgbe_mac_82599EB)
304 		for (i = 0; i < 8; i++)
305 			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
306 	IXGBE_READ_REG(hw, IXGBE_PRC64);
307 	IXGBE_READ_REG(hw, IXGBE_PRC127);
308 	IXGBE_READ_REG(hw, IXGBE_PRC255);
309 	IXGBE_READ_REG(hw, IXGBE_PRC511);
310 	IXGBE_READ_REG(hw, IXGBE_PRC1023);
311 	IXGBE_READ_REG(hw, IXGBE_PRC1522);
312 	IXGBE_READ_REG(hw, IXGBE_GPRC);
313 	IXGBE_READ_REG(hw, IXGBE_BPRC);
314 	IXGBE_READ_REG(hw, IXGBE_MPRC);
315 	IXGBE_READ_REG(hw, IXGBE_GPTC);
316 	IXGBE_READ_REG(hw, IXGBE_GORCL);
317 	IXGBE_READ_REG(hw, IXGBE_GORCH);
318 	IXGBE_READ_REG(hw, IXGBE_GOTCL);
319 	IXGBE_READ_REG(hw, IXGBE_GOTCH);
320 	for (i = 0; i < 8; i++)
321 		IXGBE_READ_REG(hw, IXGBE_RNBC(i));
322 	IXGBE_READ_REG(hw, IXGBE_RUC);
323 	IXGBE_READ_REG(hw, IXGBE_RFC);
324 	IXGBE_READ_REG(hw, IXGBE_ROC);
325 	IXGBE_READ_REG(hw, IXGBE_RJC);
326 	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
327 	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
328 	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
329 	IXGBE_READ_REG(hw, IXGBE_TORL);
330 	IXGBE_READ_REG(hw, IXGBE_TORH);
331 	IXGBE_READ_REG(hw, IXGBE_TPR);
332 	IXGBE_READ_REG(hw, IXGBE_TPT);
333 	IXGBE_READ_REG(hw, IXGBE_PTC64);
334 	IXGBE_READ_REG(hw, IXGBE_PTC127);
335 	IXGBE_READ_REG(hw, IXGBE_PTC255);
336 	IXGBE_READ_REG(hw, IXGBE_PTC511);
337 	IXGBE_READ_REG(hw, IXGBE_PTC1023);
338 	IXGBE_READ_REG(hw, IXGBE_PTC1522);
339 	IXGBE_READ_REG(hw, IXGBE_MPTC);
340 	IXGBE_READ_REG(hw, IXGBE_BPTC);
341 	for (i = 0; i < 16; i++) {
342 		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
343 		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
344 		if (hw->mac.type >= ixgbe_mac_82599EB) {
345 			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
346 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
347 			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
348 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
349 			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
350 		} else {
351 			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
352 			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
353 		}
354 	}
355 
356 	return IXGBE_SUCCESS;
357 }
358 
359 /**
360  *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
361  *  @hw: pointer to hardware structure
362  *  @pba_num: stores the part number string from the EEPROM
363  *  @pba_num_size: part number string buffer length
364  *
365  *  Reads the part number string from the EEPROM.
366  **/
367 int32_t ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, uint8_t *pba_num,
368                                   uint32_t pba_num_size)
369 {
370 	int32_t ret_val;
371 	uint16_t data;
372 	uint16_t pba_ptr;
373 	uint16_t offset;
374 	uint16_t length;
375 
376 	if (pba_num == NULL) {
377 		DEBUGOUT("PBA string buffer was null\n");
378 		return IXGBE_ERR_INVALID_ARGUMENT;
379 	}
380 
381 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
382 	if (ret_val) {
383 		DEBUGOUT("NVM Read Error\n");
384 		return ret_val;
385 	}
386 
387 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
388 	if (ret_val) {
389 		DEBUGOUT("NVM Read Error\n");
390 		return ret_val;
391 	}
392 
393 	/*
394 	 * if data is not ptr guard the PBA must be in legacy format which
395 	 * means pba_ptr is actually our second data word for the PBA number
396 	 * and we can decode it into an ascii string
397 	 */
398 	if (data != IXGBE_PBANUM_PTR_GUARD) {
399 		DEBUGOUT("NVM PBA number is not stored as string\n");
400 
401 		/* we will need 11 characters to store the PBA */
402 		if (pba_num_size < 11) {
403 			DEBUGOUT("PBA string buffer too small\n");
404 			return IXGBE_ERR_NO_SPACE;
405 		}
406 
407 		/* extract hex string from data and pba_ptr */
408 		pba_num[0] = (data >> 12) & 0xF;
409 		pba_num[1] = (data >> 8) & 0xF;
410 		pba_num[2] = (data >> 4) & 0xF;
411 		pba_num[3] = data & 0xF;
412 		pba_num[4] = (pba_ptr >> 12) & 0xF;
413 		pba_num[5] = (pba_ptr >> 8) & 0xF;
414 		pba_num[6] = '-';
415 		pba_num[7] = 0;
416 		pba_num[8] = (pba_ptr >> 4) & 0xF;
417 		pba_num[9] = pba_ptr & 0xF;
418 
419 		/* put a null character on the end of our string */
420 		pba_num[10] = '\0';
421 
422 		/* switch all the data but the '-' to hex char */
423 		for (offset = 0; offset < 10; offset++) {
424 			if (pba_num[offset] < 0xA)
425 				pba_num[offset] += '0';
426 			else if (pba_num[offset] < 0x10)
427 				pba_num[offset] += 'A' - 0xA;
428 		}
429 
430 		return IXGBE_SUCCESS;
431 	}
432 
433 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
434 	if (ret_val) {
435 		DEBUGOUT("NVM Read Error\n");
436 		return ret_val;
437 	}
438 
439 	if (length == 0xFFFF || length == 0) {
440 		DEBUGOUT("NVM PBA number section invalid length\n");
441 		return IXGBE_ERR_PBA_SECTION;
442 	}
443 
444 	/* check if pba_num buffer is big enough */
445 	if (pba_num_size  < (((uint32_t)length * 2) - 1)) {
446 		DEBUGOUT("PBA string buffer too small\n");
447 		return IXGBE_ERR_NO_SPACE;
448 	}
449 
450 	/* trim pba length from start of string */
451 	pba_ptr++;
452 	length--;
453 
454 	for (offset = 0; offset < length; offset++) {
455 		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
456 		if (ret_val) {
457 			DEBUGOUT("NVM Read Error\n");
458 			return ret_val;
459 		}
460 		pba_num[offset * 2] = (uint8_t)(data >> 8);
461 		pba_num[(offset * 2) + 1] = (uint8_t)(data & 0xFF);
462 	}
463 	pba_num[offset * 2] = '\0';
464 
465 	return IXGBE_SUCCESS;
466 }
467 
468 /**
469  *  ixgbe_read_pba_length_generic - Reads part number length from EEPROM
470  *  @hw: pointer to hardware structure
471  *  @pba_num_size: part number string buffer length
472  *
473  *  Reads the part number length from the EEPROM.
474  *  Returns expected buffer size in pba_num_size
475  **/
476 int32_t ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, uint32_t *pba_num_size)
477 {
478 	int32_t ret_val;
479 	uint16_t data;
480 	uint16_t pba_ptr;
481 	uint16_t length;
482 
483 	if (pba_num_size == NULL) {
484 		DEBUGOUT("PBA buffer size was null\n");
485 		return IXGBE_ERR_INVALID_ARGUMENT;
486 	}
487 
488 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
489 	if (ret_val) {
490 		DEBUGOUT("NVM Read Error\n");
491 		return ret_val;
492 	}
493 
494 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
495 	if (ret_val) {
496 		DEBUGOUT("NVM Read Error\n");
497 		return ret_val;
498 	}
499 
500 	 /* if data is not ptr guard the PBA must be in legacy format */
501 	if (data != IXGBE_PBANUM_PTR_GUARD) {
502 		*pba_num_size = 11;
503 		return IXGBE_SUCCESS;
504 	}
505 
506 	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
507 	if (ret_val) {
508 		DEBUGOUT("NVM Read Error\n");
509 		return ret_val;
510 	}
511 
512 	if (length == 0xFFFF || length == 0) {
513 		DEBUGOUT("NVM PBA number section invalid length\n");
514 		return IXGBE_ERR_PBA_SECTION;
515 	}
516 
517 	/*
518 	 * Convert from length in 16bit values to 8bit chars, add 1 for NULL,
519 	 * and subtract 2 because length field is included in length.
520 	 */
521 	*pba_num_size = ((uint32_t)length * 2) - 1;
522 
523 	return IXGBE_SUCCESS;
524 }
525 
526 /**
527  *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
528  *  @hw: pointer to hardware structure
529  *  @pba_num: stores the part number from the EEPROM
530  *
531  *  Reads the part number from the EEPROM.
532  **/
533 int32_t ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, uint32_t *pba_num)
534 {
535 	int32_t ret_val;
536 	uint16_t data;
537 
538 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
539 	if (ret_val) {
540 		DEBUGOUT("NVM Read Error\n");
541 		return ret_val;
542 	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
543 		DEBUGOUT("NVM Not supported\n");
544 		return IXGBE_NOT_IMPLEMENTED;
545 	}
546 	*pba_num = (uint32_t)(data << 16);
547 
548 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
549 	if (ret_val) {
550 		DEBUGOUT("NVM Read Error\n");
551 		return ret_val;
552 	}
553 	*pba_num |= data;
554 
555 	return IXGBE_SUCCESS;
556 }
557 
558 /**
559  *  ixgbe_get_mac_addr_generic - Generic get MAC address
560  *  @hw: pointer to hardware structure
561  *  @mac_addr: Adapter MAC address
562  *
563  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
564  *  A reset of the adapter must be performed prior to calling this function
565  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
566  **/
567 int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr)
568 {
569 	uint32_t rar_high;
570 	uint32_t rar_low;
571 	uint16_t i;
572 
573 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
574 	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
575 
576 	for (i = 0; i < 4; i++)
577 		mac_addr[i] = (uint8_t)(rar_low >> (i*8));
578 
579 	for (i = 0; i < 2; i++)
580 		mac_addr[i+4] = (uint8_t)(rar_high >> (i*8));
581 
582 	return IXGBE_SUCCESS;
583 }
584 
585 /**
586  *  ixgbe_get_bus_info_generic - Generic set PCI bus info
587  *  @hw: pointer to hardware structure
588  *
589  *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
590  **/
591 int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
592 {
593 	struct ixgbe_mac_info *mac = &hw->mac;
594 	uint16_t link_status;
595 
596 	hw->bus.type = ixgbe_bus_type_pci_express;
597 
598 	/* Get the negotiated link width and speed from PCI config space */
599 	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
600 
601 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
602 	case IXGBE_PCI_LINK_WIDTH_1:
603 		hw->bus.width = ixgbe_bus_width_pcie_x1;
604 		break;
605 	case IXGBE_PCI_LINK_WIDTH_2:
606 		hw->bus.width = ixgbe_bus_width_pcie_x2;
607 		break;
608 	case IXGBE_PCI_LINK_WIDTH_4:
609 		hw->bus.width = ixgbe_bus_width_pcie_x4;
610 		break;
611 	case IXGBE_PCI_LINK_WIDTH_8:
612 		hw->bus.width = ixgbe_bus_width_pcie_x8;
613 		break;
614 	default:
615 		hw->bus.width = ixgbe_bus_width_unknown;
616 		break;
617 	}
618 
619 	switch (link_status & IXGBE_PCI_LINK_SPEED) {
620 	case IXGBE_PCI_LINK_SPEED_2500:
621 		hw->bus.speed = ixgbe_bus_speed_2500;
622 		break;
623 	case IXGBE_PCI_LINK_SPEED_5000:
624 		hw->bus.speed = ixgbe_bus_speed_5000;
625 		break;
626 	default:
627 		hw->bus.speed = ixgbe_bus_speed_unknown;
628 		break;
629 	}
630 
631 	mac->ops.set_lan_id(hw);
632 
633 	return IXGBE_SUCCESS;
634 }
635 
636 /**
637  *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
638  *  @hw: pointer to the HW structure
639  *
640  *  Determines the LAN function id by reading memory-mapped registers
641  *  and swaps the port value if requested.
642  **/
643 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
644 {
645 	struct ixgbe_bus_info *bus = &hw->bus;
646 	uint32_t reg;
647 
648 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
649 	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
650 	bus->lan_id = bus->func;
651 
652 	/* check for a port swap */
653 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
654 	if (reg & IXGBE_FACTPS_LFS)
655 		bus->func ^= 0x1;
656 }
657 
658 /**
659  *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
660  *  @hw: pointer to hardware structure
661  *
662  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
663  *  disables transmit and receive units. The adapter_stopped flag is used by
664  *  the shared code and drivers to determine if the adapter is in a stopped
665  *  state and should not touch the hardware.
666  **/
667 int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
668 {
669 	uint32_t number_of_queues;
670 	uint32_t reg_val;
671 	uint16_t i;
672 
673 	/*
674 	 * Set the adapter_stopped flag so other driver functions stop touching
675 	 * the hardware
676 	 */
677 	hw->adapter_stopped = TRUE;
678 
679 	/* Disable the receive unit */
680 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
681 	reg_val &= ~(IXGBE_RXCTRL_RXEN);
682 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
683 	IXGBE_WRITE_FLUSH(hw);
684 	msec_delay(2);
685 
686 	/* Clear interrupt mask to stop from interrupts being generated */
687 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
688 
689 	/* Clear any pending interrupts */
690 	IXGBE_READ_REG(hw, IXGBE_EICR);
691 
692 	/* Disable the transmit unit.  Each queue must be disabled. */
693 	number_of_queues = hw->mac.max_tx_queues;
694 	for (i = 0; i < number_of_queues; i++) {
695 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
696 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
697 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
698 			IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
699 		}
700 	}
701 
702 	/*
703 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
704 	 * access and verify no pending requests
705 	 */
706 	ixgbe_disable_pcie_master(hw);
707 
708 	return IXGBE_SUCCESS;
709 }
710 
711 /**
712  *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
713  *  @hw: pointer to hardware structure
714  *  @index: led number to turn on
715  **/
716 int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
717 {
718 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
719 
720 	/* To turn on the LED, set mode to ON. */
721 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
722 	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
723 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
724 	IXGBE_WRITE_FLUSH(hw);
725 
726 	return IXGBE_SUCCESS;
727 }
728 
729 /**
730  *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
731  *  @hw: pointer to hardware structure
732  *  @index: led number to turn off
733  **/
734 int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
735 {
736 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
737 
738 	/* To turn off the LED, set mode to OFF. */
739 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
740 	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
741 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
742 	IXGBE_WRITE_FLUSH(hw);
743 
744 	return IXGBE_SUCCESS;
745 }
746 
747 /**
748  *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
749  *  @hw: pointer to hardware structure
750  *
751  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
752  *  ixgbe_hw struct in order to set up EEPROM access.
753  **/
754 int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
755 {
756 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
757 	uint32_t eec;
758 	uint16_t eeprom_size;
759 
760 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
761 		eeprom->type = ixgbe_eeprom_none;
762 		/* Set default semaphore delay to 10ms which is a well
763 		 * tested value */
764 		eeprom->semaphore_delay = 10;
765 
766 		/*
767 		 * Check for EEPROM present first.
768 		 * If not present leave as none
769 		 */
770 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
771 		if (eec & IXGBE_EEC_PRES) {
772 			eeprom->type = ixgbe_eeprom_spi;
773 
774 			/*
775 			 * SPI EEPROM is assumed here.  This code would need to
776 			 * change if a future EEPROM is not SPI.
777 			 */
778 			eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
779 			                    IXGBE_EEC_SIZE_SHIFT);
780 			eeprom->word_size = 1 << (eeprom_size +
781 			                     IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT);
782 		}
783 
784 		if (eec & IXGBE_EEC_ADDR_SIZE)
785 			eeprom->address_bits = 16;
786 		else
787 			eeprom->address_bits = 8;
788 		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
789 		          "%d\n", eeprom->type, eeprom->word_size,
790 		          eeprom->address_bits);
791 	}
792 
793 	return IXGBE_SUCCESS;
794 }
795 
796 /**
797  *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
798  *  @hw: pointer to hardware structure
799  *  @offset: offset within the EEPROM to be written to
800  *  @data: 16 bit word to be written to the EEPROM
801  *
802  *  If ixgbe_eeprom_update_checksum is not called after this function, the
803  *  EEPROM will most likely contain an invalid checksum.
804  **/
805 int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
806 {
807 	int32_t status;
808 	uint8_t write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
809 
810 	hw->eeprom.ops.init_params(hw);
811 
812 	if (offset >= hw->eeprom.word_size) {
813 		status = IXGBE_ERR_EEPROM;
814 		goto out;
815 	}
816 
817 	/* Prepare the EEPROM for writing  */
818 	status = ixgbe_acquire_eeprom(hw);
819 
820 	if (status == IXGBE_SUCCESS) {
821 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
822 			ixgbe_release_eeprom(hw);
823 			status = IXGBE_ERR_EEPROM;
824 		}
825 	}
826 
827 	if (status == IXGBE_SUCCESS) {
828 		ixgbe_standby_eeprom(hw);
829 
830 		/*  Send the WRITE ENABLE command (8 bit opcode )  */
831 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
832 		                            IXGBE_EEPROM_OPCODE_BITS);
833 
834 		ixgbe_standby_eeprom(hw);
835 
836 		/*
837 		 * Some SPI eeproms use the 8th address bit embedded in the
838 		 * opcode
839 		 */
840 		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
841 			write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
842 
843 		/* Send the Write command (8-bit opcode + addr) */
844 		ixgbe_shift_out_eeprom_bits(hw, write_opcode,
845 		                            IXGBE_EEPROM_OPCODE_BITS);
846 		ixgbe_shift_out_eeprom_bits(hw, (uint16_t)(offset*2),
847 		                            hw->eeprom.address_bits);
848 
849 		/* Send the data */
850 		data = (data >> 8) | (data << 8);
851 		ixgbe_shift_out_eeprom_bits(hw, data, 16);
852 		ixgbe_standby_eeprom(hw);
853 
854 		/* Done with writing - release the EEPROM */
855 		ixgbe_release_eeprom(hw);
856 	}
857 
858 out:
859 	return status;
860 }
861 
862 /**
863  *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
864  *  @hw: pointer to hardware structure
865  *  @offset: offset within the EEPROM to be read
866  *  @data: read 16 bit value from EEPROM
867  *
868  *  Reads 16 bit value from EEPROM through bit-bang method
869  **/
870 int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
871                                        uint16_t *data)
872 {
873 	int32_t status;
874 	uint16_t word_in;
875 	uint8_t read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
876 
877 	hw->eeprom.ops.init_params(hw);
878 
879 	if (offset >= hw->eeprom.word_size) {
880 		status = IXGBE_ERR_EEPROM;
881 		goto out;
882 	}
883 
884 	/* Prepare the EEPROM for reading  */
885 	status = ixgbe_acquire_eeprom(hw);
886 
887 	if (status == IXGBE_SUCCESS) {
888 		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
889 			ixgbe_release_eeprom(hw);
890 			status = IXGBE_ERR_EEPROM;
891 		}
892 	}
893 
894 	if (status == IXGBE_SUCCESS) {
895 		ixgbe_standby_eeprom(hw);
896 
897 		/*
898 		 * Some SPI eeproms use the 8th address bit embedded in the
899 		 * opcode
900 		 */
901 		if ((hw->eeprom.address_bits == 8) && (offset >= 128))
902 			read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
903 
904 		/* Send the READ command (opcode + addr) */
905 		ixgbe_shift_out_eeprom_bits(hw, read_opcode,
906 		                            IXGBE_EEPROM_OPCODE_BITS);
907 		ixgbe_shift_out_eeprom_bits(hw, (uint16_t)(offset*2),
908 		                            hw->eeprom.address_bits);
909 
910 		/* Read the data. */
911 		word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
912 		*data = (word_in >> 8) | (word_in << 8);
913 
914 		/* End this read operation */
915 		ixgbe_release_eeprom(hw);
916 	}
917 
918 out:
919 	return status;
920 }
921 
922 /**
923  *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
924  *  @hw: pointer to hardware structure
925  *  @offset: offset of  word in the EEPROM to read
926  *  @data: word read from the EEPROM
927  *
928  *  Reads a 16 bit word from the EEPROM using the EERD register.
929  **/
930 int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data)
931 {
932 	uint32_t eerd;
933 	int32_t status;
934 
935 	hw->eeprom.ops.init_params(hw);
936 
937 	if (offset >= hw->eeprom.word_size) {
938 		status = IXGBE_ERR_EEPROM;
939 		goto out;
940 	}
941 
942 	eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
943 	       IXGBE_EEPROM_RW_REG_START;
944 
945 	IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
946 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
947 
948 	if (status == IXGBE_SUCCESS)
949 		*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
950 		         IXGBE_EEPROM_RW_REG_DATA);
951 	else
952 		DEBUGOUT("Eeprom read timed out\n");
953 
954 out:
955 	return status;
956 }
957 
958 /**
959  *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
960  *  @hw: pointer to hardware structure
961  *  @offset: offset of  word in the EEPROM to write
962  *  @data: word write to the EEPROM
963  *
964  *  Write a 16 bit word to the EEPROM using the EEWR register.
965  **/
966 int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
967 {
968 	uint32_t eewr;
969 	int32_t status;
970 
971 	hw->eeprom.ops.init_params(hw);
972 
973 	if (offset >= hw->eeprom.word_size) {
974 		status = IXGBE_ERR_EEPROM;
975 		goto out;
976 	}
977 
978 	eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
979 	       (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
980 
981 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
982 	if (status != IXGBE_SUCCESS) {
983 		DEBUGOUT("Eeprom write EEWR timed out\n");
984 		goto out;
985 	}
986 
987 	IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
988 
989 	status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
990 	if (status != IXGBE_SUCCESS) {
991 		DEBUGOUT("Eeprom write EEWR timed out\n");
992 		goto out;
993 	}
994 
995 out:
996 	return status;
997 }
998 
999 /**
1000  *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1001  *  @hw: pointer to hardware structure
1002  *  @ee_reg: EEPROM flag for polling
1003  *
1004  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1005  *  read or write is done respectively.
1006  **/
1007 int32_t ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, uint32_t ee_reg)
1008 {
1009 	uint32_t i;
1010 	uint32_t reg;
1011 	int32_t status = IXGBE_ERR_EEPROM;
1012 
1013 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1014 		if (ee_reg == IXGBE_NVM_POLL_READ)
1015 			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1016 		else
1017 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1018 
1019 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1020 			status = IXGBE_SUCCESS;
1021 			break;
1022 		}
1023 		usec_delay(5);
1024 	}
1025 	return status;
1026 }
1027 
1028 /**
1029  *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1030  *  @hw: pointer to hardware structure
1031  *
1032  *  Prepares EEPROM for access using bit-bang method. This function should
1033  *  be called before issuing a command to the EEPROM.
1034  **/
1035 int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1036 {
1037 	int32_t status = IXGBE_SUCCESS;
1038 	uint32_t eec;
1039 	uint32_t i;
1040 
1041 	if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS)
1042 		status = IXGBE_ERR_SWFW_SYNC;
1043 
1044 	if (status == IXGBE_SUCCESS) {
1045 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1046 
1047 		/* Request EEPROM Access */
1048 		eec |= IXGBE_EEC_REQ;
1049 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1050 
1051 		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1052 			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1053 			if (eec & IXGBE_EEC_GNT)
1054 				break;
1055 			usec_delay(5);
1056 		}
1057 
1058 		/* Release if grant not acquired */
1059 		if (!(eec & IXGBE_EEC_GNT)) {
1060 			eec &= ~IXGBE_EEC_REQ;
1061 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1062 			DEBUGOUT("Could not acquire EEPROM grant\n");
1063 
1064 			ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1065 			status = IXGBE_ERR_EEPROM;
1066 		}
1067 
1068 		/* Setup EEPROM for Read/Write */
1069 		if (status == IXGBE_SUCCESS) {
1070 			/* Clear CS and SK */
1071 			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1072 			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1073 			IXGBE_WRITE_FLUSH(hw);
1074 			usec_delay(1);
1075 		}
1076 	}
1077 	return status;
1078 }
1079 
1080 /**
1081  *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1082  *  @hw: pointer to hardware structure
1083  *
1084  *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1085  **/
1086 int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1087 {
1088 	int32_t status = IXGBE_ERR_EEPROM;
1089 	uint32_t timeout = 2000;
1090 	uint32_t i;
1091 	uint32_t swsm;
1092 
1093 	/* Get SMBI software semaphore between device drivers first */
1094 	for (i = 0; i < timeout; i++) {
1095 		/*
1096 		 * If the SMBI bit is 0 when we read it, then the bit will be
1097 		 * set and we have the semaphore
1098 		 */
1099 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1100 		if (!(swsm & IXGBE_SWSM_SMBI)) {
1101 			status = IXGBE_SUCCESS;
1102 			break;
1103 		}
1104 		usec_delay(50);
1105 	}
1106 
1107 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
1108 	if (status == IXGBE_SUCCESS) {
1109 		for (i = 0; i < timeout; i++) {
1110 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1111 
1112 			/* Set the SW EEPROM semaphore bit to request access */
1113 			swsm |= IXGBE_SWSM_SWESMBI;
1114 			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1115 
1116 			/*
1117 			 * If we set the bit successfully then we got the
1118 			 * semaphore.
1119 			 */
1120 			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1121 			if (swsm & IXGBE_SWSM_SWESMBI)
1122 				break;
1123 
1124 			usec_delay(50);
1125 		}
1126 
1127 		/*
1128 		 * Release semaphores and return error if SW EEPROM semaphore
1129 		 * was not granted because we don't have access to the EEPROM
1130 		 */
1131 		if (i >= timeout) {
1132 			DEBUGOUT("SWESMBI Software EEPROM semaphore "
1133 			         "not granted.\n");
1134 			ixgbe_release_eeprom_semaphore(hw);
1135 			status = IXGBE_ERR_EEPROM;
1136 		}
1137 	} else {
1138 		DEBUGOUT("Software semaphore SMBI between device drivers "
1139 		         "not granted.\n");
1140 	}
1141 
1142 	return status;
1143 }
1144 
1145 /**
1146  *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1147  *  @hw: pointer to hardware structure
1148  *
1149  *  This function clears hardware semaphore bits.
1150  **/
1151 void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1152 {
1153 	uint32_t swsm;
1154 
1155 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1156 
1157 	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1158 	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1159 	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1160 	IXGBE_WRITE_FLUSH(hw);
1161 }
1162 
1163 /**
1164  *  ixgbe_ready_eeprom - Polls for EEPROM ready
1165  *  @hw: pointer to hardware structure
1166  **/
1167 int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1168 {
1169 	int32_t status = IXGBE_SUCCESS;
1170 	uint16_t i;
1171 	uint8_t spi_stat_reg;
1172 
1173 	/*
1174 	 * Read "Status Register" repeatedly until the LSB is cleared.  The
1175 	 * EEPROM will signal that the command has been completed by clearing
1176 	 * bit 0 of the internal status register.  If it's not cleared within
1177 	 * 5 milliseconds, then error out.
1178 	 */
1179 	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1180 		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1181 		                            IXGBE_EEPROM_OPCODE_BITS);
1182 		spi_stat_reg = (uint8_t)ixgbe_shift_in_eeprom_bits(hw, 8);
1183 		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1184 			break;
1185 
1186 		usec_delay(5);
1187 		ixgbe_standby_eeprom(hw);
1188 	};
1189 
1190 	/*
1191 	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1192 	 * devices (and only 0-5mSec on 5V devices)
1193 	 */
1194 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1195 		DEBUGOUT("SPI EEPROM Status error\n");
1196 		status = IXGBE_ERR_EEPROM;
1197 	}
1198 
1199 	return status;
1200 }
1201 
1202 /**
1203  *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1204  *  @hw: pointer to hardware structure
1205  **/
1206 void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1207 {
1208 	uint32_t eec;
1209 
1210 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1211 
1212 	/* Toggle CS to flush commands */
1213 	eec |= IXGBE_EEC_CS;
1214 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1215 	IXGBE_WRITE_FLUSH(hw);
1216 	usec_delay(1);
1217 	eec &= ~IXGBE_EEC_CS;
1218 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1219 	IXGBE_WRITE_FLUSH(hw);
1220 	usec_delay(1);
1221 }
1222 
1223 /**
1224  *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1225  *  @hw: pointer to hardware structure
1226  *  @data: data to send to the EEPROM
1227  *  @count: number of bits to shift out
1228  **/
1229 void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
1230                                         uint16_t count)
1231 {
1232 	uint32_t eec;
1233 	uint32_t mask;
1234 	uint32_t i;
1235 
1236 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1237 
1238 	/*
1239 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
1240 	 * one bit at a time.  Determine the starting bit based on count
1241 	 */
1242 	mask = 0x01 << (count - 1);
1243 
1244 	for (i = 0; i < count; i++) {
1245 		/*
1246 		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1247 		 * "1", and then raising and then lowering the clock (the SK
1248 		 * bit controls the clock input to the EEPROM).  A "0" is
1249 		 * shifted out to the EEPROM by setting "DI" to "0" and then
1250 		 * raising and then lowering the clock.
1251 		 */
1252 		if (data & mask)
1253 			eec |= IXGBE_EEC_DI;
1254 		else
1255 			eec &= ~IXGBE_EEC_DI;
1256 
1257 		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1258 		IXGBE_WRITE_FLUSH(hw);
1259 
1260 		usec_delay(1);
1261 
1262 		ixgbe_raise_eeprom_clk(hw, &eec);
1263 		ixgbe_lower_eeprom_clk(hw, &eec);
1264 
1265 		/*
1266 		 * Shift mask to signify next bit of data to shift in to the
1267 		 * EEPROM
1268 		 */
1269 		mask = mask >> 1;
1270 	};
1271 
1272 	/* We leave the "DI" bit set to "0" when we leave this routine. */
1273 	eec &= ~IXGBE_EEC_DI;
1274 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1275 	IXGBE_WRITE_FLUSH(hw);
1276 }
1277 
1278 /**
1279  *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1280  *  @hw: pointer to hardware structure
1281  **/
1282 uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
1283 {
1284 	uint32_t eec;
1285 	uint32_t i;
1286 	uint16_t data = 0;
1287 
1288 	/*
1289 	 * In order to read a register from the EEPROM, we need to shift
1290 	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1291 	 * the clock input to the EEPROM (setting the SK bit), and then reading
1292 	 * the value of the "DO" bit.  During this "shifting in" process the
1293 	 * "DI" bit should always be clear.
1294 	 */
1295 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1296 
1297 	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1298 
1299 	for (i = 0; i < count; i++) {
1300 		data = data << 1;
1301 		ixgbe_raise_eeprom_clk(hw, &eec);
1302 
1303 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1304 
1305 		eec &= ~(IXGBE_EEC_DI);
1306 		if (eec & IXGBE_EEC_DO)
1307 			data |= 1;
1308 
1309 		ixgbe_lower_eeprom_clk(hw, &eec);
1310 	}
1311 
1312 	return data;
1313 }
1314 
1315 /**
1316  *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1317  *  @hw: pointer to hardware structure
1318  *  @eec: EEC register's current value
1319  **/
1320 void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1321 {
1322 	/*
1323 	 * Raise the clock input to the EEPROM
1324 	 * (setting the SK bit), then delay
1325 	 */
1326 	*eec = *eec | IXGBE_EEC_SK;
1327 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1328 	IXGBE_WRITE_FLUSH(hw);
1329 	usec_delay(1);
1330 }
1331 
1332 /**
1333  *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1334  *  @hw: pointer to hardware structure
1335  *  @eecd: EECD's current value
1336  **/
1337 void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1338 {
1339 	/*
1340 	 * Lower the clock input to the EEPROM (clearing the SK bit), then
1341 	 * delay
1342 	 */
1343 	*eec = *eec & ~IXGBE_EEC_SK;
1344 	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1345 	IXGBE_WRITE_FLUSH(hw);
1346 	usec_delay(1);
1347 }
1348 
1349 /**
1350  *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1351  *  @hw: pointer to hardware structure
1352  **/
1353 void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1354 {
1355 	uint32_t eec;
1356 
1357 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1358 
1359 	eec |= IXGBE_EEC_CS;  /* Pull CS high */
1360 	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1361 
1362 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1363 	IXGBE_WRITE_FLUSH(hw);
1364 
1365 	usec_delay(1);
1366 
1367 	/* Stop requesting EEPROM access */
1368 	eec &= ~IXGBE_EEC_REQ;
1369 	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1370 
1371 	ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1372 
1373 	/* Delay before attempt to obtain semaphore again to allow FW access */
1374 	msec_delay(hw->eeprom.semaphore_delay);
1375 }
1376 
1377 /**
1378  *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1379  *  @hw: pointer to hardware structure
1380  **/
1381 uint16_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1382 {
1383 	uint16_t i;
1384 	uint16_t j;
1385 	uint16_t checksum = 0;
1386 	uint16_t length = 0;
1387 	uint16_t pointer = 0;
1388 	uint16_t word = 0;
1389 
1390 	/* Include 0x0-0x3F in the checksum */
1391 	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1392 		if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1393 			DEBUGOUT("EEPROM read failed\n");
1394 			break;
1395 		}
1396 		checksum += word;
1397 	}
1398 
1399 	/* Include all data from pointers except for the fw pointer */
1400 	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1401 		hw->eeprom.ops.read(hw, i, &pointer);
1402 
1403 		/* Make sure the pointer seems valid */
1404 		if (pointer != 0xFFFF && pointer != 0) {
1405 			hw->eeprom.ops.read(hw, pointer, &length);
1406 
1407 			if (length != 0xFFFF && length != 0) {
1408 				for (j = pointer+1; j <= pointer+length; j++) {
1409 					hw->eeprom.ops.read(hw, j, &word);
1410 					checksum += word;
1411 				}
1412 			}
1413 		}
1414 	}
1415 
1416 	checksum = (uint16_t)IXGBE_EEPROM_SUM - checksum;
1417 
1418 	return checksum;
1419 }
1420 
1421 /**
1422  *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1423  *  @hw: pointer to hardware structure
1424  *  @checksum_val: calculated checksum
1425  *
1426  *  Performs checksum calculation and validates the EEPROM checksum.  If the
1427  *  caller does not need checksum_val, the value can be NULL.
1428  **/
1429 int32_t ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1430                                            uint16_t *checksum_val)
1431 {
1432 	int32_t status;
1433 	uint16_t checksum;
1434 	uint16_t read_checksum = 0;
1435 
1436 	/*
1437 	 * Read the first word from the EEPROM. If this times out or fails, do
1438 	 * not continue or we could be in for a very long wait while every
1439 	 * EEPROM read fails
1440 	 */
1441 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1442 
1443 	if (status == IXGBE_SUCCESS) {
1444 		checksum = hw->eeprom.ops.calc_checksum(hw);
1445 
1446 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1447 
1448 		/*
1449 		 * Verify read checksum from EEPROM is the same as
1450 		 * calculated checksum
1451 		 */
1452 		if (read_checksum != checksum)
1453 			status = IXGBE_ERR_EEPROM_CHECKSUM;
1454 
1455 		/* If the user cares, return the calculated checksum */
1456 		if (checksum_val)
1457 			*checksum_val = checksum;
1458 	} else {
1459 		DEBUGOUT("EEPROM read failed\n");
1460 	}
1461 
1462 	return status;
1463 }
1464 
1465 /**
1466  *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1467  *  @hw: pointer to hardware structure
1468  **/
1469 int32_t ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1470 {
1471 	int32_t status;
1472 	uint16_t checksum;
1473 
1474 	/*
1475 	 * Read the first word from the EEPROM. If this times out or fails, do
1476 	 * not continue or we could be in for a very long wait while every
1477 	 * EEPROM read fails
1478 	 */
1479 	status = hw->eeprom.ops.read(hw, 0, &checksum);
1480 
1481 	if (status == IXGBE_SUCCESS) {
1482 		checksum = hw->eeprom.ops.calc_checksum(hw);
1483 		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1484 		                              checksum);
1485 	} else {
1486 		DEBUGOUT("EEPROM read failed\n");
1487 	}
1488 
1489 	return status;
1490 }
1491 
1492 /**
1493  *  ixgbe_validate_mac_addr - Validate MAC address
1494  *  @mac_addr: pointer to MAC address.
1495  *
1496  *  Tests a MAC address to ensure it is a valid Individual Address
1497  **/
1498 int32_t ixgbe_validate_mac_addr(uint8_t *mac_addr)
1499 {
1500 	int32_t status = IXGBE_SUCCESS;
1501 
1502 	/* Make sure it is not a multicast address */
1503 	if (IXGBE_IS_MULTICAST(mac_addr)) {
1504 		DEBUGOUT("MAC address is multicast\n");
1505 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1506 	/* Not a broadcast address */
1507 	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
1508 		DEBUGOUT("MAC address is broadcast\n");
1509 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1510 	/* Reject the zero address */
1511 	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1512 	           mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1513 		DEBUGOUT("MAC address is all zeros\n");
1514 		status = IXGBE_ERR_INVALID_MAC_ADDR;
1515 	}
1516 	return status;
1517 }
1518 
1519 /**
1520  *  ixgbe_set_rar_generic - Set Rx address register
1521  *  @hw: pointer to hardware structure
1522  *  @index: Receive address register to write
1523  *  @addr: Address to put into receive address register
1524  *  @vmdq: VMDq "set" or "pool" index
1525  *  @enable_addr: set flag that address is active
1526  *
1527  *  Puts an ethernet address into a receive address register.
1528  **/
1529 int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr, uint32_t vmdq,
1530                           uint32_t enable_addr)
1531 {
1532 	uint32_t rar_low, rar_high;
1533 	uint32_t rar_entries = hw->mac.num_rar_entries;
1534 
1535 	/* Make sure we are using a valid rar index range */
1536 	if (index >= rar_entries) {
1537 		DEBUGOUT1("RAR index %d is out of range.\n", index);
1538 		return IXGBE_ERR_INVALID_ARGUMENT;
1539 	}
1540 
1541 	/* setup VMDq pool selection before this RAR gets enabled */
1542 	hw->mac.ops.set_vmdq(hw, index, vmdq);
1543 
1544 	/* Make sure we are using a valid rar index range */
1545 	/*
1546 	 * HW expects these in little endian so we reverse the byte
1547 	 * order from network order (big endian) to little endian
1548 	 */
1549 	rar_low = ((uint32_t)addr[0] |
1550 	           ((uint32_t)addr[1] << 8) |
1551 	           ((uint32_t)addr[2] << 16) |
1552 	           ((uint32_t)addr[3] << 24));
1553 	/*
1554 	 * Some parts put the VMDq setting in the extra RAH bits,
1555 	 * so save everything except the lower 16 bits that hold part
1556 	 * of the address and the address valid bit.
1557 	 */
1558 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1559 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1560 	rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1561 
1562 	if (enable_addr != 0)
1563 		rar_high |= IXGBE_RAH_AV;
1564 
1565 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1566 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1567 
1568 	return IXGBE_SUCCESS;
1569 }
1570 
1571 /**
1572  *  ixgbe_clear_rar_generic - Remove Rx address register
1573  *  @hw: pointer to hardware structure
1574  *  @index: Receive address register to write
1575  *
1576  *  Clears an ethernet address from a receive address register.
1577  **/
1578 int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
1579 {
1580 	uint32_t rar_high;
1581 	uint32_t rar_entries = hw->mac.num_rar_entries;
1582 
1583 	/* Make sure we are using a valid rar index range */
1584 	if (index >= rar_entries) {
1585 		DEBUGOUT1("RAR index %d is out of range.\n", index);
1586 		return IXGBE_ERR_INVALID_ARGUMENT;
1587 	}
1588 
1589 	/*
1590 	 * Some parts put the VMDq setting in the extra RAH bits,
1591 	 * so save everything except the lower 16 bits that hold part
1592 	 * of the address and the address valid bit.
1593 	 */
1594 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1595 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1596 
1597 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1598 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1599 
1600 	/* clear VMDq pool/queue selection for this RAR */
1601 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1602 
1603 	return IXGBE_SUCCESS;
1604 }
1605 
1606 /**
1607  *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1608  *  @hw: pointer to hardware structure
1609  *
1610  *  Places the MAC address in receive address register 0 and clears the rest
1611  *  of the receive address registers. Clears the multicast table. Assumes
1612  *  the receiver is in reset when the routine is called.
1613  **/
1614 int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1615 {
1616 	uint32_t i;
1617 	uint32_t rar_entries = hw->mac.num_rar_entries;
1618 
1619 	/*
1620 	 * If the current mac address is valid, assume it is a software override
1621 	 * to the permanent address.
1622 	 * Otherwise, use the permanent address from the eeprom.
1623 	 */
1624 	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1625 	    IXGBE_ERR_INVALID_MAC_ADDR) {
1626 		/* Get the MAC address from the RAR0 for later reference */
1627 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1628 
1629 		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1630 		          hw->mac.addr[0], hw->mac.addr[1],
1631 		          hw->mac.addr[2]);
1632 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1633 		          hw->mac.addr[4], hw->mac.addr[5]);
1634 	} else {
1635 		/* Setup the receive address. */
1636 		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1637 		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1638 		          hw->mac.addr[0], hw->mac.addr[1],
1639 		          hw->mac.addr[2]);
1640 		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1641 		          hw->mac.addr[4], hw->mac.addr[5]);
1642 
1643 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1644 
1645 		/* clear VMDq pool/queue selection for RAR 0 */
1646 		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1647 	}
1648 	hw->addr_ctrl.overflow_promisc = 0;
1649 
1650 	hw->addr_ctrl.rar_used_count = 1;
1651 
1652 	/* Zero out the other receive addresses. */
1653 	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1654 	for (i = 1; i < rar_entries; i++) {
1655 		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1656 		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1657 	}
1658 
1659 	/* Clear the MTA */
1660 	hw->addr_ctrl.mta_in_use = 0;
1661 	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1662 
1663 	DEBUGOUT(" Clearing MTA\n");
1664 	for (i = 0; i < hw->mac.mcft_size; i++)
1665 		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1666 
1667 	ixgbe_hw0(hw, init_uta_tables);
1668 
1669 	return IXGBE_SUCCESS;
1670 }
1671 
1672 /**
1673  *  ixgbe_add_uc_addr - Adds a secondary unicast address.
1674  *  @hw: pointer to hardware structure
1675  *  @addr: new address
1676  *
1677  *  Adds it to unused receive address register or goes into promiscuous mode.
1678  **/
1679 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
1680 {
1681 	uint32_t rar_entries = hw->mac.num_rar_entries;
1682 	uint32_t rar;
1683 
1684 	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1685 	          addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1686 
1687 	/*
1688 	 * Place this address in the RAR if there is room,
1689 	 * else put the controller into promiscuous mode
1690 	 */
1691 	if (hw->addr_ctrl.rar_used_count < rar_entries) {
1692 		rar = hw->addr_ctrl.rar_used_count;
1693 		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1694 		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
1695 		hw->addr_ctrl.rar_used_count++;
1696 	} else {
1697 		hw->addr_ctrl.overflow_promisc++;
1698 	}
1699 
1700 	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
1701 }
1702 
1703 /**
1704  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1705  *  @hw: pointer to hardware structure
1706  *  @addr_list: the list of new addresses
1707  *  @addr_count: number of addresses
1708  *  @next: iterator function to walk the address list
1709  *
1710  *  The given list replaces any existing list.  Clears the secondary addrs from
1711  *  receive address registers.  Uses unused receive address registers for the
1712  *  first secondary addresses, and falls back to promiscuous mode as needed.
1713  *
1714  *  Drivers using secondary unicast addresses must set user_set_promisc when
1715  *  manually putting the device into promiscuous mode.
1716  **/
1717 int32_t ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *addr_list,
1718                                       uint32_t addr_count, ixgbe_mc_addr_itr next)
1719 {
1720 	uint8_t *addr;
1721 	uint32_t i;
1722 	uint32_t old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1723 	uint32_t uc_addr_in_use;
1724 	uint32_t fctrl;
1725 	uint32_t vmdq;
1726 
1727 	/*
1728 	 * Clear accounting of old secondary address list,
1729 	 * don't count RAR[0]
1730 	 */
1731 	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1732 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1733 	hw->addr_ctrl.overflow_promisc = 0;
1734 
1735 	/* Zero out the other receive addresses */
1736 	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
1737 	for (i = 0; i < uc_addr_in_use; i++) {
1738 		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1739 		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1740 	}
1741 
1742 	/* Add the new addresses */
1743 	for (i = 0; i < addr_count; i++) {
1744 		DEBUGOUT(" Adding the secondary addresses:\n");
1745 		addr = next(hw, &addr_list, &vmdq);
1746 		ixgbe_add_uc_addr(hw, addr, vmdq);
1747 	}
1748 
1749 	if (hw->addr_ctrl.overflow_promisc) {
1750 		/* enable promisc if not already in overflow or set by user */
1751 		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1752 			DEBUGOUT(" Entering address overflow promisc mode\n");
1753 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1754 			fctrl |= IXGBE_FCTRL_UPE;
1755 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1756 		}
1757 	} else {
1758 		/* only disable if set by overflow, not by user */
1759 		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1760 			DEBUGOUT(" Leaving address overflow promisc mode\n");
1761 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1762 			fctrl &= ~IXGBE_FCTRL_UPE;
1763 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1764 		}
1765 	}
1766 
1767 	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
1768 	return IXGBE_SUCCESS;
1769 }
1770 
1771 /**
1772  *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
1773  *  @hw: pointer to hardware structure
1774  *  @mc_addr: the multicast address
1775  *
1776  *  Extracts the 12 bits, from a multicast address, to determine which
1777  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
1778  *  incoming rx multicast addresses, to determine the bit-vector to check in
1779  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1780  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
1781  *  to mc_filter_type.
1782  **/
1783 int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
1784 {
1785 	uint32_t vector = 0;
1786 
1787 	switch (hw->mac.mc_filter_type) {
1788 	case 0:   /* use bits [47:36] of the address */
1789 		vector = ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
1790 		break;
1791 	case 1:   /* use bits [46:35] of the address */
1792 		vector = ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
1793 		break;
1794 	case 2:   /* use bits [45:34] of the address */
1795 		vector = ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
1796 		break;
1797 	case 3:   /* use bits [43:32] of the address */
1798 		vector = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
1799 		break;
1800 	default:  /* Invalid mc_filter_type */
1801 		DEBUGOUT("MC filter type param set incorrectly\n");
1802 		panic("ixgbe");
1803 		break;
1804 	}
1805 
1806 	/* vector can only be 12-bits or boundary will be exceeded */
1807 	vector &= 0xFFF;
1808 	return vector;
1809 }
1810 
1811 /**
1812  *  ixgbe_set_mta - Set bit-vector in multicast table
1813  *  @hw: pointer to hardware structure
1814  *  @hash_value: Multicast address hash value
1815  *
1816  *  Sets the bit-vector in the multicast table.
1817  **/
1818 void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
1819 {
1820 	uint32_t vector;
1821 	uint32_t vector_bit;
1822 	uint32_t vector_reg;
1823 
1824 	hw->addr_ctrl.mta_in_use++;
1825 
1826 	vector = ixgbe_mta_vector(hw, mc_addr);
1827 	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
1828 
1829 	/*
1830 	 * The MTA is a register array of 128 32-bit registers. It is treated
1831 	 * like an array of 4096 bits.  We want to set bit
1832 	 * BitArray[vector_value]. So we figure out what register the bit is
1833 	 * in, read it, OR in the new bit, then write back the new value.  The
1834 	 * register is determined by the upper 7 bits of the vector value and
1835 	 * the bit within that register are determined by the lower 5 bits of
1836 	 * the value.
1837 	 */
1838 	vector_reg = (vector >> 5) & 0x7F;
1839 	vector_bit = vector & 0x1F;
1840 	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1841 }
1842 
1843 /**
1844  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1845  *  @hw: pointer to hardware structure
1846  *  @mc_addr_list: the list of new multicast addresses
1847  *  @mc_addr_count: number of addresses
1848  *  @next: iterator function to walk the multicast address list
1849  *
1850  *  The given list replaces any existing list. Clears the MC addrs from receive
1851  *  address registers and the multicast table. Uses unused receive address
1852  *  registers for the first multicast addresses, and hashes the rest into the
1853  *  multicast table.
1854  **/
1855 int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
1856                                       uint32_t mc_addr_count, ixgbe_mc_addr_itr next)
1857 {
1858 	uint32_t i;
1859 	uint32_t vmdq;
1860 
1861 	/*
1862 	 * Set the new number of MC addresses that we are being requested to
1863 	 * use.
1864 	 */
1865 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
1866 	hw->addr_ctrl.mta_in_use = 0;
1867 
1868 	/* Clear mta_shadow */
1869 	DEBUGOUT(" Clearing MTA\n");
1870 	memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
1871 
1872 	/* Update mta_shadow */
1873 	for (i = 0; i < mc_addr_count; i++) {
1874 		DEBUGOUT(" Adding the multicast addresses:\n");
1875 		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
1876 	}
1877 
1878 	/* Enable mta */
1879 	for (i = 0; i < hw->mac.mcft_size; i++)
1880 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1881 				      hw->mac.mta_shadow[i]);
1882 
1883 	if (hw->addr_ctrl.mta_in_use > 0)
1884 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1885 		                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
1886 
1887 	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
1888 	return IXGBE_SUCCESS;
1889 }
1890 
1891 /**
1892  *  ixgbe_enable_mc_generic - Enable multicast address in RAR
1893  *  @hw: pointer to hardware structure
1894  *
1895  *  Enables multicast address in RAR and the use of the multicast hash table.
1896  **/
1897 int32_t ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1898 {
1899 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1900 
1901 	if (a->mta_in_use > 0)
1902 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1903 		                hw->mac.mc_filter_type);
1904 
1905 	return IXGBE_SUCCESS;
1906 }
1907 
1908 /**
1909  *  ixgbe_disable_mc_generic - Disable multicast address in RAR
1910  *  @hw: pointer to hardware structure
1911  *
1912  *  Disables multicast address in RAR and the use of the multicast hash table.
1913  **/
1914 int32_t ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1915 {
1916 	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1917 
1918 	if (a->mta_in_use > 0)
1919 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1920 
1921 	return IXGBE_SUCCESS;
1922 }
1923 
1924 /**
1925  *  ixgbe_fc_enable_generic - Enable flow control
1926  *  @hw: pointer to hardware structure
1927  *  @packetbuf_num: packet buffer number (0-7)
1928  *
1929  *  Enable flow control according to the current settings.
1930  **/
1931 int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw, int32_t packetbuf_num)
1932 {
1933 	int32_t ret_val = IXGBE_SUCCESS;
1934 	uint32_t mflcn_reg, fccfg_reg;
1935 	uint32_t reg;
1936 	uint32_t rx_pba_size;
1937 	uint32_t fcrtl, fcrth;
1938 
1939 	/* Negotiate the fc mode to use */
1940 	ret_val = ixgbe_fc_autoneg(hw);
1941 	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1942 		goto out;
1943 
1944 	/* Disable any previous flow control settings */
1945 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1946 	mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
1947 
1948 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
1949 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
1950 
1951 	/*
1952 	 * The possible values of fc.current_mode are:
1953 	 * 0: Flow control is completely disabled
1954 	 * 1: Rx flow control is enabled (we can receive pause frames,
1955 	 *    but not send pause frames).
1956 	 * 2: Tx flow control is enabled (we can send pause frames but
1957 	 *    we do not support receiving pause frames).
1958 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1959 	 * other: Invalid.
1960 	 */
1961 	switch (hw->fc.current_mode) {
1962 	case ixgbe_fc_none:
1963 		/*
1964 		 * Flow control is disabled by software override or autoneg.
1965 		 * The code below will actually disable it in the HW.
1966 		 */
1967 		break;
1968 	case ixgbe_fc_rx_pause:
1969 		/*
1970 		 * Rx Flow control is enabled and Tx Flow control is
1971 		 * disabled by software override. Since there really
1972 		 * isn't a way to advertise that we are capable of RX
1973 		 * Pause ONLY, we will advertise that we support both
1974 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
1975 		 * disable the adapter's ability to send PAUSE frames.
1976 		 */
1977 		mflcn_reg |= IXGBE_MFLCN_RFCE;
1978 		break;
1979 	case ixgbe_fc_tx_pause:
1980 		/*
1981 		 * Tx Flow control is enabled, and Rx Flow control is
1982 		 * disabled by software override.
1983 		 */
1984 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1985 		break;
1986 	case ixgbe_fc_full:
1987 		/* Flow control (both Rx and Tx) is enabled by SW override. */
1988 		mflcn_reg |= IXGBE_MFLCN_RFCE;
1989 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1990 		break;
1991 	default:
1992 		DEBUGOUT("Flow control param set incorrectly\n");
1993 		ret_val = IXGBE_ERR_CONFIG;
1994 		goto out;
1995 		break;
1996 	}
1997 
1998 	/* Set 802.3x based flow control settings. */
1999 	mflcn_reg |= IXGBE_MFLCN_DPF;
2000 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2001 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2002 
2003 	rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
2004 	rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
2005 
2006 	fcrth = (rx_pba_size - hw->fc.high_water) << 10;
2007 	fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
2008 
2009 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
2010 		fcrth |= IXGBE_FCRTH_FCEN;
2011 		if (hw->fc.send_xon)
2012 			fcrtl |= IXGBE_FCRTL_XONE;
2013 	}
2014 
2015 	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
2016 	IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
2017 
2018 	/* Configure pause time (2 TCs per register) */
2019 	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
2020 	if ((packetbuf_num & 1) == 0)
2021 		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
2022 	else
2023 		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
2024 	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
2025 
2026 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
2027 
2028 out:
2029 	return ret_val;
2030 }
2031 
2032 /**
2033  *  ixgbe_fc_autoneg - Configure flow control
2034  *  @hw: pointer to hardware structure
2035  *
2036  *  Compares our advertised flow control capabilities to those advertised by
2037  *  our link partner, and determines the proper flow control mode to use.
2038  **/
2039 int32_t ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2040 {
2041 	int32_t ret_val = IXGBE_SUCCESS;
2042 	ixgbe_link_speed speed;
2043 	int link_up;
2044 
2045 	if (hw->fc.disable_fc_autoneg)
2046 		goto out;
2047 
2048 	/*
2049 	 * AN should have completed when the cable was plugged in.
2050 	 * Look for reasons to bail out.  Bail out if:
2051 	 * - FC autoneg is disabled, or if
2052 	 * - link is not up.
2053 	 *
2054 	 * Since we're being called from an LSC, link is already known to be up.
2055 	 * So use link_up_wait_to_complete=FALSE.
2056 	 */
2057 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2058 	if (!link_up) {
2059 		ret_val = IXGBE_ERR_FLOW_CONTROL;
2060 		goto out;
2061 	}
2062 
2063 	switch (hw->phy.media_type) {
2064 	/* Autoneg flow control on fiber adapters */
2065 	case ixgbe_media_type_fiber:
2066 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2067 			ret_val = ixgbe_fc_autoneg_fiber(hw);
2068 		break;
2069 
2070 	/* Autoneg flow control on backplane adapters */
2071 	case ixgbe_media_type_backplane:
2072 		ret_val = ixgbe_fc_autoneg_backplane(hw);
2073 		break;
2074 
2075 	/* Autoneg flow control on copper adapters */
2076 	case ixgbe_media_type_copper:
2077 		if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2078 			ret_val = ixgbe_fc_autoneg_copper(hw);
2079 		break;
2080 
2081 	default:
2082 		break;
2083 	}
2084 
2085 out:
2086 	if (ret_val == IXGBE_SUCCESS) {
2087 		hw->fc.fc_was_autonegged = TRUE;
2088 	} else {
2089 		hw->fc.fc_was_autonegged = FALSE;
2090 		hw->fc.current_mode = hw->fc.requested_mode;
2091 	}
2092 	return ret_val;
2093 }
2094 
2095 /**
2096  *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2097  *  @hw: pointer to hardware structure
2098  *  @speed:
2099  *  @link_up
2100  *
2101  *  Enable flow control according on 1 gig fiber.
2102  **/
2103 int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2104 {
2105 	uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
2106 	int32_t ret_val;
2107 
2108 	/*
2109 	 * On multispeed fiber at 1g, bail out if
2110 	 * - link is up but AN did not complete, or if
2111 	 * - link is up and AN completed but timed out
2112 	 */
2113 
2114 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2115 	if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2116 	    ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2117 		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2118 		goto out;
2119 	}
2120 
2121 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2122 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2123 
2124 	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2125 			       pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2126 			       IXGBE_PCS1GANA_ASM_PAUSE,
2127 			       IXGBE_PCS1GANA_SYM_PAUSE,
2128 			       IXGBE_PCS1GANA_ASM_PAUSE);
2129 
2130 out:
2131 	return ret_val;
2132 }
2133 
2134 /**
2135  *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2136  *  @hw: pointer to hardware structure
2137  *
2138  *  Enable flow control according to IEEE clause 37.
2139  **/
2140 int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2141 {
2142 	uint32_t links2, anlp1_reg, autoc_reg, links;
2143 	int32_t ret_val;
2144 
2145 	/*
2146 	 * On backplane, bail out if
2147 	 * - backplane autoneg was not completed, or if
2148 	 * - we are 82599 and link partner is not AN enabled
2149 	 */
2150 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2151 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2152 		hw->fc.fc_was_autonegged = FALSE;
2153 		hw->fc.current_mode = hw->fc.requested_mode;
2154 		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2155 		goto out;
2156 	}
2157 
2158 	if (hw->mac.type == ixgbe_mac_82599EB) {
2159 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2160 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2161 			hw->fc.fc_was_autonegged = FALSE;
2162 			hw->fc.current_mode = hw->fc.requested_mode;
2163 			ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2164 			goto out;
2165 		}
2166 	}
2167 	/*
2168 	 * Read the 10g AN autoc and LP ability registers and resolve
2169 	 * local flow control settings accordingly
2170 	 */
2171 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2172 	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2173 
2174 	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2175 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2176 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2177 
2178 out:
2179 	return ret_val;
2180 }
2181 
2182 /**
2183  *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2184  *  @hw: pointer to hardware structure
2185  *
2186  *  Enable flow control according to IEEE clause 37.
2187  **/
2188 int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2189 {
2190 	uint16_t technology_ability_reg = 0;
2191 	uint16_t lp_technology_ability_reg = 0;
2192 
2193 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2194 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2195 			     &technology_ability_reg);
2196 	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2197 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2198 			     &lp_technology_ability_reg);
2199 
2200 	return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
2201 				  (uint32_t)lp_technology_ability_reg,
2202 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2203 				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2204 }
2205 
2206 /**
2207  *  ixgbe_negotiate_fc - Negotiate flow control
2208  *  @hw: pointer to hardware structure
2209  *  @adv_reg: flow control advertised settings
2210  *  @lp_reg: link partner's flow control settings
2211  *  @adv_sym: symmetric pause bit in advertisement
2212  *  @adv_asm: asymmetric pause bit in advertisement
2213  *  @lp_sym: symmetric pause bit in link partner advertisement
2214  *  @lp_asm: asymmetric pause bit in link partner advertisement
2215  *
2216  *  Find the intersection between advertised settings and link partner's
2217  *  advertised settings
2218  **/
2219 int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg, uint32_t lp_reg,
2220 			      uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm)
2221 {
2222 	if ((!(adv_reg)) ||  (!(lp_reg)))
2223 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
2224 
2225 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2226 		/*
2227 		 * Now we need to check if the user selected Rx ONLY
2228 		 * of pause frames.  In this case, we had to advertise
2229 		 * FULL flow control because we could not advertise RX
2230 		 * ONLY. Hence, we must now check to see if we need to
2231 		 * turn OFF the TRANSMISSION of PAUSE frames.
2232 		 */
2233 		if (hw->fc.requested_mode == ixgbe_fc_full) {
2234 			hw->fc.current_mode = ixgbe_fc_full;
2235 			DEBUGOUT("Flow Control = FULL.\n");
2236 		} else {
2237 			hw->fc.current_mode = ixgbe_fc_rx_pause;
2238 			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2239 		}
2240 	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2241 		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2242 		hw->fc.current_mode = ixgbe_fc_tx_pause;
2243 		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2244 	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2245 		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2246 		hw->fc.current_mode = ixgbe_fc_rx_pause;
2247 		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2248 	} else {
2249 		hw->fc.current_mode = ixgbe_fc_none;
2250 		DEBUGOUT("Flow Control = NONE.\n");
2251 	}
2252 	return IXGBE_SUCCESS;
2253 }
2254 
2255 /**
2256  *  ixgbe_setup_fc - Set up flow control
2257  *  @hw: pointer to hardware structure
2258  *
2259  *  Called at init time to set up flow control.
2260  **/
2261 int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
2262 {
2263 	int32_t ret_val = IXGBE_SUCCESS;
2264 	uint32_t reg = 0, reg_bp = 0;;
2265 	uint16_t reg_cu = 0;
2266 
2267 	/* Validate the packetbuf configuration */
2268 	if (packetbuf_num < 0 || packetbuf_num > 7) {
2269 		DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
2270 		          " 0-7\n", packetbuf_num);
2271 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2272 		goto out;
2273 	}
2274 
2275 	/*
2276 	 * Validate the water mark configuration.  Zero water marks are invalid
2277 	 * because it causes the controller to just blast out fc packets.
2278 	 */
2279 	if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
2280 		DEBUGOUT("Invalid water mark configuration\n");
2281 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2282 		goto out;
2283 	}
2284 
2285 	/*
2286 	 * Validate the requested mode.  Strict IEEE mode does not allow
2287 	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
2288 	 */
2289 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2290 		DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
2291 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2292 		goto out;
2293 	}
2294 
2295 	/*
2296 	 * 10gig parts do not have a word in the EEPROM to determine the
2297 	 * default flow control setting, so we explicitly set it to full.
2298 	 */
2299 	if (hw->fc.requested_mode == ixgbe_fc_default)
2300 		hw->fc.requested_mode = ixgbe_fc_full;
2301 
2302 	/*
2303 	 * Set up the 1G and 10G flow control advertisement registers so the
2304 	 * HW will be able to do fc autoneg once the cable is plugged in.  If
2305 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
2306 	 */
2307 
2308 	switch (hw->phy.media_type) {
2309 	case ixgbe_media_type_fiber:
2310 	case ixgbe_media_type_backplane:
2311 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2312 		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2313 		break;
2314 
2315 	case ixgbe_media_type_copper:
2316 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2317 					IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
2318 		break;
2319 
2320 	default:
2321 		;
2322 	}
2323 
2324 	/*
2325 	 * The possible values of fc.requested_mode are:
2326 	 * 0: Flow control is completely disabled
2327 	 * 1: Rx flow control is enabled (we can receive pause frames,
2328 	 *    but not send pause frames).
2329 	 * 2: Tx flow control is enabled (we can send pause frames but
2330 	 *    we do not support receiving pause frames).
2331 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2332 	 * other: Invalid.
2333 	 */
2334 	switch (hw->fc.requested_mode) {
2335 	case ixgbe_fc_none:
2336 		/* Flow control completely disabled by software override. */
2337 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2338 		if (hw->phy.media_type == ixgbe_media_type_backplane)
2339 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2340 				    IXGBE_AUTOC_ASM_PAUSE);
2341 		else if (hw->phy.media_type == ixgbe_media_type_copper)
2342 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2343 		break;
2344 	case ixgbe_fc_rx_pause:
2345 		/*
2346 		 * Rx Flow control is enabled and Tx Flow control is
2347 		 * disabled by software override. Since there really
2348 		 * isn't a way to advertise that we are capable of RX
2349 		 * Pause ONLY, we will advertise that we support both
2350 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
2351 		 * disable the adapter's ability to send PAUSE frames.
2352 		 */
2353 		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2354 		if (hw->phy.media_type == ixgbe_media_type_backplane)
2355 			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2356 				   IXGBE_AUTOC_ASM_PAUSE);
2357 		else if (hw->phy.media_type == ixgbe_media_type_copper)
2358 			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2359 		break;
2360 	case ixgbe_fc_tx_pause:
2361 		/*
2362 		 * Tx Flow control is enabled, and Rx Flow control is
2363 		 * disabled by software override.
2364 		 */
2365 		reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2366 		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2367 		if (hw->phy.media_type == ixgbe_media_type_backplane) {
2368 			reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2369 			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2370 		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
2371 			reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2372 			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2373 		}
2374 		break;
2375 	case ixgbe_fc_full:
2376 		/* Flow control (both Rx and Tx) is enabled by SW override. */
2377 		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2378 		if (hw->phy.media_type == ixgbe_media_type_backplane)
2379 			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2380 				   IXGBE_AUTOC_ASM_PAUSE);
2381 		else if (hw->phy.media_type == ixgbe_media_type_copper)
2382 			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2383 		break;
2384 	default:
2385 		DEBUGOUT("Flow control param set incorrectly\n");
2386 		ret_val = IXGBE_ERR_CONFIG;
2387 		goto out;
2388 		break;
2389 	}
2390 
2391 	/*
2392 	 * Enable auto-negotiation between the MAC & PHY;
2393 	 * the MAC will advertise clause 37 flow control.
2394 	 */
2395 	IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2396 	reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2397 
2398 	/* Disable AN timeout */
2399 	if (hw->fc.strict_ieee)
2400 		reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2401 
2402 	IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2403 	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2404 
2405 	/*
2406 	 * AUTOC restart handles negotiation of 1G and 10G on backplane
2407 	 * and copper. There is no need to set the PCS1GCTL register.
2408 	 *
2409 	 */
2410 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
2411 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
2412 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2413 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2414 		    (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
2415 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2416 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
2417 	}
2418 
2419 	DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2420 out:
2421 	return ret_val;
2422 }
2423 
2424 /**
2425  *  ixgbe_disable_pcie_master - Disable PCI-express master access
2426  *  @hw: pointer to hardware structure
2427  *
2428  *  Disables PCI-Express master access and verifies there are no pending
2429  *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2430  *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2431  *  is returned signifying master requests disabled.
2432  **/
2433 int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2434 {
2435 	uint32_t i;
2436 	uint32_t reg_val;
2437 	uint32_t number_of_queues;
2438 	int32_t status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2439 
2440 	/* Just jump out if bus mastering is already disabled */
2441 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2442 		goto out;
2443 
2444 	/* Disable the receive unit by stopping each queue */
2445 	number_of_queues = hw->mac.max_rx_queues;
2446 	for (i = 0; i < number_of_queues; i++) {
2447 		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
2448 		if (reg_val & IXGBE_RXDCTL_ENABLE) {
2449 			reg_val &= ~IXGBE_RXDCTL_ENABLE;
2450 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
2451 		}
2452 	}
2453 
2454 	reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
2455 	reg_val |= IXGBE_CTRL_GIO_DIS;
2456 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2457 
2458 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2459 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2460 			goto check_device_status;
2461 		usec_delay(100);
2462 	}
2463 
2464 	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2465 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2466 
2467 	/*
2468 	 * Before proceeding, make sure that the PCIe block does not have
2469 	 * transactions pending.
2470 	 */
2471 check_device_status:
2472 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2473 		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2474 			IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2475 			break;
2476 		usec_delay(100);
2477 	}
2478 
2479 	if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2480 		DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2481 	else
2482 		goto out;
2483 
2484 	/*
2485 	 * Two consecutive resets are required via CTRL.RST per datasheet
2486 	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2487 	 * of this need.  The first reset prevents new master requests from
2488 	 * being issued by our device.  We then must wait 1usec for any
2489 	 * remaining completions from the PCIe bus to trickle in, and then reset
2490 	 * again to clear out any effects they may have had on our device.
2491 	 */
2492 	 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2493 
2494 out:
2495 	return status;
2496 }
2497 
2498 
2499 /**
2500  *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2501  *  @hw: pointer to hardware structure
2502  *  @mask: Mask to specify which semaphore to acquire
2503  *
2504  *  Acquires the SWFW semaphore thought the GSSR register for the specified
2505  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2506  **/
2507 int32_t ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, uint16_t mask)
2508 {
2509 	uint32_t gssr;
2510 	uint32_t swmask = mask;
2511 	uint32_t fwmask = mask << 5;
2512 	int32_t timeout = 200;
2513 
2514 	while (timeout) {
2515 		/*
2516 		 * SW EEPROM semaphore bit is used for access to all
2517 		 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2518 		 */
2519 		if (ixgbe_get_eeprom_semaphore(hw))
2520 			return IXGBE_ERR_SWFW_SYNC;
2521 
2522 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2523 		if (!(gssr & (fwmask | swmask)))
2524 			break;
2525 
2526 		/*
2527 		 * Firmware currently using resource (fwmask) or other software
2528 		 * thread currently using resource (swmask)
2529 		 */
2530 		ixgbe_release_eeprom_semaphore(hw);
2531 		msec_delay(5);
2532 		timeout--;
2533 	}
2534 
2535 	if (!timeout) {
2536 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2537 		return IXGBE_ERR_SWFW_SYNC;
2538 	}
2539 
2540 	gssr |= swmask;
2541 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2542 
2543 	ixgbe_release_eeprom_semaphore(hw);
2544 	return IXGBE_SUCCESS;
2545 }
2546 
2547 /**
2548  *  ixgbe_release_swfw_sync - Release SWFW semaphore
2549  *  @hw: pointer to hardware structure
2550  *  @mask: Mask to specify which semaphore to release
2551  *
2552  *  Releases the SWFW semaphore thought the GSSR register for the specified
2553  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2554  **/
2555 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, uint16_t mask)
2556 {
2557 	uint32_t gssr;
2558 	uint32_t swmask = mask;
2559 
2560 	ixgbe_get_eeprom_semaphore(hw);
2561 
2562 	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2563 	gssr &= ~swmask;
2564 	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2565 
2566 	ixgbe_release_eeprom_semaphore(hw);
2567 }
2568 
2569 /**
2570  *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2571  *  @hw: pointer to hardware structure
2572  *  @regval: register value to write to RXCTRL
2573  *
2574  *  Enables the Rx DMA unit
2575  **/
2576 int32_t ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, uint32_t regval)
2577 {
2578 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2579 
2580 	return IXGBE_SUCCESS;
2581 }
2582 
2583 /**
2584  *  ixgbe_blink_led_start_generic - Blink LED based on index.
2585  *  @hw: pointer to hardware structure
2586  *  @index: led number to blink
2587  **/
2588 int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
2589 {
2590 	ixgbe_link_speed speed = 0;
2591 	int link_up = 0;
2592 	uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2593 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2594 
2595 	/*
2596 	 * Link must be up to auto-blink the LEDs;
2597 	 * Force it if link is down.
2598 	 */
2599 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2600 
2601 	if (!link_up) {
2602 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2603 		autoc_reg |= IXGBE_AUTOC_FLU;
2604 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2605 		msec_delay(10);
2606 	}
2607 
2608 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2609 	led_reg |= IXGBE_LED_BLINK(index);
2610 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2611 	IXGBE_WRITE_FLUSH(hw);
2612 
2613 	return IXGBE_SUCCESS;
2614 }
2615 
2616 /**
2617  *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2618  *  @hw: pointer to hardware structure
2619  *  @index: led number to stop blinking
2620  **/
2621 int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
2622 {
2623 	uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2624 	uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2625 
2626 	autoc_reg &= ~IXGBE_AUTOC_FLU;
2627 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2628 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2629 
2630 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
2631 	led_reg &= ~IXGBE_LED_BLINK(index);
2632 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2633 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2634 	IXGBE_WRITE_FLUSH(hw);
2635 
2636 	return IXGBE_SUCCESS;
2637 }
2638 
2639 /**
2640  *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2641  *  @hw: pointer to hardware structure
2642  *
2643  *  Read PCIe configuration space, and get the MSI-X vector count from
2644  *  the capabilities table.
2645  **/
2646 uint32_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2647 {
2648 	uint32_t msix_count = 64;
2649 
2650 	if (hw->mac.msix_vectors_from_pcie) {
2651 		msix_count = IXGBE_READ_PCIE_WORD(hw,
2652 		                                  IXGBE_PCIE_MSIX_82599_CAPS);
2653 		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2654 
2655 		/* MSI-X count is zero-based in HW, so increment to give
2656 		 * proper value */
2657 		msix_count++;
2658 	}
2659 
2660 	return msix_count;
2661 }
2662 
2663 /**
2664  *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
2665  *  @hw: pointer to hardware structure
2666  *  @addr: Address to put into receive address register
2667  *  @vmdq: VMDq pool to assign
2668  *
2669  *  Puts an ethernet address into a receive address register, or
2670  *  finds the rar that it is aleady in; adds to the pool list
2671  **/
2672 int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
2673 {
2674 	static const uint32_t NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
2675 	uint32_t first_empty_rar = NO_EMPTY_RAR_FOUND;
2676 	uint32_t rar;
2677 	uint32_t rar_low, rar_high;
2678 	uint32_t addr_low, addr_high;
2679 
2680 	/* swap bytes for HW little endian */
2681 	addr_low  = addr[0] | (addr[1] << 8)
2682 			    | (addr[2] << 16)
2683 			    | (addr[3] << 24);
2684 	addr_high = addr[4] | (addr[5] << 8);
2685 
2686 	/*
2687 	 * Either find the mac_id in rar or find the first empty space.
2688 	 * rar_highwater points to just after the highest currently used
2689 	 * rar in order to shorten the search.  It grows when we add a new
2690 	 * rar to the top.
2691 	 */
2692 	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
2693 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
2694 
2695 		if (((IXGBE_RAH_AV & rar_high) == 0)
2696 		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
2697 			first_empty_rar = rar;
2698 		} else if ((rar_high & 0xFFFF) == addr_high) {
2699 			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
2700 			if (rar_low == addr_low)
2701 				break;    /* found it already in the rars */
2702 		}
2703 	}
2704 
2705 	if (rar < hw->mac.rar_highwater) {
2706 		/* already there so just add to the pool bits */
2707 		ixgbe_hw(hw, set_vmdq, rar, vmdq);
2708 	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
2709 		/* stick it into first empty RAR slot we found */
2710 		rar = first_empty_rar;
2711 		ixgbe_hw(hw, set_rar, rar, addr, vmdq, IXGBE_RAH_AV);
2712 	} else if (rar == hw->mac.rar_highwater) {
2713 		/* add it to the top of the list and inc the highwater mark */
2714 		ixgbe_hw(hw, set_rar, rar, addr, vmdq, IXGBE_RAH_AV);
2715 		hw->mac.rar_highwater++;
2716 	} else if (rar >= hw->mac.num_rar_entries) {
2717 		return IXGBE_ERR_INVALID_MAC_ADDR;
2718 	}
2719 
2720 	/*
2721 	 * If we found rar[0], make sure the default pool bit (we use pool 0)
2722 	 * remains cleared to be sure default pool packets will get delivered
2723 	 */
2724 	if (rar == 0)
2725 		ixgbe_hw(hw, clear_vmdq, rar, 0);
2726 
2727 	return rar;
2728 }
2729 
2730 /**
2731  *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2732  *  @hw: pointer to hardware struct
2733  *  @rar: receive address register index to disassociate
2734  *  @vmdq: VMDq pool index to remove from the rar
2735  **/
2736 int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
2737 {
2738 	uint32_t mpsar_lo, mpsar_hi;
2739 	uint32_t rar_entries = hw->mac.num_rar_entries;
2740 
2741 	/* Make sure we are using a valid rar index range */
2742 	if (rar >= rar_entries) {
2743 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
2744 		return IXGBE_ERR_INVALID_ARGUMENT;
2745 	}
2746 
2747 	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2748 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2749 
2750 	if (!mpsar_lo && !mpsar_hi)
2751 		goto done;
2752 
2753 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2754 		if (mpsar_lo) {
2755 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2756 			mpsar_lo = 0;
2757 		}
2758 		if (mpsar_hi) {
2759 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2760 			mpsar_hi = 0;
2761 		}
2762 	} else if (vmdq < 32) {
2763 		mpsar_lo &= ~(1 << vmdq);
2764 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2765 	} else {
2766 		mpsar_hi &= ~(1 << (vmdq - 32));
2767 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2768 	}
2769 
2770 	/* was that the last pool using this rar? */
2771 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2772 		hw->mac.ops.clear_rar(hw, rar);
2773 done:
2774 	return IXGBE_SUCCESS;
2775 }
2776 
2777 /**
2778  *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2779  *  @hw: pointer to hardware struct
2780  *  @rar: receive address register index to associate with a VMDq index
2781  *  @vmdq: VMDq pool index
2782  **/
2783 int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
2784 {
2785 	uint32_t mpsar;
2786 	uint32_t rar_entries = hw->mac.num_rar_entries;
2787 
2788 	/* Make sure we are using a valid rar index range */
2789 	if (rar >= rar_entries) {
2790 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
2791 		return IXGBE_ERR_INVALID_ARGUMENT;
2792 	}
2793 
2794 	if (vmdq < 32) {
2795 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2796 		mpsar |= 1 << vmdq;
2797 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2798 	} else {
2799 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2800 		mpsar |= 1 << (vmdq - 32);
2801 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2802 	}
2803 	return IXGBE_SUCCESS;
2804 }
2805 
2806 /**
2807  *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2808  *  @hw: pointer to hardware structure
2809  **/
2810 int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2811 {
2812 	int i;
2813 
2814 	DEBUGOUT("Clearing UTA\n");
2815 
2816 	for (i = 0; i < 128; i++)
2817 		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2818 
2819 	return IXGBE_SUCCESS;
2820 }
2821 
2822 /**
2823  *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
2824  *  @hw: pointer to hardware structure
2825  *  @vlan: VLAN id to write to VLAN filter
2826  *
2827  *  return the VLVF index where this VLAN id should be placed
2828  *
2829  **/
2830 int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
2831 {
2832 	uint32_t bits = 0;
2833 	uint32_t first_empty_slot = 0;
2834 	int32_t regindex;
2835 
2836 	/* short cut the special case */
2837 	if (vlan == 0)
2838 		return 0;
2839 
2840 	/*
2841 	  * Search for the vlan id in the VLVF entries. Save off the first empty
2842 	  * slot found along the way
2843 	  */
2844 	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
2845 		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
2846 		if (!bits && !(first_empty_slot))
2847 			first_empty_slot = regindex;
2848 		else if ((bits & 0x0FFF) == vlan)
2849 			break;
2850 	}
2851 
2852 	/*
2853 	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
2854 	  * in the VLVF. Else use the first empty VLVF register for this
2855 	  * vlan id.
2856 	  */
2857 	if (regindex >= IXGBE_VLVF_ENTRIES) {
2858 		if (first_empty_slot)
2859 			regindex = first_empty_slot;
2860 		else {
2861 			DEBUGOUT("No space in VLVF.\n");
2862 			regindex = IXGBE_ERR_NO_SPACE;
2863 		}
2864 	}
2865 
2866 	return regindex;
2867 }
2868 
2869 /**
2870  *  ixgbe_set_vfta_generic - Set VLAN filter table
2871  *  @hw: pointer to hardware structure
2872  *  @vlan: VLAN id to write to VLAN filter
2873  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
2874  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
2875  *
2876  *  Turn on/off specified VLAN in the VLAN filter table.
2877  **/
2878 int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
2879                            int vlan_on)
2880 {
2881 	int32_t regindex;
2882 	uint32_t bitindex;
2883 	uint32_t vfta;
2884 	uint32_t bits;
2885 	uint32_t vt;
2886 	uint32_t targetbit;
2887 	int vfta_changed = FALSE;
2888 
2889 	if (vlan > 4095)
2890 		return IXGBE_ERR_PARAM;
2891 
2892 	/*
2893 	 * this is a 2 part operation - first the VFTA, then the
2894 	 * VLVF and VLVFB if VT Mode is set
2895 	 * We don't write the VFTA until we know the VLVF part succeeded.
2896 	 */
2897 
2898 	/* Part 1
2899 	 * The VFTA is a bitstring made up of 128 32-bit registers
2900 	 * that enable the particular VLAN id, much like the MTA:
2901 	 *    bits[11-5]: which register
2902 	 *    bits[4-0]:  which bit in the register
2903 	 */
2904 	regindex = (vlan >> 5) & 0x7F;
2905 	bitindex = vlan & 0x1F;
2906 	targetbit = (1 << bitindex);
2907 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
2908 
2909 	if (vlan_on) {
2910 		if (!(vfta & targetbit)) {
2911 			vfta |= targetbit;
2912 			vfta_changed = TRUE;
2913 		}
2914 	} else {
2915 		if ((vfta & targetbit)) {
2916 			vfta &= ~targetbit;
2917 			vfta_changed = TRUE;
2918 		}
2919 	}
2920 
2921 	/* Part 2
2922 	 * If VT Mode is set
2923 	 *   Either vlan_on
2924 	 *     make sure the vlan is in VLVF
2925 	 *     set the vind bit in the matching VLVFB
2926 	 *   Or !vlan_on
2927 	 *     clear the pool bit and possibly the vind
2928 	 */
2929 	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2930 	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
2931 		int32_t vlvf_index;
2932 
2933 		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
2934 		if (vlvf_index < 0)
2935 			return vlvf_index;
2936 
2937 		if (vlan_on) {
2938 			/* set the pool bit */
2939 			if (vind < 32) {
2940 				bits = IXGBE_READ_REG(hw,
2941 						IXGBE_VLVFB(vlvf_index*2));
2942 				bits |= (1 << vind);
2943 				IXGBE_WRITE_REG(hw,
2944 						IXGBE_VLVFB(vlvf_index*2),
2945 						bits);
2946 			} else {
2947 				bits = IXGBE_READ_REG(hw,
2948 						IXGBE_VLVFB((vlvf_index*2)+1));
2949 				bits |= (1 << (vind-32));
2950 				IXGBE_WRITE_REG(hw,
2951 						IXGBE_VLVFB((vlvf_index*2)+1),
2952 						bits);
2953 			}
2954 		} else {
2955 			/* clear the pool bit */
2956 			if (vind < 32) {
2957 				bits = IXGBE_READ_REG(hw,
2958 						IXGBE_VLVFB(vlvf_index*2));
2959 				bits &= ~(1 << vind);
2960 				IXGBE_WRITE_REG(hw,
2961 						IXGBE_VLVFB(vlvf_index*2),
2962 						bits);
2963 				bits |= IXGBE_READ_REG(hw,
2964 						IXGBE_VLVFB((vlvf_index*2)+1));
2965 			} else {
2966 				bits = IXGBE_READ_REG(hw,
2967 						IXGBE_VLVFB((vlvf_index*2)+1));
2968 				bits &= ~(1 << (vind-32));
2969 				IXGBE_WRITE_REG(hw,
2970 						IXGBE_VLVFB((vlvf_index*2)+1),
2971 						bits);
2972 				bits |= IXGBE_READ_REG(hw,
2973 						IXGBE_VLVFB(vlvf_index*2));
2974 			}
2975 		}
2976 
2977 		/*
2978 		 * If there are still bits set in the VLVFB registers
2979 		 * for the VLAN ID indicated we need to see if the
2980 		 * caller is requesting that we clear the VFTA entry bit.
2981 		 * If the caller has requested that we clear the VFTA
2982 		 * entry bit but there are still pools/VFs using this VLAN
2983 		 * ID entry then ignore the request.  We're not worried
2984 		 * about the case where we're turning the VFTA VLAN ID
2985 		 * entry bit on, only when requested to turn it off as
2986 		 * there may be multiple pools and/or VFs using the
2987 		 * VLAN ID entry.  In that case we cannot clear the
2988 		 * VFTA bit until all pools/VFs using that VLAN ID have also
2989 		 * been cleared.  This will be indicated by "bits" being
2990 		 * zero.
2991 		 */
2992 		if (bits) {
2993 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
2994 					(IXGBE_VLVF_VIEN | vlan));
2995 			if (!vlan_on) {
2996 				/* someone wants to clear the vfta entry
2997 				 * but some pools/VFs are still using it.
2998 				 * Ignore it. */
2999 				vfta_changed = FALSE;
3000 			}
3001 		}
3002 		else
3003 			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3004 	}
3005 
3006 	if (vfta_changed)
3007 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3008 
3009 	return IXGBE_SUCCESS;
3010 }
3011 
3012 /**
3013  *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3014  *  @hw: pointer to hardware structure
3015  *
3016  *  Clears the VLAN filer table, and the VMDq index associated with the filter
3017  **/
3018 int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3019 {
3020 	uint32_t offset;
3021 
3022 	for (offset = 0; offset < hw->mac.vft_size; offset++)
3023 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3024 
3025 	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3026 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3027 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
3028 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
3029 	}
3030 
3031 	return IXGBE_SUCCESS;
3032 }
3033 
3034 /**
3035  *  ixgbe_check_mac_link_generic - Determine link and speed status
3036  *  @hw: pointer to hardware structure
3037  *  @speed: pointer to link speed
3038  *  @link_up: TRUE when link is up
3039  *  @link_up_wait_to_complete: bool used to wait for link up or not
3040  *
3041  *  Reads the links register to determine if link is up and the current speed
3042  **/
3043 int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3044                                int *link_up, int link_up_wait_to_complete)
3045 {
3046 	uint32_t links_reg, links_orig;
3047 	uint32_t i;
3048 
3049 	/* clear the old state */
3050 	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3051 
3052 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3053 
3054 	if (links_orig != links_reg) {
3055 		DEBUGOUT2("LINKS changed from %08X to %08X\n",
3056 		          links_orig, links_reg);
3057 	}
3058 
3059 	if (link_up_wait_to_complete) {
3060 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3061 			if (links_reg & IXGBE_LINKS_UP) {
3062 				*link_up = TRUE;
3063 				break;
3064 			} else {
3065 				*link_up = FALSE;
3066 			}
3067 			msec_delay(100);
3068 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3069 		}
3070 	} else {
3071 		if (links_reg & IXGBE_LINKS_UP)
3072 			*link_up = TRUE;
3073 		else
3074 			*link_up = FALSE;
3075 	}
3076 
3077 	if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3078 	    IXGBE_LINKS_SPEED_10G_82599)
3079 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3080 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3081 	         IXGBE_LINKS_SPEED_1G_82599)
3082 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3083 	else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3084 	         IXGBE_LINKS_SPEED_100_82599)
3085 		*speed = IXGBE_LINK_SPEED_100_FULL;
3086 	else
3087 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3088 
3089 	/* if link is down, zero out the current_mode */
3090 	if (*link_up == FALSE) {
3091 		hw->fc.current_mode = ixgbe_fc_none;
3092 		hw->fc.fc_was_autonegged = FALSE;
3093 	}
3094 
3095 	return IXGBE_SUCCESS;
3096 }
3097 
3098 /**
3099  *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3100  *  control
3101  *  @hw: pointer to hardware structure
3102  *
3103  *  There are several phys that do not support autoneg flow control. This
3104  *  function check the device id to see if the associated phy supports
3105  *  autoneg flow control.
3106  **/
3107 int32_t ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3108 {
3109 	switch (hw->device_id) {
3110 	case IXGBE_DEV_ID_82599_T3_LOM:
3111 		return IXGBE_SUCCESS;
3112 	default:
3113 		return IXGBE_ERR_FC_NOT_SUPPORTED;
3114 	}
3115 }
3116 
3117 /**
3118  *  ixgbe_get_device_caps_generic - Get additional device capabilities
3119  *  @hw: pointer to hardware structure
3120  *  @device_caps: the EEPROM word with the extra device capabilities
3121  *
3122  *  This function will read the EEPROM location for the device capabilities,
3123  *  and return the word through device_caps.
3124  **/
3125 int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
3126 {
3127 	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3128 
3129 	return IXGBE_SUCCESS;
3130 }
3131 
3132 /**
3133  *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3134  *  @hw: pointer to hardware structure
3135  *
3136  **/
3137 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3138 {
3139 	uint32_t regval;
3140 	uint32_t i;
3141 
3142 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3143 
3144 	/* Enable relaxed ordering */
3145 	for (i = 0; i < hw->mac.max_tx_queues; i++) {
3146 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3147 		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
3148 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3149 	}
3150 
3151 	for (i = 0; i < hw->mac.max_rx_queues; i++) {
3152 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3153 		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
3154 		           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
3155 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3156 	}
3157 
3158 }
3159 
3160 /*
3161  * MBX: Mailbox handling
3162  */
3163 
3164 /**
3165  *  ixgbe_read_mbx - Reads a message from the mailbox
3166  *  @hw: pointer to the HW structure
3167  *  @msg: The message buffer
3168  *  @size: Length of buffer
3169  *  @mbx_id: id of mailbox to read
3170  *
3171  *  returns SUCCESS if it successfuly read message from buffer
3172  **/
3173 int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3174 {
3175 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3176 	int32_t ret_val = IXGBE_ERR_MBX;
3177 
3178 	DEBUGFUNC("ixgbe_read_mbx");
3179 
3180 	/* limit read to size of mailbox */
3181 	if (size > mbx->size)
3182 		size = mbx->size;
3183 
3184 	if (mbx->ops.read)
3185 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3186 
3187 	return ret_val;
3188 }
3189 
3190 /**
3191  *  ixgbe_write_mbx - Write a message to the mailbox
3192  *  @hw: pointer to the HW structure
3193  *  @msg: The message buffer
3194  *  @size: Length of buffer
3195  *  @mbx_id: id of mailbox to write
3196  *
3197  *  returns SUCCESS if it successfully copied message into the buffer
3198  **/
3199 int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3200 {
3201 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3202 	int32_t ret_val = IXGBE_SUCCESS;
3203 
3204 	DEBUGFUNC("ixgbe_write_mbx");
3205 
3206 	if (size > mbx->size)
3207 		ret_val = IXGBE_ERR_MBX;
3208 
3209 	else if (mbx->ops.write)
3210 		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3211 
3212 	return ret_val;
3213 }
3214 
3215 /**
3216  *  ixgbe_check_for_msg - checks to see if someone sent us mail
3217  *  @hw: pointer to the HW structure
3218  *  @mbx_id: id of mailbox to check
3219  *
3220  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3221  **/
3222 int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
3223 {
3224 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3225 	int32_t ret_val = IXGBE_ERR_MBX;
3226 
3227 	DEBUGFUNC("ixgbe_check_for_msg");
3228 
3229 	if (mbx->ops.check_for_msg)
3230 		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
3231 
3232 	return ret_val;
3233 }
3234 
3235 /**
3236  *  ixgbe_check_for_ack - checks to see if someone sent us ACK
3237  *  @hw: pointer to the HW structure
3238  *  @mbx_id: id of mailbox to check
3239  *
3240  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3241  **/
3242 int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
3243 {
3244 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3245 	int32_t ret_val = IXGBE_ERR_MBX;
3246 
3247 	DEBUGFUNC("ixgbe_check_for_ack");
3248 
3249 	if (mbx->ops.check_for_ack)
3250 		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
3251 
3252 	return ret_val;
3253 }
3254 
3255 /**
3256  *  ixgbe_check_for_rst - checks to see if other side has reset
3257  *  @hw: pointer to the HW structure
3258  *  @mbx_id: id of mailbox to check
3259  *
3260  *  returns SUCCESS if the Status bit was found or else ERR_MBX
3261  **/
3262 int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
3263 {
3264 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3265 	int32_t ret_val = IXGBE_ERR_MBX;
3266 
3267 	DEBUGFUNC("ixgbe_check_for_rst");
3268 
3269 	if (mbx->ops.check_for_rst)
3270 		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
3271 
3272 	return ret_val;
3273 }
3274 
3275 /**
3276  *  ixgbe_poll_for_msg - Wait for message notification
3277  *  @hw: pointer to the HW structure
3278  *  @mbx_id: id of mailbox to write
3279  *
3280  *  returns SUCCESS if it successfully received a message notification
3281  **/
3282 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
3283 {
3284 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3285 	int countdown = mbx->timeout;
3286 
3287 	DEBUGFUNC("ixgbe_poll_for_msg");
3288 
3289 	if (!countdown || !mbx->ops.check_for_msg)
3290 		goto out;
3291 
3292 	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
3293 		countdown--;
3294 		if (!countdown)
3295 			break;
3296 		usec_delay(mbx->usec_delay);
3297 	}
3298 
3299 out:
3300 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
3301 }
3302 
3303 /**
3304  *  ixgbe_poll_for_ack - Wait for message acknowledgement
3305  *  @hw: pointer to the HW structure
3306  *  @mbx_id: id of mailbox to write
3307  *
3308  *  returns SUCCESS if it successfully received a message acknowledgement
3309  **/
3310 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
3311 {
3312 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3313 	int countdown = mbx->timeout;
3314 
3315 	DEBUGFUNC("ixgbe_poll_for_ack");
3316 
3317 	if (!countdown || !mbx->ops.check_for_ack)
3318 		goto out;
3319 
3320 	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
3321 		countdown--;
3322 		if (!countdown)
3323 			break;
3324 		usec_delay(mbx->usec_delay);
3325 	}
3326 
3327 out:
3328 	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
3329 }
3330 
3331 /**
3332  *  ixgbe_read_posted_mbx - Wait for message notification and receive message
3333  *  @hw: pointer to the HW structure
3334  *  @msg: The message buffer
3335  *  @size: Length of buffer
3336  *  @mbx_id: id of mailbox to write
3337  *
3338  *  returns SUCCESS if it successfully received a message notification and
3339  *  copied it into the receive buffer.
3340  **/
3341 int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
3342 {
3343 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3344 	int32_t ret_val = IXGBE_ERR_MBX;
3345 
3346 	DEBUGFUNC("ixgbe_read_posted_mbx");
3347 
3348 	if (!mbx->ops.read)
3349 		goto out;
3350 
3351 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
3352 
3353 	/* if ack received read message, otherwise we timed out */
3354 	if (!ret_val)
3355 		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
3356 out:
3357 	return ret_val;
3358 }
3359 
3360 /**
3361  *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
3362  *  @hw: pointer to the HW structure
3363  *  @msg: The message buffer
3364  *  @size: Length of buffer
3365  *  @mbx_id: id of mailbox to write
3366  *
3367  *  returns SUCCESS if it successfully copied message into the buffer and
3368  *  received an ack to that message within delay * timeout period
3369  **/
3370 int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3371                            uint16_t mbx_id)
3372 {
3373 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3374 	int32_t ret_val = IXGBE_ERR_MBX;
3375 
3376 	DEBUGFUNC("ixgbe_write_posted_mbx");
3377 
3378 	/* exit if either we can't write or there isn't a defined timeout */
3379 	if (!mbx->ops.write || !mbx->timeout)
3380 		goto out;
3381 
3382 	/* send msg */
3383 	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
3384 
3385 	/* if msg sent wait until we receive an ack */
3386 	if (!ret_val)
3387 		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
3388 out:
3389 	return ret_val;
3390 }
3391 
3392 /**
3393  *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
3394  *  @hw: pointer to the HW structure
3395  *
3396  *  Setups up the mailbox read and write message function pointers
3397  **/
3398 void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
3399 {
3400 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3401 
3402 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
3403 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
3404 }
3405 
3406 /**
3407  *  ixgbe_read_v2p_mailbox - read v2p mailbox
3408  *  @hw: pointer to the HW structure
3409  *
3410  *  This function is used to read the v2p mailbox without losing the read to
3411  *  clear status bits.
3412  **/
3413 uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
3414 {
3415 	uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
3416 
3417 	v2p_mailbox |= hw->mbx.v2p_mailbox;
3418 	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
3419 
3420 	return v2p_mailbox;
3421 }
3422 
3423 /**
3424  *  ixgbe_check_for_bit_vf - Determine if a status bit was set
3425  *  @hw: pointer to the HW structure
3426  *  @mask: bitmask for bits to be tested and cleared
3427  *
3428  *  This function is used to check for the read to clear bits within
3429  *  the V2P mailbox.
3430  **/
3431 int32_t ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, uint32_t mask)
3432 {
3433 	uint32_t v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
3434 	int32_t ret_val = IXGBE_ERR_MBX;
3435 
3436 	if (v2p_mailbox & mask)
3437 		ret_val = IXGBE_SUCCESS;
3438 
3439 	hw->mbx.v2p_mailbox &= ~mask;
3440 
3441 	return ret_val;
3442 }
3443 
3444 /**
3445  *  ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
3446  *  @hw: pointer to the HW structure
3447  *  @mbx_id: id of mailbox to check
3448  *
3449  *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
3450  **/
3451 int32_t ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
3452 {
3453 	int32_t ret_val = IXGBE_ERR_MBX;
3454 
3455 	UNREFERENCED_PARAMETER(mbx_id);
3456 	DEBUGFUNC("ixgbe_check_for_msg_vf");
3457 
3458 	if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
3459 		ret_val = IXGBE_SUCCESS;
3460 		hw->mbx.stats.reqs++;
3461 	}
3462 
3463 	return ret_val;
3464 }
3465 
3466 /**
3467  *  ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
3468  *  @hw: pointer to the HW structure
3469  *  @mbx_id: id of mailbox to check
3470  *
3471  *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
3472  **/
3473 int32_t ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
3474 {
3475 	int32_t ret_val = IXGBE_ERR_MBX;
3476 
3477 	UNREFERENCED_PARAMETER(mbx_id);
3478 	DEBUGFUNC("ixgbe_check_for_ack_vf");
3479 
3480 	if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
3481 		ret_val = IXGBE_SUCCESS;
3482 		hw->mbx.stats.acks++;
3483 	}
3484 
3485 	return ret_val;
3486 }
3487 
3488 /**
3489  *  ixgbe_check_for_rst_vf - checks to see if the PF has reset
3490  *  @hw: pointer to the HW structure
3491  *  @mbx_id: id of mailbox to check
3492  *
3493  *  returns TRUE if the PF has set the reset done bit or else FALSE
3494  **/
3495 int32_t ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
3496 {
3497 	int32_t ret_val = IXGBE_ERR_MBX;
3498 
3499 	UNREFERENCED_PARAMETER(mbx_id);
3500 	DEBUGFUNC("ixgbe_check_for_rst_vf");
3501 
3502 	if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
3503 	                                 IXGBE_VFMAILBOX_RSTI))) {
3504 		ret_val = IXGBE_SUCCESS;
3505 		hw->mbx.stats.rsts++;
3506 	}
3507 
3508 	return ret_val;
3509 }
3510 
3511 /**
3512  *  ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
3513  *  @hw: pointer to the HW structure
3514  *
3515  *  return SUCCESS if we obtained the mailbox lock
3516  **/
3517 int32_t ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
3518 {
3519 	int32_t ret_val = IXGBE_ERR_MBX;
3520 
3521 	DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
3522 
3523 	/* Take ownership of the buffer */
3524 	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
3525 
3526 	/* reserve mailbox for vf use */
3527 	if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
3528 		ret_val = IXGBE_SUCCESS;
3529 
3530 	return ret_val;
3531 }
3532 
3533 /**
3534  *  ixgbe_write_mbx_vf - Write a message to the mailbox
3535  *  @hw: pointer to the HW structure
3536  *  @msg: The message buffer
3537  *  @size: Length of buffer
3538  *  @mbx_id: id of mailbox to write
3539  *
3540  *  returns SUCCESS if it successfully copied message into the buffer
3541  **/
3542 int32_t ixgbe_write_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3543                               uint16_t mbx_id)
3544 {
3545 	int32_t ret_val;
3546 	uint16_t i;
3547 
3548 	UNREFERENCED_PARAMETER(mbx_id);
3549 
3550 	DEBUGFUNC("ixgbe_write_mbx_vf");
3551 
3552 	/* lock the mailbox to prevent pf/vf race condition */
3553 	ret_val = ixgbe_obtain_mbx_lock_vf(hw);
3554 	if (ret_val)
3555 		goto out_no_write;
3556 
3557 	/* flush msg and acks as we are overwriting the message buffer */
3558 	ixgbe_check_for_msg_vf(hw, 0);
3559 	ixgbe_check_for_ack_vf(hw, 0);
3560 
3561 	/* copy the caller specified message to the mailbox memory buffer */
3562 	for (i = 0; i < size; i++)
3563 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
3564 
3565 	/* update stats */
3566 	hw->mbx.stats.msgs_tx++;
3567 
3568 	/* Drop VFU and interrupt the PF to tell it a message has been sent */
3569 	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
3570 
3571 out_no_write:
3572 	return ret_val;
3573 }
3574 
3575 /**
3576  *  ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
3577  *  @hw: pointer to the HW structure
3578  *  @msg: The message buffer
3579  *  @size: Length of buffer
3580  *  @mbx_id: id of mailbox to read
3581  *
3582  *  returns SUCCESS if it successfuly read message from buffer
3583  **/
3584 int32_t ixgbe_read_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3585                              uint16_t mbx_id)
3586 {
3587 	int32_t ret_val = IXGBE_SUCCESS;
3588 	uint16_t i;
3589 
3590 	DEBUGFUNC("ixgbe_read_mbx_vf");
3591 	UNREFERENCED_PARAMETER(mbx_id);
3592 
3593 	/* lock the mailbox to prevent pf/vf race condition */
3594 	ret_val = ixgbe_obtain_mbx_lock_vf(hw);
3595 	if (ret_val)
3596 		goto out_no_read;
3597 
3598 	/* copy the message from the mailbox memory buffer */
3599 	for (i = 0; i < size; i++)
3600 		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
3601 
3602 	/* Acknowledge receipt and release mailbox, then we're done */
3603 	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
3604 
3605 	/* update stats */
3606 	hw->mbx.stats.msgs_rx++;
3607 
3608 out_no_read:
3609 	return ret_val;
3610 }
3611 
3612 /**
3613  *  ixgbe_init_mbx_params_vf - set initial values for vf mailbox
3614  *  @hw: pointer to the HW structure
3615  *
3616  *  Initializes the hw->mbx struct to correct values for vf mailbox
3617  */
3618 void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
3619 {
3620 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3621 
3622 	/* start mailbox as timed out and let the reset_hw call set the timeout
3623 	 * value to begin communications */
3624 	mbx->timeout = 0;
3625 	mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
3626 
3627 	mbx->size = IXGBE_VFMAILBOX_SIZE;
3628 
3629 	mbx->ops.read = ixgbe_read_mbx_vf;
3630 	mbx->ops.write = ixgbe_write_mbx_vf;
3631 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
3632 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
3633 	mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
3634 	mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
3635 	mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
3636 
3637 	mbx->stats.msgs_tx = 0;
3638 	mbx->stats.msgs_rx = 0;
3639 	mbx->stats.reqs = 0;
3640 	mbx->stats.acks = 0;
3641 	mbx->stats.rsts = 0;
3642 }
3643 
3644 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
3645 {
3646 	uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
3647 	int32_t ret_val = IXGBE_ERR_MBX;
3648 
3649 	if (mbvficr & mask) {
3650 		ret_val = IXGBE_SUCCESS;
3651 		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
3652 	}
3653 
3654 	return ret_val;
3655 }
3656 
3657 /**
3658  *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
3659  *  @hw: pointer to the HW structure
3660  *  @vf_number: the VF index
3661  *
3662  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3663  **/
3664 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3665 {
3666 	int32_t ret_val = IXGBE_ERR_MBX;
3667 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
3668 	uint32_t vf_bit = vf_number % 16;
3669 
3670 	DEBUGFUNC("ixgbe_check_for_msg_pf");
3671 
3672 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
3673 	                            index)) {
3674 		ret_val = IXGBE_SUCCESS;
3675 		hw->mbx.stats.reqs++;
3676 	}
3677 
3678 	return ret_val;
3679 }
3680 
3681 /**
3682  *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
3683  *  @hw: pointer to the HW structure
3684  *  @vf_number: the VF index
3685  *
3686  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3687  **/
3688 int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3689 {
3690 	int32_t ret_val = IXGBE_ERR_MBX;
3691 	int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
3692 	uint32_t vf_bit = vf_number % 16;
3693 
3694 	DEBUGFUNC("ixgbe_check_for_ack_pf");
3695 
3696 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
3697 	                            index)) {
3698 		ret_val = IXGBE_SUCCESS;
3699 		hw->mbx.stats.acks++;
3700 	}
3701 
3702 	return ret_val;
3703 }
3704 
3705 /**
3706  *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
3707  *  @hw: pointer to the HW structure
3708  *  @vf_number: the VF index
3709  *
3710  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
3711  **/
3712 int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3713 {
3714 	uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
3715 	uint32_t vf_shift = vf_number % 32;
3716 	uint32_t vflre = 0;
3717 	int32_t ret_val = IXGBE_ERR_MBX;
3718 
3719 	DEBUGFUNC("ixgbe_check_for_rst_pf");
3720 
3721 	switch (hw->mac.type) {
3722 	case ixgbe_mac_82599EB:
3723 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
3724 		break;
3725 	default:
3726 		goto out;
3727 		break;
3728 	}
3729 
3730 	if (vflre & (1 << vf_shift)) {
3731 		ret_val = IXGBE_SUCCESS;
3732 		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
3733 		hw->mbx.stats.rsts++;
3734 	}
3735 
3736 out:
3737 	return ret_val;
3738 }
3739 
3740 /**
3741  *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
3742  *  @hw: pointer to the HW structure
3743  *  @vf_number: the VF index
3744  *
3745  *  return SUCCESS if we obtained the mailbox lock
3746  **/
3747 int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
3748 {
3749 	int32_t ret_val = IXGBE_ERR_MBX;
3750 	uint32_t p2v_mailbox;
3751 
3752 	DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
3753 
3754 	/* Take ownership of the buffer */
3755 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
3756 
3757 	/* reserve mailbox for vf use */
3758 	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
3759 	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
3760 		ret_val = IXGBE_SUCCESS;
3761 
3762 	return ret_val;
3763 }
3764 
3765 /**
3766  *  ixgbe_write_mbx_pf - Places a message in the mailbox
3767  *  @hw: pointer to the HW structure
3768  *  @msg: The message buffer
3769  *  @size: Length of buffer
3770  *  @vf_number: the VF index
3771  *
3772  *  returns SUCCESS if it successfully copied message into the buffer
3773  **/
3774 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3775                               uint16_t vf_number)
3776 {
3777 	int32_t ret_val;
3778 	uint16_t i;
3779 
3780 	DEBUGFUNC("ixgbe_write_mbx_pf");
3781 
3782 	/* lock the mailbox to prevent pf/vf race condition */
3783 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
3784 	if (ret_val)
3785 		goto out_no_write;
3786 
3787 	/* flush msg and acks as we are overwriting the message buffer */
3788 	ixgbe_check_for_msg_pf(hw, vf_number);
3789 	ixgbe_check_for_ack_pf(hw, vf_number);
3790 
3791 	/* copy the caller specified message to the mailbox memory buffer */
3792 	for (i = 0; i < size; i++)
3793 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
3794 
3795 	/* Interrupt VF to tell it a message has been sent and release buffer*/
3796 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
3797 
3798 	/* update stats */
3799 	hw->mbx.stats.msgs_tx++;
3800 
3801 out_no_write:
3802 	return ret_val;
3803 
3804 }
3805 
3806 /**
3807  *  ixgbe_read_mbx_pf - Read a message from the mailbox
3808  *  @hw: pointer to the HW structure
3809  *  @msg: The message buffer
3810  *  @size: Length of buffer
3811  *  @vf_number: the VF index
3812  *
3813  *  This function copies a message from the mailbox buffer to the caller's
3814  *  memory buffer.  The presumption is that the caller knows that there was
3815  *  a message due to a VF request so no polling for message is needed.
3816  **/
3817 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
3818                              uint16_t vf_number)
3819 {
3820 	int32_t ret_val;
3821 	uint16_t i;
3822 
3823 	DEBUGFUNC("ixgbe_read_mbx_pf");
3824 
3825 	/* lock the mailbox to prevent pf/vf race condition */
3826 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
3827 	if (ret_val)
3828 		goto out_no_read;
3829 
3830 	/* copy the message to the mailbox memory buffer */
3831 	for (i = 0; i < size; i++)
3832 		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
3833 
3834 	/* Acknowledge the message and release buffer */
3835 	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
3836 
3837 	/* update stats */
3838 	hw->mbx.stats.msgs_rx++;
3839 
3840 out_no_read:
3841 	return ret_val;
3842 }
3843 
3844 /**
3845  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
3846  *  @hw: pointer to the HW structure
3847  *
3848  *  Initializes the hw->mbx struct to correct values for pf mailbox
3849  */
3850 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
3851 {
3852 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3853 
3854 	if (hw->mac.type != ixgbe_mac_82599EB)
3855 		return;
3856 
3857 	mbx->timeout = 0;
3858 	mbx->usec_delay = 0;
3859 
3860 	mbx->size = IXGBE_VFMAILBOX_SIZE;
3861 
3862 	mbx->ops.read = ixgbe_read_mbx_pf;
3863 	mbx->ops.write = ixgbe_write_mbx_pf;
3864 	mbx->ops.read_posted = ixgbe_read_posted_mbx;
3865 	mbx->ops.write_posted = ixgbe_write_posted_mbx;
3866 	mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
3867 	mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
3868 	mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
3869 
3870 	mbx->stats.msgs_tx = 0;
3871 	mbx->stats.msgs_rx = 0;
3872 	mbx->stats.reqs = 0;
3873 	mbx->stats.acks = 0;
3874 	mbx->stats.rsts = 0;
3875 }
3876 
3877