xref: /openbsd-src/sys/dev/pci/ixgbe_82598.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: ixgbe_82598.c,v 1.6 2011/06/10 12:46:35 claudio Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2009, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.9 2009/12/07 21:30:54 jfv Exp $*/
36 
37 #include <dev/pci/ixgbe.h>
38 #include <dev/pci/ixgbe_type.h>
39 
40 uint32_t ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
41 int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw);
42 int32_t ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
43                                       ixgbe_link_speed *speed,
44                                       int *autoneg);
45 enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46 int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw, int32_t packetbuf_num);
47 int32_t ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
48                                int autoneg_wait_to_complete);
49 int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw);
50 int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
51                                ixgbe_link_speed *speed,
52                                int *link_up, int link_up_wait_to_complete);
53 int32_t ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
54                                      ixgbe_link_speed speed,
55                                      int autoneg,
56                                      int autoneg_wait_to_complete);
57 int32_t ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
58                                         ixgbe_link_speed speed,
59                                         int autoneg,
60                                         int autoneg_wait_to_complete);
61 int32_t ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
62 int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw);
63 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
64 int32_t ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
65 int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
66 int32_t ixgbe_set_vfta_82598(struct ixgbe_hw *hw, uint32_t vlan,
67                          uint32_t vind, int vlan_on);
68 int32_t ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
69 int32_t ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t *val);
70 int32_t ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t val);
71 int32_t ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, uint8_t byte_offset,
72                                 uint8_t *eeprom_data);
73 uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
74 int32_t ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
75 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
76 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
77 
78 /**
79  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
80  *  @hw: pointer to the HW structure
81  *
82  *  The defaults for 82598 should be in the range of 50us to 50ms,
83  *  however the hardware default for these parts is 500us to 1ms which is less
84  *  than the 10ms recommended by the pci-e spec.  To address this we need to
85  *  increase the value to either 10ms to 250ms for capability version 1 config,
86  *  or 16ms to 55ms for version 2.
87  **/
88 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
89 {
90 	uint32_t gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
91 	uint16_t pcie_devctl2;
92 
93 	/* only take action if timeout value is defaulted to 0 */
94 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
95 		goto out;
96 
97 	/*
98 	 * if capababilities version is type 1 we can write the
99 	 * timeout of 10ms to 250ms through the GCR register
100 	 */
101 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
102 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
103 		goto out;
104 	}
105 
106 	/*
107 	 * for version 2 capabilities we need to write the config space
108 	 * directly in order to set the completion timeout value for
109 	 * 16ms to 55ms
110 	 */
111 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
112 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
113 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
114 out:
115 	/* disable completion timeout resend */
116 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
117 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
118 }
119 
120 /**
121  *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
122  *  @hw: pointer to hardware structure
123  *
124  *  Read PCIe configuration space, and get the MSI-X vector count from
125  *  the capabilities table.
126  **/
127 uint32_t ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
128 {
129 	uint32_t msix_count = 18;
130 
131 	if (hw->mac.msix_vectors_from_pcie) {
132 		msix_count = IXGBE_READ_PCIE_WORD(hw,
133 		                                  IXGBE_PCIE_MSIX_82598_CAPS);
134 		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
135 
136 		/* MSI-X count is zero-based in HW, so increment to give
137 		 * proper value */
138 		msix_count++;
139 	}
140 	return msix_count;
141 }
142 
143 /**
144  *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
145  *  @hw: pointer to hardware structure
146  *
147  *  Initialize the function pointers and assign the MAC type for 82598.
148  *  Does not touch the hardware.
149  **/
150 int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw)
151 {
152 	struct ixgbe_mac_info *mac = &hw->mac;
153 	struct ixgbe_phy_info *phy = &hw->phy;
154 	int32_t ret_val;
155 
156 	ret_val = ixgbe_init_phy_ops_generic(hw);
157 	ret_val = ixgbe_init_ops_generic(hw);
158 
159 	/* PHY */
160 	phy->ops.init = &ixgbe_init_phy_ops_82598;
161 
162 	/* MAC */
163 	mac->ops.start_hw = &ixgbe_start_hw_82598;
164 	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
165 	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
166 	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
167 	mac->ops.get_supported_physical_layer =
168 	                            &ixgbe_get_supported_physical_layer_82598;
169 	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
170 	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
171 	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
172 
173 	/* RAR, Multicast, VLAN */
174 	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
175 	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
176 	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
177 	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
178 
179 	/* Flow Control */
180 	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
181 
182 	mac->mcft_size       = 128;
183 	mac->vft_size        = 128;
184 	mac->num_rar_entries = 16;
185 	mac->rx_pb_size      = 512;
186 	mac->max_tx_queues   = 32;
187 	mac->max_rx_queues   = 64;
188 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
189 
190 	/* SFP+ Module */
191 	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
192 
193 	/* Link */
194 	mac->ops.check_link = &ixgbe_check_mac_link_82598;
195 	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
196 	mac->ops.flap_tx_laser = NULL;
197 	mac->ops.get_link_capabilities =
198 	                       &ixgbe_get_link_capabilities_82598;
199 
200 	return ret_val;
201 }
202 
203 /**
204  *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
205  *  @hw: pointer to hardware structure
206  *
207  *  Initialize any function pointers that were not able to be
208  *  set during init_shared_code because the PHY/SFP type was
209  *  not known.  Perform the SFP init if necessary.
210  *
211  **/
212 int32_t ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
213 {
214 	struct ixgbe_mac_info *mac = &hw->mac;
215 	struct ixgbe_phy_info *phy = &hw->phy;
216 	int32_t ret_val = IXGBE_SUCCESS;
217 	uint16_t list_offset, data_offset;
218 
219 	/* Identify the PHY */
220 	phy->ops.identify(hw);
221 
222 	/* Overwrite the link function pointers if copper PHY */
223 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
224 		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
225 		mac->ops.get_link_capabilities =
226 		                  &ixgbe_get_copper_link_capabilities_generic;
227 	}
228 
229 	switch (hw->phy.type) {
230 	case ixgbe_phy_tn:
231 		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
232 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
233 		phy->ops.get_firmware_version =
234 		             &ixgbe_get_phy_firmware_version_tnx;
235 		break;
236 	case ixgbe_phy_aq:
237 		phy->ops.get_firmware_version =
238 		             &ixgbe_get_phy_firmware_version_generic;
239 		break;
240 	case ixgbe_phy_nl:
241 		phy->ops.reset = &ixgbe_reset_phy_nl;
242 
243 		/* Call SFP+ identify routine to get the SFP+ module type */
244 		ret_val = phy->ops.identify_sfp(hw);
245 		if (ret_val != IXGBE_SUCCESS)
246 			goto out;
247 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
248 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
249 			goto out;
250 		}
251 
252 		/* Check to see if SFP+ module is supported */
253 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
254 		                                            &list_offset,
255 		                                            &data_offset);
256 		if (ret_val != IXGBE_SUCCESS) {
257 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
258 			goto out;
259 		}
260 		break;
261 	default:
262 		break;
263 	}
264 
265 out:
266 	return ret_val;
267 }
268 
269 /**
270  *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
271  *  @hw: pointer to hardware structure
272  *
273  *  Starts the hardware using the generic start_hw function.
274  *  Then set pcie completion timeout
275  **/
276 int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw)
277 {
278 	uint32_t regval;
279 	uint32_t i;
280 	int32_t ret_val = IXGBE_SUCCESS;
281 
282 	ret_val = ixgbe_start_hw_generic(hw);
283 
284 	/* Disable relaxed ordering */
285 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
286 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
287 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
288 		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
289 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
290 	}
291 
292 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
293 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
294 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
295 		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
296 		            IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
297 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
298 	}
299 
300 	/* set the completion timeout for interface */
301 	if (ret_val == IXGBE_SUCCESS)
302 		ixgbe_set_pcie_completion_timeout(hw);
303 
304 	return ret_val;
305 }
306 
307 /**
308  *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
309  *  @hw: pointer to hardware structure
310  *  @speed: pointer to link speed
311  *  @autoneg: boolean auto-negotiation value
312  *
313  *  Determines the link capabilities by reading the AUTOC register.
314  **/
315 int32_t ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
316                                       ixgbe_link_speed *speed,
317                                       int *autoneg)
318 {
319 	int32_t status = IXGBE_SUCCESS;
320 	uint32_t autoc = 0;
321 
322 	/*
323 	 * Determine link capabilities based on the stored value of AUTOC,
324 	 * which represents EEPROM defaults.  If AUTOC value has not been
325 	 * stored, use the current register value.
326 	 */
327 	if (hw->mac.orig_link_settings_stored)
328 		autoc = hw->mac.orig_autoc;
329 	else
330 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
331 
332 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
333 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
334 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
335 		*autoneg = FALSE;
336 		break;
337 
338 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
339 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
340 		*autoneg = FALSE;
341 		break;
342 
343 	case IXGBE_AUTOC_LMS_1G_AN:
344 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
345 		*autoneg = TRUE;
346 		break;
347 
348 	case IXGBE_AUTOC_LMS_KX4_AN:
349 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
350 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
351 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
352 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
353 		if (autoc & IXGBE_AUTOC_KX_SUPP)
354 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
355 		*autoneg = TRUE;
356 		break;
357 
358 	default:
359 		status = IXGBE_ERR_LINK_SETUP;
360 		break;
361 	}
362 
363 	return status;
364 }
365 
366 /**
367  *  ixgbe_get_media_type_82598 - Determines media type
368  *  @hw: pointer to hardware structure
369  *
370  *  Returns the media type (fiber, copper, backplane)
371  **/
372 enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
373 {
374 	enum ixgbe_media_type media_type;
375 
376 	/* Detect if there is a copper PHY attached. */
377 	switch (hw->phy.type) {
378 	case ixgbe_phy_cu_unknown:
379 	case ixgbe_phy_tn:
380 	case ixgbe_phy_aq:
381 		media_type = ixgbe_media_type_copper;
382 		goto out;
383 	default:
384 		break;
385 	}
386 
387 	/* Media type for I82598 is based on device ID */
388 	switch (hw->device_id) {
389 	case IXGBE_DEV_ID_82598:
390 	case IXGBE_DEV_ID_82598_BX:
391 		/* Default device ID is mezzanine card KX/KX4 */
392 		media_type = ixgbe_media_type_backplane;
393 		break;
394 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
395 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
396 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
397 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
398 	case IXGBE_DEV_ID_82598EB_XF_LR:
399 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
400 		media_type = ixgbe_media_type_fiber;
401 		break;
402 	case IXGBE_DEV_ID_82598EB_CX4:
403 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
404 		media_type = ixgbe_media_type_cx4;
405 		break;
406 	case IXGBE_DEV_ID_82598AT:
407 	case IXGBE_DEV_ID_82598AT2:
408 		media_type = ixgbe_media_type_copper;
409 		break;
410 	default:
411 		media_type = ixgbe_media_type_unknown;
412 		break;
413 	}
414 out:
415 	return media_type;
416 }
417 
418 /**
419  *  ixgbe_fc_enable_82598 - Enable flow control
420  *  @hw: pointer to hardware structure
421  *  @packetbuf_num: packet buffer number (0-7)
422  *
423  *  Enable flow control according to the current settings.
424  **/
425 int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw, int32_t packetbuf_num)
426 {
427 	int32_t ret_val = IXGBE_SUCCESS;
428 	uint32_t fctrl_reg;
429 	uint32_t rmcs_reg;
430 	uint32_t reg;
431 	uint32_t rx_pba_size;
432 	uint32_t link_speed = 0;
433 	int link_up;
434 
435 	/*
436 	 * On 82598 having Rx FC on causes resets while doing 1G
437 	 * so if it's on turn it off once we know link_speed. For
438 	 * more details see 82598 Specification update.
439 	 */
440 	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
441 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
442 		switch (hw->fc.requested_mode) {
443 		case ixgbe_fc_full:
444 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
445 			break;
446 		case ixgbe_fc_rx_pause:
447 			hw->fc.requested_mode = ixgbe_fc_none;
448 			break;
449 		default:
450 			/* no change */
451 			break;
452 		}
453 	}
454 
455 	/* Negotiate the fc mode to use */
456 	ret_val = ixgbe_fc_autoneg(hw);
457 	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
458 		goto out;
459 
460 	/* Disable any previous flow control settings */
461 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
462 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
463 
464 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
465 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
466 
467 	/*
468 	 * The possible values of fc.current_mode are:
469 	 * 0: Flow control is completely disabled
470 	 * 1: Rx flow control is enabled (we can receive pause frames,
471 	 *    but not send pause frames).
472 	 * 2: Tx flow control is enabled (we can send pause frames but
473 	 *     we do not support receiving pause frames).
474 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
475 	 * other: Invalid.
476 	 */
477 	switch (hw->fc.current_mode) {
478 	case ixgbe_fc_none:
479 		/*
480 		 * Flow control is disabled by software override or autoneg.
481 		 * The code below will actually disable it in the HW.
482 		 */
483 		break;
484 	case ixgbe_fc_rx_pause:
485 		/*
486 		 * Rx Flow control is enabled and Tx Flow control is
487 		 * disabled by software override. Since there really
488 		 * isn't a way to advertise that we are capable of RX
489 		 * Pause ONLY, we will advertise that we support both
490 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
491 		 * disable the adapter's ability to send PAUSE frames.
492 		 */
493 		fctrl_reg |= IXGBE_FCTRL_RFCE;
494 		break;
495 	case ixgbe_fc_tx_pause:
496 		/*
497 		 * Tx Flow control is enabled, and Rx Flow control is
498 		 * disabled by software override.
499 		 */
500 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
501 		break;
502 	case ixgbe_fc_full:
503 		/* Flow control (both Rx and Tx) is enabled by SW override. */
504 		fctrl_reg |= IXGBE_FCTRL_RFCE;
505 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
506 		break;
507 	default:
508 		DEBUGOUT("Flow control param set incorrectly\n");
509 		ret_val = IXGBE_ERR_CONFIG;
510 		goto out;
511 		break;
512 	}
513 
514 	/* Set 802.3x based flow control settings. */
515 	fctrl_reg |= IXGBE_FCTRL_DPF;
516 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
517 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
518 
519 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
520 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
521 		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
522 		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
523 
524 		reg = (rx_pba_size - hw->fc.low_water) << 6;
525 		if (hw->fc.send_xon)
526 			reg |= IXGBE_FCRTL_XONE;
527 
528 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
529 
530 		reg = (rx_pba_size - hw->fc.high_water) << 6;
531 		reg |= IXGBE_FCRTH_FCEN;
532 
533 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
534 	}
535 
536 	/* Configure pause time (2 TCs per register) */
537 	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
538 	if ((packetbuf_num & 1) == 0)
539 		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
540 	else
541 		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
542 	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
543 
544 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
545 
546 out:
547 	return ret_val;
548 }
549 
550 /**
551  *  ixgbe_start_mac_link_82598 - Configures MAC link settings
552  *  @hw: pointer to hardware structure
553  *
554  *  Configures link settings based on values in the ixgbe_hw struct.
555  *  Restarts the link.  Performs autonegotiation if needed.
556  **/
557 int32_t ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
558 	                               int autoneg_wait_to_complete)
559 {
560 	uint32_t autoc_reg;
561 	uint32_t links_reg;
562 	uint32_t i;
563 	int32_t status = IXGBE_SUCCESS;
564 
565 	/* Restart link */
566 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
567 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
568 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
569 
570 	/* Only poll for autoneg to complete if specified to do so */
571 	if (autoneg_wait_to_complete) {
572 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
573 		     IXGBE_AUTOC_LMS_KX4_AN ||
574 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
575 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
576 			links_reg = 0; /* Just in case Autoneg time = 0 */
577 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
578 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
579 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
580 					break;
581 				msec_delay(100);
582 			}
583 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
584 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
585 				DEBUGOUT("Autonegotiation did not complete.\n");
586 			}
587 		}
588 	}
589 
590 	/* Add delay to filter out noises during initial link setup */
591 	msec_delay(50);
592 
593 	return status;
594 }
595 
596 /**
597  *  ixgbe_validate_link_ready - Function looks for phy link
598  *  @hw: pointer to hardware structure
599  *
600  *  Function indicates success when phy link is available. If phy is not ready
601  *  within 5 seconds of MAC indicating link, the function returns error.
602  **/
603 int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw)
604 {
605 	uint32_t timeout;
606 	uint16_t an_reg;
607 
608 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
609 		return IXGBE_SUCCESS;
610 
611 	for (timeout = 0;
612 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
613 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
614 		                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
615 
616 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
617 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
618 			break;
619 
620 		msec_delay(100);
621 	}
622 
623 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
624 		DEBUGOUT("Link was indicated but link is down\n");
625 		return IXGBE_ERR_LINK_SETUP;
626 	}
627 
628 	return IXGBE_SUCCESS;
629 }
630 
631 /**
632  *  ixgbe_check_mac_link_82598 - Get link/speed status
633  *  @hw: pointer to hardware structure
634  *  @speed: pointer to link speed
635  *  @link_up: TRUE is link is up, FALSE otherwise
636  *  @link_up_wait_to_complete: bool used to wait for link up or not
637  *
638  *  Reads the links register to determine if link is up and the current speed
639  **/
640 int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
641                                int *link_up, int link_up_wait_to_complete)
642 {
643 	uint32_t links_reg;
644 	uint32_t i;
645 	uint16_t link_reg, adapt_comp_reg;
646 
647 	/*
648 	 * SERDES PHY requires us to read link status from undocumented
649 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
650 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
651 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
652 	 */
653 	if (hw->phy.type == ixgbe_phy_nl) {
654 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
655 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
656 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
657 		                     &adapt_comp_reg);
658 		if (link_up_wait_to_complete) {
659 			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
660 				if ((link_reg & 1) &&
661 				    ((adapt_comp_reg & 1) == 0)) {
662 					*link_up = TRUE;
663 					break;
664 				} else {
665 					*link_up = FALSE;
666 				}
667 				msec_delay(100);
668 				hw->phy.ops.read_reg(hw, 0xC79F,
669 				                     IXGBE_TWINAX_DEV,
670 				                     &link_reg);
671 				hw->phy.ops.read_reg(hw, 0xC00C,
672 				                     IXGBE_TWINAX_DEV,
673 				                     &adapt_comp_reg);
674 			}
675 		} else {
676 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
677 				*link_up = TRUE;
678 			else
679 				*link_up = FALSE;
680 		}
681 
682 		if (*link_up == FALSE)
683 			goto out;
684 	}
685 
686 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
687 	if (link_up_wait_to_complete) {
688 		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
689 			if (links_reg & IXGBE_LINKS_UP) {
690 				*link_up = TRUE;
691 				break;
692 			} else {
693 				*link_up = FALSE;
694 			}
695 			msec_delay(100);
696 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
697 		}
698 	} else {
699 		if (links_reg & IXGBE_LINKS_UP)
700 			*link_up = TRUE;
701 		else
702 			*link_up = FALSE;
703 	}
704 
705 	if (links_reg & IXGBE_LINKS_SPEED)
706 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
707 	else
708 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
709 
710 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
711 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
712 		*link_up = FALSE;
713 
714 	/* if link is down, zero out the current_mode */
715 	if (*link_up == FALSE) {
716 		hw->fc.current_mode = ixgbe_fc_none;
717 		hw->fc.fc_was_autonegged = FALSE;
718 	}
719 out:
720 	return IXGBE_SUCCESS;
721 }
722 
723 /**
724  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
725  *  @hw: pointer to hardware structure
726  *  @speed: new link speed
727  *  @autoneg: TRUE if autonegotiation enabled
728  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
729  *
730  *  Set the link speed in the AUTOC register and restarts link.
731  **/
732 int32_t ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
733                                      ixgbe_link_speed speed, int autoneg,
734                                      int autoneg_wait_to_complete)
735 {
736 	int32_t          status            = IXGBE_SUCCESS;
737 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
738 	uint32_t         curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
739 	uint32_t         autoc             = curr_autoc;
740 	uint32_t         link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
741 
742 	/* Check to see if speed passed in is supported. */
743 	ixgbe_hw(hw, get_link_capabilities, &link_capabilities, &autoneg);
744 	speed &= link_capabilities;
745 
746 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
747 		status = IXGBE_ERR_LINK_SETUP;
748 
749 	/* Set KX4/KX support according to speed requested */
750 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
751 	         link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
752 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
753 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
754 			autoc |= IXGBE_AUTOC_KX4_SUPP;
755 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
756 			autoc |= IXGBE_AUTOC_KX_SUPP;
757 		if (autoc != curr_autoc)
758 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
759 	}
760 
761 	if (status == IXGBE_SUCCESS) {
762 		/*
763 		 * Setup and restart the link based on the new values in
764 		 * ixgbe_hw This will write the AUTOC register based on the new
765 		 * stored values
766 		 */
767 		status = ixgbe_start_mac_link_82598(hw,
768 		                                    autoneg_wait_to_complete);
769 	}
770 
771 	return status;
772 }
773 
774 
775 /**
776  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
777  *  @hw: pointer to hardware structure
778  *  @speed: new link speed
779  *  @autoneg: TRUE if autonegotiation enabled
780  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
781  *
782  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
783  **/
784 int32_t ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
785                                         ixgbe_link_speed speed,
786                                         int autoneg,
787                                         int autoneg_wait_to_complete)
788 {
789 	int32_t status;
790 
791 	/* Setup the PHY according to input speed */
792 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
793 	                                      autoneg_wait_to_complete);
794 	/* Set up MAC */
795 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
796 
797 	return status;
798 }
799 
800 /**
801  *  ixgbe_reset_hw_82598 - Performs hardware reset
802  *  @hw: pointer to hardware structure
803  *
804  *  Resets the hardware by resetting the transmit and receive units, masks and
805  *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
806  *  reset.
807  **/
808 int32_t ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
809 {
810 	int32_t status = IXGBE_SUCCESS;
811 	int32_t phy_status = IXGBE_SUCCESS;
812 	uint32_t ctrl;
813 	uint32_t gheccr;
814 	uint32_t i;
815 	uint32_t autoc;
816 	uint8_t  analog_val;
817 
818 	/* Call adapter stop to disable tx/rx and clear interrupts */
819 	hw->mac.ops.stop_adapter(hw);
820 
821 	/*
822 	 * Power up the Atlas Tx lanes if they are currently powered down.
823 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
824 	 * they are not automatically restored on reset.
825 	 */
826 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
827 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
828 		/* Enable Tx Atlas so packets can be transmitted again */
829 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
830 		                             &analog_val);
831 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
832 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
833 		                              analog_val);
834 
835 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
836 		                             &analog_val);
837 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
838 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
839 		                              analog_val);
840 
841 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
842 		                             &analog_val);
843 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
844 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
845 		                              analog_val);
846 
847 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
848 		                             &analog_val);
849 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
850 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
851 		                              analog_val);
852 	}
853 
854 	/* Reset PHY */
855 	if (hw->phy.reset_disable == FALSE) {
856 		/* PHY ops must be identified and initialized prior to reset */
857 
858 		/* Init PHY and function pointers, perform SFP setup */
859 		phy_status = hw->phy.ops.init(hw);
860 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
861 			goto reset_hw_out;
862 		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
863 			goto no_phy_reset;
864 
865 		hw->phy.ops.reset(hw);
866 	}
867 
868 no_phy_reset:
869 	/*
870 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
871 	 * access and verify no pending requests before reset
872 	 */
873 	ixgbe_disable_pcie_master(hw);
874 
875 mac_reset_top:
876 	/*
877 	 * Issue global reset to the MAC.  This needs to be a SW reset.
878 	 * If link reset is used, it might reset the MAC when mng is using it
879 	 */
880 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
881 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
882 	IXGBE_WRITE_FLUSH(hw);
883 
884 	/* Poll for reset bit to self-clear indicating reset is complete */
885 	for (i = 0; i < 10; i++) {
886 		usec_delay(1);
887 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
888 		if (!(ctrl & IXGBE_CTRL_RST))
889 			break;
890 	}
891 	if (ctrl & IXGBE_CTRL_RST) {
892 		status = IXGBE_ERR_RESET_FAILED;
893 		DEBUGOUT("Reset polling failed to complete.\n");
894 	}
895 
896 	/*
897 	 * Double resets are required for recovery from certain error
898 	 * conditions.  Between resets, it is necessary to stall to allow time
899 	 * for any pending HW events to complete.  We use 1usec since that is
900 	 * what is needed for ixgbe_disable_pcie_master().  The second reset
901 	 * then clears out any effects of those events.
902 	 */
903 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
904 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
905 		usec_delay(1);
906 		goto mac_reset_top;
907 	}
908 
909 	msec_delay(50);
910 
911 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
912 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
913 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
914 
915 	/*
916 	 * Store the original AUTOC value if it has not been
917 	 * stored off yet.  Otherwise restore the stored original
918 	 * AUTOC value since the reset operation sets back to deaults.
919 	 */
920 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
921 	if (hw->mac.orig_link_settings_stored == FALSE) {
922 		hw->mac.orig_autoc = autoc;
923 		hw->mac.orig_link_settings_stored = TRUE;
924 	} else if (autoc != hw->mac.orig_autoc) {
925 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
926 	}
927 
928 	/* Store the permanent mac address */
929 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
930 
931 	/*
932 	 * Store MAC address from RAR0, clear receive address registers, and
933 	 * clear the multicast table
934 	 */
935 	hw->mac.ops.init_rx_addrs(hw);
936 
937 reset_hw_out:
938 	if (phy_status != IXGBE_SUCCESS)
939 		status = phy_status;
940 
941 	return status;
942 }
943 
944 /**
945  *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
946  *  @hw: pointer to hardware struct
947  *  @rar: receive address register index to associate with a VMDq index
948  *  @vmdq: VMDq set index
949  **/
950 int32_t ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
951 {
952 	uint32_t rar_high;
953 	uint32_t rar_entries = hw->mac.num_rar_entries;
954 
955 	/* Make sure we are using a valid rar index range */
956 	if (rar >= rar_entries) {
957 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
958 		return IXGBE_ERR_INVALID_ARGUMENT;
959 	}
960 
961 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
962 	rar_high &= ~IXGBE_RAH_VIND_MASK;
963 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
964 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
965 	return IXGBE_SUCCESS;
966 }
967 
968 /**
969  *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
970  *  @hw: pointer to hardware struct
971  *  @rar: receive address register index to associate with a VMDq index
972  *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
973  **/
974 int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
975 {
976 	uint32_t rar_high;
977 	uint32_t rar_entries = hw->mac.num_rar_entries;
978 
979 	UNREFERENCED_PARAMETER(vmdq);
980 
981 	/* Make sure we are using a valid rar index range */
982 	if (rar >= rar_entries) {
983 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
984 		return IXGBE_ERR_INVALID_ARGUMENT;
985 	}
986 
987 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
988 	if (rar_high & IXGBE_RAH_VIND_MASK) {
989 		rar_high &= ~IXGBE_RAH_VIND_MASK;
990 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
991 	}
992 
993 	return IXGBE_SUCCESS;
994 }
995 
996 /**
997  *  ixgbe_set_vfta_82598 - Set VLAN filter table
998  *  @hw: pointer to hardware structure
999  *  @vlan: VLAN id to write to VLAN filter
1000  *  @vind: VMDq output index that maps queue to VLAN id in VFTA
1001  *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
1002  *
1003  *  Turn on/off specified VLAN in the VLAN filter table.
1004  **/
1005 int32_t ixgbe_set_vfta_82598(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
1006 	                                              int vlan_on)
1007 {
1008 	uint32_t regindex;
1009 	uint32_t bitindex;
1010 	uint32_t bits;
1011 	uint32_t vftabyte;
1012 
1013 	if (vlan > 4095)
1014 		return IXGBE_ERR_PARAM;
1015 
1016 	/* Determine 32-bit word position in array */
1017 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1018 
1019 	/* Determine the location of the (VMD) queue index */
1020 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1021 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1022 
1023 	/* Set the nibble for VMD queue index */
1024 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1025 	bits &= (~(0x0F << bitindex));
1026 	bits |= (vind << bitindex);
1027 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1028 
1029 	/* Determine the location of the bit for this VLAN id */
1030 	bitindex = vlan & 0x1F;   /* lower five bits */
1031 
1032 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1033 	if (vlan_on)
1034 		/* Turn on this VLAN id */
1035 		bits |= (1 << bitindex);
1036 	else
1037 		/* Turn off this VLAN id */
1038 		bits &= ~(1 << bitindex);
1039 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1040 
1041 	return IXGBE_SUCCESS;
1042 }
1043 
1044 /**
1045  *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1046  *  @hw: pointer to hardware structure
1047  *
1048  *  Clears the VLAN filer table, and the VMDq index associated with the filter
1049  **/
1050 int32_t ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1051 {
1052 	uint32_t offset;
1053 	uint32_t vlanbyte;
1054 
1055 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1056 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1057 
1058 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1059 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1060 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1061 			                0);
1062 
1063 	return IXGBE_SUCCESS;
1064 }
1065 
1066 /**
1067  *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1068  *  @hw: pointer to hardware structure
1069  *  @reg: analog register to read
1070  *  @val: read value
1071  *
1072  *  Performs read operation to Atlas analog register specified.
1073  **/
1074 int32_t ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t *val)
1075 {
1076 	uint32_t  atlas_ctl;
1077 
1078 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1079 	                IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1080 	IXGBE_WRITE_FLUSH(hw);
1081 	usec_delay(10);
1082 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1083 	*val = (uint8_t)atlas_ctl;
1084 
1085 	return IXGBE_SUCCESS;
1086 }
1087 
1088 /**
1089  *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1090  *  @hw: pointer to hardware structure
1091  *  @reg: atlas register to write
1092  *  @val: value to write
1093  *
1094  *  Performs write operation to Atlas analog register specified.
1095  **/
1096 int32_t ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t val)
1097 {
1098 	uint32_t  atlas_ctl;
1099 
1100 	atlas_ctl = (reg << 8) | val;
1101 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1102 	IXGBE_WRITE_FLUSH(hw);
1103 	usec_delay(10);
1104 
1105 	return IXGBE_SUCCESS;
1106 }
1107 
1108 /**
1109  *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1110  *  @hw: pointer to hardware structure
1111  *  @byte_offset: EEPROM byte offset to read
1112  *  @eeprom_data: value read
1113  *
1114  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1115  **/
1116 int32_t ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, uint8_t byte_offset,
1117                                 uint8_t *eeprom_data)
1118 {
1119 	int32_t status = IXGBE_SUCCESS;
1120 	uint16_t sfp_addr = 0;
1121 	uint16_t sfp_data = 0;
1122 	uint16_t sfp_stat = 0;
1123 	uint32_t i;
1124 
1125 	if (hw->phy.type == ixgbe_phy_nl) {
1126 		/*
1127 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1128 		 * 0xC30D. These registers are used to talk to the SFP+
1129 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1130 		 */
1131 		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1132 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1133 		hw->phy.ops.write_reg(hw,
1134 		                      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1135 		                      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1136 		                      sfp_addr);
1137 
1138 		/* Poll status */
1139 		for (i = 0; i < 100; i++) {
1140 			hw->phy.ops.read_reg(hw,
1141 			                     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1142 			                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1143 			                     &sfp_stat);
1144 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1145 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1146 				break;
1147 			msec_delay(10);
1148 		}
1149 
1150 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1151 			DEBUGOUT("EEPROM read did not pass.\n");
1152 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1153 			goto out;
1154 		}
1155 
1156 		/* Read data */
1157 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1158 		                     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1159 
1160 		*eeprom_data = (uint8_t)(sfp_data >> 8);
1161 	} else {
1162 		status = IXGBE_ERR_PHY;
1163 		goto out;
1164 	}
1165 
1166 out:
1167 	return status;
1168 }
1169 
1170 /**
1171  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1172  *  @hw: pointer to hardware structure
1173  *
1174  *  Determines physical layer capabilities of the current configuration.
1175  **/
1176 uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1177 {
1178 	uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1179 	uint32_t autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1180 	uint32_t pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1181 	uint32_t pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1182 	uint16_t ext_ability = 0;
1183 
1184 	hw->phy.ops.identify(hw);
1185 
1186 	/* Copper PHY must be checked before AUTOC LMS to determine correct
1187 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1188 	switch (hw->phy.type) {
1189 	case ixgbe_phy_tn:
1190 	case ixgbe_phy_aq:
1191 	case ixgbe_phy_cu_unknown:
1192 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1193 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1194 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1195 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1196 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1197 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1198 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1199 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1200 		goto out;
1201 	default:
1202 		break;
1203 	}
1204 
1205 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1206 	case IXGBE_AUTOC_LMS_1G_AN:
1207 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1208 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1209 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1210 		else
1211 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1212 		break;
1213 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1214 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1215 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1216 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1217 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1218 		else /* XAUI */
1219 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1220 		break;
1221 	case IXGBE_AUTOC_LMS_KX4_AN:
1222 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1223 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1224 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1225 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1226 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1227 		break;
1228 	default:
1229 		break;
1230 	}
1231 
1232 	if (hw->phy.type == ixgbe_phy_nl) {
1233 		hw->phy.ops.identify_sfp(hw);
1234 
1235 		switch (hw->phy.sfp_type) {
1236 		case ixgbe_sfp_type_da_cu:
1237 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1238 			break;
1239 		case ixgbe_sfp_type_sr:
1240 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1241 			break;
1242 		case ixgbe_sfp_type_lr:
1243 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1244 			break;
1245 		default:
1246 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1247 			break;
1248 		}
1249 	}
1250 
1251 	switch (hw->device_id) {
1252 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1253 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1254 		break;
1255 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1256 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1257 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1258 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1259 		break;
1260 	case IXGBE_DEV_ID_82598EB_XF_LR:
1261 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1262 		break;
1263 	default:
1264 		break;
1265 	}
1266 
1267 out:
1268 	return physical_layer;
1269 }
1270 
1271 /**
1272  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1273  *  port devices.
1274  *  @hw: pointer to the HW structure
1275  *
1276  *  Calls common function and corrects issue with some single port devices
1277  *  that enable LAN1 but not LAN0.
1278  **/
1279 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1280 {
1281 	struct ixgbe_bus_info *bus = &hw->bus;
1282 	uint16_t pci_gen, pci_ctrl2;
1283 
1284 	ixgbe_set_lan_id_multi_port_pcie(hw);
1285 
1286 	/* check if LAN0 is disabled */
1287 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1288 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1289 
1290 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1291 
1292 		/* if LAN0 is completely disabled force function to 0 */
1293 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1294 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1295 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1296 
1297 			bus->func = 0;
1298 		}
1299 	}
1300 }
1301 
1302 /**
1303  *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1304  *  @hw: pointer to hardware structure
1305  *
1306  **/
1307 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1308 {
1309 	uint32_t regval;
1310 	uint32_t i;
1311 
1312 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1313 
1314 	/* Enable relaxed ordering */
1315 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1316 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1317 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1318 		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1319 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1320 	}
1321 
1322 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1323 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1324 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1325 		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1326 		           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1327 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1328 	}
1329 
1330 }
1331