xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_82598.c (revision 826cda680206d797a9fff636f5740ad8ef88e099)
1 /* $NetBSD: ixgbe_82598.c,v 1.19 2022/06/06 02:16:37 msaitoh Exp $ */
2 
3 /******************************************************************************
4   SPDX-License-Identifier: BSD-3-Clause
5 
6   Copyright (c) 2001-2020, Intel Corporation
7   All rights reserved.
8 
9   Redistribution and use in source and binary forms, with or without
10   modification, are permitted provided that the following conditions are met:
11 
12    1. Redistributions of source code must retain the above copyright notice,
13       this list of conditions and the following disclaimer.
14 
15    2. Redistributions in binary form must reproduce the above copyright
16       notice, this list of conditions and the following disclaimer in the
17       documentation and/or other materials provided with the distribution.
18 
19    3. Neither the name of the Intel Corporation nor the names of its
20       contributors may be used to endorse or promote products derived from
21       this software without specific prior written permission.
22 
23   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33   POSSIBILITY OF SUCH DAMAGE.
34 
35 ******************************************************************************/
36 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 331224 2018-03-19 20:55:05Z erj $*/
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ixgbe_82598.c,v 1.19 2022/06/06 02:16:37 msaitoh Exp $");
40 
41 #include "ixgbe_type.h"
42 #include "ixgbe_82598.h"
43 #include "ixgbe_api.h"
44 #include "ixgbe_common.h"
45 #include "ixgbe_phy.h"
46 
47 #define IXGBE_82598_MAX_TX_QUEUES 32
48 #define IXGBE_82598_MAX_RX_QUEUES 64
49 #define IXGBE_82598_RAR_ENTRIES   16
50 #define IXGBE_82598_MC_TBL_SIZE  128
51 #define IXGBE_82598_VFT_TBL_SIZE 128
52 #define IXGBE_82598_RX_PB_SIZE   512
53 
54 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
55 					     ixgbe_link_speed *speed,
56 					     bool *autoneg);
57 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
58 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
59 				      bool autoneg_wait_to_complete);
60 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
61 				      ixgbe_link_speed *speed, bool *link_up,
62 				      bool link_up_wait_to_complete);
63 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
64 				      ixgbe_link_speed speed,
65 				      bool autoneg_wait_to_complete);
66 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
67 					 ixgbe_link_speed speed,
68 					 bool autoneg_wait_to_complete);
69 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
70 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
71 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
72 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
73 				  u32 headroom, int strategy);
74 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
75 					u8 *sff8472_data);
76 /**
77  * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
78  * @hw: pointer to the HW structure
79  *
80  * The defaults for 82598 should be in the range of 50us to 50ms,
81  * however the hardware default for these parts is 500us to 1ms which is less
82  * than the 10ms recommended by the pci-e spec.  To address this we need to
83  * increase the value to either 10ms to 250ms for capability version 1 config,
84  * or 16ms to 55ms for version 2.
85  **/
ixgbe_set_pcie_completion_timeout(struct ixgbe_hw * hw)86 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
87 {
88 	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
89 	u16 pcie_devctl2;
90 
91 	/* only take action if timeout value is defaulted to 0 */
92 	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
93 		goto out;
94 
95 	/*
96 	 * if capabilities version is type 1 we can write the
97 	 * timeout of 10ms to 250ms through the GCR register
98 	 */
99 	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
100 		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
101 		goto out;
102 	}
103 
104 	/*
105 	 * for version 2 capabilities we need to write the config space
106 	 * directly in order to set the completion timeout value for
107 	 * 16ms to 55ms
108 	 */
109 	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
110 	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
111 	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
112 out:
113 	/* disable completion timeout resend */
114 	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
115 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
116 }
117 
118 /**
119  * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
120  * @hw: pointer to hardware structure
121  *
122  * Initialize the function pointers and assign the MAC type for 82598.
123  * Does not touch the hardware.
124  **/
ixgbe_init_ops_82598(struct ixgbe_hw * hw)125 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
126 {
127 	struct ixgbe_mac_info *mac = &hw->mac;
128 	struct ixgbe_phy_info *phy = &hw->phy;
129 	s32 ret_val;
130 
131 	DEBUGFUNC("ixgbe_init_ops_82598");
132 
133 	ret_val = ixgbe_init_phy_ops_generic(hw);
134 	ret_val = ixgbe_init_ops_generic(hw);
135 
136 	/* PHY */
137 	phy->ops.init = ixgbe_init_phy_ops_82598;
138 
139 	/* MAC */
140 	mac->ops.start_hw = ixgbe_start_hw_82598;
141 	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
142 	mac->ops.reset_hw = ixgbe_reset_hw_82598;
143 	mac->ops.get_media_type = ixgbe_get_media_type_82598;
144 	mac->ops.get_supported_physical_layer =
145 				ixgbe_get_supported_physical_layer_82598;
146 	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
147 	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
148 	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
149 	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
150 
151 	/* RAR, Multicast, VLAN */
152 	mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
153 	mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
154 	mac->ops.set_vfta = ixgbe_set_vfta_82598;
155 	mac->ops.set_vlvf = NULL;
156 	mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
157 
158 	/* Flow Control */
159 	mac->ops.fc_enable = ixgbe_fc_enable_82598;
160 
161 	mac->mcft_size		= IXGBE_82598_MC_TBL_SIZE;
162 	mac->vft_size		= IXGBE_82598_VFT_TBL_SIZE;
163 	mac->num_rar_entries	= IXGBE_82598_RAR_ENTRIES;
164 	mac->rx_pb_size		= IXGBE_82598_RX_PB_SIZE;
165 	mac->max_rx_queues	= IXGBE_82598_MAX_RX_QUEUES;
166 	mac->max_tx_queues	= IXGBE_82598_MAX_TX_QUEUES;
167 	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
168 
169 	/* SFP+ Module */
170 	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
171 	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
172 
173 	/* Link */
174 	mac->ops.check_link = ixgbe_check_mac_link_82598;
175 	mac->ops.setup_link = ixgbe_setup_mac_link_82598;
176 	mac->ops.flap_tx_laser = NULL;
177 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
178 	mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
179 
180 	/* Manageability interface */
181 	mac->ops.set_fw_drv_ver = NULL;
182 
183 	mac->ops.get_rtrup2tc = NULL;
184 
185 	return ret_val;
186 }
187 
188 /**
189  * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
190  * @hw: pointer to hardware structure
191  *
192  * Initialize any function pointers that were not able to be
193  * set during init_shared_code because the PHY/SFP type was
194  * not known.  Perform the SFP init if necessary.
195  *
196  **/
ixgbe_init_phy_ops_82598(struct ixgbe_hw * hw)197 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
198 {
199 	struct ixgbe_mac_info *mac = &hw->mac;
200 	struct ixgbe_phy_info *phy = &hw->phy;
201 	s32 ret_val = IXGBE_SUCCESS;
202 	u16 list_offset, data_offset;
203 
204 	DEBUGFUNC("ixgbe_init_phy_ops_82598");
205 
206 	/* Identify the PHY */
207 	phy->ops.identify(hw);
208 
209 	/* Overwrite the link function pointers if copper PHY */
210 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
211 		mac->ops.setup_link = ixgbe_setup_copper_link_82598;
212 		mac->ops.get_link_capabilities =
213 				ixgbe_get_copper_link_capabilities_generic;
214 	}
215 
216 	switch (hw->phy.type) {
217 	case ixgbe_phy_tn:
218 		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
219 		phy->ops.check_link = ixgbe_check_phy_link_tnx;
220 		phy->ops.get_firmware_version =
221 					ixgbe_get_phy_firmware_version_tnx;
222 		break;
223 	case ixgbe_phy_nl:
224 		phy->ops.reset = ixgbe_reset_phy_nl;
225 
226 		/* Call SFP+ identify routine to get the SFP+ module type */
227 		ret_val = phy->ops.identify_sfp(hw);
228 		if (ret_val != IXGBE_SUCCESS)
229 			goto out;
230 		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
231 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
232 			goto out;
233 		}
234 
235 		/* Check to see if SFP+ module is supported */
236 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
237 							      &list_offset,
238 							      &data_offset);
239 		if (ret_val != IXGBE_SUCCESS) {
240 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
241 			goto out;
242 		}
243 		break;
244 	default:
245 		break;
246 	}
247 
248 out:
249 	return ret_val;
250 }
251 
252 /**
253  * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
254  * @hw: pointer to hardware structure
255  *
256  * Starts the hardware using the generic start_hw function.
257  * Disables relaxed ordering Then set pcie completion timeout
258  *
259  **/
ixgbe_start_hw_82598(struct ixgbe_hw * hw)260 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
261 {
262 	u32 regval;
263 	u32 i;
264 	s32 ret_val = IXGBE_SUCCESS;
265 
266 	DEBUGFUNC("ixgbe_start_hw_82598");
267 
268 	ret_val = ixgbe_start_hw_generic(hw);
269 	if (ret_val)
270 		return ret_val;
271 
272 	/* Disable relaxed ordering */
273 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
274 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
275 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
276 		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
277 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
278 	}
279 
280 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
281 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
282 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
283 		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
284 			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
285 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
286 	}
287 
288 	/* set the completion timeout for interface */
289 	ixgbe_set_pcie_completion_timeout(hw);
290 
291 	return ret_val;
292 }
293 
294 /**
295  * ixgbe_get_link_capabilities_82598 - Determines link capabilities
296  * @hw: pointer to hardware structure
297  * @speed: pointer to link speed
298  * @autoneg: boolean auto-negotiation value
299  *
300  * Determines the link capabilities by reading the AUTOC register.
301  **/
ixgbe_get_link_capabilities_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)302 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
303 					     ixgbe_link_speed *speed,
304 					     bool *autoneg)
305 {
306 	s32 status = IXGBE_SUCCESS;
307 	u32 autoc = 0;
308 
309 	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
310 
311 	/*
312 	 * Determine link capabilities based on the stored value of AUTOC,
313 	 * which represents EEPROM defaults.  If AUTOC value has not been
314 	 * stored, use the current register value.
315 	 */
316 	if (hw->mac.orig_link_settings_stored)
317 		autoc = hw->mac.orig_autoc;
318 	else
319 		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
320 
321 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
322 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
323 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
324 		*autoneg = FALSE;
325 		break;
326 
327 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
328 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
329 		*autoneg = FALSE;
330 		break;
331 
332 	case IXGBE_AUTOC_LMS_1G_AN:
333 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
334 		*autoneg = TRUE;
335 		break;
336 
337 	case IXGBE_AUTOC_LMS_KX4_AN:
338 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
339 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
340 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
341 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
342 		if (autoc & IXGBE_AUTOC_KX_SUPP)
343 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
344 		*autoneg = TRUE;
345 		break;
346 
347 	default:
348 		status = IXGBE_ERR_LINK_SETUP;
349 		break;
350 	}
351 
352 	return status;
353 }
354 
355 /**
356  * ixgbe_get_media_type_82598 - Determines media type
357  * @hw: pointer to hardware structure
358  *
359  * Returns the media type (fiber, copper, backplane)
360  **/
ixgbe_get_media_type_82598(struct ixgbe_hw * hw)361 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
362 {
363 	enum ixgbe_media_type media_type;
364 
365 	DEBUGFUNC("ixgbe_get_media_type_82598");
366 
367 	/* Detect if there is a copper PHY attached. */
368 	switch (hw->phy.type) {
369 	case ixgbe_phy_cu_unknown:
370 	case ixgbe_phy_tn:
371 		media_type = ixgbe_media_type_copper;
372 		goto out;
373 	default:
374 		break;
375 	}
376 
377 	/* Media type for I82598 is based on device ID */
378 	switch (hw->device_id) {
379 	case IXGBE_DEV_ID_82598:
380 	case IXGBE_DEV_ID_82598_BX:
381 		/* Default device ID is mezzanine card KX/KX4 */
382 		media_type = ixgbe_media_type_backplane;
383 		break;
384 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
385 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
386 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
387 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
388 	case IXGBE_DEV_ID_82598EB_XF_LR:
389 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
390 		media_type = ixgbe_media_type_fiber;
391 		break;
392 	case IXGBE_DEV_ID_82598EB_CX4:
393 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
394 		media_type = ixgbe_media_type_cx4;
395 		break;
396 	case IXGBE_DEV_ID_82598AT:
397 	case IXGBE_DEV_ID_82598AT2:
398 		media_type = ixgbe_media_type_copper;
399 		break;
400 	default:
401 		media_type = ixgbe_media_type_unknown;
402 		break;
403 	}
404 out:
405 	return media_type;
406 }
407 
408 /**
409  * ixgbe_fc_enable_82598 - Enable flow control
410  * @hw: pointer to hardware structure
411  *
412  * Enable flow control according to the current settings.
413  **/
ixgbe_fc_enable_82598(struct ixgbe_hw * hw)414 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
415 {
416 	s32 ret_val = IXGBE_SUCCESS;
417 	u32 fctrl_reg;
418 	u32 rmcs_reg;
419 	u32 reg;
420 	u32 fcrtl, fcrth;
421 	u32 link_speed = 0;
422 	int i;
423 	bool link_up;
424 
425 	DEBUGFUNC("ixgbe_fc_enable_82598");
426 
427 	/* Validate the water mark configuration */
428 	if (!hw->fc.pause_time) {
429 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
430 		goto out;
431 	}
432 
433 	/* Low water mark of zero causes XOFF floods */
434 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
435 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
436 		    hw->fc.high_water[i]) {
437 			if (!hw->fc.low_water[i] ||
438 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
439 				DEBUGOUT("Invalid water mark configuration\n");
440 				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
441 				goto out;
442 			}
443 		}
444 	}
445 
446 	/*
447 	 * On 82598 having Rx FC on causes resets while doing 1G
448 	 * so if it's on turn it off once we know link_speed. For
449 	 * more details see 82598 Specification update.
450 	 */
451 	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
452 	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
453 		switch (hw->fc.requested_mode) {
454 		case ixgbe_fc_full:
455 			hw->fc.requested_mode = ixgbe_fc_tx_pause;
456 			break;
457 		case ixgbe_fc_rx_pause:
458 			hw->fc.requested_mode = ixgbe_fc_none;
459 			break;
460 		default:
461 			/* no change */
462 			break;
463 		}
464 	}
465 
466 	/* Negotiate the fc mode to use */
467 	ixgbe_fc_autoneg(hw);
468 
469 	/* Disable any previous flow control settings */
470 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
471 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
472 
473 	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
474 	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
475 
476 	/*
477 	 * The possible values of fc.current_mode are:
478 	 * 0: Flow control is completely disabled
479 	 * 1: Rx flow control is enabled (we can receive pause frames,
480 	 *    but not send pause frames).
481 	 * 2: Tx flow control is enabled (we can send pause frames but
482 	 *     we do not support receiving pause frames).
483 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
484 	 * other: Invalid.
485 	 */
486 	switch (hw->fc.current_mode) {
487 	case ixgbe_fc_none:
488 		/*
489 		 * Flow control is disabled by software override or autoneg.
490 		 * The code below will actually disable it in the HW.
491 		 */
492 		break;
493 	case ixgbe_fc_rx_pause:
494 		/*
495 		 * Rx Flow control is enabled and Tx Flow control is
496 		 * disabled by software override. Since there really
497 		 * isn't a way to advertise that we are capable of RX
498 		 * Pause ONLY, we will advertise that we support both
499 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
500 		 * disable the adapter's ability to send PAUSE frames.
501 		 */
502 		fctrl_reg |= IXGBE_FCTRL_RFCE;
503 		break;
504 	case ixgbe_fc_tx_pause:
505 		/*
506 		 * Tx Flow control is enabled, and Rx Flow control is
507 		 * disabled by software override.
508 		 */
509 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
510 		break;
511 	case ixgbe_fc_full:
512 		/* Flow control (both Rx and Tx) is enabled by SW override. */
513 		fctrl_reg |= IXGBE_FCTRL_RFCE;
514 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
515 		break;
516 	default:
517 		DEBUGOUT("Flow control param set incorrectly\n");
518 		ret_val = IXGBE_ERR_CONFIG;
519 		goto out;
520 		break;
521 	}
522 
523 	/* Set 802.3x based flow control settings. */
524 	fctrl_reg |= IXGBE_FCTRL_DPF;
525 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
526 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
527 
528 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
529 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
530 		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
531 		    hw->fc.high_water[i]) {
532 			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
533 			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
534 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
535 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
536 		} else {
537 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
538 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
539 		}
540 
541 	}
542 
543 	/* Configure pause time (2 TCs per register) */
544 	reg = (u32)hw->fc.pause_time * 0x00010001;
545 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
546 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
547 
548 	/* Configure flow control refresh threshold value */
549 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
550 
551 out:
552 	return ret_val;
553 }
554 
555 /**
556  * ixgbe_start_mac_link_82598 - Configures MAC link settings
557  * @hw: pointer to hardware structure
558  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
559  *
560  * Configures link settings based on values in the ixgbe_hw struct.
561  * Restarts the link.  Performs autonegotiation if needed.
562  **/
ixgbe_start_mac_link_82598(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)563 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
564 				      bool autoneg_wait_to_complete)
565 {
566 	u32 autoc_reg;
567 	u32 links_reg;
568 	u32 i;
569 	s32 status = IXGBE_SUCCESS;
570 
571 	DEBUGFUNC("ixgbe_start_mac_link_82598");
572 
573 	/* Restart link */
574 	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
575 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
576 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
577 
578 	/* Only poll for autoneg to complete if specified to do so */
579 	if (autoneg_wait_to_complete) {
580 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
581 		     IXGBE_AUTOC_LMS_KX4_AN ||
582 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
583 		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
584 			links_reg = 0; /* Just in case Autoneg time = 0 */
585 			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
586 				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
587 				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
588 					break;
589 				msec_delay(100);
590 			}
591 			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
592 				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
593 				DEBUGOUT("Autonegotiation did not complete.\n");
594 			}
595 		}
596 	}
597 
598 	/* Add delay to filter out noises during initial link setup */
599 	msec_delay(50);
600 
601 	return status;
602 }
603 
604 /**
605  * ixgbe_validate_link_ready - Function looks for phy link
606  * @hw: pointer to hardware structure
607  *
608  * Function indicates success when phy link is available. If phy is not ready
609  * within 5 seconds of MAC indicating link, the function returns error.
610  **/
ixgbe_validate_link_ready(struct ixgbe_hw * hw)611 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
612 {
613 	u32 timeout;
614 	u16 an_reg;
615 
616 	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
617 		return IXGBE_SUCCESS;
618 
619 	for (timeout = 0;
620 	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
621 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
622 				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
623 
624 		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
625 		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
626 			break;
627 
628 		msec_delay(100);
629 	}
630 
631 	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
632 		DEBUGOUT("Link was indicated but link is down\n");
633 		return IXGBE_ERR_LINK_SETUP;
634 	}
635 
636 	return IXGBE_SUCCESS;
637 }
638 
639 /**
640  * ixgbe_check_mac_link_82598 - Get link/speed status
641  * @hw: pointer to hardware structure
642  * @speed: pointer to link speed
643  * @link_up: TRUE is link is up, FALSE otherwise
644  * @link_up_wait_to_complete: bool used to wait for link up or not
645  *
646  * Reads the links register to determine if link is up and the current speed
647  **/
ixgbe_check_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)648 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
649 				      ixgbe_link_speed *speed, bool *link_up,
650 				      bool link_up_wait_to_complete)
651 {
652 	u32 links_reg;
653 	u32 i;
654 	u16 link_reg, adapt_comp_reg;
655 
656 	DEBUGFUNC("ixgbe_check_mac_link_82598");
657 
658 	/*
659 	 * SERDES PHY requires us to read link status from undocumented
660 	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
661 	 * indicates link down.  OxC00C is read to check that the XAUI lanes
662 	 * are active.  Bit 0 clear indicates active; set indicates inactive.
663 	 */
664 	if (hw->phy.type == ixgbe_phy_nl) {
665 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
666 		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
667 		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
668 				     &adapt_comp_reg);
669 		if (link_up_wait_to_complete) {
670 			for (i = 0; i < hw->mac.max_link_up_time; i++) {
671 				if ((link_reg & 1) &&
672 				    ((adapt_comp_reg & 1) == 0)) {
673 					*link_up = TRUE;
674 					break;
675 				} else {
676 					*link_up = FALSE;
677 				}
678 				msec_delay(100);
679 				hw->phy.ops.read_reg(hw, 0xC79F,
680 						     IXGBE_TWINAX_DEV,
681 						     &link_reg);
682 				hw->phy.ops.read_reg(hw, 0xC00C,
683 						     IXGBE_TWINAX_DEV,
684 						     &adapt_comp_reg);
685 			}
686 		} else {
687 			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
688 				*link_up = TRUE;
689 			else
690 				*link_up = FALSE;
691 		}
692 
693 		if (*link_up == FALSE)
694 			goto out;
695 	}
696 
697 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
698 	if (link_up_wait_to_complete) {
699 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
700 			if (links_reg & IXGBE_LINKS_UP) {
701 				*link_up = TRUE;
702 				break;
703 			} else {
704 				*link_up = FALSE;
705 			}
706 			msec_delay(100);
707 			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
708 		}
709 	} else {
710 		if (links_reg & IXGBE_LINKS_UP)
711 			*link_up = TRUE;
712 		else
713 			*link_up = FALSE;
714 	}
715 
716 	if (links_reg & IXGBE_LINKS_SPEED)
717 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
718 	else
719 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
720 
721 	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
722 	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
723 		*link_up = FALSE;
724 
725 out:
726 	return IXGBE_SUCCESS;
727 }
728 
729 /**
730  * ixgbe_setup_mac_link_82598 - Set MAC link speed
731  * @hw: pointer to hardware structure
732  * @speed: new link speed
733  * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
734  *
735  * Set the link speed in the AUTOC register and restarts link.
736  **/
ixgbe_setup_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)737 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
738 				      ixgbe_link_speed speed,
739 				      bool autoneg_wait_to_complete)
740 {
741 	bool autoneg = FALSE;
742 	s32 status = IXGBE_SUCCESS;
743 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
744 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
745 	u32 autoc = curr_autoc;
746 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
747 
748 	DEBUGFUNC("ixgbe_setup_mac_link_82598");
749 
750 	/* Check to see if speed passed in is supported. */
751 	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
752 	speed &= link_capabilities;
753 
754 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
755 		status = IXGBE_ERR_LINK_SETUP;
756 
757 	/* Set KX4/KX support according to speed requested */
758 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
759 		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
760 		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
761 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
762 			autoc |= IXGBE_AUTOC_KX4_SUPP;
763 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
764 			autoc |= IXGBE_AUTOC_KX_SUPP;
765 		if (autoc != curr_autoc)
766 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
767 	}
768 
769 	if (status == IXGBE_SUCCESS) {
770 		/*
771 		 * Setup and restart the link based on the new values in
772 		 * ixgbe_hw This will write the AUTOC register based on the new
773 		 * stored values
774 		 */
775 		status = ixgbe_start_mac_link_82598(hw,
776 						    autoneg_wait_to_complete);
777 	}
778 
779 	return status;
780 }
781 
782 
783 /**
784  * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
785  * @hw: pointer to hardware structure
786  * @speed: new link speed
787  * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
788  *
789  * Sets the link speed in the AUTOC register in the MAC and restarts link.
790  **/
ixgbe_setup_copper_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)791 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
792 					 ixgbe_link_speed speed,
793 					 bool autoneg_wait_to_complete)
794 {
795 	s32 status;
796 
797 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
798 
799 	/* Setup the PHY according to input speed */
800 	status = hw->phy.ops.setup_link_speed(hw, speed,
801 					      autoneg_wait_to_complete);
802 	/* Set up MAC */
803 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
804 
805 	return status;
806 }
807 
808 /**
809  * ixgbe_reset_hw_82598 - Performs hardware reset
810  * @hw: pointer to hardware structure
811  *
812  * Resets the hardware by resetting the transmit and receive units, masks and
813  * clears all interrupts, performing a PHY reset, and performing a link (MAC)
814  * reset.
815  **/
ixgbe_reset_hw_82598(struct ixgbe_hw * hw)816 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
817 {
818 	s32 status = IXGBE_SUCCESS;
819 	s32 phy_status = IXGBE_SUCCESS;
820 	u32 ctrl;
821 	u32 gheccr;
822 	u32 i;
823 	u32 autoc;
824 	u8  analog_val;
825 
826 	DEBUGFUNC("ixgbe_reset_hw_82598");
827 
828 	/* Call adapter stop to disable tx/rx and clear interrupts */
829 	status = hw->mac.ops.stop_adapter(hw);
830 	if (status != IXGBE_SUCCESS)
831 		goto reset_hw_out;
832 
833 	/*
834 	 * Power up the Atlas Tx lanes if they are currently powered down.
835 	 * Atlas Tx lanes are powered down for MAC loopback tests, but
836 	 * they are not automatically restored on reset.
837 	 */
838 	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
839 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
840 		/* Enable Tx Atlas so packets can be transmitted again */
841 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
842 					     &analog_val);
843 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
844 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
845 					      analog_val);
846 
847 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
848 					     &analog_val);
849 		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
850 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
851 					      analog_val);
852 
853 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
854 					     &analog_val);
855 		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
856 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
857 					      analog_val);
858 
859 		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
860 					     &analog_val);
861 		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
862 		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
863 					      analog_val);
864 	}
865 
866 	/* Reset PHY */
867 	if (hw->phy.reset_disable == FALSE) {
868 		/* PHY ops must be identified and initialized prior to reset */
869 
870 		/* Init PHY and function pointers, perform SFP setup */
871 		phy_status = hw->phy.ops.init(hw);
872 		if ((phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) ||
873 		    (phy_status == IXGBE_ERR_SFP_NOT_PRESENT))
874 			goto mac_reset_top;
875 
876 		hw->phy.ops.reset(hw);
877 	}
878 
879 mac_reset_top:
880 	/*
881 	 * Issue global reset to the MAC.  This needs to be a SW reset.
882 	 * If link reset is used, it might reset the MAC when mng is using it
883 	 */
884 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
885 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
886 	IXGBE_WRITE_FLUSH(hw);
887 
888 	/* Poll for reset bit to self-clear indicating reset is complete */
889 	for (i = 0; i < 10; i++) {
890 		usec_delay(1);
891 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
892 		if (!(ctrl & IXGBE_CTRL_RST))
893 			break;
894 	}
895 	if (ctrl & IXGBE_CTRL_RST) {
896 		status = IXGBE_ERR_RESET_FAILED;
897 		DEBUGOUT("Reset polling failed to complete.\n");
898 	}
899 
900 	msec_delay(50);
901 
902 	/*
903 	 * Double resets are required for recovery from certain error
904 	 * conditions.  Between resets, it is necessary to stall to allow time
905 	 * for any pending HW events to complete.
906 	 */
907 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
908 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
909 		goto mac_reset_top;
910 	}
911 
912 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
913 	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
914 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
915 
916 	/*
917 	 * Store the original AUTOC value if it has not been
918 	 * stored off yet.  Otherwise restore the stored original
919 	 * AUTOC value since the reset operation sets back to defaults.
920 	 */
921 	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
922 	if (hw->mac.orig_link_settings_stored == FALSE) {
923 		hw->mac.orig_autoc = autoc;
924 		hw->mac.orig_link_settings_stored = TRUE;
925 	} else if (autoc != hw->mac.orig_autoc) {
926 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
927 	}
928 
929 	/* Store the permanent mac address */
930 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
931 
932 	/*
933 	 * Store MAC address from RAR0, clear receive address registers, and
934 	 * clear the multicast table
935 	 */
936 	hw->mac.ops.init_rx_addrs(hw);
937 
938 reset_hw_out:
939 	if (phy_status != IXGBE_SUCCESS)
940 		status = phy_status;
941 
942 	return status;
943 }
944 
945 /**
946  * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
947  * @hw: pointer to hardware struct
948  * @rar: receive address register index to associate with a VMDq index
949  * @vmdq: VMDq set index
950  **/
ixgbe_set_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)951 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
952 {
953 	u32 rar_high;
954 	u32 rar_entries = hw->mac.num_rar_entries;
955 
956 	DEBUGFUNC("ixgbe_set_vmdq_82598");
957 
958 	/* Make sure we are using a valid rar index range */
959 	if (rar >= rar_entries) {
960 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
961 		return IXGBE_ERR_INVALID_ARGUMENT;
962 	}
963 
964 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
965 	rar_high &= ~IXGBE_RAH_VIND_MASK;
966 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
967 	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
968 	return IXGBE_SUCCESS;
969 }
970 
971 /**
972  * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
973  * @hw: pointer to hardware struct
974  * @rar: receive address register index to associate with a VMDq index
975  * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
976  **/
ixgbe_clear_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)977 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
978 {
979 	u32 rar_high;
980 	u32 rar_entries = hw->mac.num_rar_entries;
981 
982 	UNREFERENCED_1PARAMETER(vmdq);
983 
984 	/* Make sure we are using a valid rar index range */
985 	if (rar >= rar_entries) {
986 		DEBUGOUT1("RAR index %d is out of range.\n", rar);
987 		return IXGBE_ERR_INVALID_ARGUMENT;
988 	}
989 
990 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
991 	if (rar_high & IXGBE_RAH_VIND_MASK) {
992 		rar_high &= ~IXGBE_RAH_VIND_MASK;
993 		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
994 	}
995 
996 	return IXGBE_SUCCESS;
997 }
998 
999 /**
1000  * ixgbe_set_vfta_82598 - Set VLAN filter table
1001  * @hw: pointer to hardware structure
1002  * @vlan: VLAN id to write to VLAN filter
1003  * @vind: VMDq output index that maps queue to VLAN id in VFTA
1004  * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1005  * @vlvf_bypass: boolean flag - unused
1006  *
1007  * Turn on/off specified VLAN in the VLAN filter table.
1008  **/
ixgbe_set_vfta_82598(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on,bool vlvf_bypass)1009 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1010 			 bool vlan_on, bool vlvf_bypass)
1011 {
1012 	u32 regindex;
1013 	u32 bitindex;
1014 	u32 bits;
1015 	u32 vftabyte;
1016 
1017 	UNREFERENCED_1PARAMETER(vlvf_bypass);
1018 
1019 	DEBUGFUNC("ixgbe_set_vfta_82598");
1020 
1021 	if (vlan > 4095)
1022 		return IXGBE_ERR_PARAM;
1023 
1024 	/* Determine 32-bit word position in array */
1025 	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1026 
1027 	/* Determine the location of the (VMD) queue index */
1028 	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1029 	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1030 
1031 	/* Set the nibble for VMD queue index */
1032 	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1033 	bits &= (~(0x0F << bitindex));
1034 	bits |= (vind << bitindex);
1035 	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1036 
1037 	/* Determine the location of the bit for this VLAN id */
1038 	bitindex = vlan & 0x1F;   /* lower five bits */
1039 
1040 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1041 	if (vlan_on)
1042 		/* Turn on this VLAN id */
1043 		bits |= (1 << bitindex);
1044 	else
1045 		/* Turn off this VLAN id */
1046 		bits &= ~(1 << bitindex);
1047 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1048 
1049 	return IXGBE_SUCCESS;
1050 }
1051 
1052 /**
1053  * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1054  * @hw: pointer to hardware structure
1055  *
1056  * Clears the VLAN filter table, and the VMDq index associated with the filter
1057  **/
ixgbe_clear_vfta_82598(struct ixgbe_hw * hw)1058 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1059 {
1060 	u32 offset;
1061 	u32 vlanbyte;
1062 
1063 	DEBUGFUNC("ixgbe_clear_vfta_82598");
1064 
1065 	for (offset = 0; offset < hw->mac.vft_size; offset++)
1066 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1067 
1068 	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1069 		for (offset = 0; offset < hw->mac.vft_size; offset++)
1070 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1071 					0);
1072 
1073 	return IXGBE_SUCCESS;
1074 }
1075 
1076 /**
1077  * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1078  * @hw: pointer to hardware structure
1079  * @reg: analog register to read
1080  * @val: read value
1081  *
1082  * Performs read operation to Atlas analog register specified.
1083  **/
ixgbe_read_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 * val)1084 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1085 {
1086 	u32  atlas_ctl;
1087 
1088 	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1089 
1090 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1091 			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1092 	IXGBE_WRITE_FLUSH(hw);
1093 	usec_delay(10);
1094 	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1095 	*val = (u8)atlas_ctl;
1096 
1097 	return IXGBE_SUCCESS;
1098 }
1099 
1100 /**
1101  * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1102  * @hw: pointer to hardware structure
1103  * @reg: atlas register to write
1104  * @val: value to write
1105  *
1106  * Performs write operation to Atlas analog register specified.
1107  **/
ixgbe_write_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 val)1108 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1109 {
1110 	u32  atlas_ctl;
1111 
1112 	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1113 
1114 	atlas_ctl = (reg << 8) | val;
1115 	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1116 	IXGBE_WRITE_FLUSH(hw);
1117 	usec_delay(10);
1118 
1119 	return IXGBE_SUCCESS;
1120 }
1121 
1122 /**
1123  * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1124  * @hw: pointer to hardware structure
1125  * @dev_addr: address to read from
1126  * @byte_offset: byte offset to read from dev_addr
1127  * @eeprom_data: value read
1128  *
1129  * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1130  **/
ixgbe_read_i2c_phy_82598(struct ixgbe_hw * hw,u8 dev_addr,u8 byte_offset,u8 * eeprom_data)1131 static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1132 				    u8 byte_offset, u8 *eeprom_data)
1133 {
1134 	s32 status = IXGBE_SUCCESS;
1135 	u16 sfp_addr = 0;
1136 	u16 sfp_data = 0;
1137 	u16 sfp_stat = 0;
1138 	u16 gssr;
1139 	u32 i;
1140 
1141 	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1142 
1143 	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1144 		gssr = IXGBE_GSSR_PHY1_SM;
1145 	else
1146 		gssr = IXGBE_GSSR_PHY0_SM;
1147 
1148 	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1149 		return IXGBE_ERR_SWFW_SYNC;
1150 
1151 	if (hw->phy.type == ixgbe_phy_nl) {
1152 		/*
1153 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1154 		 * 0xC30D. These registers are used to talk to the SFP+
1155 		 * module's EEPROM through the SDA/SCL (I2C) interface.
1156 		 */
1157 		sfp_addr = (dev_addr << 8) + byte_offset;
1158 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1159 		hw->phy.ops.write_reg_mdi(hw,
1160 					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1161 					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1162 					  sfp_addr);
1163 
1164 		/* Poll status */
1165 		for (i = 0; i < 100; i++) {
1166 			hw->phy.ops.read_reg_mdi(hw,
1167 						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1168 						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1169 						&sfp_stat);
1170 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1171 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1172 				break;
1173 			msec_delay(10);
1174 		}
1175 
1176 		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1177 			DEBUGOUT("EEPROM read did not pass.\n");
1178 			status = IXGBE_ERR_SFP_NOT_PRESENT;
1179 			goto out;
1180 		}
1181 
1182 		/* Read data */
1183 		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1184 					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1185 
1186 		*eeprom_data = (u8)(sfp_data >> 8);
1187 	} else {
1188 		status = IXGBE_ERR_PHY;
1189 	}
1190 
1191 out:
1192 	hw->mac.ops.release_swfw_sync(hw, gssr);
1193 	return status;
1194 }
1195 
1196 /**
1197  * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1198  * @hw: pointer to hardware structure
1199  * @byte_offset: EEPROM byte offset to read
1200  * @eeprom_data: value read
1201  *
1202  * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1203  **/
ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)1204 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1205 				u8 *eeprom_data)
1206 {
1207 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1208 					byte_offset, eeprom_data);
1209 }
1210 
1211 /**
1212  * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1213  * @hw: pointer to hardware structure
1214  * @byte_offset: byte offset at address 0xA2
1215  * @sff8472_data: value read
1216  *
1217  * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1218  **/
ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)1219 static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1220 					u8 *sff8472_data)
1221 {
1222 	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1223 					byte_offset, sff8472_data);
1224 }
1225 
1226 /**
1227  * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1228  * @hw: pointer to hardware structure
1229  *
1230  * Determines physical layer capabilities of the current configuration.
1231  **/
ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw * hw)1232 u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1233 {
1234 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1235 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1236 	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1237 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1238 	u16 ext_ability = 0;
1239 
1240 	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1241 
1242 	hw->phy.ops.identify(hw);
1243 
1244 	/* Copper PHY must be checked before AUTOC LMS to determine correct
1245 	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1246 	switch (hw->phy.type) {
1247 	case ixgbe_phy_tn:
1248 	case ixgbe_phy_cu_unknown:
1249 		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1250 		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1251 		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1252 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1253 		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1254 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1255 		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1256 			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1257 		goto out;
1258 	default:
1259 		break;
1260 	}
1261 
1262 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1263 	case IXGBE_AUTOC_LMS_1G_AN:
1264 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1265 		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1266 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1267 		else
1268 			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1269 		break;
1270 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1271 		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1272 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1273 		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1274 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1275 		else /* XAUI */
1276 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1277 		break;
1278 	case IXGBE_AUTOC_LMS_KX4_AN:
1279 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1280 		if (autoc & IXGBE_AUTOC_KX_SUPP)
1281 			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1282 		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1283 			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1284 		break;
1285 	default:
1286 		break;
1287 	}
1288 
1289 	if (hw->phy.type == ixgbe_phy_nl) {
1290 		hw->phy.ops.identify_sfp(hw);
1291 
1292 		switch (hw->phy.sfp_type) {
1293 		case ixgbe_sfp_type_da_cu:
1294 			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1295 			break;
1296 		case ixgbe_sfp_type_sr:
1297 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1298 			break;
1299 		case ixgbe_sfp_type_lr:
1300 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1301 			break;
1302 		default:
1303 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1304 			break;
1305 		}
1306 	}
1307 
1308 	switch (hw->device_id) {
1309 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1310 		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1311 		break;
1312 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1313 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1314 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1315 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1316 		break;
1317 	case IXGBE_DEV_ID_82598EB_XF_LR:
1318 		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1319 		break;
1320 	default:
1321 		break;
1322 	}
1323 
1324 out:
1325 	return physical_layer;
1326 }
1327 
1328 /**
1329  * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1330  * port devices.
1331  * @hw: pointer to the HW structure
1332  *
1333  * Calls common function and corrects issue with some single port devices
1334  * that enable LAN1 but not LAN0.
1335  **/
ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw * hw)1336 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1337 {
1338 	struct ixgbe_bus_info *bus = &hw->bus;
1339 	u16 pci_gen = 0;
1340 	u16 pci_ctrl2 = 0;
1341 
1342 	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1343 
1344 	ixgbe_set_lan_id_multi_port_pcie(hw);
1345 
1346 	/* check if LAN0 is disabled */
1347 	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1348 	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1349 
1350 		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1351 
1352 		/* if LAN0 is completely disabled force function to 0 */
1353 		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1354 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1355 		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1356 
1357 			bus->func = 0;
1358 		}
1359 	}
1360 }
1361 
1362 /**
1363  * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1364  * @hw: pointer to hardware structure
1365  *
1366  **/
ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw * hw)1367 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1368 {
1369 	u32 regval;
1370 	u32 i;
1371 
1372 	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1373 
1374 	/* Enable relaxed ordering */
1375 	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1376 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1377 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1378 		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1379 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1380 	}
1381 
1382 	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1383 	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1384 		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1385 		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1386 			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1387 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1388 	}
1389 
1390 }
1391 
1392 /**
1393  * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1394  * @hw: pointer to hardware structure
1395  * @num_pb: number of packet buffers to allocate
1396  * @headroom: reserve n KB of headroom
1397  * @strategy: packet buffer allocation strategy
1398  **/
ixgbe_set_rxpba_82598(struct ixgbe_hw * hw,int num_pb,u32 headroom,int strategy)1399 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1400 				  u32 headroom, int strategy)
1401 {
1402 	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1403 	u8 i = 0;
1404 	UNREFERENCED_1PARAMETER(headroom);
1405 
1406 	if (!num_pb)
1407 		return;
1408 
1409 	/* Setup Rx packet buffer sizes */
1410 	switch (strategy) {
1411 	case PBA_STRATEGY_WEIGHTED:
1412 		/* Setup the first four at 80KB */
1413 		rxpktsize = IXGBE_RXPBSIZE_80KB;
1414 		for (; i < 4; i++)
1415 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1416 		/* Setup the last four at 48KB...don't re-init i */
1417 		rxpktsize = IXGBE_RXPBSIZE_48KB;
1418 		/* Fall Through */
1419 	case PBA_STRATEGY_EQUAL:
1420 	default:
1421 		/* Divide the remaining Rx packet buffer evenly among the TCs */
1422 		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1423 			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1424 		break;
1425 	}
1426 
1427 	/* Setup Tx packet buffer sizes */
1428 	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1429 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1430 }
1431 
1432 /**
1433  * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1434  * @hw: pointer to hardware structure
1435  * @regval: register value to write to RXCTRL
1436  *
1437  * Enables the Rx DMA unit
1438  **/
ixgbe_enable_rx_dma_82598(struct ixgbe_hw * hw,u32 regval)1439 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1440 {
1441 	DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1442 
1443 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1444 
1445 	return IXGBE_SUCCESS;
1446 }
1447