1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at:
9 * http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When using or redistributing this file, you may do so under the
14 * License only. No other modification of this header is permitted.
15 *
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
25 */
26
27 /*
28 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29 */
30
31 /* IntelVersion: 1.167 scm_061610_003709 */
32
33 #include "ixgbe_type.h"
34 #include "ixgbe_api.h"
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
37
38 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
39 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
40 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
41 ixgbe_link_speed *speed, bool *autoneg);
42 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
43 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
44 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
45 bool autoneg_wait_to_complete);
46 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
47 ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete);
48 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed, bool autoneg,
50 bool autoneg_wait_to_complete);
51 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
52 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete);
53 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
54 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
55 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
56 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
57 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
58 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
59 u32 vind, bool vlan_on);
60 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
61 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
62 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
63 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
64 u8 *eeprom_data);
65 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
66 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
67 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
68 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
69 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw);
70
71 /*
72 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
73 * @hw: pointer to the HW structure
74 *
75 * The defaults for 82598 should be in the range of 50us to 50ms,
76 * however the hardware default for these parts is 500us to 1ms which is less
77 * than the 10ms recommended by the pci-e spec. To address this we need to
78 * increase the value to either 10ms to 250ms for capability version 1 config,
79 * or 16ms to 55ms for version 2.
80 */
81 void
ixgbe_set_pcie_completion_timeout(struct ixgbe_hw * hw)82 ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
83 {
84 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
85 u16 pcie_devctl2;
86
87 /* only take action if timeout value is defaulted to 0 */
88 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
89 goto out;
90
91 /*
92 * if capababilities version is type 1 we can write the
93 * timeout of 10ms to 250ms through the GCR register
94 */
95 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
96 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
97 goto out;
98 }
99
100 /*
101 * for version 2 capabilities we need to write the config space
102 * directly in order to set the completion timeout value for
103 * 16ms to 55ms
104 */
105 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
106 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
107 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
108 out:
109 /* disable completion timeout resend */
110 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
111 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
112 }
113
114 /*
115 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
116 * @hw: pointer to hardware structure
117 *
118 * Read PCIe configuration space, and get the MSI-X vector count from
119 * the capabilities table.
120 */
121 u32
ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw * hw)122 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
123 {
124 u32 msix_count = 18;
125
126 DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
127
128 if (hw->mac.msix_vectors_from_pcie) {
129 msix_count = IXGBE_READ_PCIE_WORD(hw,
130 IXGBE_PCIE_MSIX_82598_CAPS);
131 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
132
133 /*
134 * MSI-X count is zero-based in HW, so increment to give
135 * proper value
136 */
137 msix_count++;
138 }
139 return (msix_count);
140 }
141
142 /*
143 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
144 * @hw: pointer to hardware structure
145 *
146 * Initialize the function pointers and assign the MAC type for 82598.
147 * Does not touch the hardware.
148 */
149 s32
ixgbe_init_ops_82598(struct ixgbe_hw * hw)150 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
151 {
152 struct ixgbe_mac_info *mac = &hw->mac;
153 struct ixgbe_phy_info *phy = &hw->phy;
154 s32 ret_val;
155
156 DEBUGFUNC("ixgbe_init_ops_82598");
157
158 ret_val = ixgbe_init_phy_ops_generic(hw);
159 ret_val = ixgbe_init_ops_generic(hw);
160
161 /* PHY */
162 phy->ops.init = &ixgbe_init_phy_ops_82598;
163
164 /* MAC */
165 mac->ops.start_hw = &ixgbe_start_hw_82598;
166 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
167 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
168 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
169 mac->ops.get_supported_physical_layer =
170 &ixgbe_get_supported_physical_layer_82598;
171 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
172 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
173 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
174
175 /* RAR, Multicast, VLAN */
176 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
177 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
178 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
179 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
180
181 /* Flow Control */
182 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
183
184 mac->mcft_size = 128;
185 mac->vft_size = 128;
186 mac->num_rar_entries = 16;
187 mac->max_tx_queues = 32;
188 mac->max_rx_queues = 64;
189 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
190
191 /* SFP+ Module */
192 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
193
194 /* Link */
195 mac->ops.check_link = &ixgbe_check_mac_link_82598;
196 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
197 mac->ops.flap_tx_laser = NULL;
198 mac->ops.get_link_capabilities =
199 &ixgbe_get_link_capabilities_82598;
200
201 return (ret_val);
202 }
203
204 /*
205 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
206 * @hw: pointer to hardware structure
207 *
208 * Initialize any function pointers that were not able to be
209 * set during init_shared_code because the PHY/SFP type was
210 * not known. Perform the SFP init if necessary.
211 *
212 */
213 s32
ixgbe_init_phy_ops_82598(struct ixgbe_hw * hw)214 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
215 {
216 struct ixgbe_mac_info *mac = &hw->mac;
217 struct ixgbe_phy_info *phy = &hw->phy;
218 s32 ret_val = IXGBE_SUCCESS;
219 u16 list_offset, data_offset;
220
221 DEBUGFUNC("ixgbe_init_phy_ops_82598");
222
223 /* Identify the PHY */
224 phy->ops.identify(hw);
225
226 /* Overwrite the link function pointers if copper PHY */
227 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
228 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
229 mac->ops.get_link_capabilities =
230 &ixgbe_get_copper_link_capabilities_generic;
231 }
232
233 switch (hw->phy.type) {
234 case ixgbe_phy_tn:
235 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
236 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
237 phy->ops.get_firmware_version =
238 &ixgbe_get_phy_firmware_version_tnx;
239 break;
240 case ixgbe_phy_aq:
241 phy->ops.get_firmware_version =
242 &ixgbe_get_phy_firmware_version_generic;
243 break;
244 case ixgbe_phy_nl:
245 phy->ops.reset = &ixgbe_reset_phy_nl;
246
247 /* Call SFP+ identify routine to get the SFP+ module type */
248 ret_val = phy->ops.identify_sfp(hw);
249 if (ret_val != IXGBE_SUCCESS)
250 goto out;
251 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
252 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
253 goto out;
254 }
255
256 /* Check to see if SFP+ module is supported */
257 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
258 &list_offset, &data_offset);
259 if (ret_val != IXGBE_SUCCESS) {
260 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
261 goto out;
262 }
263 break;
264 default:
265 break;
266 }
267 out:
268 return (ret_val);
269 }
270
271 /*
272 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
273 * @hw: pointer to hardware structure
274 *
275 * Starts the hardware using the generic start_hw function.
276 * Disables relaxed ordering Then set pcie completion timeout
277 */
278 s32
ixgbe_start_hw_82598(struct ixgbe_hw * hw)279 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
280 {
281 u32 regval;
282 u32 i;
283 s32 ret_val = IXGBE_SUCCESS;
284
285 DEBUGFUNC("ixgbe_start_hw_82598");
286
287 ret_val = ixgbe_start_hw_generic(hw);
288
289 /*
290 * Disable relaxed ordering
291 */
292 for (i = 0; ((i < hw->mac.max_tx_queues) &&
293 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
294 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
295 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
296 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
297 }
298
299 for (i = 0; ((i < hw->mac.max_rx_queues) &&
300 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
301 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
302 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
303 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
304 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
305 }
306
307 /* set the completion timeout for interface */
308 if (ret_val == IXGBE_SUCCESS)
309 ixgbe_set_pcie_completion_timeout(hw);
310
311 return (ret_val);
312 }
313
314 /*
315 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
316 * @hw: pointer to hardware structure
317 * @speed: pointer to link speed
318 * @autoneg: boolean auto-negotiation value
319 *
320 * Determines the link capabilities by reading the AUTOC register.
321 */
322 static s32
ixgbe_get_link_capabilities_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)323 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
324 ixgbe_link_speed *speed, bool *autoneg)
325 {
326 s32 status = IXGBE_SUCCESS;
327 u32 autoc = 0;
328
329 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
330
331 /*
332 * Determine link capabilities based on the stored value of AUTOC,
333 * which represents EEPROM defaults. If AUTOC value has not been
334 * stored, use the current register value.
335 */
336 if (hw->mac.orig_link_settings_stored)
337 autoc = hw->mac.orig_autoc;
338 else
339 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
340
341 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
342 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
343 *speed = IXGBE_LINK_SPEED_1GB_FULL;
344 *autoneg = false;
345 break;
346
347 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
348 *speed = IXGBE_LINK_SPEED_10GB_FULL;
349 *autoneg = false;
350 break;
351
352 case IXGBE_AUTOC_LMS_1G_AN:
353 *speed = IXGBE_LINK_SPEED_1GB_FULL;
354 *autoneg = true;
355 break;
356
357 case IXGBE_AUTOC_LMS_KX4_AN:
358 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
359 *speed = IXGBE_LINK_SPEED_UNKNOWN;
360 if (autoc & IXGBE_AUTOC_KX4_SUPP)
361 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
362 if (autoc & IXGBE_AUTOC_KX_SUPP)
363 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
364 *autoneg = true;
365 break;
366
367 default:
368 status = IXGBE_ERR_LINK_SETUP;
369 break;
370 }
371
372 return (status);
373 }
374
375 /*
376 * ixgbe_get_media_type_82598 - Determines media type
377 * @hw: pointer to hardware structure
378 *
379 * Returns the media type (fiber, copper, backplane)
380 */
381 static enum ixgbe_media_type
ixgbe_get_media_type_82598(struct ixgbe_hw * hw)382 ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
383 {
384 enum ixgbe_media_type media_type;
385
386 DEBUGFUNC("ixgbe_get_media_type_82598");
387
388 /* Detect if there is a copper PHY attached. */
389 if (hw->phy.type == ixgbe_phy_cu_unknown ||
390 hw->phy.type == ixgbe_phy_tn ||
391 hw->phy.type == ixgbe_phy_aq) {
392 media_type = ixgbe_media_type_copper;
393 goto out;
394 }
395
396 /* Media type for I82598 is based on device ID */
397 switch (hw->device_id) {
398 case IXGBE_DEV_ID_82598:
399 case IXGBE_DEV_ID_82598_BX:
400 /* Default device ID is mezzanine card KX/KX4 */
401 media_type = ixgbe_media_type_backplane;
402 break;
403 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
404 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
405 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
406 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
407 case IXGBE_DEV_ID_82598EB_XF_LR:
408 case IXGBE_DEV_ID_82598EB_SFP_LOM:
409 media_type = ixgbe_media_type_fiber;
410 break;
411 case IXGBE_DEV_ID_82598EB_CX4:
412 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
413 media_type = ixgbe_media_type_cx4;
414 break;
415 case IXGBE_DEV_ID_82598AT:
416 case IXGBE_DEV_ID_82598AT2:
417 media_type = ixgbe_media_type_copper;
418 break;
419 default:
420 media_type = ixgbe_media_type_unknown;
421 break;
422 }
423 out:
424 return (media_type);
425 }
426
427 /*
428 * ixgbe_fc_enable_82598 - Enable flow control
429 * @hw: pointer to hardware structure
430 * @packetbuf_num: packet buffer number (0-7)
431 *
432 * Enable flow control according to the current settings.
433 */
434 s32
ixgbe_fc_enable_82598(struct ixgbe_hw * hw,s32 packetbuf_num)435 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
436 {
437 s32 ret_val = IXGBE_SUCCESS;
438 u32 fctrl_reg;
439 u32 rmcs_reg;
440 u32 reg;
441 u32 link_speed = 0;
442 bool link_up;
443
444 DEBUGFUNC("ixgbe_fc_enable_82598");
445
446 /*
447 * On 82598 having Rx FC on causes resets while doing 1G
448 * so if it's on turn it off once we know link_speed. For
449 * more details see 82598 Specification update.
450 */
451 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
452 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
453 switch (hw->fc.requested_mode) {
454 case ixgbe_fc_full:
455 hw->fc.requested_mode = ixgbe_fc_tx_pause;
456 break;
457 case ixgbe_fc_rx_pause:
458 hw->fc.requested_mode = ixgbe_fc_none;
459 break;
460 default:
461 /* no change */
462 break;
463 }
464 }
465
466 /* Negotiate the fc mode to use */
467 ret_val = ixgbe_fc_autoneg(hw);
468 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
469 goto out;
470
471 /* Disable any previous flow control settings */
472 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
473 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
474
475 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
476 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
477
478 /*
479 * The possible values of fc.current_mode are:
480 * 0: Flow control is completely disabled
481 * 1: Rx flow control is enabled (we can receive pause frames,
482 * but not send pause frames).
483 * 2: Tx flow control is enabled (we can send pause frames but
484 * we do not support receiving pause frames).
485 * 3: Both Rx and Tx flow control (symmetric) are enabled.
486 * other: Invalid.
487 */
488 switch (hw->fc.current_mode) {
489 case ixgbe_fc_none:
490 /*
491 * Flow control is disabled by software override or autoneg.
492 * The code below will actually disable it in the HW.
493 */
494 break;
495 case ixgbe_fc_rx_pause:
496 /*
497 * Rx Flow control is enabled and Tx Flow control is
498 * disabled by software override. Since there really
499 * isn't a way to advertise that we are capable of RX
500 * Pause ONLY, we will advertise that we support both
501 * symmetric and asymmetric Rx PAUSE. Later, we will
502 * disable the adapter's ability to send PAUSE frames.
503 */
504 fctrl_reg |= IXGBE_FCTRL_RFCE;
505 break;
506 case ixgbe_fc_tx_pause:
507 /*
508 * Tx Flow control is enabled, and Rx Flow control is
509 * disabled by software override.
510 */
511 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
512 break;
513 case ixgbe_fc_full:
514 /* Flow control (both Rx and Tx) is enabled by SW override. */
515 fctrl_reg |= IXGBE_FCTRL_RFCE;
516 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
517 break;
518 default:
519 DEBUGOUT("Flow control param set incorrectly\n");
520 ret_val = IXGBE_ERR_CONFIG;
521 goto out;
522 }
523
524 /* Set 802.3x based flow control settings. */
525 fctrl_reg |= IXGBE_FCTRL_DPF;
526 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
527 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
528
529 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
530 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
531 if (hw->fc.send_xon) {
532 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
533 (hw->fc.low_water | IXGBE_FCRTL_XONE));
534 } else {
535 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
536 hw->fc.low_water);
537 }
538
539 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
540 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
541 }
542
543 /* Configure pause time (2 TCs per register) */
544 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
545 if ((packetbuf_num & 1) == 0)
546 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
547 else
548 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
549 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
550
551 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
552
553 out:
554 return (ret_val);
555 }
556
557 /*
558 * ixgbe_start_mac_link_82598 - Configures MAC link settings
559 * @hw: pointer to hardware structure
560 *
561 * Configures link settings based on values in the ixgbe_hw struct.
562 * Restarts the link. Performs autonegotiation if needed.
563 */
564 static s32
ixgbe_start_mac_link_82598(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)565 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete)
566 {
567 u32 autoc_reg;
568 u32 links_reg;
569 u32 i;
570 s32 status = IXGBE_SUCCESS;
571
572 DEBUGFUNC("ixgbe_start_mac_link_82598");
573
574 /* Restart link */
575 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
576 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
577 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
578
579 /* Only poll for autoneg to complete if specified to do so */
580 if (autoneg_wait_to_complete) {
581 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
582 IXGBE_AUTOC_LMS_KX4_AN ||
583 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
584 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
585 links_reg = 0; /* Just in case Autoneg time = 0 */
586 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
587 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
588 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
589 break;
590 msec_delay(100);
591 }
592 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
593 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
594 DEBUGOUT("Autonegotiation did not complete.\n");
595 }
596 }
597 }
598
599 /* Add delay to filter out noises during initial link setup */
600 msec_delay(50);
601
602 return (status);
603 }
604
605 /*
606 * ixgbe_check_mac_link_82598 - Get link/speed status
607 * @hw: pointer to hardware structure
608 * @speed: pointer to link speed
609 * @link_up: true is link is up, false otherwise
610 * @link_up_wait_to_complete: bool used to wait for link up or not
611 *
612 * Reads the links register to determine if link is up and the current speed
613 */
614 static s32
ixgbe_check_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)615 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
616 bool *link_up, bool link_up_wait_to_complete)
617 {
618 u32 links_reg;
619 u32 i;
620 u16 link_reg, adapt_comp_reg;
621
622 DEBUGFUNC("ixgbe_check_mac_link_82598");
623
624 /*
625 * SERDES PHY requires us to read link status from undocumented
626 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
627 * indicates link down. OxC00C is read to check that the XAUI lanes
628 * are active. Bit 0 clear indicates active; set indicates inactive.
629 */
630 if (hw->phy.type == ixgbe_phy_nl) {
631 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
632 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
633 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
634 &adapt_comp_reg);
635 if (link_up_wait_to_complete) {
636 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
637 if ((link_reg & 1) &&
638 ((adapt_comp_reg & 1) == 0)) {
639 *link_up = true;
640 break;
641 } else {
642 *link_up = false;
643 }
644 msec_delay(100);
645 hw->phy.ops.read_reg(hw, 0xC79F,
646 IXGBE_TWINAX_DEV, &link_reg);
647 hw->phy.ops.read_reg(hw, 0xC00C,
648 IXGBE_TWINAX_DEV, &adapt_comp_reg);
649 }
650 } else {
651 if ((link_reg & 1) &&
652 ((adapt_comp_reg & 1) == 0))
653 *link_up = true;
654 else
655 *link_up = false;
656 }
657
658 if (*link_up == false)
659 goto out;
660 }
661
662 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
663 if (link_up_wait_to_complete) {
664 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
665 if (links_reg & IXGBE_LINKS_UP) {
666 *link_up = true;
667 break;
668 } else {
669 *link_up = false;
670 }
671 msec_delay(100);
672 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
673 }
674 } else {
675 if (links_reg & IXGBE_LINKS_UP)
676 *link_up = true;
677 else
678 *link_up = false;
679 }
680
681 if (links_reg & IXGBE_LINKS_SPEED)
682 *speed = IXGBE_LINK_SPEED_10GB_FULL;
683 else
684 *speed = IXGBE_LINK_SPEED_1GB_FULL;
685
686 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
687 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
688 *link_up = false;
689
690 /* if link is down, zero out the current_mode */
691 if (*link_up == false) {
692 hw->fc.current_mode = ixgbe_fc_none;
693 hw->fc.fc_was_autonegged = false;
694 }
695
696 out:
697 return (IXGBE_SUCCESS);
698 }
699
700 /*
701 * ixgbe_setup_mac_link_82598 - Set MAC link speed
702 * @hw: pointer to hardware structure
703 * @speed: new link speed
704 * @autoneg: true if autonegotiation enabled
705 * @autoneg_wait_to_complete: true when waiting for completion is needed
706 *
707 * Set the link speed in the AUTOC register and restarts link.
708 */
709 static s32
ixgbe_setup_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)710 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
711 ixgbe_link_speed speed, bool autoneg,
712 bool autoneg_wait_to_complete)
713 {
714 s32 status = IXGBE_SUCCESS;
715 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
716 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
717 u32 autoc = curr_autoc;
718 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
719
720 DEBUGFUNC("ixgbe_setup_mac_link_82598");
721
722 /* Check to see if speed passed in is supported. */
723 (void) ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
724 speed &= link_capabilities;
725
726 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
727 status = IXGBE_ERR_LINK_SETUP;
728 } else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
729 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
730 /* Set KX4/KX support according to speed requested */
731 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
732 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
733 autoc |= IXGBE_AUTOC_KX4_SUPP;
734 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
735 autoc |= IXGBE_AUTOC_KX_SUPP;
736 if (autoc != curr_autoc)
737 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
738 }
739
740 if (status == IXGBE_SUCCESS) {
741 /*
742 * Setup and restart the link based on the new values in
743 * ixgbe_hw This will write the AUTOC register based on the new
744 * stored values
745 */
746 status = ixgbe_start_mac_link_82598(hw,
747 autoneg_wait_to_complete);
748 }
749
750 return (status);
751 }
752
753 /*
754 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
755 * @hw: pointer to hardware structure
756 * @speed: new link speed
757 * @autoneg: true if autonegotiation enabled
758 * @autoneg_wait_to_complete: true if waiting is needed to complete
759 *
760 * Sets the link speed in the AUTOC register in the MAC and restarts link.
761 */
762 static s32
ixgbe_setup_copper_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)763 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
764 ixgbe_link_speed speed,
765 bool autoneg,
766 bool autoneg_wait_to_complete)
767 {
768 s32 status;
769
770 DEBUGFUNC("ixgbe_setup_copper_link_82598");
771
772 /* Setup the PHY according to input speed */
773 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
774 autoneg_wait_to_complete);
775
776 /* Set up MAC */
777 (void) ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
778
779 return (status);
780 }
781
782 /*
783 * ixgbe_reset_hw_82598 - Performs hardware reset
784 * @hw: pointer to hardware structure
785 *
786 * Resets the hardware by resetting the transmit and receive units, masks and
787 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
788 * reset.
789 */
790 static s32
ixgbe_reset_hw_82598(struct ixgbe_hw * hw)791 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
792 {
793 s32 status = IXGBE_SUCCESS;
794 s32 phy_status = IXGBE_SUCCESS;
795 u32 ctrl;
796 u32 gheccr;
797 u32 i;
798 u32 autoc;
799 u8 analog_val;
800
801 DEBUGFUNC("ixgbe_reset_hw_82598");
802
803 /* Call adapter stop to disable tx/rx and clear interrupts */
804 hw->mac.ops.stop_adapter(hw);
805
806 /*
807 * Power up the Atlas Tx lanes if they are currently powered down.
808 * Atlas Tx lanes are powered down for MAC loopback tests, but
809 * they are not automatically restored on reset.
810 */
811 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
812 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
813 /* Enable Tx Atlas so packets can be transmitted again */
814 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
815 &analog_val);
816 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
817 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
818 analog_val);
819
820 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
821 &analog_val);
822 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
823 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
824 analog_val);
825
826 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
827 &analog_val);
828 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
829 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
830 analog_val);
831
832 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
833 &analog_val);
834 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
835 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
836 analog_val);
837 }
838
839 /* Reset PHY */
840 if (hw->phy.reset_disable == false) {
841 /* PHY ops must be identified and initialized prior to reset */
842
843 /* Init PHY and function pointers, perform SFP setup */
844 phy_status = hw->phy.ops.init(hw);
845 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
846 goto reset_hw_out;
847 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
848 goto no_phy_reset;
849
850 hw->phy.ops.reset(hw);
851 }
852
853 no_phy_reset:
854 /*
855 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
856 * access and verify no pending requests before reset
857 */
858 (void) ixgbe_disable_pcie_master(hw);
859
860 mac_reset_top:
861
862 /*
863 * Issue global reset to the MAC. This needs to be a SW reset.
864 * If link reset is used, it might reset the MAC when mng is using it
865 */
866 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
867 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
868 IXGBE_WRITE_FLUSH(hw);
869
870 /* Poll for reset bit to self-clear indicating reset is complete */
871 for (i = 0; i < 10; i++) {
872 usec_delay(1);
873 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
874 if (!(ctrl & IXGBE_CTRL_RST))
875 break;
876 }
877 if (ctrl & IXGBE_CTRL_RST) {
878 status = IXGBE_ERR_RESET_FAILED;
879 DEBUGOUT("Reset polling failed to complete.\n");
880 }
881
882 /*
883 * Double resets are required for recovery from certain error
884 * conditions. Between resets, it is necessary to stall to allow time
885 * for any pending HW events to complete. We use 1usec since that is
886 * what is needed for ixgbe_disable_pcie_master(). The second reset
887 * then clears out any effects of those events.
888 */
889 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
890 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
891 usec_delay(1);
892 goto mac_reset_top;
893 }
894 msec_delay(50);
895
896 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
897 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
898 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
899
900 /*
901 * Store the original AUTOC value if it has not been
902 * stored off yet. Otherwise restore the stored original
903 * AUTOC value since the reset operation sets back to deaults.
904 */
905 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
906 if (hw->mac.orig_link_settings_stored == false) {
907 hw->mac.orig_autoc = autoc;
908 hw->mac.orig_link_settings_stored = true;
909 } else if (autoc != hw->mac.orig_autoc) {
910 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
911 }
912
913 /* Store the permanent mac address */
914 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
915
916 /*
917 * Store MAC address from RAR0, clear receive address registers, and
918 * clear the multicast table
919 */
920 hw->mac.ops.init_rx_addrs(hw);
921
922 reset_hw_out:
923 if (phy_status != IXGBE_SUCCESS)
924 status = phy_status;
925
926 return (status);
927 }
928
929 /*
930 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
931 * @hw: pointer to hardware struct
932 * @rar: receive address register index to associate with a VMDq index
933 * @vmdq: VMDq set index
934 */
935 s32
ixgbe_set_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)936 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
937 {
938 u32 rar_high;
939
940 DEBUGFUNC("ixgbe_set_vmdq_82598");
941
942 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
943 rar_high &= ~IXGBE_RAH_VIND_MASK;
944 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
945 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
946 return (IXGBE_SUCCESS);
947 }
948
949 /*
950 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
951 * @hw: pointer to hardware struct
952 * @rar: receive address register index to associate with a VMDq index
953 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
954 */
955 static s32
ixgbe_clear_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)956 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
957 {
958 u32 rar_high;
959 u32 rar_entries = hw->mac.num_rar_entries;
960
961 UNREFERENCED_PARAMETER(vmdq);
962
963 if (rar < rar_entries) {
964 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
965 if (rar_high & IXGBE_RAH_VIND_MASK) {
966 rar_high &= ~IXGBE_RAH_VIND_MASK;
967 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
968 }
969 } else {
970 DEBUGOUT1("RAR index %d is out of range.\n", rar);
971 }
972
973 return (IXGBE_SUCCESS);
974 }
975
976 /*
977 * ixgbe_set_vfta_82598 - Set VLAN filter table
978 * @hw: pointer to hardware structure
979 * @vlan: VLAN id to write to VLAN filter
980 * @vind: VMDq output index that maps queue to VLAN id in VFTA
981 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
982 *
983 * Turn on/off specified VLAN in the VLAN filter table.
984 */
985 s32
ixgbe_set_vfta_82598(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)986 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
987 {
988 u32 regindex;
989 u32 bitindex;
990 u32 bits;
991 u32 vftabyte;
992
993 DEBUGFUNC("ixgbe_set_vfta_82598");
994
995 if (vlan > 4095)
996 return (IXGBE_ERR_PARAM);
997
998 /* Determine 32-bit word position in array */
999 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1000
1001 /* Determine the location of the (VMD) queue index */
1002 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1003 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1004
1005 /* Set the nibble for VMD queue index */
1006 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1007 bits &= (~(0x0F << bitindex));
1008 bits |= (vind << bitindex);
1009 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1010
1011 /* Determine the location of the bit for this VLAN id */
1012 bitindex = vlan & 0x1F; /* lower five bits */
1013
1014 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1015 if (vlan_on)
1016 /* Turn on this VLAN id */
1017 bits |= (1 << bitindex);
1018 else
1019 /* Turn off this VLAN id */
1020 bits &= ~(1 << bitindex);
1021 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1022
1023 return (IXGBE_SUCCESS);
1024 }
1025
1026 /*
1027 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1028 * @hw: pointer to hardware structure
1029 *
1030 * Clears the VLAN filer table, and the VMDq index associated with the filter
1031 */
1032 static s32
ixgbe_clear_vfta_82598(struct ixgbe_hw * hw)1033 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1034 {
1035 u32 offset;
1036 u32 vlanbyte;
1037
1038 DEBUGFUNC("ixgbe_clear_vfta_82598");
1039
1040 for (offset = 0; offset < hw->mac.vft_size; offset++)
1041 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1042
1043 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1044 for (offset = 0; offset < hw->mac.vft_size; offset++)
1045 IXGBE_WRITE_REG(hw,
1046 IXGBE_VFTAVIND(vlanbyte, offset), 0);
1047
1048 return (IXGBE_SUCCESS);
1049 }
1050
1051 /*
1052 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1053 * @hw: pointer to hardware structure
1054 * @reg: analog register to read
1055 * @val: read value
1056 *
1057 * Performs read operation to Atlas analog register specified.
1058 */
1059 s32
ixgbe_read_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 * val)1060 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1061 {
1062 u32 atlas_ctl;
1063
1064 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1065
1066 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1067 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1068 IXGBE_WRITE_FLUSH(hw);
1069 usec_delay(10);
1070 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1071 *val = (u8)atlas_ctl;
1072
1073 return (IXGBE_SUCCESS);
1074 }
1075
1076 /*
1077 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1078 * @hw: pointer to hardware structure
1079 * @reg: atlas register to write
1080 * @val: value to write
1081 *
1082 * Performs write operation to Atlas analog register specified.
1083 */
1084 s32
ixgbe_write_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 val)1085 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1086 {
1087 u32 atlas_ctl;
1088
1089 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1090
1091 atlas_ctl = (reg << 8) | val;
1092 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1093 IXGBE_WRITE_FLUSH(hw);
1094 usec_delay(10);
1095
1096 return (IXGBE_SUCCESS);
1097 }
1098
1099 /*
1100 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1101 * @hw: pointer to hardware structure
1102 * @byte_offset: EEPROM byte offset to read
1103 * @eeprom_data: value read
1104 *
1105 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1106 */
1107 s32
ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)1108 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1109 u8 *eeprom_data)
1110 {
1111 s32 status = IXGBE_SUCCESS;
1112 u16 sfp_addr = 0;
1113 u16 sfp_data = 0;
1114 u16 sfp_stat = 0;
1115 u32 i;
1116
1117 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1118
1119 if (hw->phy.type == ixgbe_phy_nl) {
1120 /*
1121 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1122 * 0xC30D. These registers are used to talk to the SFP+
1123 * module's EEPROM through the SDA/SCL (I2C) interface.
1124 */
1125 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1126 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1127 hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1128 IXGBE_MDIO_PMA_PMD_DEV_TYPE, sfp_addr);
1129
1130 /* Poll status */
1131 for (i = 0; i < 100; i++) {
1132 hw->phy.ops.read_reg(hw,
1133 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1134 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_stat);
1135 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1136 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1137 break;
1138 msec_delay(10);
1139 }
1140
1141 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1142 DEBUGOUT("EEPROM read did not pass.\n");
1143 status = IXGBE_ERR_SFP_NOT_PRESENT;
1144 goto out;
1145 }
1146
1147 /* Read data */
1148 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1149 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1150
1151 *eeprom_data = (u8)(sfp_data >> 8);
1152 } else {
1153 status = IXGBE_ERR_PHY;
1154 goto out;
1155 }
1156
1157 out:
1158 return (status);
1159 }
1160
1161 /*
1162 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1163 * @hw: pointer to hardware structure
1164 *
1165 * Determines physical layer capabilities of the current configuration.
1166 */
1167 u32
ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw * hw)1168 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1169 {
1170 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1171 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1172 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1173 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1174 u16 ext_ability = 0;
1175
1176 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1177
1178 hw->phy.ops.identify(hw);
1179
1180 /*
1181 * Copper PHY must be checked before AUTOC LMS to determine correct
1182 * physical layer because 10GBase-T PHYs use LMS = KX4/KX
1183 */
1184 if (hw->phy.type == ixgbe_phy_tn ||
1185 hw->phy.type == ixgbe_phy_cu_unknown) {
1186 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1187 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1188 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1189 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1190 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1191 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1192 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1193 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1194 goto out;
1195 }
1196
1197 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1198 case IXGBE_AUTOC_LMS_1G_AN:
1199 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1200 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1201 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1202 else
1203 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1204 break;
1205 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1206 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1207 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1208 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1209 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1210 else /* XAUI */
1211 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1212 break;
1213 case IXGBE_AUTOC_LMS_KX4_AN:
1214 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1215 if (autoc & IXGBE_AUTOC_KX_SUPP)
1216 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1217 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1218 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1219 break;
1220 default:
1221 break;
1222 }
1223
1224 if (hw->phy.type == ixgbe_phy_nl) {
1225 hw->phy.ops.identify_sfp(hw);
1226
1227 switch (hw->phy.sfp_type) {
1228 case ixgbe_sfp_type_da_cu:
1229 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1230 break;
1231 case ixgbe_sfp_type_sr:
1232 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1233 break;
1234 case ixgbe_sfp_type_lr:
1235 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1236 break;
1237 default:
1238 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1239 break;
1240 }
1241 }
1242
1243 switch (hw->device_id) {
1244 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1245 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1246 break;
1247 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1248 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1249 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1250 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1251 break;
1252 case IXGBE_DEV_ID_82598EB_XF_LR:
1253 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1254 break;
1255 default:
1256 break;
1257 }
1258
1259 out:
1260 return (physical_layer);
1261 }
1262
1263 /*
1264 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1265 * port devices.
1266 * @hw: pointer to the HW structure
1267 *
1268 * Calls common function and corrects issue with some single port devices
1269 * that enable LAN1 but not LAN0.
1270 */
1271 void
ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw * hw)1272 ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1273 {
1274 struct ixgbe_bus_info *bus = &hw->bus;
1275 u16 pci_gen, pci_ctrl2;
1276
1277 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1278
1279 ixgbe_set_lan_id_multi_port_pcie(hw);
1280
1281 /* check if LAN0 is disabled */
1282 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1283 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1284 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1285
1286 /* if LAN0 is completely disabled force function to 0 */
1287 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1288 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1289 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1290 bus->func = 0;
1291 }
1292 }
1293 }
1294
1295 /*
1296 * ixgbe_validate_link_ready - Function looks for phy link
1297 * @hw: pointer to hardware structure
1298 *
1299 * Function indicates success when phy link is available. If phy is not ready
1300 * within 5 seconds of MAC indicating link, the function returns error.
1301 */
1302 static s32
ixgbe_validate_link_ready(struct ixgbe_hw * hw)1303 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
1304 {
1305 u32 timeout;
1306 u16 an_reg;
1307
1308 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
1309 return (IXGBE_SUCCESS);
1310
1311 for (timeout = 0;
1312 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
1313 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1314 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
1315
1316 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
1317 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
1318 break;
1319
1320 msec_delay(100);
1321 }
1322
1323 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
1324 DEBUGOUT("Link was indicated but link is down\n");
1325 return (IXGBE_ERR_LINK_SETUP);
1326 }
1327
1328 return (IXGBE_SUCCESS);
1329 }
1330
1331 /*
1332 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1333 * @hw: pointer to hardware structure
1334 */
1335 void
ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw * hw)1336 ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1337 {
1338 u32 regval;
1339 u32 i;
1340
1341 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1342
1343 /* Enable relaxed ordering */
1344 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1345 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1346 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1347 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1348 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1349 }
1350
1351 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1352 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1353 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1354 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1355 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1356 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1357 }
1358 }
1359