1 /* $NetBSD: ixgbe_x550.c,v 1.28 2023/10/06 14:48:08 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2020, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ixgbe_x550.c,v 1.28 2023/10/06 14:48:08 msaitoh Exp $");
39
40 #include "ixgbe_x550.h"
41 #include "ixgbe_x540.h"
42 #include "ixgbe_type.h"
43 #include "ixgbe_api.h"
44 #include "ixgbe_common.h"
45 #include "ixgbe_phy.h"
46 #include <dev/mii/mii.h>
47
48 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
49 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
50 ixgbe_link_speed speed,
51 bool autoneg_wait_to_complete);
52 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
53 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
54 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
55
56 /**
57 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
58 * @hw: pointer to hardware structure
59 *
60 * Initialize the function pointers and assign the MAC type for X550.
61 * Does not touch the hardware.
62 **/
ixgbe_init_ops_X550(struct ixgbe_hw * hw)63 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
64 {
65 struct ixgbe_mac_info *mac = &hw->mac;
66 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
67 s32 ret_val;
68
69 DEBUGFUNC("ixgbe_init_ops_X550");
70
71 ret_val = ixgbe_init_ops_X540(hw);
72 mac->ops.dmac_config = ixgbe_dmac_config_X550;
73 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
74 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
75 mac->ops.setup_eee = NULL;
76 mac->ops.set_source_address_pruning =
77 ixgbe_set_source_address_pruning_X550;
78 mac->ops.set_ethertype_anti_spoofing =
79 ixgbe_set_ethertype_anti_spoofing_X550;
80
81 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
82 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
83 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
84 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
85 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
86 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
87 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
88 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
89 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
90
91 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
92 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
93 mac->ops.mdd_event = ixgbe_mdd_event_X550;
94 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
95 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550;
96 mac->ops.disable_rx = ixgbe_disable_rx_x550;
97 /* Manageability interface */
98 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
99 switch (hw->device_id) {
100 case IXGBE_DEV_ID_X550EM_X_1G_T:
101 hw->mac.ops.led_on = NULL;
102 hw->mac.ops.led_off = NULL;
103 break;
104 case IXGBE_DEV_ID_X550EM_X_10G_T:
105 case IXGBE_DEV_ID_X550EM_A_10G_T:
106 hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
107 hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
108 break;
109 default:
110 break;
111 }
112 return ret_val;
113 }
114
115 /**
116 * ixgbe_read_cs4227 - Read CS4227 register
117 * @hw: pointer to hardware structure
118 * @reg: register number to write
119 * @value: pointer to receive value read
120 *
121 * Returns status code
122 **/
ixgbe_read_cs4227(struct ixgbe_hw * hw,u16 reg,u16 * value)123 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
124 {
125 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
126 }
127
128 /**
129 * ixgbe_write_cs4227 - Write CS4227 register
130 * @hw: pointer to hardware structure
131 * @reg: register number to write
132 * @value: value to write to register
133 *
134 * Returns status code
135 **/
ixgbe_write_cs4227(struct ixgbe_hw * hw,u16 reg,u16 value)136 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
137 {
138 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
139 }
140
141 /**
142 * ixgbe_read_pe - Read register from port expander
143 * @hw: pointer to hardware structure
144 * @reg: register number to read
145 * @value: pointer to receive read value
146 *
147 * Returns status code
148 **/
ixgbe_read_pe(struct ixgbe_hw * hw,u8 reg,u8 * value)149 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
150 {
151 s32 status;
152
153 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
154 if (status != IXGBE_SUCCESS)
155 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
156 "port expander access failed with %d\n", status);
157 return status;
158 }
159
160 /**
161 * ixgbe_write_pe - Write register to port expander
162 * @hw: pointer to hardware structure
163 * @reg: register number to write
164 * @value: value to write
165 *
166 * Returns status code
167 **/
ixgbe_write_pe(struct ixgbe_hw * hw,u8 reg,u8 value)168 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
169 {
170 s32 status;
171
172 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
173 if (status != IXGBE_SUCCESS)
174 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
175 "port expander access failed with %d\n", status);
176 return status;
177 }
178
179 /**
180 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
181 * @hw: pointer to hardware structure
182 *
183 * This function assumes that the caller has acquired the proper semaphore.
184 * Returns error code
185 **/
ixgbe_reset_cs4227(struct ixgbe_hw * hw)186 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
187 {
188 s32 status;
189 u32 retry;
190 u16 value;
191 u8 reg;
192
193 /* Trigger hard reset. */
194 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
195 if (status != IXGBE_SUCCESS)
196 return status;
197 reg |= IXGBE_PE_BIT1;
198 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
199 if (status != IXGBE_SUCCESS)
200 return status;
201
202 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
203 if (status != IXGBE_SUCCESS)
204 return status;
205 reg &= ~IXGBE_PE_BIT1;
206 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
207 if (status != IXGBE_SUCCESS)
208 return status;
209
210 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
211 if (status != IXGBE_SUCCESS)
212 return status;
213 reg &= ~IXGBE_PE_BIT1;
214 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
215 if (status != IXGBE_SUCCESS)
216 return status;
217
218 usec_delay(IXGBE_CS4227_RESET_HOLD);
219
220 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
221 if (status != IXGBE_SUCCESS)
222 return status;
223 reg |= IXGBE_PE_BIT1;
224 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
225 if (status != IXGBE_SUCCESS)
226 return status;
227
228 /* Wait for the reset to complete. */
229 msec_delay(IXGBE_CS4227_RESET_DELAY);
230 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
231 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
232 &value);
233 if (status == IXGBE_SUCCESS &&
234 value == IXGBE_CS4227_EEPROM_LOAD_OK)
235 break;
236 msec_delay(IXGBE_CS4227_CHECK_DELAY);
237 }
238 if (retry == IXGBE_CS4227_RETRIES) {
239 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
240 "CS4227 reset did not complete.");
241 return IXGBE_ERR_PHY;
242 }
243
244 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
245 if (status != IXGBE_SUCCESS ||
246 !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
247 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
248 "CS4227 EEPROM did not load successfully.");
249 return IXGBE_ERR_PHY;
250 }
251
252 return IXGBE_SUCCESS;
253 }
254
255 /**
256 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
257 * @hw: pointer to hardware structure
258 **/
ixgbe_check_cs4227(struct ixgbe_hw * hw)259 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
260 {
261 s32 status = IXGBE_SUCCESS;
262 u32 swfw_mask = hw->phy.phy_semaphore_mask;
263 u16 value = 0;
264 u8 retry;
265
266 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
267 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
268 if (status != IXGBE_SUCCESS) {
269 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
270 "semaphore failed with %d", status);
271 msec_delay(IXGBE_CS4227_CHECK_DELAY);
272 continue;
273 }
274
275 /* Get status of reset flow. */
276 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
277
278 if (status == IXGBE_SUCCESS &&
279 value == IXGBE_CS4227_RESET_COMPLETE)
280 goto out;
281
282 if (status != IXGBE_SUCCESS ||
283 value != IXGBE_CS4227_RESET_PENDING)
284 break;
285
286 /* Reset is pending. Wait and check again. */
287 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
288 msec_delay(IXGBE_CS4227_CHECK_DELAY);
289 }
290
291 /* If still pending, assume other instance failed. */
292 if (retry == IXGBE_CS4227_RETRIES) {
293 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
294 if (status != IXGBE_SUCCESS) {
295 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
296 "semaphore failed with %d", status);
297 return;
298 }
299 }
300
301 /* Reset the CS4227. */
302 status = ixgbe_reset_cs4227(hw);
303 if (status != IXGBE_SUCCESS) {
304 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
305 "CS4227 reset failed: %d", status);
306 goto out;
307 }
308
309 /* Reset takes so long, temporarily release semaphore in case the
310 * other driver instance is waiting for the reset indication.
311 */
312 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
313 IXGBE_CS4227_RESET_PENDING);
314 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
315 msec_delay(10);
316 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
317 if (status != IXGBE_SUCCESS) {
318 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
319 "semaphore failed with %d", status);
320 return;
321 }
322
323 /* Record completion for next time. */
324 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
325 IXGBE_CS4227_RESET_COMPLETE);
326
327 out:
328 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
329 msec_delay(hw->eeprom.semaphore_delay);
330 }
331
332 /**
333 * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
334 * @hw: pointer to hardware structure
335 **/
ixgbe_setup_mux_ctl(struct ixgbe_hw * hw)336 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
337 {
338 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
339
340 if (hw->bus.lan_id) {
341 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
342 esdp |= IXGBE_ESDP_SDP1_DIR;
343 }
344 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
345 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
346 IXGBE_WRITE_FLUSH(hw);
347 }
348
349 /**
350 * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
351 * @hw: pointer to hardware structure
352 * @reg_addr: 32 bit address of PHY register to read
353 * @dev_type: always unused
354 * @phy_data: Pointer to read data from PHY register
355 */
ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw * hw,u32 reg_addr,u32 dev_type,u16 * phy_data)356 static s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
357 u32 dev_type, u16 *phy_data)
358 {
359 u32 i, data, command;
360 UNREFERENCED_1PARAMETER(dev_type);
361
362 /* Setup and write the read command */
363 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
364 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
365 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
366 IXGBE_MSCA_MDI_COMMAND;
367
368 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
369
370 /* Check every 10 usec to see if the access completed.
371 * The MDI Command bit will clear when the operation is
372 * complete
373 */
374 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
375 usec_delay(10);
376
377 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
378 if (!(command & IXGBE_MSCA_MDI_COMMAND))
379 break;
380 }
381
382 if (command & IXGBE_MSCA_MDI_COMMAND) {
383 ERROR_REPORT1(IXGBE_ERROR_POLLING,
384 "PHY read command did not complete.\n");
385 return IXGBE_ERR_PHY;
386 }
387
388 /* Read operation is complete. Get the data from MSRWD */
389 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
390 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
391 *phy_data = (u16)data;
392
393 return IXGBE_SUCCESS;
394 }
395
396 /**
397 * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
398 * @hw: pointer to hardware structure
399 * @reg_addr: 32 bit PHY register to write
400 * @dev_type: always unused
401 * @phy_data: Data to write to the PHY register
402 */
ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw * hw,u32 reg_addr,u32 dev_type,u16 phy_data)403 static s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
404 u32 dev_type, u16 phy_data)
405 {
406 u32 i, command;
407 UNREFERENCED_1PARAMETER(dev_type);
408
409 /* Put the data in the MDI single read and write data register*/
410 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
411
412 /* Setup and write the write command */
413 command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
414 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
415 IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
416 IXGBE_MSCA_MDI_COMMAND;
417
418 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
419
420 /* Check every 10 usec to see if the access completed.
421 * The MDI Command bit will clear when the operation is
422 * complete
423 */
424 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
425 usec_delay(10);
426
427 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
428 if (!(command & IXGBE_MSCA_MDI_COMMAND))
429 break;
430 }
431
432 if (command & IXGBE_MSCA_MDI_COMMAND) {
433 ERROR_REPORT1(IXGBE_ERROR_POLLING,
434 "PHY write cmd didn't complete\n");
435 return IXGBE_ERR_PHY;
436 }
437
438 return IXGBE_SUCCESS;
439 }
440
441 /**
442 * ixgbe_identify_phy_x550em - Get PHY type based on device id
443 * @hw: pointer to hardware structure
444 *
445 * Returns error code
446 */
ixgbe_identify_phy_x550em(struct ixgbe_hw * hw)447 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
448 {
449 hw->mac.ops.set_lan_id(hw);
450
451 ixgbe_read_mng_if_sel_x550em(hw);
452
453 switch (hw->device_id) {
454 case IXGBE_DEV_ID_X550EM_A_SFP:
455 return ixgbe_identify_sfp_module_X550em(hw);
456 case IXGBE_DEV_ID_X550EM_X_SFP:
457 /* set up for CS4227 usage */
458 ixgbe_setup_mux_ctl(hw);
459 ixgbe_check_cs4227(hw);
460 /* fall through */
461 case IXGBE_DEV_ID_X550EM_A_SFP_N:
462 return ixgbe_identify_sfp_module_X550em(hw);
463 break;
464 case IXGBE_DEV_ID_X550EM_X_KX4:
465 hw->phy.type = ixgbe_phy_x550em_kx4;
466 break;
467 case IXGBE_DEV_ID_X550EM_X_XFI:
468 hw->phy.type = ixgbe_phy_x550em_xfi;
469 break;
470 case IXGBE_DEV_ID_X550EM_X_KR:
471 case IXGBE_DEV_ID_X550EM_A_KR:
472 case IXGBE_DEV_ID_X550EM_A_KR_L:
473 hw->phy.type = ixgbe_phy_x550em_kr;
474 break;
475 case IXGBE_DEV_ID_X550EM_A_10G_T:
476 case IXGBE_DEV_ID_X550EM_X_10G_T:
477 return ixgbe_identify_phy_generic(hw);
478 case IXGBE_DEV_ID_X550EM_X_1G_T:
479 hw->phy.type = ixgbe_phy_ext_1g_t;
480 break;
481 case IXGBE_DEV_ID_X550EM_A_1G_T:
482 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
483 hw->phy.type = ixgbe_phy_fw;
484 if (hw->bus.lan_id)
485 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
486 else
487 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
488 break;
489 default:
490 break;
491 }
492 return IXGBE_SUCCESS;
493 }
494
495 /**
496 * ixgbe_fw_phy_activity - Perform an activity on a PHY
497 * @hw: pointer to hardware structure
498 * @activity: activity to perform
499 * @data: Pointer to 4 32-bit words of data
500 */
ixgbe_fw_phy_activity(struct ixgbe_hw * hw,u16 activity,u32 (* data)[FW_PHY_ACT_DATA_COUNT])501 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
502 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
503 {
504 union {
505 struct ixgbe_hic_phy_activity_req cmd;
506 struct ixgbe_hic_phy_activity_resp rsp;
507 } hic;
508 u16 retries = FW_PHY_ACT_RETRIES;
509 s32 rc;
510 u16 i;
511
512 do {
513 memset(&hic, 0, sizeof(hic));
514 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
515 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
516 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
517 hic.cmd.port_number = hw->bus.lan_id;
518 hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
519 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
520 hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
521
522 rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
523 sizeof(hic.cmd),
524 IXGBE_HI_COMMAND_TIMEOUT,
525 TRUE);
526 if (rc != IXGBE_SUCCESS)
527 return rc;
528 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
529 FW_CEM_RESP_STATUS_SUCCESS) {
530 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
531 (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
532 return IXGBE_SUCCESS;
533 }
534 usec_delay(20);
535 --retries;
536 } while (retries > 0);
537
538 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
539 }
540
541 static const struct {
542 u16 fw_speed;
543 ixgbe_link_speed phy_speed;
544 } ixgbe_fw_map[] = {
545 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
546 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
547 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
548 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
549 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
550 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
551 };
552
553 /**
554 * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
555 * @hw: pointer to hardware structure
556 *
557 * Returns error code
558 */
ixgbe_get_phy_id_fw(struct ixgbe_hw * hw)559 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
560 {
561 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
562 u16 phy_speeds;
563 u16 phy_id_lo;
564 s32 rc;
565 u16 i;
566
567 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
568 if (rc)
569 return rc;
570
571 hw->phy.speeds_supported = 0;
572 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
573 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
574 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
575 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
576 }
577
578 #if 0
579 /*
580 * Don't set autoneg_advertised here to not to be inconsistent with
581 * if_media value.
582 */
583 if (!hw->phy.autoneg_advertised)
584 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
585 #endif
586
587 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
588 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
589 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
590 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
591 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
592 return IXGBE_ERR_PHY_ADDR_INVALID;
593 return IXGBE_SUCCESS;
594 }
595
596 /**
597 * ixgbe_identify_phy_fw - Get PHY type based on firmware command
598 * @hw: pointer to hardware structure
599 *
600 * Returns error code
601 */
ixgbe_identify_phy_fw(struct ixgbe_hw * hw)602 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
603 {
604 if (hw->bus.lan_id)
605 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
606 else
607 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
608
609 hw->phy.type = ixgbe_phy_fw;
610 hw->phy.ops.read_reg = NULL;
611 hw->phy.ops.write_reg = NULL;
612 return ixgbe_get_phy_id_fw(hw);
613 }
614
615 /**
616 * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
617 * @hw: pointer to hardware structure
618 *
619 * Returns error code
620 */
ixgbe_shutdown_fw_phy(struct ixgbe_hw * hw)621 s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
622 {
623 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
624
625 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
626 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
627 }
628
ixgbe_read_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)629 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
630 u32 device_type, u16 *phy_data)
631 {
632 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
633 return IXGBE_NOT_IMPLEMENTED;
634 }
635
ixgbe_write_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)636 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
637 u32 device_type, u16 phy_data)
638 {
639 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
640 return IXGBE_NOT_IMPLEMENTED;
641 }
642
643 /**
644 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
645 * @hw: pointer to the hardware structure
646 * @addr: I2C bus address to read from
647 * @reg: I2C device register to read from
648 * @val: pointer to location to receive read value
649 *
650 * Returns an error code on error.
651 **/
ixgbe_read_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)652 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
653 u16 reg, u16 *val)
654 {
655 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
656 }
657
658 /**
659 * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
660 * @hw: pointer to the hardware structure
661 * @addr: I2C bus address to read from
662 * @reg: I2C device register to read from
663 * @val: pointer to location to receive read value
664 *
665 * Returns an error code on error.
666 **/
667 static s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)668 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
669 u16 reg, u16 *val)
670 {
671 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
672 }
673
674 /**
675 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
676 * @hw: pointer to the hardware structure
677 * @addr: I2C bus address to write to
678 * @reg: I2C device register to write to
679 * @val: value to write
680 *
681 * Returns an error code on error.
682 **/
ixgbe_write_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)683 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
684 u8 addr, u16 reg, u16 val)
685 {
686 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE);
687 }
688
689 /**
690 * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
691 * @hw: pointer to the hardware structure
692 * @addr: I2C bus address to write to
693 * @reg: I2C device register to write to
694 * @val: value to write
695 *
696 * Returns an error code on error.
697 **/
698 static s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)699 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
700 u8 addr, u16 reg, u16 val)
701 {
702 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
703 }
704
705 /**
706 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
707 * @hw: pointer to hardware structure
708 *
709 * Initialize the function pointers and for MAC type X550EM.
710 * Does not touch the hardware.
711 **/
ixgbe_init_ops_X550EM(struct ixgbe_hw * hw)712 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
713 {
714 struct ixgbe_mac_info *mac = &hw->mac;
715 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
716 struct ixgbe_phy_info *phy = &hw->phy;
717 s32 ret_val;
718
719 DEBUGFUNC("ixgbe_init_ops_X550EM");
720
721 /* Similar to X550 so start there. */
722 ret_val = ixgbe_init_ops_X550(hw);
723
724 /* Since this function eventually calls
725 * ixgbe_init_ops_540 by design, we are setting
726 * the pointers to NULL explicitly here to overwrite
727 * the values being set in the x540 function.
728 */
729
730 /* Bypass not supported in x550EM */
731 mac->ops.bypass_rw = NULL;
732 mac->ops.bypass_valid_rd = NULL;
733 mac->ops.bypass_set = NULL;
734 mac->ops.bypass_rd_eep = NULL;
735
736 /* FCOE not supported in x550EM */
737 mac->ops.get_san_mac_addr = NULL;
738 mac->ops.set_san_mac_addr = NULL;
739 mac->ops.get_wwn_prefix = NULL;
740 mac->ops.get_fcoe_boot_status = NULL;
741
742 /* IPsec not supported in x550EM */
743 mac->ops.disable_sec_rx_path = NULL;
744 mac->ops.enable_sec_rx_path = NULL;
745
746 /* AUTOC register is not present in x550EM. */
747 mac->ops.prot_autoc_read = NULL;
748 mac->ops.prot_autoc_write = NULL;
749
750 /* X550EM bus type is internal*/
751 hw->bus.type = ixgbe_bus_type_internal;
752 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
753
754
755 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
756 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
757 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
758 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
759 mac->ops.get_supported_physical_layer =
760 ixgbe_get_supported_physical_layer_X550em;
761
762 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
763 mac->ops.setup_fc = ixgbe_setup_fc_generic;
764 else
765 mac->ops.setup_fc = ixgbe_setup_fc_X550em;
766
767 /* PHY */
768 phy->ops.init = ixgbe_init_phy_ops_X550em;
769 switch (hw->device_id) {
770 case IXGBE_DEV_ID_X550EM_A_1G_T:
771 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
772 mac->ops.setup_fc = NULL;
773 phy->ops.identify = ixgbe_identify_phy_fw;
774 phy->ops.set_phy_power = NULL;
775 phy->ops.get_firmware_version = NULL;
776 break;
777 case IXGBE_DEV_ID_X550EM_X_1G_T:
778 mac->ops.setup_fc = NULL;
779 phy->ops.identify = ixgbe_identify_phy_x550em;
780 phy->ops.set_phy_power = NULL;
781 break;
782 default:
783 phy->ops.identify = ixgbe_identify_phy_x550em;
784 }
785
786 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
787 phy->ops.set_phy_power = NULL;
788
789 /* EEPROM */
790 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
791 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
792 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
793 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
794 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
795 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
796 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
797 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
798
799 return ret_val;
800 }
801
802 #define IXGBE_DENVERTON_WA 1
803
804 /**
805 * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
806 * @hw: pointer to hardware structure
807 */
ixgbe_setup_fw_link(struct ixgbe_hw * hw)808 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
809 {
810 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
811 s32 rc;
812 #ifdef IXGBE_DENVERTON_WA
813 s32 ret_val;
814 u16 phydata;
815 #endif
816 u16 i;
817
818 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
819 return 0;
820
821 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
822 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
823 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
824 return IXGBE_ERR_INVALID_LINK_SETTINGS;
825 }
826
827 switch (hw->fc.requested_mode) {
828 case ixgbe_fc_full:
829 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
830 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
831 break;
832 case ixgbe_fc_rx_pause:
833 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
834 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
835 break;
836 case ixgbe_fc_tx_pause:
837 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
838 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
839 break;
840 default:
841 break;
842 }
843
844 for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
845 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
846 setup[0] |= (u32)(ixgbe_fw_map[i].fw_speed);
847 }
848 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
849
850 if (hw->phy.eee_speeds_advertised)
851 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
852
853 #ifdef IXGBE_DENVERTON_WA
854 if ((hw->phy.force_10_100_autonego == false)
855 && ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
856 || (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL))) {
857 /* Don't use auto-nego for 10/100Mbps */
858 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_AN;
859 setup[0] &= ~FW_PHY_ACT_SETUP_LINK_EEE;
860 setup[0] &= ~(FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX
861 << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT);
862 }
863 #endif
864
865 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
866 if (rc)
867 return rc;
868
869 #ifdef IXGBE_DENVERTON_WA
870 if (hw->phy.force_10_100_autonego == true)
871 goto out;
872
873 ret_val = ixgbe_read_phy_reg_x550a(hw, MII_BMCR, 0, &phydata);
874 if (ret_val != 0)
875 goto out;
876
877 /*
878 * Broken firmware sets BMCR register incorrectly if
879 * FW_PHY_ACT_SETUP_LINK_AN isn't set.
880 * a) FDX may not be set.
881 * b) BMCR_SPEED1 (bit 6) is always cleared.
882 * + -------+------+-----------+-----+--------------------------+
883 * |request | BMCR | BMCR spd | BMCR | |
884 * | | (HEX)| (in bits)| FDX | |
885 * +--------+------+----------+------+--------------------------+
886 * | 10M | 0000 | 10M(00) | 0 | |
887 * | 10M | 2000 | 100M(01) | 0 |(I've never observed this)|
888 * | 10M | 2100 | 100M(01) | 1 | |
889 * | 100M | 0000 | 10M(00) | 0 | |
890 * | 100M | 0100 | 10M(00) | 1 | |
891 * +--------------------------+------+--------------------------+
892 */
893 if (((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_100_FULL)
894 && (((phydata & BMCR_FDX) == 0) || (BMCR_SPEED(phydata) == 0)))
895 || ((hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_10_FULL)
896 && (((phydata & BMCR_FDX) == 0)
897 || (BMCR_SPEED(phydata) != BMCR_S10)))) {
898 phydata = BMCR_FDX;
899 switch (hw->phy.autoneg_advertised) {
900 case IXGBE_LINK_SPEED_10_FULL:
901 phydata |= BMCR_S10;
902 break;
903 case IXGBE_LINK_SPEED_100_FULL:
904 phydata |= BMCR_S100;
905 break;
906 case IXGBE_LINK_SPEED_1GB_FULL:
907 panic("%s: 1GB_FULL is set", __func__);
908 break;
909 default:
910 break;
911 }
912 ret_val = ixgbe_write_phy_reg_x550a(hw, MII_BMCR, 0, phydata);
913 if (ret_val != 0)
914 return ret_val;
915 }
916 out:
917 #endif
918 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
919 return IXGBE_ERR_OVERTEMP;
920 return IXGBE_SUCCESS;
921 }
922
923 /**
924 * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
925 * @hw: pointer to hardware structure
926 *
927 * Called at init time to set up flow control.
928 */
ixgbe_fc_autoneg_fw(struct ixgbe_hw * hw)929 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
930 {
931 if (hw->fc.requested_mode == ixgbe_fc_default)
932 hw->fc.requested_mode = ixgbe_fc_full;
933
934 return ixgbe_setup_fw_link(hw);
935 }
936
937 /**
938 * ixgbe_setup_eee_fw - Enable/disable EEE support
939 * @hw: pointer to the HW structure
940 * @enable_eee: boolean flag to enable EEE
941 *
942 * Enable/disable EEE based on enable_eee flag.
943 * This function controls EEE for firmware-based PHY implementations.
944 */
ixgbe_setup_eee_fw(struct ixgbe_hw * hw,bool enable_eee)945 static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
946 {
947 if (!!hw->phy.eee_speeds_advertised == enable_eee)
948 return IXGBE_SUCCESS;
949 if (enable_eee)
950 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
951 else
952 hw->phy.eee_speeds_advertised = 0;
953 return hw->phy.ops.setup_link(hw);
954 }
955
956 /**
957 * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
958 * @hw: pointer to hardware structure
959 *
960 * Initialize the function pointers and for MAC type X550EM_a.
961 * Does not touch the hardware.
962 **/
ixgbe_init_ops_X550EM_a(struct ixgbe_hw * hw)963 s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
964 {
965 struct ixgbe_mac_info *mac = &hw->mac;
966 s32 ret_val;
967
968 DEBUGFUNC("ixgbe_init_ops_X550EM_a");
969
970 /* Start with generic X550EM init */
971 ret_val = ixgbe_init_ops_X550EM(hw);
972
973 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
974 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
975 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
976 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
977 } else {
978 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
979 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
980 }
981 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
982 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
983
984 switch (mac->ops.get_media_type(hw)) {
985 case ixgbe_media_type_fiber:
986 mac->ops.setup_fc = NULL;
987 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
988 break;
989 case ixgbe_media_type_backplane:
990 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
991 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
992 break;
993 default:
994 break;
995 }
996
997 switch (hw->device_id) {
998 case IXGBE_DEV_ID_X550EM_A_1G_T:
999 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1000 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
1001 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
1002 mac->ops.setup_eee = ixgbe_setup_eee_fw;
1003 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
1004 IXGBE_LINK_SPEED_1GB_FULL;
1005 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1006 break;
1007 default:
1008 break;
1009 }
1010
1011 return ret_val;
1012 }
1013
1014 /**
1015 * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
1016 * @hw: pointer to hardware structure
1017 *
1018 * Initialize the function pointers and for MAC type X550EM_x.
1019 * Does not touch the hardware.
1020 **/
ixgbe_init_ops_X550EM_x(struct ixgbe_hw * hw)1021 s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
1022 {
1023 struct ixgbe_mac_info *mac = &hw->mac;
1024 struct ixgbe_link_info *link = &hw->link;
1025 s32 ret_val;
1026
1027 DEBUGFUNC("ixgbe_init_ops_X550EM_x");
1028
1029 /* Start with generic X550EM init */
1030 ret_val = ixgbe_init_ops_X550EM(hw);
1031
1032 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
1033 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
1034 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
1035 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
1036 link->ops.read_link = ixgbe_read_i2c_combined_generic;
1037 link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
1038 link->ops.write_link = ixgbe_write_i2c_combined_generic;
1039 link->ops.write_link_unlocked =
1040 ixgbe_write_i2c_combined_generic_unlocked;
1041 link->addr = IXGBE_CS4227;
1042
1043 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
1044 mac->ops.setup_fc = NULL;
1045 mac->ops.setup_eee = NULL;
1046 mac->ops.init_led_link_act = NULL;
1047 }
1048
1049 return ret_val;
1050 }
1051
1052 /**
1053 * ixgbe_dmac_config_X550
1054 * @hw: pointer to hardware structure
1055 *
1056 * Configure DMA coalescing. If enabling dmac, dmac is activated.
1057 * When disabling dmac, dmac enable dmac bit is cleared.
1058 **/
ixgbe_dmac_config_X550(struct ixgbe_hw * hw)1059 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
1060 {
1061 u32 reg, high_pri_tc;
1062
1063 DEBUGFUNC("ixgbe_dmac_config_X550");
1064
1065 /* Disable DMA coalescing before configuring */
1066 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1067 reg &= ~IXGBE_DMACR_DMAC_EN;
1068 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1069
1070 /* Disable DMA Coalescing if the watchdog timer is 0 */
1071 if (!hw->mac.dmac_config.watchdog_timer)
1072 goto out;
1073
1074 ixgbe_dmac_config_tcs_X550(hw);
1075
1076 /* Configure DMA Coalescing Control Register */
1077 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1078
1079 /* Set the watchdog timer in units of 40.96 usec */
1080 reg &= ~IXGBE_DMACR_DMACWT_MASK;
1081 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
1082
1083 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
1084 /* If fcoe is enabled, set high priority traffic class */
1085 if (hw->mac.dmac_config.fcoe_en) {
1086 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
1087 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
1088 IXGBE_DMACR_HIGH_PRI_TC_MASK);
1089 }
1090 reg |= IXGBE_DMACR_EN_MNG_IND;
1091
1092 /* Enable DMA coalescing after configuration */
1093 reg |= IXGBE_DMACR_DMAC_EN;
1094 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1095
1096 out:
1097 return IXGBE_SUCCESS;
1098 }
1099
1100 /**
1101 * ixgbe_dmac_config_tcs_X550
1102 * @hw: pointer to hardware structure
1103 *
1104 * Configure DMA coalescing threshold per TC. The dmac enable bit must
1105 * be cleared before configuring.
1106 **/
ixgbe_dmac_config_tcs_X550(struct ixgbe_hw * hw)1107 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
1108 {
1109 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
1110
1111 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
1112
1113 /* Configure DMA coalescing enabled */
1114 switch (hw->mac.dmac_config.link_speed) {
1115 case IXGBE_LINK_SPEED_10_FULL:
1116 case IXGBE_LINK_SPEED_100_FULL:
1117 pb_headroom = IXGBE_DMACRXT_100M;
1118 break;
1119 case IXGBE_LINK_SPEED_1GB_FULL:
1120 pb_headroom = IXGBE_DMACRXT_1G;
1121 break;
1122 default:
1123 pb_headroom = IXGBE_DMACRXT_10G;
1124 break;
1125 }
1126
1127 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
1128 IXGBE_MHADD_MFS_SHIFT) / 1024);
1129
1130 /* Set the per Rx packet buffer receive threshold */
1131 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1132 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
1133 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
1134
1135 if (tc < hw->mac.dmac_config.num_tcs) {
1136 /* Get Rx PB size */
1137 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
1138 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
1139 IXGBE_RXPBSIZE_SHIFT;
1140
1141 /* Calculate receive buffer threshold in kilobytes */
1142 if (rx_pb_size > pb_headroom)
1143 rx_pb_size = rx_pb_size - pb_headroom;
1144 else
1145 rx_pb_size = 0;
1146
1147 /* Minimum of MFS shall be set for DMCTH */
1148 reg |= (rx_pb_size > maxframe_size_kb) ?
1149 rx_pb_size : maxframe_size_kb;
1150 }
1151 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
1152 }
1153 return IXGBE_SUCCESS;
1154 }
1155
1156 /**
1157 * ixgbe_dmac_update_tcs_X550
1158 * @hw: pointer to hardware structure
1159 *
1160 * Disables dmac, updates per TC settings, and then enables dmac.
1161 **/
ixgbe_dmac_update_tcs_X550(struct ixgbe_hw * hw)1162 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
1163 {
1164 u32 reg;
1165
1166 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
1167
1168 /* Disable DMA coalescing before configuring */
1169 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1170 reg &= ~IXGBE_DMACR_DMAC_EN;
1171 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1172
1173 ixgbe_dmac_config_tcs_X550(hw);
1174
1175 /* Enable DMA coalescing after configuration */
1176 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
1177 reg |= IXGBE_DMACR_DMAC_EN;
1178 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
1179
1180 return IXGBE_SUCCESS;
1181 }
1182
1183 /**
1184 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
1185 * @hw: pointer to hardware structure
1186 *
1187 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1188 * ixgbe_hw struct in order to set up EEPROM access.
1189 **/
ixgbe_init_eeprom_params_X550(struct ixgbe_hw * hw)1190 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
1191 {
1192 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1193 u32 eec;
1194 u16 eeprom_size;
1195
1196 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
1197
1198 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1199 eeprom->semaphore_delay = 10;
1200 eeprom->type = ixgbe_flash;
1201
1202 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1203 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1204 IXGBE_EEC_SIZE_SHIFT);
1205 eeprom->word_size = 1 << (eeprom_size +
1206 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1207
1208 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
1209 eeprom->type, eeprom->word_size);
1210 }
1211
1212 return IXGBE_SUCCESS;
1213 }
1214
1215 /**
1216 * ixgbe_set_source_address_pruning_X550 - Enable/Disable source address pruning
1217 * @hw: pointer to hardware structure
1218 * @enable: enable or disable source address pruning
1219 * @pool: Rx pool to set source address pruning for
1220 **/
ixgbe_set_source_address_pruning_X550(struct ixgbe_hw * hw,bool enable,unsigned int pool)1221 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
1222 unsigned int pool)
1223 {
1224 u64 pfflp;
1225
1226 /* max rx pool is 63 */
1227 if (pool > 63)
1228 return;
1229
1230 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
1231 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
1232
1233 if (enable)
1234 pfflp |= (1ULL << pool);
1235 else
1236 pfflp &= ~(1ULL << pool);
1237
1238 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
1239 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
1240 }
1241
1242 /**
1243 * ixgbe_set_ethertype_anti_spoofing_X550 - Configure Ethertype anti-spoofing
1244 * @hw: pointer to hardware structure
1245 * @enable: enable or disable switch for Ethertype anti-spoofing
1246 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1247 *
1248 **/
ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw * hw,bool enable,int vf)1249 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
1250 bool enable, int vf)
1251 {
1252 int vf_target_reg = vf >> 3;
1253 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1254 u32 pfvfspoof;
1255
1256 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
1257
1258 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1259 if (enable)
1260 pfvfspoof |= (1 << vf_target_shift);
1261 else
1262 pfvfspoof &= ~(1 << vf_target_shift);
1263
1264 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1265 }
1266
1267 /**
1268 * ixgbe_iosf_wait - Wait for IOSF command completion
1269 * @hw: pointer to hardware structure
1270 * @ctrl: pointer to location to receive final IOSF control value
1271 *
1272 * Returns failing status on timeout
1273 *
1274 * Note: ctrl can be NULL if the IOSF control register value is not needed
1275 **/
ixgbe_iosf_wait(struct ixgbe_hw * hw,u32 * ctrl)1276 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
1277 {
1278 u32 i, command = 0;
1279
1280 /* Check every 10 usec to see if the address cycle completed.
1281 * The SB IOSF BUSY bit will clear when the operation is
1282 * complete
1283 */
1284 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
1285 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
1286 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
1287 break;
1288 usec_delay(10);
1289 }
1290 if (ctrl)
1291 *ctrl = command;
1292 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
1293 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
1294 return IXGBE_ERR_PHY;
1295 }
1296
1297 return IXGBE_SUCCESS;
1298 }
1299
1300 /**
1301 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
1302 * of the IOSF device
1303 * @hw: pointer to hardware structure
1304 * @reg_addr: 32 bit PHY register to write
1305 * @device_type: 3 bit device type
1306 * @data: Data to write to the register
1307 **/
ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1308 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1309 u32 device_type, u32 data)
1310 {
1311 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1312 u32 command, error __unused;
1313 s32 ret;
1314
1315 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1316 if (ret != IXGBE_SUCCESS)
1317 return ret;
1318
1319 ret = ixgbe_iosf_wait(hw, NULL);
1320 if (ret != IXGBE_SUCCESS)
1321 goto out;
1322
1323 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1324 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1325
1326 /* Write IOSF control register */
1327 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1328
1329 /* Write IOSF data register */
1330 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1331
1332 ret = ixgbe_iosf_wait(hw, &command);
1333
1334 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1335 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1336 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1337 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1338 "Failed to write, error %x\n", error);
1339 ret = IXGBE_ERR_PHY;
1340 }
1341
1342 out:
1343 ixgbe_release_swfw_semaphore(hw, gssr);
1344 return ret;
1345 }
1346
1347 /**
1348 * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
1349 * @hw: pointer to hardware structure
1350 * @reg_addr: 32 bit PHY register to write
1351 * @device_type: 3 bit device type
1352 * @data: Pointer to read data from the register
1353 **/
ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1354 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1355 u32 device_type, u32 *data)
1356 {
1357 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1358 u32 command, error __unused;
1359 s32 ret;
1360
1361 ret = ixgbe_acquire_swfw_semaphore(hw, gssr);
1362 if (ret != IXGBE_SUCCESS)
1363 return ret;
1364
1365 ret = ixgbe_iosf_wait(hw, NULL);
1366 if (ret != IXGBE_SUCCESS)
1367 goto out;
1368
1369 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1370 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1371
1372 /* Write IOSF control register */
1373 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1374
1375 ret = ixgbe_iosf_wait(hw, &command);
1376
1377 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1378 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1379 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1380 ERROR_REPORT2(IXGBE_ERROR_POLLING,
1381 "Failed to read, error %x\n", error);
1382 ret = IXGBE_ERR_PHY;
1383 }
1384
1385 if (ret == IXGBE_SUCCESS)
1386 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
1387
1388 out:
1389 ixgbe_release_swfw_semaphore(hw, gssr);
1390 return ret;
1391 }
1392
1393 /**
1394 * ixgbe_get_phy_token - Get the token for shared phy access
1395 * @hw: Pointer to hardware structure
1396 */
1397
ixgbe_get_phy_token(struct ixgbe_hw * hw)1398 s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
1399 {
1400 struct ixgbe_hic_phy_token_req token_cmd;
1401 s32 status;
1402
1403 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1404 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1405 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1406 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1407 token_cmd.port_number = hw->bus.lan_id;
1408 token_cmd.command_type = FW_PHY_TOKEN_REQ;
1409 token_cmd.pad = 0;
1410 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1411 sizeof(token_cmd),
1412 IXGBE_HI_COMMAND_TIMEOUT,
1413 TRUE);
1414 if (status) {
1415 DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
1416 status);
1417 return status;
1418 }
1419 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1420 return IXGBE_SUCCESS;
1421 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
1422 DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
1423 token_cmd.hdr.cmd_or_resp.ret_status);
1424 return IXGBE_ERR_FW_RESP_INVALID;
1425 }
1426
1427 DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
1428 return IXGBE_ERR_TOKEN_RETRY;
1429 }
1430
1431 /**
1432 * ixgbe_put_phy_token - Put the token for shared phy access
1433 * @hw: Pointer to hardware structure
1434 */
1435
ixgbe_put_phy_token(struct ixgbe_hw * hw)1436 s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
1437 {
1438 struct ixgbe_hic_phy_token_req token_cmd;
1439 s32 status;
1440
1441 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
1442 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
1443 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
1444 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1445 token_cmd.port_number = hw->bus.lan_id;
1446 token_cmd.command_type = FW_PHY_TOKEN_REL;
1447 token_cmd.pad = 0;
1448 status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd,
1449 sizeof(token_cmd),
1450 IXGBE_HI_COMMAND_TIMEOUT,
1451 TRUE);
1452 if (status)
1453 return status;
1454 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
1455 return IXGBE_SUCCESS;
1456
1457 DEBUGOUT("Put PHY Token host interface command failed");
1458 return IXGBE_ERR_FW_RESP_INVALID;
1459 }
1460
1461 /**
1462 * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
1463 * of the IOSF device
1464 * @hw: pointer to hardware structure
1465 * @reg_addr: 32 bit PHY register to write
1466 * @device_type: 3 bit device type
1467 * @data: Data to write to the register
1468 **/
ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1469 s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1470 u32 device_type, u32 data)
1471 {
1472 struct ixgbe_hic_internal_phy_req write_cmd;
1473 s32 status;
1474 UNREFERENCED_1PARAMETER(device_type);
1475
1476 memset(&write_cmd, 0, sizeof(write_cmd));
1477 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1478 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1479 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1480 write_cmd.port_number = hw->bus.lan_id;
1481 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
1482 write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1483 write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
1484
1485 status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
1486 sizeof(write_cmd),
1487 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
1488
1489 return status;
1490 }
1491
1492 /**
1493 * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
1494 * @hw: pointer to hardware structure
1495 * @reg_addr: 32 bit PHY register to write
1496 * @device_type: 3 bit device type
1497 * @data: Pointer to read data from the register
1498 **/
ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)1499 s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
1500 u32 device_type, u32 *data)
1501 {
1502 union {
1503 struct ixgbe_hic_internal_phy_req cmd;
1504 struct ixgbe_hic_internal_phy_resp rsp;
1505 } hic;
1506 s32 status;
1507 UNREFERENCED_1PARAMETER(device_type);
1508
1509 memset(&hic, 0, sizeof(hic));
1510 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
1511 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
1512 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1513 hic.cmd.port_number = hw->bus.lan_id;
1514 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
1515 hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
1516
1517 status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
1518 sizeof(hic.cmd),
1519 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
1520
1521 /* Extract the register value from the response. */
1522 *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
1523
1524 return status;
1525 }
1526
1527 /**
1528 * ixgbe_disable_mdd_X550
1529 * @hw: pointer to hardware structure
1530 *
1531 * Disable malicious driver detection
1532 **/
ixgbe_disable_mdd_X550(struct ixgbe_hw * hw)1533 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
1534 {
1535 u32 reg;
1536
1537 DEBUGFUNC("ixgbe_disable_mdd_X550");
1538
1539 /* Disable MDD for TX DMA and interrupt */
1540 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1541 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1542 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1543
1544 /* Disable MDD for RX and interrupt */
1545 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1546 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1547 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1548 }
1549
1550 /**
1551 * ixgbe_enable_mdd_X550
1552 * @hw: pointer to hardware structure
1553 *
1554 * Enable malicious driver detection
1555 **/
ixgbe_enable_mdd_X550(struct ixgbe_hw * hw)1556 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
1557 {
1558 u32 reg;
1559
1560 DEBUGFUNC("ixgbe_enable_mdd_X550");
1561
1562 /* Enable MDD for TX DMA and interrupt */
1563 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1564 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
1565 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1566
1567 /* Enable MDD for RX and interrupt */
1568 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1569 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
1570 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
1571 }
1572
1573 /**
1574 * ixgbe_restore_mdd_vf_X550
1575 * @hw: pointer to hardware structure
1576 * @vf: vf index
1577 *
1578 * Restore VF that was disabled during malicious driver detection event
1579 **/
ixgbe_restore_mdd_vf_X550(struct ixgbe_hw * hw,u32 vf)1580 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
1581 {
1582 u32 idx, reg, num_qs, start_q, bitmask;
1583
1584 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
1585
1586 /* Map VF to queues */
1587 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1588 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1589 case IXGBE_MRQC_VMDQRT8TCEN:
1590 num_qs = 8; /* 16 VFs / pools */
1591 bitmask = 0x000000FF;
1592 break;
1593 case IXGBE_MRQC_VMDQRSS32EN:
1594 case IXGBE_MRQC_VMDQRT4TCEN:
1595 num_qs = 4; /* 32 VFs / pools */
1596 bitmask = 0x0000000F;
1597 break;
1598 default: /* 64 VFs / pools */
1599 num_qs = 2;
1600 bitmask = 0x00000003;
1601 break;
1602 }
1603 start_q = vf * num_qs;
1604
1605 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
1606 idx = start_q / 32;
1607 reg = 0;
1608 reg |= (bitmask << (start_q % 32));
1609 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
1610 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
1611 }
1612
1613 /**
1614 * ixgbe_mdd_event_X550
1615 * @hw: pointer to hardware structure
1616 * @vf_bitmap: vf bitmap of malicious vfs
1617 *
1618 * Handle malicious driver detection event.
1619 **/
ixgbe_mdd_event_X550(struct ixgbe_hw * hw,u32 * vf_bitmap)1620 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
1621 {
1622 u32 wqbr;
1623 u32 i, j, reg, q, shift, vf, idx;
1624
1625 DEBUGFUNC("ixgbe_mdd_event_X550");
1626
1627 /* figure out pool size for mapping to vf's */
1628 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
1629 switch (reg & IXGBE_MRQC_MRQE_MASK) {
1630 case IXGBE_MRQC_VMDQRT8TCEN:
1631 shift = 3; /* 16 VFs / pools */
1632 break;
1633 case IXGBE_MRQC_VMDQRSS32EN:
1634 case IXGBE_MRQC_VMDQRT4TCEN:
1635 shift = 2; /* 32 VFs / pools */
1636 break;
1637 default:
1638 shift = 1; /* 64 VFs / pools */
1639 break;
1640 }
1641
1642 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
1643 for (i = 0; i < 4; i++) {
1644 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
1645 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
1646
1647 if (!wqbr)
1648 continue;
1649
1650 /* Get malicious queue */
1651 for (j = 0; j < 32 && wqbr; j++) {
1652
1653 if (!(wqbr & (1 << j)))
1654 continue;
1655
1656 /* Get queue from bitmask */
1657 q = j + (i * 32);
1658
1659 /* Map queue to vf */
1660 vf = (q >> shift);
1661
1662 /* Set vf bit in vf_bitmap */
1663 idx = vf / 32;
1664 vf_bitmap[idx] |= (1 << (vf % 32));
1665 wqbr &= ~(1 << j);
1666 }
1667 }
1668 }
1669
1670 /**
1671 * ixgbe_get_media_type_X550em - Get media type
1672 * @hw: pointer to hardware structure
1673 *
1674 * Returns the media type (fiber, copper, backplane)
1675 */
ixgbe_get_media_type_X550em(struct ixgbe_hw * hw)1676 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1677 {
1678 enum ixgbe_media_type media_type;
1679
1680 DEBUGFUNC("ixgbe_get_media_type_X550em");
1681
1682 /* Detect if there is a copper PHY attached. */
1683 switch (hw->device_id) {
1684 case IXGBE_DEV_ID_X550EM_X_KR:
1685 case IXGBE_DEV_ID_X550EM_X_KX4:
1686 case IXGBE_DEV_ID_X550EM_X_XFI:
1687 case IXGBE_DEV_ID_X550EM_A_KR:
1688 case IXGBE_DEV_ID_X550EM_A_KR_L:
1689 media_type = ixgbe_media_type_backplane;
1690 break;
1691 case IXGBE_DEV_ID_X550EM_X_SFP:
1692 case IXGBE_DEV_ID_X550EM_A_SFP:
1693 case IXGBE_DEV_ID_X550EM_A_SFP_N:
1694 case IXGBE_DEV_ID_X550EM_A_QSFP:
1695 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
1696 media_type = ixgbe_media_type_fiber;
1697 break;
1698 case IXGBE_DEV_ID_X550EM_X_1G_T:
1699 case IXGBE_DEV_ID_X550EM_X_10G_T:
1700 case IXGBE_DEV_ID_X550EM_A_10G_T:
1701 media_type = ixgbe_media_type_copper;
1702 break;
1703 case IXGBE_DEV_ID_X550EM_A_SGMII:
1704 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
1705 media_type = ixgbe_media_type_backplane;
1706 hw->phy.type = ixgbe_phy_sgmii;
1707 break;
1708 case IXGBE_DEV_ID_X550EM_A_1G_T:
1709 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
1710 media_type = ixgbe_media_type_copper;
1711 break;
1712 default:
1713 media_type = ixgbe_media_type_unknown;
1714 break;
1715 }
1716 return media_type;
1717 }
1718
1719 /**
1720 * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1721 * @hw: pointer to hardware structure
1722 * @linear: TRUE if SFP module is linear
1723 */
ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw * hw,bool * linear)1724 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1725 {
1726 DEBUGFUNC("ixgbe_supported_sfp_modules_X550em");
1727
1728 switch (hw->phy.sfp_type) {
1729 case ixgbe_sfp_type_not_present:
1730 return IXGBE_ERR_SFP_NOT_PRESENT;
1731 case ixgbe_sfp_type_da_cu_core0:
1732 case ixgbe_sfp_type_da_cu_core1:
1733 *linear = TRUE;
1734 break;
1735 case ixgbe_sfp_type_srlr_core0:
1736 case ixgbe_sfp_type_srlr_core1:
1737 case ixgbe_sfp_type_da_act_lmt_core0:
1738 case ixgbe_sfp_type_da_act_lmt_core1:
1739 case ixgbe_sfp_type_1g_sx_core0:
1740 case ixgbe_sfp_type_1g_sx_core1:
1741 case ixgbe_sfp_type_1g_lx_core0:
1742 case ixgbe_sfp_type_1g_lx_core1:
1743 *linear = FALSE;
1744 break;
1745 case ixgbe_sfp_type_unknown:
1746 case ixgbe_sfp_type_1g_cu_core0:
1747 case ixgbe_sfp_type_1g_cu_core1:
1748 default:
1749 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1750 }
1751
1752 return IXGBE_SUCCESS;
1753 }
1754
1755 /**
1756 * ixgbe_identify_sfp_module_X550em - Identifies SFP modules
1757 * @hw: pointer to hardware structure
1758 *
1759 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1760 **/
ixgbe_identify_sfp_module_X550em(struct ixgbe_hw * hw)1761 s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw)
1762 {
1763 s32 status;
1764 bool linear;
1765
1766 DEBUGFUNC("ixgbe_identify_sfp_module_X550em");
1767
1768 status = ixgbe_identify_module_generic(hw);
1769
1770 if (status != IXGBE_SUCCESS)
1771 return status;
1772
1773 /* Check if SFP module is supported */
1774 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1775
1776 return status;
1777 }
1778
1779 /**
1780 * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops
1781 * @hw: pointer to hardware structure
1782 */
ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw * hw)1783 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
1784 {
1785 s32 status;
1786 bool linear;
1787
1788 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
1789
1790 /* Check if SFP module is supported */
1791 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
1792
1793 if (status != IXGBE_SUCCESS)
1794 return status;
1795
1796 ixgbe_init_mac_link_ops_X550em(hw);
1797 hw->phy.ops.reset = NULL;
1798
1799 return IXGBE_SUCCESS;
1800 }
1801
1802 /**
1803 * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1804 * internal PHY
1805 * @hw: pointer to hardware structure
1806 **/
ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw * hw)1807 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1808 {
1809 s32 status;
1810 u32 link_ctrl;
1811
1812 /* Restart auto-negotiation. */
1813 status = hw->mac.ops.read_iosf_sb_reg(hw,
1814 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1815 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1816
1817 if (status) {
1818 DEBUGOUT("Auto-negotiation did not complete\n");
1819 return status;
1820 }
1821
1822 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1823 status = hw->mac.ops.write_iosf_sb_reg(hw,
1824 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1825 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1826
1827 if (hw->mac.type == ixgbe_mac_X550EM_a) {
1828 u32 flx_mask_st20;
1829
1830 /* Indicate to FW that AN restart has been asserted */
1831 status = hw->mac.ops.read_iosf_sb_reg(hw,
1832 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1833 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1834
1835 if (status) {
1836 DEBUGOUT("Auto-negotiation did not complete\n");
1837 return status;
1838 }
1839
1840 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1841 status = hw->mac.ops.write_iosf_sb_reg(hw,
1842 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1843 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1844 }
1845
1846 return status;
1847 }
1848
1849 /**
1850 * ixgbe_setup_sgmii - Set up link for sgmii
1851 * @hw: pointer to hardware structure
1852 * @speed: new link speed
1853 * @autoneg_wait: TRUE when waiting for completion is needed
1854 */
ixgbe_setup_sgmii(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1855 static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1856 bool autoneg_wait)
1857 {
1858 struct ixgbe_mac_info *mac = &hw->mac;
1859 u32 lval, sval, flx_val;
1860 s32 rc;
1861
1862 rc = mac->ops.read_iosf_sb_reg(hw,
1863 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1864 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1865 if (rc)
1866 return rc;
1867
1868 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1869 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1870 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1871 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1872 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1873 rc = mac->ops.write_iosf_sb_reg(hw,
1874 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1875 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1876 if (rc)
1877 return rc;
1878
1879 rc = mac->ops.read_iosf_sb_reg(hw,
1880 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1881 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1882 if (rc)
1883 return rc;
1884
1885 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1886 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1887 rc = mac->ops.write_iosf_sb_reg(hw,
1888 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1889 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1890 if (rc)
1891 return rc;
1892
1893 rc = mac->ops.read_iosf_sb_reg(hw,
1894 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1895 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1896 if (rc)
1897 return rc;
1898
1899 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1900 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1901 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1902 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1903 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1904
1905 rc = mac->ops.write_iosf_sb_reg(hw,
1906 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1907 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1908 if (rc)
1909 return rc;
1910
1911 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1912 if (rc)
1913 return rc;
1914
1915 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1916 }
1917
1918 /**
1919 * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
1920 * @hw: pointer to hardware structure
1921 * @speed: new link speed
1922 * @autoneg_wait: TRUE when waiting for completion is needed
1923 */
ixgbe_setup_sgmii_fw(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1924 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1925 bool autoneg_wait)
1926 {
1927 struct ixgbe_mac_info *mac = &hw->mac;
1928 u32 lval, sval, flx_val;
1929 s32 rc;
1930
1931 rc = mac->ops.read_iosf_sb_reg(hw,
1932 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1933 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1934 if (rc)
1935 return rc;
1936
1937 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1938 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1939 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1940 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1941 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1942 rc = mac->ops.write_iosf_sb_reg(hw,
1943 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1944 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1945 if (rc)
1946 return rc;
1947
1948 rc = mac->ops.read_iosf_sb_reg(hw,
1949 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1950 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1951 if (rc)
1952 return rc;
1953
1954 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1955 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1956 rc = mac->ops.write_iosf_sb_reg(hw,
1957 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1958 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1959 if (rc)
1960 return rc;
1961
1962 rc = mac->ops.write_iosf_sb_reg(hw,
1963 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1964 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1965 if (rc)
1966 return rc;
1967
1968 rc = mac->ops.read_iosf_sb_reg(hw,
1969 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1970 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1971 if (rc)
1972 return rc;
1973
1974 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1975 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
1976 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1977 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1978 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1979
1980 rc = mac->ops.write_iosf_sb_reg(hw,
1981 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1982 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
1983 if (rc)
1984 return rc;
1985
1986 rc = ixgbe_restart_an_internal_phy_x550em(hw);
1987
1988 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1989 }
1990
1991 /**
1992 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1993 * @hw: pointer to hardware structure
1994 */
ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw * hw)1995 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1996 {
1997 struct ixgbe_mac_info *mac = &hw->mac;
1998
1999 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
2000
2001 switch (hw->mac.ops.get_media_type(hw)) {
2002 case ixgbe_media_type_fiber:
2003 /* CS4227 does not support autoneg, so disable the laser control
2004 * functions for SFP+ fiber
2005 */
2006 mac->ops.disable_tx_laser = NULL;
2007 mac->ops.enable_tx_laser = NULL;
2008 mac->ops.flap_tx_laser = NULL;
2009 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2010 mac->ops.set_rate_select_speed =
2011 ixgbe_set_soft_rate_select_speed;
2012
2013 if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
2014 (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
2015 mac->ops.setup_mac_link =
2016 ixgbe_setup_mac_link_sfp_x550a;
2017 else
2018 mac->ops.setup_mac_link =
2019 ixgbe_setup_mac_link_sfp_x550em;
2020 break;
2021 case ixgbe_media_type_copper:
2022 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2023 break;
2024 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2025 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2026 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2027 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2028 mac->ops.check_link =
2029 ixgbe_check_mac_link_generic;
2030 } else {
2031 mac->ops.setup_link =
2032 ixgbe_setup_mac_link_t_X550em;
2033 }
2034 } else {
2035 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2036 mac->ops.check_link = ixgbe_check_link_t_X550em;
2037 }
2038 break;
2039 case ixgbe_media_type_backplane:
2040 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2041 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2042 mac->ops.setup_link = ixgbe_setup_sgmii;
2043 break;
2044 default:
2045 break;
2046 }
2047 }
2048
2049 /**
2050 * ixgbe_get_link_capabilities_X550em - Determines link capabilities
2051 * @hw: pointer to hardware structure
2052 * @speed: pointer to link speed
2053 * @autoneg: TRUE when autoneg or autotry is enabled
2054 */
ixgbe_get_link_capabilities_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)2055 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2056 ixgbe_link_speed *speed,
2057 bool *autoneg)
2058 {
2059 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
2060
2061 if (hw->phy.type == ixgbe_phy_fw) {
2062 *autoneg = TRUE;
2063 *speed = hw->phy.speeds_supported;
2064 return 0;
2065 }
2066
2067 /* SFP */
2068 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2069
2070 /* CS4227 SFP must not enable auto-negotiation */
2071 *autoneg = FALSE;
2072
2073 /* Check if 1G SFP module. */
2074 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2075 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
2076 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2077 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2078 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2079 return IXGBE_SUCCESS;
2080 }
2081
2082 /* Link capabilities are based on SFP */
2083 if (hw->phy.multispeed_fiber)
2084 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2085 IXGBE_LINK_SPEED_1GB_FULL;
2086 else
2087 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2088 } else {
2089 *autoneg = TRUE;
2090
2091 switch (hw->phy.type) {
2092 case ixgbe_phy_x550em_xfi:
2093 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2094 IXGBE_LINK_SPEED_10GB_FULL;
2095 *autoneg = FALSE;
2096 break;
2097 case ixgbe_phy_ext_1g_t:
2098 case ixgbe_phy_sgmii:
2099 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2100 break;
2101 case ixgbe_phy_x550em_kr:
2102 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2103 /* check different backplane modes */
2104 if (hw->phy.nw_mng_if_sel &
2105 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2106 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2107 break;
2108 } else if (hw->device_id ==
2109 IXGBE_DEV_ID_X550EM_A_KR_L) {
2110 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2111 break;
2112 }
2113 }
2114 /* fall through */
2115 default:
2116 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2117 IXGBE_LINK_SPEED_1GB_FULL;
2118 break;
2119 }
2120 }
2121
2122 return IXGBE_SUCCESS;
2123 }
2124
2125 /**
2126 * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2127 * @hw: pointer to hardware structure
2128 * @lsc: pointer to boolean flag which indicates whether external Base T
2129 * PHY interrupt is lsc
2130 *
2131 * Determime if external Base T PHY interrupt cause is high temperature
2132 * failure alarm or link status change.
2133 *
2134 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2135 * failure alarm, else return PHY access status.
2136 */
ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw * hw,bool * lsc)2137 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2138 {
2139 u32 status;
2140 u16 reg;
2141
2142 *lsc = FALSE;
2143
2144 /* Vendor alarm triggered */
2145 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2146 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2147 ®);
2148
2149 if (status != IXGBE_SUCCESS ||
2150 !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2151 return status;
2152
2153 /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2154 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2155 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2156 ®);
2157
2158 if (status != IXGBE_SUCCESS ||
2159 !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2160 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2161 return status;
2162
2163 /* Global alarm triggered */
2164 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2165 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2166 ®);
2167
2168 if (status != IXGBE_SUCCESS)
2169 return status;
2170
2171 /* If high temperature failure, then return over temp error and exit */
2172 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2173 /* power down the PHY in case the PHY FW didn't already */
2174 ixgbe_set_copper_phy_power(hw, FALSE);
2175 return IXGBE_ERR_OVERTEMP;
2176 } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2177 /* device fault alarm triggered */
2178 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2179 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2180 ®);
2181
2182 if (status != IXGBE_SUCCESS)
2183 return status;
2184
2185 /* if device fault was due to high temp alarm handle and exit */
2186 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2187 /* power down the PHY in case the PHY FW didn't */
2188 ixgbe_set_copper_phy_power(hw, FALSE);
2189 return IXGBE_ERR_OVERTEMP;
2190 }
2191 }
2192
2193 /* Vendor alarm 2 triggered */
2194 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2195 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2196
2197 if (status != IXGBE_SUCCESS ||
2198 !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2199 return status;
2200
2201 /* link connect/disconnect event occurred */
2202 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2203 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2204
2205 if (status != IXGBE_SUCCESS)
2206 return status;
2207
2208 /* Indicate LSC */
2209 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2210 *lsc = TRUE;
2211
2212 return IXGBE_SUCCESS;
2213 }
2214
2215 /**
2216 * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2217 * @hw: pointer to hardware structure
2218 *
2219 * Enable link status change and temperature failure alarm for the external
2220 * Base T PHY
2221 *
2222 * Returns PHY access status
2223 */
ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw * hw)2224 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2225 {
2226 u32 status;
2227 u16 reg;
2228 bool lsc;
2229
2230 /* Clear interrupt flags */
2231 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2232
2233 /* Enable link status change alarm */
2234
2235 /* Enable the LASI interrupts on X552 devices to receive notifications
2236 * of the link configurations of the external PHY and correspondingly
2237 * support the configuration of the internal iXFI link, since iXFI does
2238 * not support auto-negotiation. This is not required for X553 devices
2239 * having KR support, which performs auto-negotiations and which is used
2240 * as the internal link to the external PHY. Hence adding a check here
2241 * to avoid enabling LASI interrupts for X553 devices.
2242 */
2243 if (hw->mac.type != ixgbe_mac_X550EM_a) {
2244 status = hw->phy.ops.read_reg(hw,
2245 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2246 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®);
2247
2248 if (status != IXGBE_SUCCESS)
2249 return status;
2250
2251 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2252
2253 status = hw->phy.ops.write_reg(hw,
2254 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2255 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
2256
2257 if (status != IXGBE_SUCCESS)
2258 return status;
2259 }
2260
2261 /* Enable high temperature failure and global fault alarms */
2262 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2263 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2264 ®);
2265
2266 if (status != IXGBE_SUCCESS)
2267 return status;
2268
2269 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2270 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2271
2272 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2273 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2274 reg);
2275
2276 if (status != IXGBE_SUCCESS)
2277 return status;
2278
2279 /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2280 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2281 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2282 ®);
2283
2284 if (status != IXGBE_SUCCESS)
2285 return status;
2286
2287 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2288 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2289
2290 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2291 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2292 reg);
2293
2294 if (status != IXGBE_SUCCESS)
2295 return status;
2296
2297 /* Enable chip-wide vendor alarm */
2298 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2299 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2300 ®);
2301
2302 if (status != IXGBE_SUCCESS)
2303 return status;
2304
2305 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2306
2307 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2308 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2309 reg);
2310
2311 return status;
2312 }
2313
2314 /**
2315 * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2316 * @hw: pointer to hardware structure
2317 * @speed: link speed
2318 *
2319 * Configures the integrated KR PHY.
2320 **/
ixgbe_setup_kr_speed_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed)2321 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2322 ixgbe_link_speed speed)
2323 {
2324 s32 status;
2325 u32 reg_val;
2326
2327 status = hw->mac.ops.read_iosf_sb_reg(hw,
2328 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2329 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2330 if (status)
2331 return status;
2332
2333 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2334 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2335 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2336
2337 /* Advertise 10G support. */
2338 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2339 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2340
2341 /* Advertise 1G support. */
2342 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2343 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2344
2345 status = hw->mac.ops.write_iosf_sb_reg(hw,
2346 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2347 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2348
2349 if (hw->mac.type == ixgbe_mac_X550EM_a) {
2350 /* Set lane mode to KR auto negotiation */
2351 status = hw->mac.ops.read_iosf_sb_reg(hw,
2352 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2353 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2354
2355 if (status)
2356 return status;
2357
2358 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2359 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2360 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2361 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2362 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2363
2364 status = hw->mac.ops.write_iosf_sb_reg(hw,
2365 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2366 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2367 }
2368
2369 return ixgbe_restart_an_internal_phy_x550em(hw);
2370 }
2371
2372 /**
2373 * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
2374 * @hw: pointer to hardware structure
2375 */
ixgbe_reset_phy_fw(struct ixgbe_hw * hw)2376 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
2377 {
2378 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2379 s32 rc;
2380
2381 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
2382 return IXGBE_SUCCESS;
2383
2384 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
2385 if (rc)
2386 return rc;
2387 memset(store, 0, sizeof(store));
2388
2389 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
2390 if (rc)
2391 return rc;
2392
2393 return ixgbe_setup_fw_link(hw);
2394 }
2395
2396 /**
2397 * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
2398 * @hw: pointer to hardware structure
2399 */
ixgbe_check_overtemp_fw(struct ixgbe_hw * hw)2400 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
2401 {
2402 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
2403 s32 rc;
2404
2405 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
2406 if (rc)
2407 return rc;
2408
2409 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
2410 ixgbe_shutdown_fw_phy(hw);
2411 return IXGBE_ERR_OVERTEMP;
2412 }
2413 return IXGBE_SUCCESS;
2414 }
2415
2416 /**
2417 * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
2418 * @hw: pointer to hardware structure
2419 *
2420 * Read NW_MNG_IF_SEL register and save field values, and check for valid field
2421 * values.
2422 **/
ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw * hw)2423 static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
2424 {
2425 /* Save NW management interface connected on board. This is used
2426 * to determine internal PHY mode.
2427 */
2428 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
2429
2430 /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
2431 * PHY address. This register field was has only been used for X552.
2432 */
2433 if (hw->mac.type == ixgbe_mac_X550EM_a &&
2434 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
2435 hw->phy.addr = (hw->phy.nw_mng_if_sel &
2436 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
2437 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
2438 }
2439
2440 return IXGBE_SUCCESS;
2441 }
2442
2443 /**
2444 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
2445 * @hw: pointer to hardware structure
2446 *
2447 * Initialize any function pointers that were not able to be
2448 * set during init_shared_code because the PHY/SFP type was
2449 * not known. Perform the SFP init if necessary.
2450 */
ixgbe_init_phy_ops_X550em(struct ixgbe_hw * hw)2451 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
2452 {
2453 struct ixgbe_phy_info *phy = &hw->phy;
2454 s32 ret_val;
2455
2456 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
2457
2458 hw->mac.ops.set_lan_id(hw);
2459 ixgbe_read_mng_if_sel_x550em(hw);
2460
2461 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
2462 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2463 ixgbe_setup_mux_ctl(hw);
2464 phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
2465 }
2466
2467 switch (hw->device_id) {
2468 case IXGBE_DEV_ID_X550EM_A_1G_T:
2469 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2470 phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
2471 phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
2472 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2473 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2474 phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
2475 if (hw->bus.lan_id)
2476 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2477 else
2478 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2479
2480 break;
2481 case IXGBE_DEV_ID_X550EM_A_10G_T:
2482 case IXGBE_DEV_ID_X550EM_A_SFP:
2483 hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
2484 hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
2485 if (hw->bus.lan_id)
2486 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
2487 else
2488 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
2489 break;
2490 case IXGBE_DEV_ID_X550EM_X_SFP:
2491 /* set up for CS4227 usage */
2492 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
2493 break;
2494 case IXGBE_DEV_ID_X550EM_X_1G_T:
2495 phy->ops.read_reg_mdi = NULL;
2496 phy->ops.write_reg_mdi = NULL;
2497 break;
2498 default:
2499 break;
2500 }
2501
2502 /* Identify the PHY or SFP module */
2503 ret_val = phy->ops.identify(hw);
2504 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2505 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
2506 return ret_val;
2507
2508 /* Setup function pointers based on detected hardware */
2509 ixgbe_init_mac_link_ops_X550em(hw);
2510 if (phy->sfp_type != ixgbe_sfp_type_unknown)
2511 phy->ops.reset = NULL;
2512
2513 /* Set functions pointers based on phy type */
2514 switch (hw->phy.type) {
2515 case ixgbe_phy_x550em_kx4:
2516 phy->ops.setup_link = NULL;
2517 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2518 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2519 break;
2520 case ixgbe_phy_x550em_kr:
2521 phy->ops.setup_link = ixgbe_setup_kr_x550em;
2522 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2523 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2524 break;
2525 case ixgbe_phy_ext_1g_t:
2526 /* link is managed by FW */
2527 phy->ops.setup_link = NULL;
2528 phy->ops.reset = NULL;
2529 break;
2530 case ixgbe_phy_x550em_xfi:
2531 /* link is managed by HW */
2532 phy->ops.setup_link = NULL;
2533 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
2534 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
2535 break;
2536 case ixgbe_phy_x550em_ext_t:
2537 /* If internal link mode is XFI, then setup iXFI internal link,
2538 * else setup KR now.
2539 */
2540 phy->ops.setup_internal_link =
2541 ixgbe_setup_internal_phy_t_x550em;
2542
2543 /* setup SW LPLU only for first revision of X550EM_x */
2544 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
2545 !(IXGBE_FUSES0_REV_MASK &
2546 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
2547 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
2548
2549 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
2550 phy->ops.reset = ixgbe_reset_phy_t_X550em;
2551 break;
2552 case ixgbe_phy_sgmii:
2553 phy->ops.setup_link = NULL;
2554 break;
2555 case ixgbe_phy_fw:
2556 phy->ops.setup_link = ixgbe_setup_fw_link;
2557 phy->ops.reset = ixgbe_reset_phy_fw;
2558 break;
2559 default:
2560 break;
2561 }
2562 return ret_val;
2563 }
2564
2565 /**
2566 * ixgbe_set_mdio_speed - Set MDIO clock speed
2567 * @hw: pointer to hardware structure
2568 */
ixgbe_set_mdio_speed(struct ixgbe_hw * hw)2569 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
2570 {
2571 u32 hlreg0;
2572
2573 switch (hw->device_id) {
2574 case IXGBE_DEV_ID_X550EM_X_10G_T:
2575 case IXGBE_DEV_ID_X550EM_A_SGMII:
2576 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
2577 case IXGBE_DEV_ID_X550EM_A_10G_T:
2578 case IXGBE_DEV_ID_X550EM_A_SFP:
2579 case IXGBE_DEV_ID_X550EM_A_QSFP:
2580 /* Config MDIO clock speed before the first MDIO PHY access */
2581 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2582 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
2583 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2584 break;
2585 case IXGBE_DEV_ID_X550EM_A_1G_T:
2586 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2587 /* Select fast MDIO clock speed for these devices */
2588 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2589 hlreg0 |= IXGBE_HLREG0_MDCSPD;
2590 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2591 break;
2592 default:
2593 break;
2594 }
2595 }
2596
2597 /**
2598 * ixgbe_reset_hw_X550em - Perform hardware reset
2599 * @hw: pointer to hardware structure
2600 *
2601 * Resets the hardware by resetting the transmit and receive units, masks
2602 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
2603 * reset.
2604 */
ixgbe_reset_hw_X550em(struct ixgbe_hw * hw)2605 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
2606 {
2607 ixgbe_link_speed link_speed;
2608 s32 status;
2609 s32 phy_status = IXGBE_SUCCESS;
2610 u32 ctrl = 0;
2611 u32 i;
2612 bool link_up = FALSE;
2613 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2614
2615 DEBUGFUNC("ixgbe_reset_hw_X550em");
2616
2617 /* Call adapter stop to disable Tx/Rx and clear interrupts */
2618 status = hw->mac.ops.stop_adapter(hw);
2619 if (status != IXGBE_SUCCESS) {
2620 DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
2621 return status;
2622 }
2623 /* flush pending Tx transactions */
2624 ixgbe_clear_tx_pending(hw);
2625
2626 ixgbe_set_mdio_speed(hw);
2627
2628 /* PHY ops must be identified and initialized prior to reset */
2629 phy_status = hw->phy.ops.init(hw);
2630
2631 if (phy_status)
2632 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
2633 status);
2634
2635 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
2636 phy_status == IXGBE_ERR_PHY_ADDR_INVALID) {
2637 DEBUGOUT("Returning from reset HW due to PHY init failure\n");
2638 goto mac_reset_top;
2639 }
2640
2641 /* start the external PHY */
2642 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
2643 status = ixgbe_init_ext_t_x550em(hw);
2644 if (status) {
2645 DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
2646 status);
2647 return status;
2648 }
2649 }
2650
2651 /* Setup SFP module if there is one present. */
2652 if (hw->phy.sfp_setup_needed) {
2653 phy_status = hw->mac.ops.setup_sfp(hw);
2654 hw->phy.sfp_setup_needed = FALSE;
2655 }
2656
2657 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
2658 goto mac_reset_top;
2659
2660 /* Reset PHY */
2661 if (!hw->phy.reset_disable && hw->phy.ops.reset) {
2662 if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
2663 return IXGBE_ERR_OVERTEMP;
2664 }
2665
2666 mac_reset_top:
2667 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
2668 * If link reset is used when link is up, it might reset the PHY when
2669 * mng is using it. If link is down or the flag to force full link
2670 * reset is set, then perform link reset.
2671 */
2672 ctrl = IXGBE_CTRL_LNK_RST;
2673 if (!hw->force_full_reset) {
2674 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
2675 if (link_up)
2676 ctrl = IXGBE_CTRL_RST;
2677 }
2678
2679 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2680 if (status != IXGBE_SUCCESS) {
2681 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
2682 "semaphore failed with %d", status);
2683 return IXGBE_ERR_SWFW_SYNC;
2684 }
2685 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2686 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2687 IXGBE_WRITE_FLUSH(hw);
2688 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2689
2690 /* Poll for reset bit to self-clear meaning reset is complete */
2691 for (i = 0; i < 10; i++) {
2692 usec_delay(1);
2693 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2694 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2695 break;
2696 }
2697
2698 if (ctrl & IXGBE_CTRL_RST_MASK) {
2699 status = IXGBE_ERR_RESET_FAILED;
2700 DEBUGOUT("Reset polling failed to complete.\n");
2701 }
2702
2703 msec_delay(50);
2704
2705 /* Double resets are required for recovery from certain error
2706 * conditions. Between resets, it is necessary to stall to
2707 * allow time for any pending HW events to complete.
2708 */
2709 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2710 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2711 goto mac_reset_top;
2712 }
2713
2714 /* Store the permanent mac address */
2715 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2716
2717 /* Store MAC address from RAR0, clear receive address registers, and
2718 * clear the multicast table. Also reset num_rar_entries to 128,
2719 * since we modify this value when programming the SAN MAC address.
2720 */
2721 hw->mac.num_rar_entries = 128;
2722 hw->mac.ops.init_rx_addrs(hw);
2723
2724 ixgbe_set_mdio_speed(hw);
2725
2726 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2727 ixgbe_setup_mux_ctl(hw);
2728
2729 if (status != IXGBE_SUCCESS)
2730 DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
2731
2732 if (phy_status != IXGBE_SUCCESS)
2733 status = phy_status;
2734
2735 return status;
2736 }
2737
2738 /**
2739 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
2740 * @hw: pointer to hardware structure
2741 */
ixgbe_init_ext_t_x550em(struct ixgbe_hw * hw)2742 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
2743 {
2744 u32 status;
2745 u16 reg;
2746
2747 status = hw->phy.ops.read_reg(hw,
2748 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
2749 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2750 ®);
2751
2752 if (status != IXGBE_SUCCESS)
2753 return status;
2754
2755 /* If PHY FW reset completed bit is set then this is the first
2756 * SW instance after a power on so the PHY FW must be un-stalled.
2757 */
2758 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
2759 status = hw->phy.ops.read_reg(hw,
2760 IXGBE_MDIO_GLOBAL_RES_PR_10,
2761 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2762 ®);
2763
2764 if (status != IXGBE_SUCCESS)
2765 return status;
2766
2767 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
2768
2769 status = hw->phy.ops.write_reg(hw,
2770 IXGBE_MDIO_GLOBAL_RES_PR_10,
2771 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
2772 reg);
2773
2774 if (status != IXGBE_SUCCESS)
2775 return status;
2776 }
2777
2778 return status;
2779 }
2780
2781 /**
2782 * ixgbe_setup_kr_x550em - Configure the KR PHY.
2783 * @hw: pointer to hardware structure
2784 **/
ixgbe_setup_kr_x550em(struct ixgbe_hw * hw)2785 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2786 {
2787 /* leave link alone for 2.5G */
2788 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2789 return IXGBE_SUCCESS;
2790
2791 if (ixgbe_check_reset_blocked(hw))
2792 return 0;
2793
2794 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2795 }
2796
2797 /**
2798 * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
2799 * @hw: pointer to hardware structure
2800 * @speed: new link speed
2801 * @autoneg_wait_to_complete: unused
2802 *
2803 * Configure the external PHY and the integrated KR PHY for SFP support.
2804 **/
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2805 s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
2806 ixgbe_link_speed speed,
2807 bool autoneg_wait_to_complete)
2808 {
2809 s32 ret_val;
2810 u16 reg_slice, reg_val;
2811 bool setup_linear = FALSE;
2812 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2813
2814 /* Check if SFP module is supported and linear */
2815 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2816
2817 /* If no SFP module present, then return success. Return success since
2818 * there is no reason to configure CS4227 and SFP not present error is
2819 * not excepted in the setup MAC link flow.
2820 */
2821 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2822 return IXGBE_SUCCESS;
2823
2824 if (ret_val != IXGBE_SUCCESS)
2825 return ret_val;
2826
2827 /* Configure internal PHY for KR/KX. */
2828 ixgbe_setup_kr_speed_x550em(hw, speed);
2829
2830 /* Configure CS4227 LINE side to proper mode. */
2831 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
2832 (hw->bus.lan_id << 12);
2833 if (setup_linear)
2834 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2835 else
2836 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2837 ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
2838 reg_val);
2839 return ret_val;
2840 }
2841
2842 /**
2843 * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
2844 * @hw: pointer to hardware structure
2845 * @speed: the link speed to force
2846 *
2847 * Configures the integrated PHY for native SFI mode. Used to connect the
2848 * internal PHY directly to an SFP cage, without autonegotiation.
2849 **/
ixgbe_setup_sfi_x550a(struct ixgbe_hw * hw,ixgbe_link_speed * speed)2850 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
2851 {
2852 struct ixgbe_mac_info *mac = &hw->mac;
2853 s32 status;
2854 u32 reg_val;
2855
2856 /* Disable all AN and force speed to 10G Serial. */
2857 status = mac->ops.read_iosf_sb_reg(hw,
2858 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2859 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2860 if (status != IXGBE_SUCCESS)
2861 return status;
2862
2863 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2864 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2865 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2866 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2867
2868 /* Select forced link speed for internal PHY. */
2869 switch (*speed) {
2870 case IXGBE_LINK_SPEED_10GB_FULL:
2871 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
2872 break;
2873 case IXGBE_LINK_SPEED_1GB_FULL:
2874 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2875 break;
2876 case 0:
2877 /* media none (linkdown) */
2878 break;
2879 default:
2880 /* Other link speeds are not supported by internal PHY. */
2881 return IXGBE_ERR_LINK_SETUP;
2882 }
2883
2884 status = mac->ops.write_iosf_sb_reg(hw,
2885 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2886 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2887
2888 /* Toggle port SW reset by AN reset. */
2889 status = ixgbe_restart_an_internal_phy_x550em(hw);
2890
2891 return status;
2892 }
2893
2894 /**
2895 * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
2896 * @hw: pointer to hardware structure
2897 * @speed: new link speed
2898 * @autoneg_wait_to_complete: unused
2899 *
2900 * Configure the integrated PHY for SFP support.
2901 **/
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)2902 static s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
2903 ixgbe_link_speed speed,
2904 bool autoneg_wait_to_complete)
2905 {
2906 s32 ret_val;
2907 u16 reg_phy_ext;
2908 bool setup_linear = FALSE;
2909 u32 reg_slice, reg_phy_int, slice_offset;
2910
2911 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
2912
2913 /* Check if SFP module is supported and linear */
2914 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
2915
2916 /* If no SFP module present, then return success. Return success since
2917 * SFP not present error is not excepted in the setup MAC link flow.
2918 */
2919 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
2920 return IXGBE_SUCCESS;
2921
2922 if (ret_val != IXGBE_SUCCESS)
2923 return ret_val;
2924
2925 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
2926 /* Configure internal PHY for native SFI based on module type */
2927 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
2928 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2929 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
2930
2931 if (ret_val != IXGBE_SUCCESS)
2932 return ret_val;
2933
2934 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
2935 if (!setup_linear)
2936 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
2937
2938 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
2939 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2940 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
2941
2942 if (ret_val != IXGBE_SUCCESS)
2943 return ret_val;
2944
2945 /* Setup SFI internal link. */
2946 ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
2947 } else {
2948 /* Configure internal PHY for KR/KX. */
2949 ixgbe_setup_kr_speed_x550em(hw, speed);
2950
2951 if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
2952 /* Find Address */
2953 DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
2954 return IXGBE_ERR_PHY_ADDR_INVALID;
2955 }
2956
2957 /* Get external PHY SKU id */
2958 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
2959 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2960
2961 if (ret_val != IXGBE_SUCCESS)
2962 return ret_val;
2963
2964 /* When configuring quad port CS4223, the MAC instance is part
2965 * of the slice offset.
2966 */
2967 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
2968 slice_offset = (hw->bus.lan_id +
2969 (hw->bus.instance_id << 1)) << 12;
2970 else
2971 slice_offset = hw->bus.lan_id << 12;
2972
2973 /* Configure CS4227/CS4223 LINE side to proper mode. */
2974 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
2975
2976 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2977 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2978
2979 if (ret_val != IXGBE_SUCCESS)
2980 return ret_val;
2981
2982 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
2983 (IXGBE_CS4227_EDC_MODE_SR << 1));
2984
2985 if (setup_linear)
2986 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
2987 else
2988 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
2989 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
2990 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
2991
2992 /* Flush previous write with a read */
2993 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
2994 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
2995 }
2996 return ret_val;
2997 }
2998
2999 /**
3000 * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
3001 * @hw: pointer to hardware structure
3002 *
3003 * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
3004 **/
ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw * hw)3005 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
3006 {
3007 struct ixgbe_mac_info *mac = &hw->mac;
3008 s32 status;
3009 u32 reg_val;
3010
3011 /* Disable training protocol FSM. */
3012 status = mac->ops.read_iosf_sb_reg(hw,
3013 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3014 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3015 if (status != IXGBE_SUCCESS)
3016 return status;
3017 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
3018 status = mac->ops.write_iosf_sb_reg(hw,
3019 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3020 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3021 if (status != IXGBE_SUCCESS)
3022 return status;
3023
3024 /* Disable Flex from training TXFFE. */
3025 status = mac->ops.read_iosf_sb_reg(hw,
3026 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3027 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3028 if (status != IXGBE_SUCCESS)
3029 return status;
3030 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3031 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3032 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3033 status = mac->ops.write_iosf_sb_reg(hw,
3034 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
3035 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3036 if (status != IXGBE_SUCCESS)
3037 return status;
3038 status = mac->ops.read_iosf_sb_reg(hw,
3039 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3040 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3041 if (status != IXGBE_SUCCESS)
3042 return status;
3043 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
3044 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
3045 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
3046 status = mac->ops.write_iosf_sb_reg(hw,
3047 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
3048 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3049 if (status != IXGBE_SUCCESS)
3050 return status;
3051
3052 /* Enable override for coefficients. */
3053 status = mac->ops.read_iosf_sb_reg(hw,
3054 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3055 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3056 if (status != IXGBE_SUCCESS)
3057 return status;
3058 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
3059 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
3060 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
3061 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
3062 status = mac->ops.write_iosf_sb_reg(hw,
3063 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
3064 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3065 return status;
3066 }
3067
3068 /**
3069 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
3070 * @hw: pointer to hardware structure
3071 * @speed: the link speed to force
3072 *
3073 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
3074 * internal and external PHY at a specific speed, without autonegotiation.
3075 **/
ixgbe_setup_ixfi_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed)3076 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
3077 {
3078 struct ixgbe_mac_info *mac = &hw->mac;
3079 s32 status;
3080 u32 reg_val;
3081
3082 /* iXFI is only supported with X552 */
3083 if (mac->type != ixgbe_mac_X550EM_x)
3084 return IXGBE_ERR_LINK_SETUP;
3085
3086 /* Disable AN and force speed to 10G Serial. */
3087 status = mac->ops.read_iosf_sb_reg(hw,
3088 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3089 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3090 if (status != IXGBE_SUCCESS)
3091 return status;
3092
3093 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3094 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3095
3096 /* Select forced link speed for internal PHY. */
3097 switch (*speed) {
3098 case IXGBE_LINK_SPEED_10GB_FULL:
3099 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3100 break;
3101 case IXGBE_LINK_SPEED_1GB_FULL:
3102 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
3103 break;
3104 default:
3105 /* Other link speeds are not supported by internal KR PHY. */
3106 return IXGBE_ERR_LINK_SETUP;
3107 }
3108
3109 status = mac->ops.write_iosf_sb_reg(hw,
3110 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3111 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3112 if (status != IXGBE_SUCCESS)
3113 return status;
3114
3115 /* Additional configuration needed for x550em_x */
3116 if (hw->mac.type == ixgbe_mac_X550EM_x) {
3117 status = ixgbe_setup_ixfi_x550em_x(hw);
3118 if (status != IXGBE_SUCCESS)
3119 return status;
3120 }
3121
3122 /* Toggle port SW reset by AN reset. */
3123 status = ixgbe_restart_an_internal_phy_x550em(hw);
3124
3125 return status;
3126 }
3127
3128 /**
3129 * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
3130 * @hw: address of hardware structure
3131 * @link_up: address of boolean to indicate link status
3132 *
3133 * Returns error code if unable to get link status.
3134 */
ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw * hw,bool * link_up)3135 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
3136 {
3137 u32 ret;
3138 u16 autoneg_status;
3139
3140 *link_up = FALSE;
3141
3142 /* read this twice back to back to indicate current status */
3143 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3144 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3145 &autoneg_status);
3146 if (ret != IXGBE_SUCCESS)
3147 return ret;
3148
3149 ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
3150 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3151 &autoneg_status);
3152 if (ret != IXGBE_SUCCESS)
3153 return ret;
3154
3155 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
3156
3157 return IXGBE_SUCCESS;
3158 }
3159
3160 /**
3161 * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
3162 * @hw: point to hardware structure
3163 *
3164 * Configures the link between the integrated KR PHY and the external X557 PHY
3165 * The driver will call this function when it gets a link status change
3166 * interrupt from the X557 PHY. This function configures the link speed
3167 * between the PHYs to match the link speed of the BASE-T link.
3168 *
3169 * A return of a non-zero value indicates an error, and the base driver should
3170 * not report link up.
3171 */
ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw * hw)3172 s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
3173 {
3174 ixgbe_link_speed force_speed;
3175 bool link_up;
3176 u32 status;
3177 u16 speed;
3178
3179 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
3180 return IXGBE_ERR_CONFIG;
3181
3182 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3183 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
3184 /* If link is down, there is no setup necessary so return */
3185 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3186 if (status != IXGBE_SUCCESS)
3187 return status;
3188
3189 if (!link_up)
3190 return IXGBE_SUCCESS;
3191
3192 status = hw->phy.ops.read_reg(hw,
3193 IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3194 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3195 &speed);
3196 if (status != IXGBE_SUCCESS)
3197 return status;
3198
3199 /* If link is still down - no setup is required so return */
3200 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3201 if (status != IXGBE_SUCCESS)
3202 return status;
3203 if (!link_up)
3204 return IXGBE_SUCCESS;
3205
3206 /* clear everything but the speed and duplex bits */
3207 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
3208
3209 switch (speed) {
3210 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
3211 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
3212 break;
3213 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
3214 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
3215 break;
3216 default:
3217 /* Internal PHY does not support anything else */
3218 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3219 }
3220
3221 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
3222 } else {
3223 speed = IXGBE_LINK_SPEED_10GB_FULL |
3224 IXGBE_LINK_SPEED_1GB_FULL;
3225 return ixgbe_setup_kr_speed_x550em(hw, speed);
3226 }
3227 }
3228
3229 /**
3230 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
3231 * @hw: pointer to hardware structure
3232 *
3233 * Configures the integrated KR PHY to use internal loopback mode.
3234 **/
ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw * hw)3235 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
3236 {
3237 s32 status;
3238 u32 reg_val;
3239
3240 /* Disable AN and force speed to 10G Serial. */
3241 status = hw->mac.ops.read_iosf_sb_reg(hw,
3242 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3243 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3244 if (status != IXGBE_SUCCESS)
3245 return status;
3246 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
3247 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
3248 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
3249 status = hw->mac.ops.write_iosf_sb_reg(hw,
3250 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
3251 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3252 if (status != IXGBE_SUCCESS)
3253 return status;
3254
3255 /* Set near-end loopback clocks. */
3256 status = hw->mac.ops.read_iosf_sb_reg(hw,
3257 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3258 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3259 if (status != IXGBE_SUCCESS)
3260 return status;
3261 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
3262 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
3263 status = hw->mac.ops.write_iosf_sb_reg(hw,
3264 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
3265 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3266 if (status != IXGBE_SUCCESS)
3267 return status;
3268
3269 /* Set loopback enable. */
3270 status = hw->mac.ops.read_iosf_sb_reg(hw,
3271 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3272 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3273 if (status != IXGBE_SUCCESS)
3274 return status;
3275 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
3276 status = hw->mac.ops.write_iosf_sb_reg(hw,
3277 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
3278 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3279 if (status != IXGBE_SUCCESS)
3280 return status;
3281
3282 /* Training bypass. */
3283 status = hw->mac.ops.read_iosf_sb_reg(hw,
3284 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3285 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
3286 if (status != IXGBE_SUCCESS)
3287 return status;
3288 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
3289 status = hw->mac.ops.write_iosf_sb_reg(hw,
3290 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
3291 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
3292
3293 return status;
3294 }
3295
3296 /**
3297 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
3298 * assuming that the semaphore is already obtained.
3299 * @hw: pointer to hardware structure
3300 * @offset: offset of word in the EEPROM to read
3301 * @data: word read from the EEPROM
3302 *
3303 * Reads a 16 bit word from the EEPROM using the hostif.
3304 **/
ixgbe_read_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 * data)3305 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
3306 {
3307 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3308 struct ixgbe_hic_read_shadow_ram buffer;
3309 s32 status;
3310
3311 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
3312 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3313 buffer.hdr.req.buf_lenh = 0;
3314 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3315 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3316
3317 /* convert offset from words to bytes */
3318 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3319 /* one word */
3320 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3321 buffer.pad2 = 0;
3322 buffer.data = 0;
3323 buffer.pad3 = 0;
3324
3325 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3326 if (status)
3327 return status;
3328
3329 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3330 IXGBE_HI_COMMAND_TIMEOUT);
3331 if (!status) {
3332 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3333 FW_NVM_DATA_OFFSET);
3334 }
3335
3336 hw->mac.ops.release_swfw_sync(hw, mask);
3337 return status;
3338 }
3339
3340 /**
3341 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
3342 * @hw: pointer to hardware structure
3343 * @offset: offset of word in the EEPROM to read
3344 * @words: number of words
3345 * @data: word(s) read from the EEPROM
3346 *
3347 * Reads a 16 bit word(s) from the EEPROM using the hostif.
3348 **/
ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3349 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3350 u16 offset, u16 words, u16 *data)
3351 {
3352 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
3353 struct ixgbe_hic_read_shadow_ram buffer;
3354 u32 current_word = 0;
3355 u16 words_to_read;
3356 s32 status;
3357 u32 i;
3358
3359 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
3360
3361 /* Take semaphore for the entire operation. */
3362 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
3363 if (status) {
3364 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
3365 return status;
3366 }
3367
3368 while (words) {
3369 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
3370 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
3371 else
3372 words_to_read = words;
3373
3374 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
3375 buffer.hdr.req.buf_lenh = 0;
3376 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
3377 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3378
3379 /* convert offset from words to bytes */
3380 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
3381 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
3382 buffer.pad2 = 0;
3383 buffer.data = 0;
3384 buffer.pad3 = 0;
3385
3386 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
3387 IXGBE_HI_COMMAND_TIMEOUT);
3388
3389 if (status) {
3390 DEBUGOUT("Host interface command failed\n");
3391 goto out;
3392 }
3393
3394 for (i = 0; i < words_to_read; i++) {
3395 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
3396 2 * i;
3397 u32 value = IXGBE_READ_REG(hw, reg);
3398
3399 data[current_word] = (u16)(value & 0xffff);
3400 current_word++;
3401 i++;
3402 if (i < words_to_read) {
3403 value >>= 16;
3404 data[current_word] = (u16)(value & 0xffff);
3405 current_word++;
3406 }
3407 }
3408 words -= words_to_read;
3409 }
3410
3411 out:
3412 hw->mac.ops.release_swfw_sync(hw, mask);
3413 return status;
3414 }
3415
3416 /**
3417 * ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
3418 * @hw: pointer to hardware structure
3419 * @offset: offset of word in the EEPROM to write
3420 * @data: word write to the EEPROM
3421 *
3422 * Write a 16 bit word to the EEPROM using the hostif.
3423 **/
ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3424 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
3425 u16 data)
3426 {
3427 s32 status;
3428 struct ixgbe_hic_write_shadow_ram buffer;
3429
3430 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
3431
3432 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
3433 buffer.hdr.req.buf_lenh = 0;
3434 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
3435 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
3436
3437 /* one word */
3438 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
3439 buffer.data = data;
3440 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
3441
3442 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3443 sizeof(buffer),
3444 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3445 if (status != IXGBE_SUCCESS) {
3446 DEBUGOUT2("for offset %04x failed with status %d\n",
3447 offset, status);
3448 return status;
3449 }
3450 if (buffer.hdr.rsp.buf_lenh_status != FW_CEM_RESP_STATUS_SUCCESS) {
3451 DEBUGOUT2("for offset %04x host interface return status %02x\n",
3452 offset, buffer.hdr.rsp.buf_lenh_status);
3453 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3454 }
3455
3456 return status;
3457 }
3458
3459 /**
3460 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
3461 * @hw: pointer to hardware structure
3462 * @offset: offset of word in the EEPROM to write
3463 * @data: word write to the EEPROM
3464 *
3465 * Write a 16 bit word to the EEPROM using the hostif.
3466 **/
ixgbe_write_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 data)3467 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
3468 u16 data)
3469 {
3470 s32 status = IXGBE_SUCCESS;
3471
3472 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
3473
3474 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
3475 IXGBE_SUCCESS) {
3476 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
3477 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3478 } else {
3479 DEBUGOUT("write ee hostif failed to get semaphore");
3480 status = IXGBE_ERR_SWFW_SYNC;
3481 }
3482
3483 return status;
3484 }
3485
3486 /**
3487 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
3488 * @hw: pointer to hardware structure
3489 * @offset: offset of word in the EEPROM to write
3490 * @words: number of words
3491 * @data: word(s) write to the EEPROM
3492 *
3493 * Write a 16 bit word(s) to the EEPROM using the hostif.
3494 **/
ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)3495 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
3496 u16 offset, u16 words, u16 *data)
3497 {
3498 s32 status = IXGBE_SUCCESS;
3499 u32 i = 0;
3500
3501 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
3502
3503 /* Take semaphore for the entire operation. */
3504 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3505 if (status != IXGBE_SUCCESS) {
3506 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
3507 goto out;
3508 }
3509
3510 for (i = 0; i < words; i++) {
3511 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
3512 data[i]);
3513
3514 if (status != IXGBE_SUCCESS) {
3515 DEBUGOUT("Eeprom buffered write failed\n");
3516 break;
3517 }
3518 }
3519
3520 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
3521 out:
3522
3523 return status;
3524 }
3525
3526 /**
3527 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
3528 * @hw: pointer to hardware structure
3529 * @ptr: pointer offset in eeprom
3530 * @size: size of section pointed by ptr, if 0 first word will be used as size
3531 * @csum: address of checksum to update
3532 * @buffer: pointer to buffer containing calculated checksum
3533 * @buffer_size: size of buffer
3534 *
3535 * Returns error status for any failure
3536 */
ixgbe_checksum_ptr_x550(struct ixgbe_hw * hw,u16 ptr,u16 size,u16 * csum,u16 * buffer,u32 buffer_size)3537 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
3538 u16 size, u16 *csum, u16 *buffer,
3539 u32 buffer_size)
3540 {
3541 u16 buf[256];
3542 s32 status;
3543 u16 length, bufsz, i, start;
3544 u16 *local_buffer;
3545
3546 bufsz = sizeof(buf) / sizeof(buf[0]);
3547
3548 /* Read a chunk at the pointer location */
3549 if (!buffer) {
3550 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
3551 if (status) {
3552 DEBUGOUT("Failed to read EEPROM image\n");
3553 return status;
3554 }
3555 local_buffer = buf;
3556 } else {
3557 if (buffer_size < ptr)
3558 return IXGBE_ERR_PARAM;
3559 local_buffer = &buffer[ptr];
3560 }
3561
3562 if (size) {
3563 start = 0;
3564 length = size;
3565 } else {
3566 start = 1;
3567 length = local_buffer[0];
3568
3569 /* Skip pointer section if length is invalid. */
3570 if (length == 0xFFFF || length == 0 ||
3571 (ptr + length) >= hw->eeprom.word_size)
3572 return IXGBE_SUCCESS;
3573 }
3574
3575 if (buffer && ((u32)start + (u32)length > buffer_size))
3576 return IXGBE_ERR_PARAM;
3577
3578 for (i = start; length; i++, length--) {
3579 if (i == bufsz && !buffer) {
3580 ptr += bufsz;
3581 i = 0;
3582 if (length < bufsz)
3583 bufsz = length;
3584
3585 /* Read a chunk at the pointer location */
3586 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
3587 bufsz, buf);
3588 if (status) {
3589 DEBUGOUT("Failed to read EEPROM image\n");
3590 return status;
3591 }
3592 }
3593 *csum += local_buffer[i];
3594 }
3595 return IXGBE_SUCCESS;
3596 }
3597
3598 /**
3599 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
3600 * @hw: pointer to hardware structure
3601 * @buffer: pointer to buffer containing calculated checksum
3602 * @buffer_size: size of buffer
3603 *
3604 * Returns a negative error code on error, or the 16-bit checksum
3605 **/
ixgbe_calc_checksum_X550(struct ixgbe_hw * hw,u16 * buffer,u32 buffer_size)3606 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
3607 {
3608 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
3609 u16 *local_buffer;
3610 s32 status;
3611 u16 checksum = 0;
3612 u16 pointer, i, size;
3613
3614 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
3615
3616 hw->eeprom.ops.init_params(hw);
3617
3618 if (!buffer) {
3619 /* Read pointer area */
3620 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
3621 IXGBE_EEPROM_LAST_WORD + 1,
3622 eeprom_ptrs);
3623 if (status) {
3624 DEBUGOUT("Failed to read EEPROM image\n");
3625 return status;
3626 }
3627 local_buffer = eeprom_ptrs;
3628 } else {
3629 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
3630 return IXGBE_ERR_PARAM;
3631 local_buffer = buffer;
3632 }
3633
3634 /*
3635 * For X550 hardware include 0x0-0x41 in the checksum, skip the
3636 * checksum word itself
3637 */
3638 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
3639 if (i != IXGBE_EEPROM_CHECKSUM)
3640 checksum += local_buffer[i];
3641
3642 /*
3643 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
3644 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
3645 */
3646 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
3647 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
3648 continue;
3649
3650 pointer = local_buffer[i];
3651
3652 /* Skip pointer section if the pointer is invalid. */
3653 if (pointer == 0xFFFF || pointer == 0 ||
3654 pointer >= hw->eeprom.word_size)
3655 continue;
3656
3657 switch (i) {
3658 case IXGBE_PCIE_GENERAL_PTR:
3659 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
3660 break;
3661 case IXGBE_PCIE_CONFIG0_PTR:
3662 case IXGBE_PCIE_CONFIG1_PTR:
3663 size = IXGBE_PCIE_CONFIG_SIZE;
3664 break;
3665 default:
3666 size = 0;
3667 break;
3668 }
3669
3670 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
3671 buffer, buffer_size);
3672 if (status)
3673 return status;
3674 }
3675
3676 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
3677
3678 return (s32)checksum;
3679 }
3680
3681 /**
3682 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
3683 * @hw: pointer to hardware structure
3684 *
3685 * Returns a negative error code on error, or the 16-bit checksum
3686 **/
ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw * hw)3687 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
3688 {
3689 return ixgbe_calc_checksum_X550(hw, NULL, 0);
3690 }
3691
3692 /**
3693 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
3694 * @hw: pointer to hardware structure
3695 * @checksum_val: calculated checksum
3696 *
3697 * Performs checksum calculation and validates the EEPROM checksum. If the
3698 * caller does not need checksum_val, the value can be NULL.
3699 **/
ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw * hw,u16 * checksum_val)3700 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
3701 {
3702 s32 status;
3703 u16 checksum;
3704 u16 read_checksum = 0;
3705
3706 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
3707
3708 /* Read the first word from the EEPROM. If this times out or fails, do
3709 * not continue or we could be in for a very long wait while every
3710 * EEPROM read fails
3711 */
3712 status = hw->eeprom.ops.read(hw, 0, &checksum);
3713 if (status) {
3714 DEBUGOUT("EEPROM read failed\n");
3715 return status;
3716 }
3717
3718 status = hw->eeprom.ops.calc_checksum(hw);
3719 if (status < 0)
3720 return status;
3721
3722 checksum = (u16)(status & 0xffff);
3723
3724 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3725 &read_checksum);
3726 if (status)
3727 return status;
3728
3729 /* Verify read checksum from EEPROM is the same as
3730 * calculated checksum
3731 */
3732 if (read_checksum != checksum) {
3733 status = IXGBE_ERR_EEPROM_CHECKSUM;
3734 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
3735 "Invalid EEPROM checksum");
3736 }
3737
3738 /* If the user cares, return the calculated checksum */
3739 if (checksum_val)
3740 *checksum_val = checksum;
3741
3742 return status;
3743 }
3744
3745 /**
3746 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
3747 * @hw: pointer to hardware structure
3748 *
3749 * After writing EEPROM to shadow RAM using EEWR register, software calculates
3750 * checksum and updates the EEPROM and instructs the hardware to update
3751 * the flash.
3752 **/
ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw * hw)3753 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
3754 {
3755 s32 status;
3756 u16 checksum = 0;
3757
3758 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
3759
3760 /* Read the first word from the EEPROM. If this times out or fails, do
3761 * not continue or we could be in for a very long wait while every
3762 * EEPROM read fails
3763 */
3764 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
3765 if (status) {
3766 DEBUGOUT("EEPROM read failed\n");
3767 return status;
3768 }
3769
3770 status = ixgbe_calc_eeprom_checksum_X550(hw);
3771 if (status < 0)
3772 return status;
3773
3774 checksum = (u16)(status & 0xffff);
3775
3776 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
3777 checksum);
3778 if (status)
3779 return status;
3780
3781 status = ixgbe_update_flash_X550(hw);
3782
3783 return status;
3784 }
3785
3786 /**
3787 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
3788 * @hw: pointer to hardware structure
3789 *
3790 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
3791 **/
ixgbe_update_flash_X550(struct ixgbe_hw * hw)3792 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
3793 {
3794 s32 status = IXGBE_SUCCESS;
3795 union ixgbe_hic_hdr2 buffer;
3796
3797 DEBUGFUNC("ixgbe_update_flash_X550");
3798
3799 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
3800 buffer.req.buf_lenh = 0;
3801 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
3802 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
3803
3804 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
3805 sizeof(buffer),
3806 IXGBE_HI_COMMAND_TIMEOUT, FALSE);
3807
3808 return status;
3809 }
3810
3811 /**
3812 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
3813 * @hw: pointer to hardware structure
3814 *
3815 * Determines physical layer capabilities of the current configuration.
3816 **/
ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw * hw)3817 u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
3818 {
3819 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
3820 u16 ext_ability = 0;
3821
3822 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
3823
3824 hw->phy.ops.identify(hw);
3825
3826 switch (hw->phy.type) {
3827 case ixgbe_phy_x550em_kr:
3828 if (hw->mac.type == ixgbe_mac_X550EM_a) {
3829 if (hw->phy.nw_mng_if_sel &
3830 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
3831 physical_layer =
3832 IXGBE_PHYSICAL_LAYER_2500BASE_KX;
3833 break;
3834 } else if (hw->device_id ==
3835 IXGBE_DEV_ID_X550EM_A_KR_L) {
3836 physical_layer =
3837 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3838 break;
3839 }
3840 }
3841 /* fall through */
3842 case ixgbe_phy_x550em_xfi:
3843 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
3844 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3845 break;
3846 case ixgbe_phy_x550em_kx4:
3847 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
3848 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3849 break;
3850 case ixgbe_phy_x550em_ext_t:
3851 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
3852 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
3853 &ext_ability);
3854 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
3855 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
3856 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
3857 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3858 break;
3859 case ixgbe_phy_fw:
3860 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
3861 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
3862 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
3863 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
3864 if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
3865 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
3866 break;
3867 case ixgbe_phy_sgmii:
3868 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
3869 break;
3870 case ixgbe_phy_ext_1g_t:
3871 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
3872 break;
3873 default:
3874 break;
3875 }
3876
3877 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
3878 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
3879
3880 return physical_layer;
3881 }
3882
3883 /**
3884 * ixgbe_get_bus_info_X550em - Set PCI bus info
3885 * @hw: pointer to hardware structure
3886 *
3887 * Sets bus link width and speed to unknown because X550em is
3888 * not a PCI device.
3889 **/
ixgbe_get_bus_info_X550em(struct ixgbe_hw * hw)3890 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
3891 {
3892
3893 DEBUGFUNC("ixgbe_get_bus_info_x550em");
3894
3895 hw->bus.width = ixgbe_bus_width_unknown;
3896 hw->bus.speed = ixgbe_bus_speed_unknown;
3897
3898 hw->mac.ops.set_lan_id(hw);
3899
3900 return IXGBE_SUCCESS;
3901 }
3902
3903 /**
3904 * ixgbe_disable_rx_x550 - Disable RX unit
3905 * @hw: pointer to hardware structure
3906 *
3907 * Enables the Rx DMA unit for x550
3908 **/
ixgbe_disable_rx_x550(struct ixgbe_hw * hw)3909 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
3910 {
3911 u32 rxctrl, pfdtxgswc;
3912 s32 status;
3913 struct ixgbe_hic_disable_rxen fw_cmd;
3914
3915 DEBUGFUNC("ixgbe_disable_rx_dma_x550");
3916
3917 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3918 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3919 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3920 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3921 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3922 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3923 hw->mac.set_lben = TRUE;
3924 } else {
3925 hw->mac.set_lben = FALSE;
3926 }
3927
3928 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
3929 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
3930 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
3931 fw_cmd.port_number = (u8)hw->bus.lan_id;
3932
3933 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3934 sizeof(struct ixgbe_hic_disable_rxen),
3935 IXGBE_HI_COMMAND_TIMEOUT, TRUE);
3936
3937 /* If we fail - disable RX using register write */
3938 if (status) {
3939 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3940 if (rxctrl & IXGBE_RXCTRL_RXEN) {
3941 rxctrl &= ~IXGBE_RXCTRL_RXEN;
3942 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3943 }
3944 }
3945 }
3946 }
3947
3948 /**
3949 * ixgbe_enter_lplu_t_x550em - Transition to low power states
3950 * @hw: pointer to hardware structure
3951 *
3952 * Configures Low Power Link Up on transition to low power states
3953 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
3954 * X557 PHY immediately prior to entering LPLU.
3955 **/
ixgbe_enter_lplu_t_x550em(struct ixgbe_hw * hw)3956 s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3957 {
3958 u16 an_10g_cntl_reg, autoneg_reg, speed;
3959 s32 status;
3960 ixgbe_link_speed lcd_speed;
3961 u32 save_autoneg;
3962 bool link_up;
3963
3964 /* SW LPLU not required on later HW revisions. */
3965 if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
3966 (IXGBE_FUSES0_REV_MASK &
3967 IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
3968 return IXGBE_SUCCESS;
3969
3970 /* If blocked by MNG FW, then don't restart AN */
3971 if (ixgbe_check_reset_blocked(hw))
3972 return IXGBE_SUCCESS;
3973
3974 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3975 if (status != IXGBE_SUCCESS)
3976 return status;
3977
3978 status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
3979
3980 if (status != IXGBE_SUCCESS)
3981 return status;
3982
3983 /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
3984 * disabled, then force link down by entering low power mode.
3985 */
3986 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3987 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3988 return ixgbe_set_copper_phy_power(hw, FALSE);
3989
3990 /* Determine LCD */
3991 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3992
3993 if (status != IXGBE_SUCCESS)
3994 return status;
3995
3996 /* If no valid LCD link speed, then force link down and exit. */
3997 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3998 return ixgbe_set_copper_phy_power(hw, FALSE);
3999
4000 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
4001 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4002 &speed);
4003
4004 if (status != IXGBE_SUCCESS)
4005 return status;
4006
4007 /* If no link now, speed is invalid so take link down */
4008 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
4009 if (status != IXGBE_SUCCESS)
4010 return ixgbe_set_copper_phy_power(hw, FALSE);
4011
4012 /* clear everything but the speed bits */
4013 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
4014
4015 /* If current speed is already LCD, then exit. */
4016 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
4017 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
4018 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
4019 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
4020 return status;
4021
4022 /* Clear AN completed indication */
4023 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
4024 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4025 &autoneg_reg);
4026
4027 if (status != IXGBE_SUCCESS)
4028 return status;
4029
4030 status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
4031 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4032 &an_10g_cntl_reg);
4033
4034 if (status != IXGBE_SUCCESS)
4035 return status;
4036
4037 status = hw->phy.ops.read_reg(hw,
4038 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
4039 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4040 &autoneg_reg);
4041
4042 if (status != IXGBE_SUCCESS)
4043 return status;
4044
4045 save_autoneg = hw->phy.autoneg_advertised;
4046
4047 /* Setup link at least common link speed */
4048 status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
4049
4050 /* restore autoneg from before setting lplu speed */
4051 hw->phy.autoneg_advertised = save_autoneg;
4052
4053 return status;
4054 }
4055
4056 /**
4057 * ixgbe_get_lcd_t_x550em - Determine lowest common denominator
4058 * @hw: pointer to hardware structure
4059 * @lcd_speed: pointer to lowest common link speed
4060 *
4061 * Determine lowest common link speed with link partner.
4062 **/
ixgbe_get_lcd_t_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * lcd_speed)4063 s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed)
4064 {
4065 u16 an_lp_status;
4066 s32 status;
4067 u16 word = hw->eeprom.ctrl_word_3;
4068
4069 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
4070
4071 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
4072 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4073 &an_lp_status);
4074
4075 if (status != IXGBE_SUCCESS)
4076 return status;
4077
4078 /* If link partner advertised 1G, return 1G */
4079 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
4080 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
4081 return status;
4082 }
4083
4084 /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
4085 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
4086 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
4087 return status;
4088
4089 /* Link partner not capable of lower speeds, return 10G */
4090 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
4091 return status;
4092 }
4093
4094 /**
4095 * ixgbe_setup_fc_X550em - Set up flow control
4096 * @hw: pointer to hardware structure
4097 *
4098 * Called at init time to set up flow control.
4099 **/
ixgbe_setup_fc_X550em(struct ixgbe_hw * hw)4100 s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
4101 {
4102 s32 ret_val = IXGBE_SUCCESS;
4103 u32 pause, asm_dir, reg_val;
4104
4105 DEBUGFUNC("ixgbe_setup_fc_X550em");
4106
4107 /* Validate the requested mode */
4108 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4109 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4110 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4111 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4112 goto out;
4113 }
4114
4115 /* 10gig parts do not have a word in the EEPROM to determine the
4116 * default flow control setting, so we explicitly set it to full.
4117 */
4118 if (hw->fc.requested_mode == ixgbe_fc_default)
4119 hw->fc.requested_mode = ixgbe_fc_full;
4120
4121 /* Determine PAUSE and ASM_DIR bits. */
4122 switch (hw->fc.requested_mode) {
4123 case ixgbe_fc_none:
4124 pause = 0;
4125 asm_dir = 0;
4126 break;
4127 case ixgbe_fc_tx_pause:
4128 pause = 0;
4129 asm_dir = 1;
4130 break;
4131 case ixgbe_fc_rx_pause:
4132 /* Rx Flow control is enabled and Tx Flow control is
4133 * disabled by software override. Since there really
4134 * isn't a way to advertise that we are capable of RX
4135 * Pause ONLY, we will advertise that we support both
4136 * symmetric and asymmetric Rx PAUSE, as such we fall
4137 * through to the fc_full statement. Later, we will
4138 * disable the adapter's ability to send PAUSE frames.
4139 */
4140 case ixgbe_fc_full:
4141 pause = 1;
4142 asm_dir = 1;
4143 break;
4144 default:
4145 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4146 "Flow control param set incorrectly\n");
4147 ret_val = IXGBE_ERR_CONFIG;
4148 goto out;
4149 }
4150
4151 switch (hw->device_id) {
4152 case IXGBE_DEV_ID_X550EM_X_KR:
4153 case IXGBE_DEV_ID_X550EM_A_KR:
4154 case IXGBE_DEV_ID_X550EM_A_KR_L:
4155 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
4156 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4157 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
4158 if (ret_val != IXGBE_SUCCESS)
4159 goto out;
4160 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4161 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4162 if (pause)
4163 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4164 if (asm_dir)
4165 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4166 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
4167 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4168 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
4169
4170 /* This device does not fully support AN. */
4171 hw->fc.disable_fc_autoneg = TRUE;
4172 break;
4173 case IXGBE_DEV_ID_X550EM_X_XFI:
4174 hw->fc.disable_fc_autoneg = TRUE;
4175 break;
4176 default:
4177 break;
4178 }
4179
4180 out:
4181 return ret_val;
4182 }
4183
4184 /**
4185 * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
4186 * @hw: pointer to hardware structure
4187 *
4188 * Enable flow control according to IEEE clause 37.
4189 **/
ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw * hw)4190 void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
4191 {
4192 u32 link_s1, lp_an_page_low, an_cntl_1;
4193 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4194 ixgbe_link_speed speed;
4195 bool link_up;
4196
4197 /* AN should have completed when the cable was plugged in.
4198 * Look for reasons to bail out. Bail out if:
4199 * - FC autoneg is disabled, or if
4200 * - link is not up.
4201 */
4202 if (hw->fc.disable_fc_autoneg) {
4203 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4204 "Flow control autoneg is disabled");
4205 goto out;
4206 }
4207
4208 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4209 if (!link_up) {
4210 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4211 goto out;
4212 }
4213
4214 /* Check at auto-negotiation has completed */
4215 status = hw->mac.ops.read_iosf_sb_reg(hw,
4216 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
4217 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
4218
4219 if (status != IXGBE_SUCCESS ||
4220 (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
4221 DEBUGOUT("Auto-Negotiation did not complete\n");
4222 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4223 goto out;
4224 }
4225
4226 /* Read the 10g AN autoc and LP ability registers and resolve
4227 * local flow control settings accordingly
4228 */
4229 status = hw->mac.ops.read_iosf_sb_reg(hw,
4230 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4231 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
4232
4233 if (status != IXGBE_SUCCESS) {
4234 DEBUGOUT("Auto-Negotiation did not complete\n");
4235 goto out;
4236 }
4237
4238 status = hw->mac.ops.read_iosf_sb_reg(hw,
4239 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
4240 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
4241
4242 if (status != IXGBE_SUCCESS) {
4243 DEBUGOUT("Auto-Negotiation did not complete\n");
4244 goto out;
4245 }
4246
4247 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
4248 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
4249 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
4250 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
4251 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
4252
4253 out:
4254 if (status == IXGBE_SUCCESS) {
4255 hw->fc.fc_was_autonegged = TRUE;
4256 } else {
4257 hw->fc.fc_was_autonegged = FALSE;
4258 hw->fc.current_mode = hw->fc.requested_mode;
4259 }
4260 }
4261
4262 /**
4263 * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
4264 * @hw: pointer to hardware structure
4265 *
4266 **/
ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw * hw)4267 void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
4268 {
4269 hw->fc.fc_was_autonegged = FALSE;
4270 hw->fc.current_mode = hw->fc.requested_mode;
4271 }
4272
4273 /**
4274 * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
4275 * @hw: pointer to hardware structure
4276 *
4277 * Enable flow control according to IEEE clause 37.
4278 **/
ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw * hw)4279 void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
4280 {
4281 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4282 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
4283 ixgbe_link_speed speed;
4284 bool link_up;
4285
4286 /* AN should have completed when the cable was plugged in.
4287 * Look for reasons to bail out. Bail out if:
4288 * - FC autoneg is disabled, or if
4289 * - link is not up.
4290 */
4291 if (hw->fc.disable_fc_autoneg) {
4292 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4293 "Flow control autoneg is disabled");
4294 goto out;
4295 }
4296
4297 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
4298 if (!link_up) {
4299 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
4300 goto out;
4301 }
4302
4303 /* Check if auto-negotiation has completed */
4304 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
4305 if (status != IXGBE_SUCCESS ||
4306 !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
4307 DEBUGOUT("Auto-Negotiation did not complete\n");
4308 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4309 goto out;
4310 }
4311
4312 /* Negotiate the flow control */
4313 status = ixgbe_negotiate_fc(hw, info[0], info[0],
4314 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
4315 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
4316 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
4317 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
4318
4319 out:
4320 if (status == IXGBE_SUCCESS) {
4321 hw->fc.fc_was_autonegged = TRUE;
4322 } else {
4323 hw->fc.fc_was_autonegged = FALSE;
4324 hw->fc.current_mode = hw->fc.requested_mode;
4325 }
4326 }
4327
4328 /**
4329 * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
4330 * @hw: pointer to hardware structure
4331 *
4332 * Called at init time to set up flow control.
4333 **/
ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw * hw)4334 s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
4335 {
4336 s32 status = IXGBE_SUCCESS;
4337 u32 an_cntl = 0;
4338
4339 DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
4340
4341 /* Validate the requested mode */
4342 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
4343 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
4344 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
4345 return IXGBE_ERR_INVALID_LINK_SETTINGS;
4346 }
4347
4348 if (hw->fc.requested_mode == ixgbe_fc_default)
4349 hw->fc.requested_mode = ixgbe_fc_full;
4350
4351 /* Set up the 1G and 10G flow control advertisement registers so the
4352 * HW will be able to do FC autoneg once the cable is plugged in. If
4353 * we link at 10G, the 1G advertisement is harmless and vice versa.
4354 */
4355 status = hw->mac.ops.read_iosf_sb_reg(hw,
4356 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4357 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
4358
4359 if (status != IXGBE_SUCCESS) {
4360 DEBUGOUT("Auto-Negotiation did not complete\n");
4361 return status;
4362 }
4363
4364 /* The possible values of fc.requested_mode are:
4365 * 0: Flow control is completely disabled
4366 * 1: Rx flow control is enabled (we can receive pause frames,
4367 * but not send pause frames).
4368 * 2: Tx flow control is enabled (we can send pause frames but
4369 * we do not support receiving pause frames).
4370 * 3: Both Rx and Tx flow control (symmetric) are enabled.
4371 * other: Invalid.
4372 */
4373 switch (hw->fc.requested_mode) {
4374 case ixgbe_fc_none:
4375 /* Flow control completely disabled by software override. */
4376 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4377 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
4378 break;
4379 case ixgbe_fc_tx_pause:
4380 /* Tx Flow control is enabled, and Rx Flow control is
4381 * disabled by software override.
4382 */
4383 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4384 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
4385 break;
4386 case ixgbe_fc_rx_pause:
4387 /* Rx Flow control is enabled and Tx Flow control is
4388 * disabled by software override. Since there really
4389 * isn't a way to advertise that we are capable of RX
4390 * Pause ONLY, we will advertise that we support both
4391 * symmetric and asymmetric Rx PAUSE, as such we fall
4392 * through to the fc_full statement. Later, we will
4393 * disable the adapter's ability to send PAUSE frames.
4394 */
4395 case ixgbe_fc_full:
4396 /* Flow control (both Rx and Tx) is enabled by SW override. */
4397 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
4398 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
4399 break;
4400 default:
4401 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
4402 "Flow control param set incorrectly\n");
4403 return IXGBE_ERR_CONFIG;
4404 }
4405
4406 status = hw->mac.ops.write_iosf_sb_reg(hw,
4407 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
4408 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
4409
4410 /* Restart auto-negotiation. */
4411 status = ixgbe_restart_an_internal_phy_x550em(hw);
4412
4413 return status;
4414 }
4415
4416 /**
4417 * ixgbe_set_mux - Set mux for port 1 access with CS4227
4418 * @hw: pointer to hardware structure
4419 * @state: set mux if 1, clear if 0
4420 */
ixgbe_set_mux(struct ixgbe_hw * hw,u8 state)4421 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
4422 {
4423 u32 esdp;
4424
4425 if (!hw->bus.lan_id)
4426 return;
4427 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4428 if (state)
4429 esdp |= IXGBE_ESDP_SDP1;
4430 else
4431 esdp &= ~IXGBE_ESDP_SDP1;
4432 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
4433 IXGBE_WRITE_FLUSH(hw);
4434 }
4435
4436 /**
4437 * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
4438 * @hw: pointer to hardware structure
4439 * @mask: Mask to specify which semaphore to acquire
4440 *
4441 * Acquires the SWFW semaphore and sets the I2C MUX
4442 **/
ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4443 s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4444 {
4445 s32 status;
4446
4447 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em");
4448
4449 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
4450 if (status)
4451 return status;
4452
4453 if (mask & IXGBE_GSSR_I2C_MASK)
4454 ixgbe_set_mux(hw, 1);
4455
4456 return IXGBE_SUCCESS;
4457 }
4458
4459 /**
4460 * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
4461 * @hw: pointer to hardware structure
4462 * @mask: Mask to specify which semaphore to release
4463 *
4464 * Releases the SWFW semaphore and sets the I2C MUX
4465 **/
ixgbe_release_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)4466 void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
4467 {
4468 DEBUGFUNC("ixgbe_release_swfw_sync_X550em");
4469
4470 if (mask & IXGBE_GSSR_I2C_MASK)
4471 ixgbe_set_mux(hw, 0);
4472
4473 ixgbe_release_swfw_sync_X540(hw, mask);
4474 }
4475
4476 /**
4477 * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
4478 * @hw: pointer to hardware structure
4479 * @mask: Mask to specify which semaphore to acquire
4480 *
4481 * Acquires the SWFW semaphore and get the shared phy token as needed
4482 */
ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4483 static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4484 {
4485 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4486 int retries = FW_PHY_TOKEN_RETRIES;
4487 s32 status = IXGBE_SUCCESS;
4488
4489 DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
4490
4491 while (--retries) {
4492 status = IXGBE_SUCCESS;
4493 if (hmask)
4494 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
4495 if (status) {
4496 DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
4497 status);
4498 return status;
4499 }
4500 if (!(mask & IXGBE_GSSR_TOKEN_SM))
4501 return IXGBE_SUCCESS;
4502
4503 status = ixgbe_get_phy_token(hw);
4504 if (status == IXGBE_ERR_TOKEN_RETRY)
4505 DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
4506 status);
4507
4508 if (status == IXGBE_SUCCESS)
4509 return IXGBE_SUCCESS;
4510
4511 if (hmask)
4512 ixgbe_release_swfw_sync_X540(hw, hmask);
4513
4514 if (status != IXGBE_ERR_TOKEN_RETRY) {
4515 DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
4516 status);
4517 return status;
4518 }
4519 }
4520
4521 DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
4522 hw->phy.id);
4523 return status;
4524 }
4525
4526 /**
4527 * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
4528 * @hw: pointer to hardware structure
4529 * @mask: Mask to specify which semaphore to release
4530 *
4531 * Releases the SWFW semaphore and puts the shared phy token as needed
4532 */
ixgbe_release_swfw_sync_X550a(struct ixgbe_hw * hw,u32 mask)4533 static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
4534 {
4535 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
4536
4537 DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
4538
4539 if (mask & IXGBE_GSSR_TOKEN_SM)
4540 ixgbe_put_phy_token(hw);
4541
4542 if (hmask)
4543 ixgbe_release_swfw_sync_X540(hw, hmask);
4544 }
4545
4546 /**
4547 * ixgbe_read_phy_reg_x550a - Reads specified PHY register
4548 * @hw: pointer to hardware structure
4549 * @reg_addr: 32 bit address of PHY register to read
4550 * @device_type: 5 bit device type
4551 * @phy_data: Pointer to read data from PHY register
4552 *
4553 * Reads a value from a specified PHY register using the SWFW lock and PHY
4554 * Token. The PHY Token is needed since the MDIO is shared between to MAC
4555 * instances.
4556 **/
ixgbe_read_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)4557 s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4558 u32 device_type, u16 *phy_data)
4559 {
4560 s32 status;
4561 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4562
4563 DEBUGFUNC("ixgbe_read_phy_reg_x550a");
4564
4565 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
4566 return IXGBE_ERR_SWFW_SYNC;
4567
4568 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
4569
4570 hw->mac.ops.release_swfw_sync(hw, mask);
4571
4572 return status;
4573 }
4574
4575 /**
4576 * ixgbe_write_phy_reg_x550a - Writes specified PHY register
4577 * @hw: pointer to hardware structure
4578 * @reg_addr: 32 bit PHY register to write
4579 * @device_type: 5 bit device type
4580 * @phy_data: Data to write to the PHY register
4581 *
4582 * Writes a value to specified PHY register using the SWFW lock and PHY Token.
4583 * The PHY Token is needed since the MDIO is shared between to MAC instances.
4584 **/
ixgbe_write_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)4585 s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
4586 u32 device_type, u16 phy_data)
4587 {
4588 s32 status;
4589 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
4590
4591 DEBUGFUNC("ixgbe_write_phy_reg_x550a");
4592
4593 if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
4594 status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
4595 phy_data);
4596 hw->mac.ops.release_swfw_sync(hw, mask);
4597 } else {
4598 status = IXGBE_ERR_SWFW_SYNC;
4599 }
4600
4601 return status;
4602 }
4603
4604 /**
4605 * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
4606 * @hw: pointer to hardware structure
4607 *
4608 * Handle external Base T PHY interrupt. If high temperature
4609 * failure alarm then return error, else if link status change
4610 * then setup internal/external PHY link
4611 *
4612 * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
4613 * failure alarm, else return PHY access status.
4614 */
ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw * hw)4615 s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
4616 {
4617 bool lsc;
4618 u32 status;
4619
4620 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
4621
4622 if (status != IXGBE_SUCCESS)
4623 return status;
4624
4625 if (lsc)
4626 return ixgbe_setup_internal_phy(hw);
4627
4628 return IXGBE_SUCCESS;
4629 }
4630
4631 /**
4632 * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
4633 * @hw: pointer to hardware structure
4634 * @speed: new link speed
4635 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
4636 *
4637 * Setup internal/external PHY link speed based on link speed, then set
4638 * external PHY auto advertised link speed.
4639 *
4640 * Returns error status for any failure
4641 **/
ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)4642 s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
4643 ixgbe_link_speed speed,
4644 bool autoneg_wait_to_complete)
4645 {
4646 s32 status;
4647 ixgbe_link_speed force_speed;
4648
4649 DEBUGFUNC("ixgbe_setup_mac_link_t_X550em");
4650
4651 /* Setup internal/external PHY link speed to iXFI (10G), unless
4652 * only 1G is auto advertised then setup KX link.
4653 */
4654 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4655 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
4656 else
4657 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
4658
4659 /* If X552 and internal link mode is XFI, then setup XFI internal link.
4660 */
4661 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4662 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
4663 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
4664
4665 if (status != IXGBE_SUCCESS)
4666 return status;
4667 }
4668
4669 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
4670 }
4671
4672 /**
4673 * ixgbe_check_link_t_X550em - Determine link and speed status
4674 * @hw: pointer to hardware structure
4675 * @speed: pointer to link speed
4676 * @link_up: TRUE when link is up
4677 * @link_up_wait_to_complete: bool used to wait for link up or not
4678 *
4679 * Check that both the MAC and X557 external PHY have link.
4680 **/
ixgbe_check_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4681 s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4682 bool *link_up, bool link_up_wait_to_complete)
4683 {
4684 u32 status;
4685 u16 i, autoneg_status = 0;
4686
4687 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
4688 return IXGBE_ERR_CONFIG;
4689
4690 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
4691 link_up_wait_to_complete);
4692
4693 /* If check link fails or MAC link is not up, then return */
4694 if (status != IXGBE_SUCCESS || !(*link_up))
4695 return status;
4696
4697 /* MAC link is up, so check external PHY link.
4698 * X557 PHY. Link status is latching low, and can only be used to detect
4699 * link drop, and not the current status of the link without performing
4700 * back-to-back reads.
4701 */
4702 for (i = 0; i < 2; i++) {
4703 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
4704 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
4705 &autoneg_status);
4706
4707 if (status != IXGBE_SUCCESS)
4708 return status;
4709 }
4710
4711 /* If external PHY link is not up, then indicate link not up */
4712 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
4713 *link_up = FALSE;
4714
4715 return IXGBE_SUCCESS;
4716 }
4717
4718 /**
4719 * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
4720 * @hw: pointer to hardware structure
4721 **/
ixgbe_reset_phy_t_X550em(struct ixgbe_hw * hw)4722 s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
4723 {
4724 s32 status;
4725
4726 status = ixgbe_reset_phy_generic(hw);
4727
4728 if (status != IXGBE_SUCCESS)
4729 return status;
4730
4731 /* Configure Link Status Alarm and Temperature Threshold interrupts */
4732 return ixgbe_enable_lasi_ext_t_x550em(hw);
4733 }
4734
4735 /**
4736 * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs.
4737 * @hw: pointer to hardware structure
4738 * @led_idx: led number to turn on
4739 **/
ixgbe_led_on_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4740 s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4741 {
4742 u16 phy_data;
4743
4744 DEBUGFUNC("ixgbe_led_on_t_X550em");
4745
4746 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4747 return IXGBE_ERR_PARAM;
4748
4749 /* To turn on the LED, set mode to ON. */
4750 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4751 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4752 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
4753 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4754 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4755
4756 /* Some designs have the LEDs wired to the MAC */
4757 return ixgbe_led_on_generic(hw, led_idx);
4758 }
4759
4760 /**
4761 * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs.
4762 * @hw: pointer to hardware structure
4763 * @led_idx: led number to turn off
4764 **/
ixgbe_led_off_t_X550em(struct ixgbe_hw * hw,u32 led_idx)4765 s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
4766 {
4767 u16 phy_data;
4768
4769 DEBUGFUNC("ixgbe_led_off_t_X550em");
4770
4771 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
4772 return IXGBE_ERR_PARAM;
4773
4774 /* To turn on the LED, set mode to ON. */
4775 ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4776 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
4777 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
4778 ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
4779 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
4780
4781 /* Some designs have the LEDs wired to the MAC */
4782 return ixgbe_led_off_generic(hw, led_idx);
4783 }
4784
4785 /**
4786 * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
4787 * @hw: pointer to the HW structure
4788 * @maj: driver version major number
4789 * @min: driver version minor number
4790 * @build: driver version build number
4791 * @sub: driver version sub build number
4792 * @len: length of driver_ver string
4793 * @driver_ver: driver string
4794 *
4795 * Sends driver version number to firmware through the manageability
4796 * block. On success return IXGBE_SUCCESS
4797 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4798 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4799 **/
ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)4800 s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
4801 u8 build, u8 sub, u16 len, const char *driver_ver)
4802 {
4803 struct ixgbe_hic_drv_info2 fw_cmd;
4804 s32 ret_val = IXGBE_SUCCESS;
4805 int i;
4806
4807 DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
4808
4809 if ((len == 0) || (driver_ver == NULL) ||
4810 (len > sizeof(fw_cmd.driver_string)))
4811 return IXGBE_ERR_INVALID_ARGUMENT;
4812
4813 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4814 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
4815 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4816 fw_cmd.port_num = (u8)hw->bus.func;
4817 fw_cmd.ver_maj = maj;
4818 fw_cmd.ver_min = min;
4819 fw_cmd.ver_build = build;
4820 fw_cmd.ver_sub = sub;
4821 fw_cmd.hdr.checksum = 0;
4822 memcpy(fw_cmd.driver_string, driver_ver, len);
4823 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4824 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4825
4826 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4827 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4828 sizeof(fw_cmd),
4829 IXGBE_HI_COMMAND_TIMEOUT,
4830 TRUE);
4831 if (ret_val != IXGBE_SUCCESS)
4832 continue;
4833
4834 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4835 FW_CEM_RESP_STATUS_SUCCESS)
4836 ret_val = IXGBE_SUCCESS;
4837 else
4838 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4839
4840 break;
4841 }
4842
4843 return ret_val;
4844 }
4845
4846 /**
4847 * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
4848 * @hw: pointer t hardware structure
4849 *
4850 * Returns TRUE if in FW NVM recovery mode.
4851 **/
ixgbe_fw_recovery_mode_X550(struct ixgbe_hw * hw)4852 bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
4853 {
4854 u32 fwsm;
4855
4856 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4857
4858 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
4859 }
4860