1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 */
29
30 /* IntelVersion: 1.238 scm_061610_003709 */
31
32 #include "ixgbe_common.h"
33 #include "ixgbe_phy.h"
34 #include "ixgbe_api.h"
35
36 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
37 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
38 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
39 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
40 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
41 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
42 u16 count);
43 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
44 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
45 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
47
48 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
49 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
50 u16 *san_mac_offset);
51 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
52 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
53 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
54 static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
55 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
56 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
57 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
58
59 /*
60 * ixgbe_init_ops_generic - Inits function ptrs
61 * @hw: pointer to the hardware structure
62 *
63 * Initialize the function pointers.
64 */
65 s32
ixgbe_init_ops_generic(struct ixgbe_hw * hw)66 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
67 {
68 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
69 struct ixgbe_mac_info *mac = &hw->mac;
70 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
71
72 DEBUGFUNC("ixgbe_init_ops_generic");
73
74 /* EEPROM */
75 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
76 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
77 if (eec & (1 << 8))
78 eeprom->ops.read = &ixgbe_read_eerd_generic;
79 else
80 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
81 eeprom->ops.write = &ixgbe_write_eeprom_generic;
82 eeprom->ops.validate_checksum =
83 &ixgbe_validate_eeprom_checksum_generic;
84 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
85 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
86
87 /* MAC */
88 mac->ops.init_hw = &ixgbe_init_hw_generic;
89 mac->ops.reset_hw = NULL;
90 mac->ops.start_hw = &ixgbe_start_hw_generic;
91 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
92 mac->ops.get_media_type = NULL;
93 mac->ops.get_supported_physical_layer = NULL;
94 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
95 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
96 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
97 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
98 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
99 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
100 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
101
102 /* LEDs */
103 mac->ops.led_on = &ixgbe_led_on_generic;
104 mac->ops.led_off = &ixgbe_led_off_generic;
105 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
106 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
107
108 /* RAR, Multicast, VLAN */
109 mac->ops.set_rar = &ixgbe_set_rar_generic;
110 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
111 mac->ops.insert_mac_addr = NULL;
112 mac->ops.set_vmdq = NULL;
113 mac->ops.clear_vmdq = NULL;
114 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
115 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
116 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
117 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
118 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
119 mac->ops.clear_vfta = NULL;
120 mac->ops.set_vfta = NULL;
121 mac->ops.init_uta_tables = NULL;
122
123 /* Flow Control */
124 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
125
126 /* Link */
127 mac->ops.get_link_capabilities = NULL;
128 mac->ops.setup_link = NULL;
129 mac->ops.check_link = NULL;
130
131 return (IXGBE_SUCCESS);
132 }
133
134 /*
135 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
136 * @hw: pointer to hardware structure
137 *
138 * Starts the hardware by filling the bus info structure and media type, clears
139 * all on chip counters, initializes receive address registers, multicast
140 * table, VLAN filter table, calls routine to set up link and flow control
141 * settings, and leaves transmit and receive units disabled and uninitialized
142 */
143 s32
ixgbe_start_hw_generic(struct ixgbe_hw * hw)144 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
145 {
146 u32 ctrl_ext;
147
148 DEBUGFUNC("ixgbe_start_hw_generic");
149
150 /* Set the media type */
151 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
152
153 /* PHY ops initialization must be done in reset_hw() */
154
155 /* Clear the VLAN filter table */
156 hw->mac.ops.clear_vfta(hw);
157
158 /* Clear statistics registers */
159 hw->mac.ops.clear_hw_cntrs(hw);
160
161 /* Set No Snoop Disable */
162 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
163 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
164 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
165 IXGBE_WRITE_FLUSH(hw);
166
167 /* Setup flow control */
168 (void) ixgbe_setup_fc(hw, 0);
169
170 /* Clear adapter stopped flag */
171 hw->adapter_stopped = false;
172
173 return (IXGBE_SUCCESS);
174 }
175
176 /*
177 * ixgbe_init_hw_generic - Generic hardware initialization
178 * @hw: pointer to hardware structure
179 *
180 * Initialize the hardware by resetting the hardware, filling the bus info
181 * structure and media type, clears all on chip counters, initializes receive
182 * address registers, multicast table, VLAN filter table, calls routine to set
183 * up link and flow control settings, and leaves transmit and receive units
184 * disabled and uninitialized
185 */
186 s32
ixgbe_init_hw_generic(struct ixgbe_hw * hw)187 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
188 {
189 s32 status = IXGBE_SUCCESS;
190
191 DEBUGFUNC("ixgbe_init_hw_generic");
192
193 /* Reset the hardware */
194 status = hw->mac.ops.reset_hw(hw);
195
196 if (status == IXGBE_SUCCESS) {
197 /* Start the HW */
198 status = hw->mac.ops.start_hw(hw);
199 }
200
201 return (status);
202 }
203
204 /*
205 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
206 * @hw: pointer to hardware structure
207 *
208 * Clears all hardware statistics counters by reading them from the hardware
209 * Statistics counters are clear on read.
210 */
211 s32
ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw * hw)212 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
213 {
214 u16 i = 0;
215
216 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
217
218 (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
219 (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
220 (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
221 (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
222 for (i = 0; i < 8; i++)
223 (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
224
225 (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
226 (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
227 (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
228 (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
229 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
230 if (hw->mac.type >= ixgbe_mac_82599EB) {
231 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
232 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
233 } else {
234 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
235 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
236 }
237
238 for (i = 0; i < 8; i++) {
239 (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
240 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
241 if (hw->mac.type >= ixgbe_mac_82599EB) {
242 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
243 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
244 } else {
245 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
246 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
247 }
248 }
249 if (hw->mac.type >= ixgbe_mac_82599EB)
250 for (i = 0; i < 8; i++)
251 (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
252
253 (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
254 (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
255 (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
256 (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
257 (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
258 (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
259 (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
260 (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
261 (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
262 (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
263 (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
264 (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
265 (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
266 (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
267 for (i = 0; i < 8; i++)
268 (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
269 (void) IXGBE_READ_REG(hw, IXGBE_RUC);
270 (void) IXGBE_READ_REG(hw, IXGBE_RFC);
271 (void) IXGBE_READ_REG(hw, IXGBE_ROC);
272 (void) IXGBE_READ_REG(hw, IXGBE_RJC);
273 (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
274 (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
275 (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
276 (void) IXGBE_READ_REG(hw, IXGBE_TORL);
277 (void) IXGBE_READ_REG(hw, IXGBE_TORH);
278 (void) IXGBE_READ_REG(hw, IXGBE_TPR);
279 (void) IXGBE_READ_REG(hw, IXGBE_TPT);
280 (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
281 (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
282 (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
283 (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
284 (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
285 (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
286 (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
287 (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
288 for (i = 0; i < 16; i++) {
289 (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
290 (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
291 if (hw->mac.type >= ixgbe_mac_82599EB) {
292 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
293 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
294 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
295 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
296 (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
297 } else {
298 (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
299 (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
300 }
301 }
302
303 return (IXGBE_SUCCESS);
304 }
305
306 /*
307 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
308 * @hw: pointer to hardware structure
309 * @pba_num: stores the part number from the EEPROM
310 *
311 * Reads the part number from the EEPROM.
312 */
313 s32
ixgbe_read_pba_num_generic(struct ixgbe_hw * hw,u32 * pba_num)314 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
315 {
316 s32 ret_val;
317 u16 data;
318
319 DEBUGFUNC("ixgbe_read_pba_num_generic");
320
321 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
322 if (ret_val) {
323 DEBUGOUT("NVM Read Error\n");
324 return (ret_val);
325 }
326 *pba_num = (u32)(data << 16);
327
328 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
329 if (ret_val) {
330 DEBUGOUT("NVM Read Error\n");
331 return (ret_val);
332 }
333 *pba_num |= data;
334
335 return (IXGBE_SUCCESS);
336 }
337
338 /*
339 * ixgbe_get_mac_addr_generic - Generic get MAC address
340 * @hw: pointer to hardware structure
341 * @mac_addr: Adapter MAC address
342 *
343 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
344 * A reset of the adapter must be performed prior to calling this function
345 * in order for the MAC address to have been loaded from the EEPROM into RAR0
346 */
347 s32
ixgbe_get_mac_addr_generic(struct ixgbe_hw * hw,u8 * mac_addr)348 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
349 {
350 u32 rar_high;
351 u32 rar_low;
352 u16 i;
353
354 DEBUGFUNC("ixgbe_get_mac_addr_generic");
355
356 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
357 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
358
359 for (i = 0; i < 4; i++)
360 mac_addr[i] = (u8)(rar_low >> (i*8));
361
362 for (i = 0; i < 2; i++)
363 mac_addr[i+4] = (u8)(rar_high >> (i*8));
364
365 return (IXGBE_SUCCESS);
366 }
367
368 /*
369 * ixgbe_get_bus_info_generic - Generic set PCI bus info
370 * @hw: pointer to hardware structure
371 *
372 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
373 */
374 s32
ixgbe_get_bus_info_generic(struct ixgbe_hw * hw)375 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
376 {
377 struct ixgbe_mac_info *mac = &hw->mac;
378 u16 link_status;
379
380 DEBUGFUNC("ixgbe_get_bus_info_generic");
381
382 hw->bus.type = ixgbe_bus_type_pci_express;
383
384 /* Get the negotiated link width and speed from PCI config space */
385 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
386
387 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
388 case IXGBE_PCI_LINK_WIDTH_1:
389 hw->bus.width = ixgbe_bus_width_pcie_x1;
390 break;
391 case IXGBE_PCI_LINK_WIDTH_2:
392 hw->bus.width = ixgbe_bus_width_pcie_x2;
393 break;
394 case IXGBE_PCI_LINK_WIDTH_4:
395 hw->bus.width = ixgbe_bus_width_pcie_x4;
396 break;
397 case IXGBE_PCI_LINK_WIDTH_8:
398 hw->bus.width = ixgbe_bus_width_pcie_x8;
399 break;
400 default:
401 hw->bus.width = ixgbe_bus_width_unknown;
402 break;
403 }
404
405 switch (link_status & IXGBE_PCI_LINK_SPEED) {
406 case IXGBE_PCI_LINK_SPEED_2500:
407 hw->bus.speed = ixgbe_bus_speed_2500;
408 break;
409 case IXGBE_PCI_LINK_SPEED_5000:
410 hw->bus.speed = ixgbe_bus_speed_5000;
411 break;
412 default:
413 hw->bus.speed = ixgbe_bus_speed_unknown;
414 break;
415 }
416
417 mac->ops.set_lan_id(hw);
418
419 return (IXGBE_SUCCESS);
420 }
421
422
423 /*
424 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
425 * @hw: pointer to the HW structure
426 *
427 * Determines the LAN function id by reading memory-mapped registers
428 * and swaps the port value if requested.
429 */
430 void
ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw * hw)431 ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
432 {
433 struct ixgbe_bus_info *bus = &hw->bus;
434 u32 reg;
435
436 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
437
438 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
439 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
440 bus->lan_id = bus->func;
441
442 /* check for a port swap */
443 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
444 if (reg & IXGBE_FACTPS_LFS)
445 bus->func ^= 0x1;
446 }
447
448 /*
449 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
450 * @hw: pointer to hardware structure
451 *
452 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
453 * disables transmit and receive units. The adapter_stopped flag is used by
454 * the shared code and drivers to determine if the adapter is in a stopped
455 * state and should not touch the hardware.
456 */
457 s32
ixgbe_stop_adapter_generic(struct ixgbe_hw * hw)458 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
459 {
460 u32 number_of_queues;
461 u32 reg_val;
462 u16 i;
463
464 DEBUGFUNC("ixgbe_stop_adapter_generic");
465
466 /*
467 * Set the adapter_stopped flag so other driver functions stop touching
468 * the hardware
469 */
470 hw->adapter_stopped = true;
471
472 /* Disable the receive unit */
473 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
474 reg_val &= ~(IXGBE_RXCTRL_RXEN);
475 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
476 IXGBE_WRITE_FLUSH(hw);
477 msec_delay(2);
478
479 /* Clear interrupt mask to stop from interrupts being generated */
480 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
481
482 /* Clear any pending interrupts */
483 (void) IXGBE_READ_REG(hw, IXGBE_EICR);
484
485 /* Disable the transmit unit. Each queue must be disabled. */
486 number_of_queues = hw->mac.max_tx_queues;
487 for (i = 0; i < number_of_queues; i++) {
488 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
489 if (reg_val & IXGBE_TXDCTL_ENABLE) {
490 reg_val &= ~IXGBE_TXDCTL_ENABLE;
491 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
492 }
493 }
494
495 /*
496 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
497 * access and verify no pending requests
498 */
499 (void) ixgbe_disable_pcie_master(hw);
500
501 return (IXGBE_SUCCESS);
502 }
503
504 /*
505 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
506 * @hw: pointer to hardware structure
507 * @index: led number to turn on
508 */
509 s32
ixgbe_led_on_generic(struct ixgbe_hw * hw,u32 index)510 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
511 {
512 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
513
514 DEBUGFUNC("ixgbe_led_on_generic");
515
516 /* To turn on the LED, set mode to ON. */
517 led_reg &= ~IXGBE_LED_MODE_MASK(index);
518 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
519 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
520 IXGBE_WRITE_FLUSH(hw);
521
522 return (IXGBE_SUCCESS);
523 }
524
525 /*
526 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
527 * @hw: pointer to hardware structure
528 * @index: led number to turn off
529 */
530 s32
ixgbe_led_off_generic(struct ixgbe_hw * hw,u32 index)531 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
532 {
533 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
534
535 DEBUGFUNC("ixgbe_led_off_generic");
536
537 /* To turn off the LED, set mode to OFF. */
538 led_reg &= ~IXGBE_LED_MODE_MASK(index);
539 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
540 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
541 IXGBE_WRITE_FLUSH(hw);
542
543 return (IXGBE_SUCCESS);
544 }
545
546 /*
547 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
548 * @hw: pointer to hardware structure
549 *
550 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
551 * ixgbe_hw struct in order to set up EEPROM access.
552 */
553 s32
ixgbe_init_eeprom_params_generic(struct ixgbe_hw * hw)554 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
555 {
556 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
557 u32 eec;
558 u16 eeprom_size;
559
560 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
561
562 if (eeprom->type == ixgbe_eeprom_uninitialized) {
563 eeprom->type = ixgbe_eeprom_none;
564 /*
565 * Set default semaphore delay to 10ms which is a well
566 * tested value
567 */
568 eeprom->semaphore_delay = 10;
569
570 /*
571 * Check for EEPROM present first.
572 * If not present leave as none
573 */
574 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
575 if (eec & IXGBE_EEC_PRES) {
576 eeprom->type = ixgbe_eeprom_spi;
577
578 /*
579 * SPI EEPROM is assumed here. This code would need to
580 * change if a future EEPROM is not SPI.
581 */
582 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
583 IXGBE_EEC_SIZE_SHIFT);
584 eeprom->word_size = 1 << (eeprom_size +
585 IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT);
586 }
587
588 if (eec & IXGBE_EEC_ADDR_SIZE)
589 eeprom->address_bits = 16;
590 else
591 eeprom->address_bits = 8;
592 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
593 "%d\n", eeprom->type, eeprom->word_size,
594 eeprom->address_bits);
595 }
596
597 return (IXGBE_SUCCESS);
598 }
599
600 /*
601 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
602 * @hw: pointer to hardware structure
603 * @offset: offset within the EEPROM to be written to
604 * @data: 16 bit word to be written to the EEPROM
605 *
606 * If ixgbe_eeprom_update_checksum is not called after this function, the
607 * EEPROM will most likely contain an invalid checksum.
608 */
609 s32
ixgbe_write_eeprom_generic(struct ixgbe_hw * hw,u16 offset,u16 data)610 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
611 {
612 s32 status;
613 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
614
615 DEBUGFUNC("ixgbe_write_eeprom_generic");
616
617 hw->eeprom.ops.init_params(hw);
618
619 if (offset >= hw->eeprom.word_size) {
620 status = IXGBE_ERR_EEPROM;
621 goto out;
622 }
623
624 /* Prepare the EEPROM for writing */
625 status = ixgbe_acquire_eeprom(hw);
626
627 if (status == IXGBE_SUCCESS) {
628 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
629 ixgbe_release_eeprom(hw);
630 status = IXGBE_ERR_EEPROM;
631 }
632 }
633
634 if (status == IXGBE_SUCCESS) {
635 ixgbe_standby_eeprom(hw);
636
637 /* Send the WRITE ENABLE command (8 bit opcode ) */
638 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
639 IXGBE_EEPROM_OPCODE_BITS);
640
641 ixgbe_standby_eeprom(hw);
642
643 /*
644 * Some SPI eeproms use the 8th address bit embedded in the
645 * opcode
646 */
647 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
648 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
649
650 /* Send the Write command (8-bit opcode + addr) */
651 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
652 IXGBE_EEPROM_OPCODE_BITS);
653 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
654 hw->eeprom.address_bits);
655
656 /* Send the data */
657 data = (data >> 8) | (data << 8);
658 ixgbe_shift_out_eeprom_bits(hw, data, 16);
659 ixgbe_standby_eeprom(hw);
660
661 /* Done with writing - release the EEPROM */
662 ixgbe_release_eeprom(hw);
663 }
664
665 out:
666 return (status);
667 }
668
669 /*
670 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
671 * @hw: pointer to hardware structure
672 * @offset: offset within the EEPROM to be read
673 * @data: read 16 bit value from EEPROM
674 *
675 * Reads 16 bit value from EEPROM through bit-bang method
676 */
677 s32
ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw * hw,u16 offset,u16 * data)678 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
679 u16 *data)
680 {
681 s32 status;
682 u16 word_in;
683 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
684
685 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
686
687 hw->eeprom.ops.init_params(hw);
688
689 if (offset >= hw->eeprom.word_size) {
690 status = IXGBE_ERR_EEPROM;
691 goto out;
692 }
693
694 /* Prepare the EEPROM for reading */
695 status = ixgbe_acquire_eeprom(hw);
696
697 if (status == IXGBE_SUCCESS) {
698 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
699 ixgbe_release_eeprom(hw);
700 status = IXGBE_ERR_EEPROM;
701 }
702 }
703
704 if (status == IXGBE_SUCCESS) {
705 ixgbe_standby_eeprom(hw);
706
707 /*
708 * Some SPI eeproms use the 8th address bit embedded in the
709 * opcode
710 */
711 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
712 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
713
714 /* Send the READ command (opcode + addr) */
715 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
716 IXGBE_EEPROM_OPCODE_BITS);
717 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
718 hw->eeprom.address_bits);
719
720 /* Read the data. */
721 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
722 *data = (word_in >> 8) | (word_in << 8);
723
724 /* End this read operation */
725 ixgbe_release_eeprom(hw);
726 }
727
728 out:
729 return (status);
730 }
731
732 /*
733 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
734 * @hw: pointer to hardware structure
735 * @offset: offset of word in the EEPROM to read
736 * @data: word read from the EEPROM
737 *
738 * Reads a 16 bit word from the EEPROM using the EERD register.
739 */
740 s32
ixgbe_read_eerd_generic(struct ixgbe_hw * hw,u16 offset,u16 * data)741 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
742 {
743 u32 eerd;
744 s32 status;
745
746 DEBUGFUNC("ixgbe_read_eerd_generic");
747
748 hw->eeprom.ops.init_params(hw);
749
750 if (offset >= hw->eeprom.word_size) {
751 status = IXGBE_ERR_EEPROM;
752 goto out;
753 }
754
755 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
756 IXGBE_EEPROM_RW_REG_START;
757
758 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
759 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
760
761 if (status == IXGBE_SUCCESS)
762 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
763 IXGBE_EEPROM_RW_REG_DATA);
764 else
765 DEBUGOUT("Eeprom read timed out\n");
766
767 out:
768 return (status);
769 }
770
771 /*
772 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
773 * @hw: pointer to hardware structure
774 * @ee_reg: EEPROM flag for polling
775 *
776 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
777 * read or write is done respectively.
778 */
779 s32
ixgbe_poll_eerd_eewr_done(struct ixgbe_hw * hw,u32 ee_reg)780 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
781 {
782 u32 i;
783 u32 reg;
784 s32 status = IXGBE_ERR_EEPROM;
785
786 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
787
788 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
789 if (ee_reg == IXGBE_NVM_POLL_READ)
790 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
791 else
792 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
793
794 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
795 status = IXGBE_SUCCESS;
796 break;
797 }
798 usec_delay(5);
799 }
800 return (status);
801 }
802
803 /*
804 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
805 * @hw: pointer to hardware structure
806 *
807 * Prepares EEPROM for access using bit-bang method. This function should
808 * be called before issuing a command to the EEPROM.
809 */
810 static s32
ixgbe_acquire_eeprom(struct ixgbe_hw * hw)811 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
812 {
813 s32 status = IXGBE_SUCCESS;
814 u32 eec;
815 u32 i;
816
817 DEBUGFUNC("ixgbe_acquire_eeprom");
818
819 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS)
820 status = IXGBE_ERR_SWFW_SYNC;
821
822 if (status == IXGBE_SUCCESS) {
823 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
824
825 /* Request EEPROM Access */
826 eec |= IXGBE_EEC_REQ;
827 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
828
829 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
830 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
831 if (eec & IXGBE_EEC_GNT)
832 break;
833 usec_delay(5);
834 }
835
836 /* Release if grant not acquired */
837 if (!(eec & IXGBE_EEC_GNT)) {
838 eec &= ~IXGBE_EEC_REQ;
839 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
840 DEBUGOUT("Could not acquire EEPROM grant\n");
841
842 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
843 status = IXGBE_ERR_EEPROM;
844 }
845 }
846
847 /* Setup EEPROM for Read/Write */
848 if (status == IXGBE_SUCCESS) {
849 /* Clear CS and SK */
850 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
851 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
852 IXGBE_WRITE_FLUSH(hw);
853 usec_delay(1);
854 }
855 return (status);
856 }
857
858 /*
859 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
860 * @hw: pointer to hardware structure
861 *
862 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
863 */
864 static s32
ixgbe_get_eeprom_semaphore(struct ixgbe_hw * hw)865 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
866 {
867 s32 status = IXGBE_ERR_EEPROM;
868 u32 timeout = 2000;
869 u32 i;
870 u32 swsm;
871
872 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
873
874 /* Get SMBI software semaphore between device drivers first */
875 for (i = 0; i < timeout; i++) {
876 /*
877 * If the SMBI bit is 0 when we read it, then the bit will be
878 * set and we have the semaphore
879 */
880 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
881 if (!(swsm & IXGBE_SWSM_SMBI)) {
882 status = IXGBE_SUCCESS;
883 break;
884 }
885 usec_delay(50);
886 }
887
888 /* Now get the semaphore between SW/FW through the SWESMBI bit */
889 if (status == IXGBE_SUCCESS) {
890 for (i = 0; i < timeout; i++) {
891 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
892
893 /* Set the SW EEPROM semaphore bit to request access */
894 swsm |= IXGBE_SWSM_SWESMBI;
895 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
896
897 /*
898 * If we set the bit successfully then we got the
899 * semaphore.
900 */
901 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
902 if (swsm & IXGBE_SWSM_SWESMBI)
903 break;
904
905 usec_delay(50);
906 }
907
908 /*
909 * Release semaphores and return error if SW EEPROM semaphore
910 * was not granted because we don't have access to the EEPROM
911 */
912 if (i >= timeout) {
913 DEBUGOUT("SWESMBI Software EEPROM semaphore "
914 "not granted.\n");
915 ixgbe_release_eeprom_semaphore(hw);
916 status = IXGBE_ERR_EEPROM;
917 }
918 } else {
919 DEBUGOUT("Software semaphore SMBI between device drivers "
920 "not granted.\n");
921 }
922
923 return (status);
924 }
925
926 /*
927 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
928 * @hw: pointer to hardware structure
929 *
930 * This function clears hardware semaphore bits.
931 */
932 static void
ixgbe_release_eeprom_semaphore(struct ixgbe_hw * hw)933 ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
934 {
935 u32 swsm;
936
937 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
938
939 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
940
941 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
942 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
943 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
944 IXGBE_WRITE_FLUSH(hw);
945 }
946
947 /*
948 * ixgbe_ready_eeprom - Polls for EEPROM ready
949 * @hw: pointer to hardware structure
950 */
951 static s32
ixgbe_ready_eeprom(struct ixgbe_hw * hw)952 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
953 {
954 s32 status = IXGBE_SUCCESS;
955 u16 i;
956 u8 spi_stat_reg;
957
958 DEBUGFUNC("ixgbe_ready_eeprom");
959
960 /*
961 * Read "Status Register" repeatedly until the LSB is cleared. The
962 * EEPROM will signal that the command has been completed by clearing
963 * bit 0 of the internal status register. If it's not cleared within
964 * 5 milliseconds, then error out.
965 */
966 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
967 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
968 IXGBE_EEPROM_OPCODE_BITS);
969 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
970 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
971 break;
972
973 usec_delay(5);
974 ixgbe_standby_eeprom(hw);
975 };
976
977 /*
978 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
979 * devices (and only 0-5mSec on 5V devices)
980 */
981 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
982 DEBUGOUT("SPI EEPROM Status error\n");
983 status = IXGBE_ERR_EEPROM;
984 }
985
986 return (status);
987 }
988
989 /*
990 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
991 * @hw: pointer to hardware structure
992 */
993 static void
ixgbe_standby_eeprom(struct ixgbe_hw * hw)994 ixgbe_standby_eeprom(struct ixgbe_hw *hw)
995 {
996 u32 eec;
997
998 DEBUGFUNC("ixgbe_standby_eeprom");
999
1000 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1001
1002 /* Toggle CS to flush commands */
1003 eec |= IXGBE_EEC_CS;
1004 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1005 IXGBE_WRITE_FLUSH(hw);
1006 usec_delay(1);
1007 eec &= ~IXGBE_EEC_CS;
1008 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1009 IXGBE_WRITE_FLUSH(hw);
1010 usec_delay(1);
1011 }
1012
1013 /*
1014 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1015 * @hw: pointer to hardware structure
1016 * @data: data to send to the EEPROM
1017 * @count: number of bits to shift out
1018 */
1019 static void
ixgbe_shift_out_eeprom_bits(struct ixgbe_hw * hw,u16 data,u16 count)1020 ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1021 u16 count)
1022 {
1023 u32 eec;
1024 u32 mask;
1025 u32 i;
1026
1027 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1028
1029 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1030
1031 /*
1032 * Mask is used to shift "count" bits of "data" out to the EEPROM
1033 * one bit at a time. Determine the starting bit based on count
1034 */
1035 mask = 0x01 << (count - 1);
1036
1037 for (i = 0; i < count; i++) {
1038 /*
1039 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1040 * "1", and then raising and then lowering the clock (the SK
1041 * bit controls the clock input to the EEPROM). A "0" is
1042 * shifted out to the EEPROM by setting "DI" to "0" and then
1043 * raising and then lowering the clock.
1044 */
1045 if (data & mask)
1046 eec |= IXGBE_EEC_DI;
1047 else
1048 eec &= ~IXGBE_EEC_DI;
1049
1050 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1051 IXGBE_WRITE_FLUSH(hw);
1052
1053 usec_delay(1);
1054
1055 ixgbe_raise_eeprom_clk(hw, &eec);
1056 ixgbe_lower_eeprom_clk(hw, &eec);
1057
1058 /*
1059 * Shift mask to signify next bit of data to shift in to the
1060 * EEPROM
1061 */
1062 mask = mask >> 1;
1063 };
1064
1065 /* We leave the "DI" bit set to "0" when we leave this routine. */
1066 eec &= ~IXGBE_EEC_DI;
1067 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1068 IXGBE_WRITE_FLUSH(hw);
1069 }
1070
1071 /*
1072 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1073 * @hw: pointer to hardware structure
1074 */
1075 static u16
ixgbe_shift_in_eeprom_bits(struct ixgbe_hw * hw,u16 count)1076 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1077 {
1078 u32 eec;
1079 u32 i;
1080 u16 data = 0;
1081
1082 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1083
1084 /*
1085 * In order to read a register from the EEPROM, we need to shift
1086 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1087 * the clock input to the EEPROM (setting the SK bit), and then reading
1088 * the value of the "DO" bit. During this "shifting in" process the
1089 * "DI" bit should always be clear.
1090 */
1091 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1092
1093 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1094
1095 for (i = 0; i < count; i++) {
1096 data = data << 1;
1097 ixgbe_raise_eeprom_clk(hw, &eec);
1098
1099 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1100
1101 eec &= ~(IXGBE_EEC_DI);
1102 if (eec & IXGBE_EEC_DO)
1103 data |= 1;
1104
1105 ixgbe_lower_eeprom_clk(hw, &eec);
1106 }
1107
1108 return (data);
1109 }
1110
1111 /*
1112 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1113 * @hw: pointer to hardware structure
1114 * @eec: EEC register's current value
1115 */
1116 static void
ixgbe_raise_eeprom_clk(struct ixgbe_hw * hw,u32 * eec)1117 ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1118 {
1119 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1120
1121 /*
1122 * Raise the clock input to the EEPROM
1123 * (setting the SK bit), then delay
1124 */
1125 *eec = *eec | IXGBE_EEC_SK;
1126 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1127 IXGBE_WRITE_FLUSH(hw);
1128 usec_delay(1);
1129 }
1130
1131 /*
1132 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1133 * @hw: pointer to hardware structure
1134 * @eecd: EECD's current value
1135 */
1136 static void
ixgbe_lower_eeprom_clk(struct ixgbe_hw * hw,u32 * eec)1137 ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1138 {
1139 DEBUGFUNC("ixgbe_lower_eeprom_clk");
1140
1141 /*
1142 * Lower the clock input to the EEPROM (clearing the SK bit), then
1143 * delay
1144 */
1145 *eec = *eec & ~IXGBE_EEC_SK;
1146 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1147 IXGBE_WRITE_FLUSH(hw);
1148 usec_delay(1);
1149 }
1150
1151 /*
1152 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1153 * @hw: pointer to hardware structure
1154 */
1155 static void
ixgbe_release_eeprom(struct ixgbe_hw * hw)1156 ixgbe_release_eeprom(struct ixgbe_hw *hw)
1157 {
1158 u32 eec;
1159
1160 DEBUGFUNC("ixgbe_release_eeprom");
1161
1162 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1163
1164 eec |= IXGBE_EEC_CS; /* Pull CS high */
1165 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1166
1167 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1168 IXGBE_WRITE_FLUSH(hw);
1169
1170 usec_delay(1);
1171
1172 /* Stop requesting EEPROM access */
1173 eec &= ~IXGBE_EEC_REQ;
1174 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1175
1176 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1177
1178 /* Delay before attempt to obtain semaphore again to allow FW access */
1179 msec_delay(hw->eeprom.semaphore_delay);
1180 }
1181
1182 /*
1183 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1184 * @hw: pointer to hardware structure
1185 */
1186 u16
ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw * hw)1187 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1188 {
1189 u16 i;
1190 u16 j;
1191 u16 checksum = 0;
1192 u16 length = 0;
1193 u16 pointer = 0;
1194 u16 word = 0;
1195
1196 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1197
1198 /* Include 0x0-0x3F in the checksum */
1199 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1200 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1201 DEBUGOUT("EEPROM read failed\n");
1202 break;
1203 }
1204 checksum += word;
1205 }
1206
1207 /* Include all data from pointers except for the fw pointer */
1208 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1209 hw->eeprom.ops.read(hw, i, &pointer);
1210
1211 /* Make sure the pointer seems valid */
1212 if (pointer != 0xFFFF && pointer != 0) {
1213 hw->eeprom.ops.read(hw, pointer, &length);
1214
1215 if (length != 0xFFFF && length != 0) {
1216 for (j = pointer+1; j <= pointer+length; j++) {
1217 hw->eeprom.ops.read(hw, j, &word);
1218 checksum += word;
1219 }
1220 }
1221 }
1222 }
1223
1224 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1225
1226 return (checksum);
1227 }
1228
1229 /*
1230 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1231 * @hw: pointer to hardware structure
1232 * @checksum_val: calculated checksum
1233 *
1234 * Performs checksum calculation and validates the EEPROM checksum. If the
1235 * caller does not need checksum_val, the value can be NULL.
1236 */
1237 s32
ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw * hw,u16 * checksum_val)1238 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1239 u16 *checksum_val)
1240 {
1241 s32 status;
1242 u16 checksum;
1243 u16 read_checksum = 0;
1244
1245 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1246
1247 /*
1248 * Read the first word from the EEPROM. If this times out or fails, do
1249 * not continue or we could be in for a very long wait while every
1250 * EEPROM read fails
1251 */
1252 status = hw->eeprom.ops.read(hw, 0, &checksum);
1253
1254 if (status == IXGBE_SUCCESS) {
1255 checksum = hw->eeprom.ops.calc_checksum(hw);
1256
1257 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1258
1259 /*
1260 * Verify read checksum from EEPROM is the same as
1261 * calculated checksum
1262 */
1263 if (read_checksum != checksum)
1264 status = IXGBE_ERR_EEPROM_CHECKSUM;
1265
1266 /* If the user cares, return the calculated checksum */
1267 if (checksum_val)
1268 *checksum_val = checksum;
1269 } else {
1270 DEBUGOUT("EEPROM read failed\n");
1271 }
1272
1273 return (status);
1274 }
1275
1276 /*
1277 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1278 * @hw: pointer to hardware structure
1279 */
1280 s32
ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw * hw)1281 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1282 {
1283 s32 status;
1284 u16 checksum;
1285
1286 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1287
1288 /*
1289 * Read the first word from the EEPROM. If this times out or fails, do
1290 * not continue or we could be in for a very long wait while every
1291 * EEPROM read fails
1292 */
1293 status = hw->eeprom.ops.read(hw, 0, &checksum);
1294
1295 if (status == IXGBE_SUCCESS) {
1296 checksum = hw->eeprom.ops.calc_checksum(hw);
1297 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1298 checksum);
1299 } else {
1300 DEBUGOUT("EEPROM read failed\n");
1301 }
1302
1303 return (status);
1304 }
1305
1306 /*
1307 * ixgbe_validate_mac_addr - Validate MAC address
1308 * @mac_addr: pointer to MAC address.
1309 *
1310 * Tests a MAC address to ensure it is a valid Individual Address
1311 */
1312 s32
ixgbe_validate_mac_addr(u8 * mac_addr)1313 ixgbe_validate_mac_addr(u8 *mac_addr)
1314 {
1315 s32 status = IXGBE_SUCCESS;
1316
1317 DEBUGFUNC("ixgbe_validate_mac_addr");
1318
1319 /* Make sure it is not a multicast address */
1320 if (IXGBE_IS_MULTICAST(mac_addr)) {
1321 DEBUGOUT("MAC address is multicast\n");
1322 status = IXGBE_ERR_INVALID_MAC_ADDR;
1323 /* Not a broadcast address */
1324 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1325 DEBUGOUT("MAC address is broadcast\n");
1326 status = IXGBE_ERR_INVALID_MAC_ADDR;
1327 /* Reject the zero address */
1328 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1329 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1330 DEBUGOUT("MAC address is all zeros\n");
1331 status = IXGBE_ERR_INVALID_MAC_ADDR;
1332 }
1333 return (status);
1334 }
1335
1336 /*
1337 * ixgbe_set_rar_generic - Set Rx address register
1338 * @hw: pointer to hardware structure
1339 * @index: Receive address register to write
1340 * @addr: Address to put into receive address register
1341 * @vmdq: VMDq "set" or "pool" index
1342 * @enable_addr: set flag that address is active
1343 *
1344 * Puts an ethernet address into a receive address register.
1345 */
1346 s32
ixgbe_set_rar_generic(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq,u32 enable_addr)1347 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1348 u32 enable_addr)
1349 {
1350 u32 rar_low, rar_high;
1351 u32 rar_entries = hw->mac.num_rar_entries;
1352
1353 DEBUGFUNC("ixgbe_set_rar_generic");
1354
1355 /* setup VMDq pool selection before this RAR gets enabled */
1356 hw->mac.ops.set_vmdq(hw, index, vmdq);
1357
1358 /* Make sure we are using a valid rar index range */
1359 if (index < rar_entries) {
1360 /*
1361 * HW expects these in little endian so we reverse the byte
1362 * order from network order (big endian) to little endian
1363 */
1364 rar_low = ((u32)addr[0] |
1365 ((u32)addr[1] << 8) |
1366 ((u32)addr[2] << 16) |
1367 ((u32)addr[3] << 24));
1368 /*
1369 * Some parts put the VMDq setting in the extra RAH bits,
1370 * so save everything except the lower 16 bits that hold part
1371 * of the address and the address valid bit.
1372 */
1373 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1374 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1375 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1376
1377 if (enable_addr != 0)
1378 rar_high |= IXGBE_RAH_AV;
1379
1380 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1381 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1382 } else {
1383 DEBUGOUT1("RAR index %d is out of range.\n", index);
1384 }
1385
1386 return (IXGBE_SUCCESS);
1387 }
1388
1389 /*
1390 * ixgbe_clear_rar_generic - Remove Rx address register
1391 * @hw: pointer to hardware structure
1392 * @index: Receive address register to write
1393 *
1394 * Clears an ethernet address from a receive address register.
1395 */
1396 s32
ixgbe_clear_rar_generic(struct ixgbe_hw * hw,u32 index)1397 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1398 {
1399 u32 rar_high;
1400 u32 rar_entries = hw->mac.num_rar_entries;
1401
1402 DEBUGFUNC("ixgbe_clear_rar_generic");
1403
1404 /* Make sure we are using a valid rar index range */
1405 if (index < rar_entries) {
1406 /*
1407 * Some parts put the VMDq setting in the extra RAH bits,
1408 * so save everything except the lower 16 bits that hold part
1409 * of the address and the address valid bit.
1410 */
1411 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1412 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1413
1414 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1415 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1416 } else {
1417 DEBUGOUT1("RAR index %d is out of range.\n", index);
1418 }
1419
1420 /* clear VMDq pool/queue selection for this RAR */
1421 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1422
1423 return (IXGBE_SUCCESS);
1424 }
1425
1426 /*
1427 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1428 * @hw: pointer to hardware structure
1429 *
1430 * Places the MAC address in receive address register 0 and clears the rest
1431 * of the receive address registers. Clears the multicast table. Assumes
1432 * the receiver is in reset when the routine is called.
1433 */
1434 s32
ixgbe_init_rx_addrs_generic(struct ixgbe_hw * hw)1435 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1436 {
1437 u32 i;
1438 u32 rar_entries = hw->mac.num_rar_entries;
1439
1440 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
1441
1442 /*
1443 * If the current mac address is valid, assume it is a software override
1444 * to the permanent address.
1445 * Otherwise, use the permanent address from the eeprom.
1446 */
1447 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1448 IXGBE_ERR_INVALID_MAC_ADDR) {
1449 /* Get the MAC address from the RAR0 for later reference */
1450 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1451
1452 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1453 hw->mac.addr[0], hw->mac.addr[1],
1454 hw->mac.addr[2]);
1455 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1456 hw->mac.addr[4], hw->mac.addr[5]);
1457 } else {
1458 /* Setup the receive address. */
1459 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1460 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1461 hw->mac.addr[0], hw->mac.addr[1],
1462 hw->mac.addr[2]);
1463 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1464 hw->mac.addr[4], hw->mac.addr[5]);
1465
1466 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1467 }
1468 hw->addr_ctrl.overflow_promisc = 0;
1469
1470 hw->addr_ctrl.rar_used_count = 1;
1471
1472 /* Zero out the other receive addresses. */
1473 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1474 for (i = 1; i < rar_entries; i++) {
1475 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1476 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1477 }
1478
1479 /* Clear the MTA */
1480 hw->addr_ctrl.mta_in_use = 0;
1481 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1482
1483 DEBUGOUT(" Clearing MTA\n");
1484 for (i = 0; i < hw->mac.mcft_size; i++)
1485 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1486
1487 (void) ixgbe_init_uta_tables(hw);
1488
1489 return (IXGBE_SUCCESS);
1490 }
1491
1492
1493 /*
1494 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1495 * @hw: pointer to hardware structure
1496 * @addr: new address
1497 *
1498 * Adds it to unused receive address register or goes into promiscuous mode.
1499 */
1500 void
ixgbe_add_uc_addr(struct ixgbe_hw * hw,u8 * addr,u32 vmdq)1501 ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1502 {
1503 u32 rar_entries = hw->mac.num_rar_entries;
1504 u32 rar;
1505
1506 DEBUGFUNC("ixgbe_add_uc_addr");
1507
1508 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1509 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1510
1511 /*
1512 * Place this address in the RAR if there is room,
1513 * else put the controller into promiscuous mode
1514 */
1515 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1516 rar = hw->addr_ctrl.rar_used_count;
1517 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1518 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
1519 hw->addr_ctrl.rar_used_count++;
1520 } else {
1521 hw->addr_ctrl.overflow_promisc++;
1522 }
1523
1524 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
1525 }
1526
1527 /*
1528 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1529 * @hw: pointer to hardware structure
1530 * @addr_list: the list of new addresses
1531 * @addr_count: number of addresses
1532 * @next: iterator function to walk the address list
1533 *
1534 * The given list replaces any existing list. Clears the secondary addrs from
1535 * receive address registers. Uses unused receive address registers for the
1536 * first secondary addresses, and falls back to promiscuous mode as needed.
1537 *
1538 * Drivers using secondary unicast addresses must set user_set_promisc when
1539 * manually putting the device into promiscuous mode.
1540 */
1541 s32
ixgbe_update_uc_addr_list_generic(struct ixgbe_hw * hw,u8 * addr_list,u32 addr_count,ixgbe_mc_addr_itr next)1542 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
1543 u32 addr_count, ixgbe_mc_addr_itr next)
1544 {
1545 u8 *addr;
1546 u32 i;
1547 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1548 u32 uc_addr_in_use;
1549 u32 fctrl;
1550 u32 vmdq;
1551
1552 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
1553
1554 /*
1555 * Clear accounting of old secondary address list,
1556 * don't count RAR[0]
1557 */
1558 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1559 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1560 hw->addr_ctrl.overflow_promisc = 0;
1561
1562 /* Zero out the other receive addresses */
1563 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1564 for (i = 0; i < uc_addr_in_use; i++) {
1565 IXGBE_WRITE_REG(hw, IXGBE_RAL(i + 1), 0);
1566 IXGBE_WRITE_REG(hw, IXGBE_RAH(i + 1), 0);
1567 }
1568
1569 /* Add the new addresses */
1570 for (i = 0; i < addr_count; i++) {
1571 DEBUGOUT(" Adding the secondary addresses:\n");
1572 addr = next(hw, &addr_list, &vmdq);
1573 ixgbe_add_uc_addr(hw, addr, vmdq);
1574 }
1575
1576 if (hw->addr_ctrl.overflow_promisc) {
1577 /* enable promisc if not already in overflow or set by user */
1578 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1579 DEBUGOUT(" Entering address overflow promisc mode\n");
1580 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1581 fctrl |= IXGBE_FCTRL_UPE;
1582 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1583 }
1584 } else {
1585 /* only disable if set by overflow, not by user */
1586 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1587 DEBUGOUT(" Leaving address overflow promisc mode\n");
1588 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1589 fctrl &= ~IXGBE_FCTRL_UPE;
1590 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1591 }
1592 }
1593
1594 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
1595 return (IXGBE_SUCCESS);
1596 }
1597
1598 /*
1599 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1600 * @hw: pointer to hardware structure
1601 * @mc_addr: the multicast address
1602 *
1603 * Extracts the 12 bits, from a multicast address, to determine which
1604 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
1605 * incoming rx multicast addresses, to determine the bit-vector to check in
1606 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1607 * by the MO field of the MCSTCTRL. The MO field is set during initialization
1608 * to mc_filter_type.
1609 */
1610 static s32
ixgbe_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)1611 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1612 {
1613 u32 vector = 0;
1614
1615 DEBUGFUNC("ixgbe_mta_vector");
1616
1617 switch (hw->mac.mc_filter_type) {
1618 case 0: /* use bits [47:36] of the address */
1619 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1620 break;
1621 case 1: /* use bits [46:35] of the address */
1622 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1623 break;
1624 case 2: /* use bits [45:34] of the address */
1625 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1626 break;
1627 case 3: /* use bits [43:32] of the address */
1628 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1629 break;
1630 default: /* Invalid mc_filter_type */
1631 DEBUGOUT("MC filter type param set incorrectly\n");
1632 ASSERT(0);
1633 break;
1634 }
1635
1636 /* vector can only be 12-bits or boundary will be exceeded */
1637 vector &= 0xFFF;
1638 return (vector);
1639 }
1640
1641 /*
1642 * ixgbe_set_mta - Set bit-vector in multicast table
1643 * @hw: pointer to hardware structure
1644 * @hash_value: Multicast address hash value
1645 *
1646 * Sets the bit-vector in the multicast table.
1647 */
1648 void
ixgbe_set_mta(struct ixgbe_hw * hw,u8 * mc_addr)1649 ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1650 {
1651 u32 vector;
1652 u32 vector_bit;
1653 u32 vector_reg;
1654
1655 DEBUGFUNC("ixgbe_set_mta");
1656
1657 hw->addr_ctrl.mta_in_use++;
1658
1659 vector = ixgbe_mta_vector(hw, mc_addr);
1660 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
1661
1662 /*
1663 * The MTA is a register array of 128 32-bit registers. It is treated
1664 * like an array of 4096 bits. We want to set bit
1665 * BitArray[vector_value]. So we figure out what register the bit is
1666 * in, read it, OR in the new bit, then write back the new value. The
1667 * register is determined by the upper 7 bits of the vector value and
1668 * the bit within that register are determined by the lower 5 bits of
1669 * the value.
1670 */
1671 vector_reg = (vector >> 5) & 0x7F;
1672 vector_bit = vector & 0x1F;
1673 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1674 }
1675
1676 /*
1677 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1678 * @hw: pointer to hardware structure
1679 * @mc_addr_list: the list of new multicast addresses
1680 * @mc_addr_count: number of addresses
1681 * @next: iterator function to walk the multicast address list
1682 *
1683 * The given list replaces any existing list. Clears the MC addrs from receive
1684 * address registers and the multicast table. Uses unused receive address
1685 * registers for the first multicast addresses, and hashes the rest into the
1686 * multicast table.
1687 */
1688 s32
ixgbe_update_mc_addr_list_generic(struct ixgbe_hw * hw,u8 * mc_addr_list,u32 mc_addr_count,ixgbe_mc_addr_itr next)1689 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
1690 u32 mc_addr_count, ixgbe_mc_addr_itr next)
1691 {
1692 u32 i;
1693 u32 vmdq;
1694
1695 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
1696
1697 /*
1698 * Set the new number of MC addresses that we are being requested to
1699 * use.
1700 */
1701 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
1702 hw->addr_ctrl.mta_in_use = 0;
1703
1704 /* Clear mta_shadow */
1705 DEBUGOUT(" Clearing MTA\n");
1706 (void) memset(&hw->mac.mta_shadow, 0, sizeof (hw->mac.mta_shadow));
1707
1708 /* Update mta_shadow */
1709 for (i = 0; i < mc_addr_count; i++) {
1710 DEBUGOUT(" Adding the multicast addresses:\n");
1711 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
1712 }
1713
1714 /* Enable mta */
1715 for (i = 0; i < hw->mac.mcft_size; i++)
1716 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1717 hw->mac.mta_shadow[i]);
1718
1719 if (hw->addr_ctrl.mta_in_use > 0)
1720 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1721 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
1722
1723 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
1724 return (IXGBE_SUCCESS);
1725 }
1726
1727 /*
1728 * ixgbe_enable_mc_generic - Enable multicast address in RAR
1729 * @hw: pointer to hardware structure
1730 *
1731 * Enables multicast address in RAR and the use of the multicast hash table.
1732 */
1733 s32
ixgbe_enable_mc_generic(struct ixgbe_hw * hw)1734 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1735 {
1736 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1737
1738 DEBUGFUNC("ixgbe_enable_mc_generic");
1739
1740 if (a->mta_in_use > 0)
1741 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1742 hw->mac.mc_filter_type);
1743
1744 return (IXGBE_SUCCESS);
1745 }
1746
1747 /*
1748 * ixgbe_disable_mc_generic - Disable multicast address in RAR
1749 * @hw: pointer to hardware structure
1750 *
1751 * Disables multicast address in RAR and the use of the multicast hash table.
1752 */
1753 s32
ixgbe_disable_mc_generic(struct ixgbe_hw * hw)1754 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1755 {
1756 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1757
1758 DEBUGFUNC("ixgbe_disable_mc_generic");
1759
1760 if (a->mta_in_use > 0)
1761 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1762
1763 return (IXGBE_SUCCESS);
1764 }
1765
1766 /*
1767 * ixgbe_fc_enable_generic - Enable flow control
1768 * @hw: pointer to hardware structure
1769 * @packetbuf_num: packet buffer number (0-7)
1770 *
1771 * Enable flow control according to the current settings.
1772 */
1773 s32
ixgbe_fc_enable_generic(struct ixgbe_hw * hw,s32 packetbuf_num)1774 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1775 {
1776 s32 ret_val = IXGBE_SUCCESS;
1777 u32 mflcn_reg, fccfg_reg;
1778 u32 reg;
1779 u32 rx_pba_size;
1780
1781 DEBUGFUNC("ixgbe_fc_enable_generic");
1782
1783 /* Negotiate the fc mode to use */
1784 ret_val = ixgbe_fc_autoneg(hw);
1785 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1786 goto out;
1787
1788 /* Disable any previous flow control settings */
1789 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1790 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
1791
1792 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
1793 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
1794
1795 /*
1796 * The possible values of fc.current_mode are:
1797 * 0: Flow control is completely disabled
1798 * 1: Rx flow control is enabled (we can receive pause frames,
1799 * but not send pause frames).
1800 * 2: Tx flow control is enabled (we can send pause frames but
1801 * we do not support receiving pause frames).
1802 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1803 * other: Invalid.
1804 */
1805 switch (hw->fc.current_mode) {
1806 case ixgbe_fc_none:
1807 /*
1808 * Flow control is disabled by software override or autoneg.
1809 * The code below will actually disable it in the HW.
1810 */
1811 break;
1812 case ixgbe_fc_rx_pause:
1813 /*
1814 * Rx Flow control is enabled and Tx Flow control is
1815 * disabled by software override. Since there really
1816 * isn't a way to advertise that we are capable of RX
1817 * Pause ONLY, we will advertise that we support both
1818 * symmetric and asymmetric Rx PAUSE. Later, we will
1819 * disable the adapter's ability to send PAUSE frames.
1820 */
1821 mflcn_reg |= IXGBE_MFLCN_RFCE;
1822 break;
1823 case ixgbe_fc_tx_pause:
1824 /*
1825 * Tx Flow control is enabled, and Rx Flow control is
1826 * disabled by software override.
1827 */
1828 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1829 break;
1830 case ixgbe_fc_full:
1831 /* Flow control (both Rx and Tx) is enabled by SW override. */
1832 mflcn_reg |= IXGBE_MFLCN_RFCE;
1833 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1834 break;
1835 default:
1836 DEBUGOUT("Flow control param set incorrectly\n");
1837 ret_val = IXGBE_ERR_CONFIG;
1838 goto out;
1839 }
1840
1841 /* Set 802.3x based flow control settings. */
1842 mflcn_reg |= IXGBE_MFLCN_DPF;
1843 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1844 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1845
1846 reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
1847 /* Thresholds are different for link flow control when in DCB mode */
1848 if (reg & IXGBE_MTQC_RT_ENA) {
1849 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1850
1851 /* Always disable XON for LFC when in DCB mode */
1852 reg = (rx_pba_size >> 5) & 0xFFE0;
1853 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
1854
1855 reg = (rx_pba_size >> 2) & 0xFFE0;
1856 if (hw->fc.current_mode & ixgbe_fc_tx_pause)
1857 reg |= IXGBE_FCRTH_FCEN;
1858 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
1859 } else {
1860 /*
1861 * Set up and enable Rx high/low water mark thresholds,
1862 * enable XON.
1863 */
1864 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1865 if (hw->fc.send_xon) {
1866 IXGBE_WRITE_REG(hw,
1867 IXGBE_FCRTL_82599(packetbuf_num),
1868 (hw->fc.low_water | IXGBE_FCRTL_XONE));
1869 } else {
1870 IXGBE_WRITE_REG(hw,
1871 IXGBE_FCRTL_82599(packetbuf_num),
1872 hw->fc.low_water);
1873 }
1874
1875 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
1876 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
1877 }
1878 }
1879
1880 /* Configure pause time (2 TCs per register) */
1881 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
1882 if ((packetbuf_num & 1) == 0)
1883 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
1884 else
1885 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
1886 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
1887
1888 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
1889
1890 out:
1891 return (ret_val);
1892 }
1893
1894 /*
1895 * ixgbe_fc_autoneg - Configure flow control
1896 * @hw: pointer to hardware structure
1897 *
1898 * Compares our advertised flow control capabilities to those advertised by
1899 * our link partner, and determines the proper flow control mode to use.
1900 */
1901 s32
ixgbe_fc_autoneg(struct ixgbe_hw * hw)1902 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1903 {
1904 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1905 ixgbe_link_speed speed;
1906 bool link_up;
1907
1908 DEBUGFUNC("ixgbe_fc_autoneg");
1909
1910 if (hw->fc.disable_fc_autoneg)
1911 goto out;
1912
1913 /*
1914 * AN should have completed when the cable was plugged in.
1915 * Look for reasons to bail out. Bail out if:
1916 * - FC autoneg is disabled, or if
1917 * - link is not up.
1918 *
1919 * Since we're being called from an LSC, link is already known to be up.
1920 * So use link_up_wait_to_complete=false.
1921 */
1922 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1923 if (!link_up) {
1924 ret_val = IXGBE_ERR_FLOW_CONTROL;
1925 goto out;
1926 }
1927
1928 switch (hw->phy.media_type) {
1929 /* Autoneg flow control on fiber adapters */
1930 case ixgbe_media_type_fiber:
1931 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
1932 ret_val = ixgbe_fc_autoneg_fiber(hw);
1933 break;
1934
1935 /* Autoneg flow control on backplane adapters */
1936 case ixgbe_media_type_backplane:
1937 ret_val = ixgbe_fc_autoneg_backplane(hw);
1938 break;
1939
1940 /* Autoneg flow control on copper adapters */
1941 case ixgbe_media_type_copper:
1942 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
1943 ret_val = ixgbe_fc_autoneg_copper(hw);
1944 break;
1945
1946 default:
1947 break;
1948 }
1949
1950 out:
1951 if (ret_val == IXGBE_SUCCESS) {
1952 hw->fc.fc_was_autonegged = true;
1953 } else {
1954 hw->fc.fc_was_autonegged = false;
1955 hw->fc.current_mode = hw->fc.requested_mode;
1956 }
1957 return (ret_val);
1958 }
1959
1960 /*
1961 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
1962 * @hw: pointer to hardware structure
1963 * @speed:
1964 * @link_up
1965 *
1966 * Enable flow control according on 1 gig fiber.
1967 */
1968 static s32
ixgbe_fc_autoneg_fiber(struct ixgbe_hw * hw)1969 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
1970 {
1971 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1972 s32 ret_val;
1973
1974 /*
1975 * On multispeed fiber at 1g, bail out if
1976 * - link is up but AN did not complete, or if
1977 * - link is up and AN completed but timed out
1978 */
1979
1980 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1981 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1982 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1983 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
1984 goto out;
1985 }
1986
1987 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1988 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1989
1990 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
1991 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
1992 IXGBE_PCS1GANA_ASM_PAUSE,
1993 IXGBE_PCS1GANA_SYM_PAUSE,
1994 IXGBE_PCS1GANA_ASM_PAUSE);
1995
1996 out:
1997 return (ret_val);
1998 }
1999
2000 /*
2001 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2002 * @hw: pointer to hardware structure
2003 *
2004 * Enable flow control according to IEEE clause 37.
2005 */
2006 static s32
ixgbe_fc_autoneg_backplane(struct ixgbe_hw * hw)2007 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2008 {
2009 u32 links2, anlp1_reg, autoc_reg, links;
2010 s32 ret_val;
2011
2012 /*
2013 * On backplane, bail out if
2014 * - backplane autoneg was not completed, or if
2015 * - we are 82599 and link partner is not AN enabled
2016 */
2017 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2018 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2019 hw->fc.fc_was_autonegged = false;
2020 hw->fc.current_mode = hw->fc.requested_mode;
2021 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2022 goto out;
2023 }
2024
2025 if (hw->mac.type == ixgbe_mac_82599EB) {
2026 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2027 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2028 hw->fc.fc_was_autonegged = false;
2029 hw->fc.current_mode = hw->fc.requested_mode;
2030 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2031 goto out;
2032 }
2033 }
2034 /*
2035 * Read the 10g AN autoc and LP ability registers and resolve
2036 * local flow control settings accordingly
2037 */
2038 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2039 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2040
2041 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2042 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2043 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2044
2045 out:
2046 return (ret_val);
2047 }
2048
2049 /*
2050 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2051 * @hw: pointer to hardware structure
2052 *
2053 * Enable flow control according to IEEE clause 37.
2054 */
2055 static s32
ixgbe_fc_autoneg_copper(struct ixgbe_hw * hw)2056 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2057 {
2058 u16 technology_ability_reg = 0;
2059 u16 lp_technology_ability_reg = 0;
2060
2061 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2062 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &technology_ability_reg);
2063 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2064 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &lp_technology_ability_reg);
2065
2066 return (ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2067 (u32)lp_technology_ability_reg,
2068 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2069 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE));
2070 }
2071
2072 /*
2073 * ixgbe_negotiate_fc - Negotiate flow control
2074 * @hw: pointer to hardware structure
2075 * @adv_reg: flow control advertised settings
2076 * @lp_reg: link partner's flow control settings
2077 * @adv_sym: symmetric pause bit in advertisement
2078 * @adv_asm: asymmetric pause bit in advertisement
2079 * @lp_sym: symmetric pause bit in link partner advertisement
2080 * @lp_asm: asymmetric pause bit in link partner advertisement
2081 *
2082 * Find the intersection between advertised settings and link partner's
2083 * advertised settings
2084 */
2085 static s32
ixgbe_negotiate_fc(struct ixgbe_hw * hw,u32 adv_reg,u32 lp_reg,u32 adv_sym,u32 adv_asm,u32 lp_sym,u32 lp_asm)2086 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2087 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2088 {
2089 if ((!(adv_reg)) || (!(lp_reg)))
2090 return (IXGBE_ERR_FC_NOT_NEGOTIATED);
2091
2092 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2093 /*
2094 * Now we need to check if the user selected Rx ONLY
2095 * of pause frames. In this case, we had to advertise
2096 * FULL flow control because we could not advertise RX
2097 * ONLY. Hence, we must now check to see if we need to
2098 * turn OFF the TRANSMISSION of PAUSE frames.
2099 */
2100 if (hw->fc.requested_mode == ixgbe_fc_full) {
2101 hw->fc.current_mode = ixgbe_fc_full;
2102 DEBUGOUT("Flow Control = FULL.\n");
2103 } else {
2104 hw->fc.current_mode = ixgbe_fc_rx_pause;
2105 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2106 }
2107 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2108 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2109 hw->fc.current_mode = ixgbe_fc_tx_pause;
2110 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2111 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2112 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2113 hw->fc.current_mode = ixgbe_fc_rx_pause;
2114 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2115 } else {
2116 hw->fc.current_mode = ixgbe_fc_none;
2117 DEBUGOUT("Flow Control = NONE.\n");
2118 }
2119 return (IXGBE_SUCCESS);
2120 }
2121
2122 /*
2123 * ixgbe_setup_fc - Set up flow control
2124 * @hw: pointer to hardware structure
2125 *
2126 * Called at init time to set up flow control.
2127 */
2128 s32
ixgbe_setup_fc(struct ixgbe_hw * hw,s32 packetbuf_num)2129 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2130 {
2131 s32 ret_val = IXGBE_SUCCESS;
2132 u32 reg = 0, reg_bp = 0;
2133 u16 reg_cu = 0;
2134
2135 DEBUGFUNC("ixgbe_setup_fc");
2136
2137 /* Validate the packetbuf configuration */
2138 if (packetbuf_num < 0 || packetbuf_num > 7) {
2139 DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
2140 " 0-7\n", packetbuf_num);
2141 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2142 goto out;
2143 }
2144
2145 /*
2146 * Validate the water mark configuration. Zero water marks are invalid
2147 * because it causes the controller to just blast out fc packets.
2148 */
2149 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
2150 DEBUGOUT("Invalid water mark configuration\n");
2151 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2152 goto out;
2153 }
2154
2155 /*
2156 * Validate the requested mode. Strict IEEE mode does not allow
2157 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
2158 */
2159 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2160 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
2161 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2162 goto out;
2163 }
2164
2165 /*
2166 * 10gig parts do not have a word in the EEPROM to determine the
2167 * default flow control setting, so we explicitly set it to full.
2168 */
2169 if (hw->fc.requested_mode == ixgbe_fc_default)
2170 hw->fc.requested_mode = ixgbe_fc_full;
2171
2172 /*
2173 * Set up the 1G and 10G flow control advertisement registers so the
2174 * HW will be able to do fc autoneg once the cable is plugged in. If
2175 * we link at 10G, the 1G advertisement is harmless and vice versa.
2176 */
2177 switch (hw->phy.media_type) {
2178 case ixgbe_media_type_fiber:
2179 case ixgbe_media_type_backplane:
2180 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2181 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2182 break;
2183
2184 case ixgbe_media_type_copper:
2185 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2186 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
2187 break;
2188
2189 default:
2190 ;
2191 }
2192
2193 /*
2194 * The possible values of fc.requested_mode are:
2195 * 0: Flow control is completely disabled
2196 * 1: Rx flow control is enabled (we can receive pause frames,
2197 * but not send pause frames).
2198 * 2: Tx flow control is enabled (we can send pause frames but
2199 * we do not support receiving pause frames).
2200 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2201 * other: Invalid.
2202 */
2203 switch (hw->fc.requested_mode) {
2204 case ixgbe_fc_none:
2205 /* Flow control completely disabled by software override. */
2206 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2207 if (hw->phy.media_type == ixgbe_media_type_backplane)
2208 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2209 IXGBE_AUTOC_ASM_PAUSE);
2210 else if (hw->phy.media_type == ixgbe_media_type_copper)
2211 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2212 break;
2213 case ixgbe_fc_rx_pause:
2214 /*
2215 * Rx Flow control is enabled and Tx Flow control is
2216 * disabled by software override. Since there really
2217 * isn't a way to advertise that we are capable of RX
2218 * Pause ONLY, we will advertise that we support both
2219 * symmetric and asymmetric Rx PAUSE. Later, we will
2220 * disable the adapter's ability to send PAUSE frames.
2221 */
2222 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2223 if (hw->phy.media_type == ixgbe_media_type_backplane)
2224 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2225 IXGBE_AUTOC_ASM_PAUSE);
2226 else if (hw->phy.media_type == ixgbe_media_type_copper)
2227 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2228 break;
2229 case ixgbe_fc_tx_pause:
2230 /*
2231 * Tx Flow control is enabled, and Rx Flow control is
2232 * disabled by software override.
2233 */
2234 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2235 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2236 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2237 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2238 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2239 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2240 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2241 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2242 }
2243 break;
2244 case ixgbe_fc_full:
2245 /* Flow control (both Rx and Tx) is enabled by SW override. */
2246 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2247 if (hw->phy.media_type == ixgbe_media_type_backplane)
2248 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2249 IXGBE_AUTOC_ASM_PAUSE);
2250 else if (hw->phy.media_type == ixgbe_media_type_copper)
2251 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2252 break;
2253 default:
2254 DEBUGOUT("Flow control param set incorrectly\n");
2255 ret_val = IXGBE_ERR_CONFIG;
2256 goto out;
2257 }
2258
2259 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2260 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2261
2262 /* Disable AN timeout */
2263 if (hw->fc.strict_ieee)
2264 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2265
2266 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2267 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2268
2269 /*
2270 * AUTOC restart handles negotiation of 1G and 10G. There is
2271 * no need to set the PCS1GCTL register.
2272 */
2273 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2274 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2275 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2276 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2277 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
2278 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2279 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
2280 }
2281
2282 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2283
2284 out:
2285 return (ret_val);
2286 }
2287
2288 /*
2289 * ixgbe_disable_pcie_master - Disable PCI-express master access
2290 * @hw: pointer to hardware structure
2291 *
2292 * Disables PCI-Express master access and verifies there are no pending
2293 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2294 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2295 * is returned signifying master requests disabled.
2296 */
2297 s32
ixgbe_disable_pcie_master(struct ixgbe_hw * hw)2298 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2299 {
2300 u32 i;
2301 u32 reg_val;
2302 u32 number_of_queues;
2303 s32 status = IXGBE_SUCCESS;
2304
2305 DEBUGFUNC("ixgbe_disable_pcie_master");
2306
2307 /* Just jump out if bus mastering is already disabled */
2308 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2309 goto out;
2310
2311 /* Disable the receive unit by stopping each queue */
2312 number_of_queues = hw->mac.max_rx_queues;
2313 for (i = 0; i < number_of_queues; i++) {
2314 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
2315 if (reg_val & IXGBE_RXDCTL_ENABLE) {
2316 reg_val &= ~IXGBE_RXDCTL_ENABLE;
2317 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
2318 }
2319 }
2320
2321 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
2322 reg_val |= IXGBE_CTRL_GIO_DIS;
2323 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2324
2325 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2326 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2327 goto out;
2328 usec_delay(100);
2329 }
2330
2331 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2332 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2333
2334 /*
2335 * The GIO Master Disable bit didn't clear. There are multiple reasons
2336 * for this listed in the datasheet 5.2.5.3.2 Master Disable, and they
2337 * all require a double reset to recover from. Before proceeding, we
2338 * first wait a little more to try to ensure that, at a minimum, the
2339 * PCIe block has no transactions pending.
2340 */
2341 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2342 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2343 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2344 break;
2345 usec_delay(100);
2346 }
2347
2348 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2349 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2350
2351 /*
2352 * Two consecutive resets are required via CTRL.RST per datasheet
2353 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2354 * of this need. The first reset prevents new master requests from
2355 * being issued by our device. We then must wait 1usec for any
2356 * remaining completions from the PCIe bus to trickle in, and then reset
2357 * again to clear out any effects they may have had on our device.
2358 */
2359 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2360
2361 out:
2362 return (status);
2363 }
2364
2365 /*
2366 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2367 * @hw: pointer to hardware structure
2368 * @mask: Mask to specify which semaphore to acquire
2369 *
2370 * Acquires the SWFW semaphore thought the GSSR register for the specified
2371 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2372 */
2373 s32
ixgbe_acquire_swfw_sync(struct ixgbe_hw * hw,u16 mask)2374 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2375 {
2376 u32 gssr;
2377 u32 swmask = mask;
2378 u32 fwmask = mask << 5;
2379 s32 timeout = 200;
2380
2381 DEBUGFUNC("ixgbe_acquire_swfw_sync");
2382
2383 while (timeout) {
2384 /*
2385 * SW EEPROM semaphore bit is used for access to all
2386 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2387 */
2388 if (ixgbe_get_eeprom_semaphore(hw))
2389 return (IXGBE_ERR_SWFW_SYNC);
2390
2391 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2392 if (!(gssr & (fwmask | swmask)))
2393 break;
2394
2395 /*
2396 * Firmware currently using resource (fwmask) or other software
2397 * thread currently using resource (swmask)
2398 */
2399 ixgbe_release_eeprom_semaphore(hw);
2400 msec_delay(5);
2401 timeout--;
2402 }
2403
2404 if (!timeout) {
2405 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2406 return (IXGBE_ERR_SWFW_SYNC);
2407 }
2408
2409 gssr |= swmask;
2410 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2411
2412 ixgbe_release_eeprom_semaphore(hw);
2413 return (IXGBE_SUCCESS);
2414 }
2415
2416 /*
2417 * ixgbe_release_swfw_sync - Release SWFW semaphore
2418 * @hw: pointer to hardware structure
2419 * @mask: Mask to specify which semaphore to release
2420 *
2421 * Releases the SWFW semaphore thought the GSSR register for the specified
2422 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2423 */
2424 void
ixgbe_release_swfw_sync(struct ixgbe_hw * hw,u16 mask)2425 ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2426 {
2427 u32 gssr;
2428 u32 swmask = mask;
2429
2430 DEBUGFUNC("ixgbe_release_swfw_sync");
2431
2432 (void) ixgbe_get_eeprom_semaphore(hw);
2433
2434 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2435 gssr &= ~swmask;
2436 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2437
2438 ixgbe_release_eeprom_semaphore(hw);
2439 }
2440
2441 /*
2442 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2443 * @hw: pointer to hardware structure
2444 * @regval: register value to write to RXCTRL
2445 *
2446 * Enables the Rx DMA unit
2447 */
2448 s32
ixgbe_enable_rx_dma_generic(struct ixgbe_hw * hw,u32 regval)2449 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2450 {
2451 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2452
2453 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2454
2455 return (IXGBE_SUCCESS);
2456 }
2457
2458 /*
2459 * ixgbe_blink_led_start_generic - Blink LED based on index.
2460 * @hw: pointer to hardware structure
2461 * @index: led number to blink
2462 */
2463 s32
ixgbe_blink_led_start_generic(struct ixgbe_hw * hw,u32 index)2464 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2465 {
2466 ixgbe_link_speed speed = 0;
2467 bool link_up = 0;
2468 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2469 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2470
2471 DEBUGFUNC("ixgbe_blink_led_start_generic");
2472
2473 /*
2474 * Link must be up to auto-blink the LEDs;
2475 * Force it if link is down.
2476 */
2477 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2478
2479 if (!link_up) {
2480 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2481 autoc_reg |= IXGBE_AUTOC_FLU;
2482 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2483 msec_delay(10);
2484 }
2485
2486 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2487 led_reg |= IXGBE_LED_BLINK(index);
2488 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2489 IXGBE_WRITE_FLUSH(hw);
2490
2491 return (IXGBE_SUCCESS);
2492 }
2493
2494 /*
2495 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2496 * @hw: pointer to hardware structure
2497 * @index: led number to stop blinking
2498 */
2499 s32
ixgbe_blink_led_stop_generic(struct ixgbe_hw * hw,u32 index)2500 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2501 {
2502 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2503 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2504
2505 DEBUGFUNC("ixgbe_blink_led_stop_generic");
2506
2507 autoc_reg &= ~IXGBE_AUTOC_FLU;
2508 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2509 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2510
2511 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2512 led_reg &= ~IXGBE_LED_BLINK(index);
2513 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2514 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2515 IXGBE_WRITE_FLUSH(hw);
2516
2517 return (IXGBE_SUCCESS);
2518 }
2519
2520 /*
2521 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2522 * @hw: pointer to hardware structure
2523 * @san_mac_offset: SAN MAC address offset
2524 *
2525 * This function will read the EEPROM location for the SAN MAC address
2526 * pointer, and returns the value at that location. This is used in both
2527 * get and set mac_addr routines.
2528 */
2529 static s32
ixgbe_get_san_mac_addr_offset(struct ixgbe_hw * hw,u16 * san_mac_offset)2530 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, u16 *san_mac_offset)
2531 {
2532 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
2533
2534 /*
2535 * First read the EEPROM pointer to see if the MAC addresses are
2536 * available.
2537 */
2538 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2539
2540 return (IXGBE_SUCCESS);
2541 }
2542
2543 /*
2544 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2545 * @hw: pointer to hardware structure
2546 * @san_mac_addr: SAN MAC address
2547 *
2548 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2549 * per-port, so set_lan_id() must be called before reading the addresses.
2550 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2551 * upon for non-SFP connections, so we must call it here.
2552 */
2553 s32
ixgbe_get_san_mac_addr_generic(struct ixgbe_hw * hw,u8 * san_mac_addr)2554 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2555 {
2556 u16 san_mac_data, san_mac_offset;
2557 u8 i;
2558
2559 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
2560
2561 /*
2562 * First read the EEPROM pointer to see if the MAC addresses are
2563 * available. If they're not, no point in calling set_lan_id() here.
2564 */
2565 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2566
2567 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2568 /*
2569 * No addresses available in this EEPROM. It's not an
2570 * error though, so just wipe the local address and return.
2571 */
2572 for (i = 0; i < 6; i++)
2573 san_mac_addr[i] = 0xFF;
2574
2575 goto san_mac_addr_out;
2576 }
2577
2578 /* make sure we know which port we need to program */
2579 hw->mac.ops.set_lan_id(hw);
2580 /* apply the port offset to the address offset */
2581 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2582 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2583 for (i = 0; i < 3; i++) {
2584 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2585 san_mac_addr[i * 2] = (u8)(san_mac_data);
2586 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2587 san_mac_offset++;
2588 }
2589
2590 san_mac_addr_out:
2591 return (IXGBE_SUCCESS);
2592 }
2593
2594 /*
2595 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
2596 * @hw: pointer to hardware structure
2597 * @san_mac_addr: SAN MAC address
2598 *
2599 * Write a SAN MAC address to the EEPROM.
2600 */
2601 s32
ixgbe_set_san_mac_addr_generic(struct ixgbe_hw * hw,u8 * san_mac_addr)2602 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2603 {
2604 s32 status = IXGBE_SUCCESS;
2605 u16 san_mac_data, san_mac_offset;
2606 u8 i;
2607
2608 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
2609
2610 /* Look for SAN mac address pointer. If not defined, return */
2611 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2612
2613 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2614 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
2615 goto san_mac_addr_out;
2616 }
2617
2618 /* Make sure we know which port we need to write */
2619 hw->mac.ops.set_lan_id(hw);
2620 /* Apply the port offset to the address offset */
2621 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2622 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2623
2624 for (i = 0; i < 3; i++) {
2625 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
2626 san_mac_data |= (u16)(san_mac_addr[i * 2]);
2627 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
2628 san_mac_offset++;
2629 }
2630
2631 san_mac_addr_out:
2632 return (status);
2633 }
2634
2635 /*
2636 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2637 * @hw: pointer to hardware structure
2638 *
2639 * Read PCIe configuration space, and get the MSI-X vector count from
2640 * the capabilities table.
2641 */
2642 u32
ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw * hw)2643 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2644 {
2645 u32 msix_count = 64;
2646
2647 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
2648 if (hw->mac.msix_vectors_from_pcie) {
2649 msix_count = IXGBE_READ_PCIE_WORD(hw,
2650 IXGBE_PCIE_MSIX_82599_CAPS);
2651 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2652
2653 /*
2654 * MSI-X count is zero-based in HW, so increment to give
2655 * proper value.
2656 */
2657 msix_count++;
2658 }
2659
2660 return (msix_count);
2661 }
2662
2663 /*
2664 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
2665 * @hw: pointer to hardware structure
2666 * @addr: Address to put into receive address register
2667 * @vmdq: VMDq pool to assign
2668 *
2669 * Puts an ethernet address into a receive address register, or
2670 * finds the rar that it is aleady in; adds to the pool list
2671 */
2672 s32
ixgbe_insert_mac_addr_generic(struct ixgbe_hw * hw,u8 * addr,u32 vmdq)2673 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2674 {
2675 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
2676 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
2677 u32 rar;
2678 u32 rar_low, rar_high;
2679 u32 addr_low, addr_high;
2680
2681 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
2682
2683 /* swap bytes for HW little endian */
2684 addr_low = addr[0] | (addr[1] << 8)
2685 | (addr[2] << 16)
2686 | (addr[3] << 24);
2687 addr_high = addr[4] | (addr[5] << 8);
2688
2689 /*
2690 * Either find the mac_id in rar or find the first empty space.
2691 * rar_highwater points to just after the highest currently used
2692 * rar in order to shorten the search. It grows when we add a new
2693 * rar to the top.
2694 */
2695 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
2696 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
2697
2698 if (((IXGBE_RAH_AV & rar_high) == 0) &&
2699 first_empty_rar == NO_EMPTY_RAR_FOUND) {
2700 first_empty_rar = rar;
2701 } else if ((rar_high & 0xFFFF) == addr_high) {
2702 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
2703 if (rar_low == addr_low)
2704 break; /* found it already in the rars */
2705 }
2706 }
2707
2708 if (rar < hw->mac.rar_highwater) {
2709 /* already there so just add to the pool bits */
2710 (void) ixgbe_set_vmdq(hw, rar, vmdq);
2711 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
2712 /* stick it into first empty RAR slot we found */
2713 rar = first_empty_rar;
2714 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2715 } else if (rar == hw->mac.rar_highwater) {
2716 /* add it to the top of the list and inc the highwater mark */
2717 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2718 hw->mac.rar_highwater++;
2719 } else if (rar >= hw->mac.num_rar_entries) {
2720 return (IXGBE_ERR_INVALID_MAC_ADDR);
2721 }
2722
2723 /*
2724 * If we found rar[0], make sure the default pool bit (we use pool 0)
2725 * remains cleared to be sure default pool packets will get delivered
2726 */
2727 if (rar == 0)
2728 (void) ixgbe_clear_vmdq(hw, rar, 0);
2729
2730 return (rar);
2731 }
2732
2733 /*
2734 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2735 * @hw: pointer to hardware struct
2736 * @rar: receive address register index to disassociate
2737 * @vmdq: VMDq pool index to remove from the rar
2738 */
2739 s32
ixgbe_clear_vmdq_generic(struct ixgbe_hw * hw,u32 rar,u32 vmdq)2740 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2741 {
2742 u32 mpsar_lo, mpsar_hi;
2743 u32 rar_entries = hw->mac.num_rar_entries;
2744
2745 DEBUGFUNC("ixgbe_clear_vmdq_generic");
2746
2747 if (rar < rar_entries) {
2748 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2749 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2750
2751 if (!mpsar_lo && !mpsar_hi)
2752 goto done;
2753
2754 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2755 if (mpsar_lo) {
2756 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2757 mpsar_lo = 0;
2758 }
2759 if (mpsar_hi) {
2760 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2761 mpsar_hi = 0;
2762 }
2763 } else if (vmdq < 32) {
2764 mpsar_lo &= ~(1 << vmdq);
2765 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2766 } else {
2767 mpsar_hi &= ~(1 << (vmdq - 32));
2768 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2769 }
2770
2771 /* was that the last pool using this rar? */
2772 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2773 hw->mac.ops.clear_rar(hw, rar);
2774 } else {
2775 DEBUGOUT1("RAR index %d is out of range.\n", rar);
2776 }
2777
2778 done:
2779 return (IXGBE_SUCCESS);
2780 }
2781
2782 /*
2783 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2784 * @hw: pointer to hardware struct
2785 * @rar: receive address register index to associate with a VMDq index
2786 * @vmdq: VMDq pool index
2787 */
2788 s32
ixgbe_set_vmdq_generic(struct ixgbe_hw * hw,u32 rar,u32 vmdq)2789 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2790 {
2791 u32 mpsar;
2792 u32 rar_entries = hw->mac.num_rar_entries;
2793
2794 DEBUGFUNC("ixgbe_set_vmdq_generic");
2795
2796 if (rar < rar_entries) {
2797 if (vmdq < 32) {
2798 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2799 mpsar |= 1 << vmdq;
2800 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2801 } else {
2802 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2803 mpsar |= 1 << (vmdq - 32);
2804 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2805 }
2806 } else {
2807 DEBUGOUT1("RAR index %d is out of range.\n", rar);
2808 }
2809 return (IXGBE_SUCCESS);
2810 }
2811
2812 /*
2813 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2814 * @hw: pointer to hardware structure
2815 */
2816 s32
ixgbe_init_uta_tables_generic(struct ixgbe_hw * hw)2817 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2818 {
2819 int i;
2820
2821 DEBUGFUNC("ixgbe_init_uta_tables_generic");
2822 DEBUGOUT(" Clearing UTA\n");
2823
2824 for (i = 0; i < 128; i++)
2825 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2826
2827 return (IXGBE_SUCCESS);
2828 }
2829
2830 /*
2831 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
2832 * @hw: pointer to hardware structure
2833 * @vlan: VLAN id to write to VLAN filter
2834 *
2835 * return the VLVF index where this VLAN id should be placed
2836 *
2837 */
2838 s32
ixgbe_find_vlvf_slot(struct ixgbe_hw * hw,u32 vlan)2839 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
2840 {
2841 u32 bits = 0;
2842 u32 first_empty_slot = 0;
2843 s32 regindex;
2844
2845 /* short cut the special case */
2846 if (vlan == 0)
2847 return (0);
2848
2849 /*
2850 * Search for the vlan id in the VLVF entries. Save off the first empty
2851 * slot found along the way
2852 */
2853 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
2854 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
2855 if (!bits && !(first_empty_slot))
2856 first_empty_slot = regindex;
2857 else if ((bits & 0x0FFF) == vlan)
2858 break;
2859 }
2860
2861 /*
2862 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
2863 * in the VLVF. Else use the first empty VLVF register for this
2864 * vlan id.
2865 */
2866 if (regindex >= IXGBE_VLVF_ENTRIES) {
2867 if (first_empty_slot)
2868 regindex = first_empty_slot;
2869 else {
2870 DEBUGOUT("No space in VLVF.\n");
2871 regindex = IXGBE_ERR_NO_SPACE;
2872 }
2873 }
2874
2875 return (regindex);
2876 }
2877
2878 /*
2879 * ixgbe_set_vfta_generic - Set VLAN filter table
2880 * @hw: pointer to hardware structure
2881 * @vlan: VLAN id to write to VLAN filter
2882 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
2883 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
2884 *
2885 * Turn on/off specified VLAN in the VLAN filter table.
2886 */
2887 s32
ixgbe_set_vfta_generic(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)2888 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
2889 {
2890 s32 regindex;
2891 u32 bitindex;
2892 u32 vfta;
2893 u32 bits;
2894 u32 vt;
2895 u32 targetbit;
2896 bool vfta_changed = false;
2897
2898 DEBUGFUNC("ixgbe_set_vfta_generic");
2899
2900 if (vlan > 4095)
2901 return (IXGBE_ERR_PARAM);
2902
2903 /*
2904 * this is a 2 part operation - first the VFTA, then the
2905 * VLVF and VLVFB if VT Mode is set
2906 * We don't write the VFTA until we know the VLVF part succeeded.
2907 */
2908
2909 /*
2910 * Part 1
2911 * The VFTA is a bitstring made up of 128 32-bit registers
2912 * that enable the particular VLAN id, much like the MTA:
2913 * bits[11-5]: which register
2914 * bits[4-0]: which bit in the register
2915 */
2916 regindex = (vlan >> 5) & 0x7F;
2917 bitindex = vlan & 0x1F;
2918 targetbit = (1 << bitindex);
2919 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
2920
2921 if (vlan_on) {
2922 if (!(vfta & targetbit)) {
2923 vfta |= targetbit;
2924 vfta_changed = true;
2925 }
2926 } else {
2927 if ((vfta & targetbit)) {
2928 vfta &= ~targetbit;
2929 vfta_changed = true;
2930 }
2931 }
2932
2933
2934 /*
2935 * Part 2
2936 * If VT Mode is set
2937 * Either vlan_on
2938 * make sure the vlan is in VLVF
2939 * set the vind bit in the matching VLVFB
2940 * Or !vlan_on
2941 * clear the pool bit and possibly the vind
2942 */
2943 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2944 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
2945 s32 vlvf_index;
2946
2947 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
2948 if (vlvf_index < 0)
2949 return (vlvf_index);
2950
2951 if (vlan_on) {
2952 /* set the pool bit */
2953 if (vind < 32) {
2954 bits = IXGBE_READ_REG(hw,
2955 IXGBE_VLVFB(vlvf_index * 2));
2956 bits |= (1 << vind);
2957 IXGBE_WRITE_REG(hw,
2958 IXGBE_VLVFB(vlvf_index * 2),
2959 bits);
2960 } else {
2961 bits = IXGBE_READ_REG(hw,
2962 IXGBE_VLVFB((vlvf_index * 2) + 1));
2963 bits |= (1 << (vind - 32));
2964 IXGBE_WRITE_REG(hw,
2965 IXGBE_VLVFB((vlvf_index * 2) + 1),
2966 bits);
2967 }
2968 } else {
2969 /* clear the pool bit */
2970 if (vind < 32) {
2971 bits = IXGBE_READ_REG(hw,
2972 IXGBE_VLVFB(vlvf_index * 2));
2973 bits &= ~(1 << vind);
2974 IXGBE_WRITE_REG(hw,
2975 IXGBE_VLVFB(vlvf_index * 2),
2976 bits);
2977 bits |= IXGBE_READ_REG(hw,
2978 IXGBE_VLVFB((vlvf_index * 2) + 1));
2979 } else {
2980 bits = IXGBE_READ_REG(hw,
2981 IXGBE_VLVFB((vlvf_index * 2) + 1));
2982 bits &= ~(1 << (vind - 32));
2983 IXGBE_WRITE_REG(hw,
2984 IXGBE_VLVFB((vlvf_index * 2) + 1),
2985 bits);
2986 bits |= IXGBE_READ_REG(hw,
2987 IXGBE_VLVFB(vlvf_index * 2));
2988 }
2989 }
2990
2991 /*
2992 * If there are still bits set in the VLVFB registers
2993 * for the VLAN ID indicated we need to see if the
2994 * caller is requesting that we clear the VFTA entry bit.
2995 * If the caller has requested that we clear the VFTA
2996 * entry bit but there are still pools/VFs using this VLAN
2997 * ID entry then ignore the request. We're not worried
2998 * about the case where we're turning the VFTA VLAN ID
2999 * entry bit on, only when requested to turn it off as
3000 * there may be multiple pools and/or VFs using the
3001 * VLAN ID entry. In that case we cannot clear the
3002 * VFTA bit until all pools/VFs using that VLAN ID have also
3003 * been cleared. This will be indicated by "bits" being
3004 * zero.
3005 */
3006 if (bits) {
3007 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3008 (IXGBE_VLVF_VIEN | vlan));
3009 if (!vlan_on) {
3010 /*
3011 * someone wants to clear the vfta entry
3012 * but some pools/VFs are still using it.
3013 * Ignore it.
3014 */
3015 vfta_changed = false;
3016 }
3017 } else {
3018 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3019 }
3020 }
3021
3022 if (vfta_changed)
3023 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3024
3025 return (IXGBE_SUCCESS);
3026 }
3027
3028 /*
3029 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3030 * @hw: pointer to hardware structure
3031 *
3032 * Clears the VLAN filer table, and the VMDq index associated with the filter
3033 */
3034 s32
ixgbe_clear_vfta_generic(struct ixgbe_hw * hw)3035 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3036 {
3037 u32 offset;
3038
3039 DEBUGFUNC("ixgbe_clear_vfta_generic");
3040
3041 for (offset = 0; offset < hw->mac.vft_size; offset++)
3042 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3043
3044 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3045 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3046 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
3047 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
3048 }
3049
3050 return (IXGBE_SUCCESS);
3051 }
3052
3053 /*
3054 * ixgbe_check_mac_link_generic - Determine link and speed status
3055 * @hw: pointer to hardware structure
3056 * @speed: pointer to link speed
3057 * @link_up: true when link is up
3058 * @link_up_wait_to_complete: bool used to wait for link up or not
3059 *
3060 * Reads the links register to determine if link is up and the current speed
3061 */
3062 s32
ixgbe_check_mac_link_generic(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)3063 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3064 bool *link_up, bool link_up_wait_to_complete)
3065 {
3066 u32 links_reg, links_orig;
3067 u32 i;
3068
3069 DEBUGFUNC("ixgbe_check_mac_link_generic");
3070
3071 /* clear the old state */
3072 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3073
3074 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3075
3076 if (links_orig != links_reg) {
3077 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3078 links_orig, links_reg);
3079 }
3080
3081 if (link_up_wait_to_complete) {
3082 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3083 if (links_reg & IXGBE_LINKS_UP) {
3084 *link_up = true;
3085 break;
3086 } else {
3087 *link_up = false;
3088 }
3089 msec_delay(100);
3090 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3091 }
3092 } else {
3093 if (links_reg & IXGBE_LINKS_UP)
3094 *link_up = true;
3095 else
3096 *link_up = false;
3097 }
3098
3099 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3100 IXGBE_LINKS_SPEED_10G_82599)
3101 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3102 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3103 IXGBE_LINKS_SPEED_1G_82599)
3104 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3105 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3106 IXGBE_LINKS_SPEED_100_82599)
3107 *speed = IXGBE_LINK_SPEED_100_FULL;
3108 else
3109 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3110
3111 /* if link is down, zero out the current_mode */
3112 if (*link_up == false) {
3113 hw->fc.current_mode = ixgbe_fc_none;
3114 hw->fc.fc_was_autonegged = false;
3115 }
3116
3117 return (IXGBE_SUCCESS);
3118 }
3119
3120 /*
3121 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3122 * the EEPROM
3123 * @hw: pointer to hardware structure
3124 * @wwnn_prefix: the alternative WWNN prefix
3125 * @wwpn_prefix: the alternative WWPN prefix
3126 *
3127 * This function will read the EEPROM from the alternative SAN MAC address
3128 * block to check the support for the alternative WWNN/WWPN prefix support.
3129 */
3130 s32
ixgbe_get_wwn_prefix_generic(struct ixgbe_hw * hw,u16 * wwnn_prefix,u16 * wwpn_prefix)3131 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3132 u16 *wwpn_prefix)
3133 {
3134 u16 offset, caps;
3135 u16 alt_san_mac_blk_offset;
3136
3137 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3138
3139 /* clear output first */
3140 *wwnn_prefix = 0xFFFF;
3141 *wwpn_prefix = 0xFFFF;
3142
3143 /* check if alternative SAN MAC is supported */
3144 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3145 &alt_san_mac_blk_offset);
3146
3147 if ((alt_san_mac_blk_offset == 0) ||
3148 (alt_san_mac_blk_offset == 0xFFFF))
3149 goto wwn_prefix_out;
3150
3151 /* check capability in alternative san mac address block */
3152 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3153 hw->eeprom.ops.read(hw, offset, &caps);
3154 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3155 goto wwn_prefix_out;
3156
3157 /* get the corresponding prefix for WWNN/WWPN */
3158 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3159 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3160
3161 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3162 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3163
3164 wwn_prefix_out:
3165 return (IXGBE_SUCCESS);
3166 }
3167
3168 /*
3169 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3170 * @hw: pointer to hardware structure
3171 * @bs: the fcoe boot status
3172 *
3173 * This function will read the FCOE boot status from the iSCSI FCOE block
3174 */
3175 s32
ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw * hw,u16 * bs)3176 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3177 {
3178 u16 offset, caps, flags;
3179 s32 status;
3180
3181 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3182
3183 /* clear output first */
3184 *bs = ixgbe_fcoe_bootstatus_unavailable;
3185
3186 /* check if FCOE IBA block is present */
3187 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3188 status = hw->eeprom.ops.read(hw, offset, &caps);
3189 if (status != IXGBE_SUCCESS)
3190 goto out;
3191
3192 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3193 goto out;
3194
3195 /* check if iSCSI FCOE block is populated */
3196 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3197 if (status != IXGBE_SUCCESS)
3198 goto out;
3199
3200 if ((offset == 0) || (offset == 0xFFFF))
3201 goto out;
3202
3203 /* read fcoe flags in iSCSI FCOE block */
3204 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3205 status = hw->eeprom.ops.read(hw, offset, &flags);
3206 if (status != IXGBE_SUCCESS)
3207 goto out;
3208
3209 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3210 *bs = ixgbe_fcoe_bootstatus_enabled;
3211 else
3212 *bs = ixgbe_fcoe_bootstatus_disabled;
3213
3214 out:
3215 return (status);
3216 }
3217
3218 /*
3219 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3220 * control
3221 * @hw: pointer to hardware structure
3222 *
3223 * There are several phys that do not support autoneg flow control. This
3224 * function check the device id to see if the associated phy supports
3225 * autoneg flow control.
3226 */
ixgbe_device_supports_autoneg_fc(struct ixgbe_hw * hw)3227 static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3228 {
3229
3230 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
3231
3232 switch (hw->device_id) {
3233 case IXGBE_DEV_ID_82599_T3_LOM:
3234 return (IXGBE_SUCCESS);
3235 default:
3236 return (IXGBE_ERR_FC_NOT_SUPPORTED);
3237 }
3238 }
3239