1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include "rge.h"
27
28 #define REG32(rgep, reg) ((uint32_t *)(rgep->io_regs+(reg)))
29 #define REG16(rgep, reg) ((uint16_t *)(rgep->io_regs+(reg)))
30 #define REG8(rgep, reg) ((uint8_t *)(rgep->io_regs+(reg)))
31 #define PIO_ADDR(rgep, offset) ((void *)(rgep->io_regs+(offset)))
32
33 /*
34 * Patchable globals:
35 *
36 * rge_autorecover
37 * Enables/disables automatic recovery after fault detection
38 */
39 static uint32_t rge_autorecover = 1;
40
41 /*
42 * globals:
43 */
44 #define RGE_DBG RGE_DBG_REGS /* debug flag for this code */
45 static uint32_t rge_watchdog_count = 1 << 5;
46 static uint32_t rge_rx_watchdog_count = 1 << 3;
47
48 /*
49 * Operating register get/set access routines
50 */
51
52 static uint32_t rge_reg_get32(rge_t *rgep, uintptr_t regno);
53 #pragma inline(rge_reg_get32)
54
55 static uint32_t
rge_reg_get32(rge_t * rgep,uintptr_t regno)56 rge_reg_get32(rge_t *rgep, uintptr_t regno)
57 {
58 RGE_TRACE(("rge_reg_get32($%p, 0x%lx)",
59 (void *)rgep, regno));
60
61 return (ddi_get32(rgep->io_handle, REG32(rgep, regno)));
62 }
63
64 static void rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data);
65 #pragma inline(rge_reg_put32)
66
67 static void
rge_reg_put32(rge_t * rgep,uintptr_t regno,uint32_t data)68 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data)
69 {
70 RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)",
71 (void *)rgep, regno, data));
72
73 ddi_put32(rgep->io_handle, REG32(rgep, regno), data);
74 }
75
76 static void rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits);
77 #pragma inline(rge_reg_set32)
78
79 static void
rge_reg_set32(rge_t * rgep,uintptr_t regno,uint32_t bits)80 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits)
81 {
82 uint32_t regval;
83
84 RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)",
85 (void *)rgep, regno, bits));
86
87 regval = rge_reg_get32(rgep, regno);
88 regval |= bits;
89 rge_reg_put32(rgep, regno, regval);
90 }
91
92 static void rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits);
93 #pragma inline(rge_reg_clr32)
94
95 static void
rge_reg_clr32(rge_t * rgep,uintptr_t regno,uint32_t bits)96 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits)
97 {
98 uint32_t regval;
99
100 RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)",
101 (void *)rgep, regno, bits));
102
103 regval = rge_reg_get32(rgep, regno);
104 regval &= ~bits;
105 rge_reg_put32(rgep, regno, regval);
106 }
107
108 static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno);
109 #pragma inline(rge_reg_get16)
110
111 static uint16_t
rge_reg_get16(rge_t * rgep,uintptr_t regno)112 rge_reg_get16(rge_t *rgep, uintptr_t regno)
113 {
114 RGE_TRACE(("rge_reg_get16($%p, 0x%lx)",
115 (void *)rgep, regno));
116
117 return (ddi_get16(rgep->io_handle, REG16(rgep, regno)));
118 }
119
120 static void rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data);
121 #pragma inline(rge_reg_put16)
122
123 static void
rge_reg_put16(rge_t * rgep,uintptr_t regno,uint16_t data)124 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data)
125 {
126 RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)",
127 (void *)rgep, regno, data));
128
129 ddi_put16(rgep->io_handle, REG16(rgep, regno), data);
130 }
131
132 static uint8_t rge_reg_get8(rge_t *rgep, uintptr_t regno);
133 #pragma inline(rge_reg_get8)
134
135 static uint8_t
rge_reg_get8(rge_t * rgep,uintptr_t regno)136 rge_reg_get8(rge_t *rgep, uintptr_t regno)
137 {
138 RGE_TRACE(("rge_reg_get8($%p, 0x%lx)",
139 (void *)rgep, regno));
140
141 return (ddi_get8(rgep->io_handle, REG8(rgep, regno)));
142 }
143
144 static void rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data);
145 #pragma inline(rge_reg_put8)
146
147 static void
rge_reg_put8(rge_t * rgep,uintptr_t regno,uint8_t data)148 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data)
149 {
150 RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)",
151 (void *)rgep, regno, data));
152
153 ddi_put8(rgep->io_handle, REG8(rgep, regno), data);
154 }
155
156 static void rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits);
157 #pragma inline(rge_reg_set8)
158
159 static void
rge_reg_set8(rge_t * rgep,uintptr_t regno,uint8_t bits)160 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits)
161 {
162 uint8_t regval;
163
164 RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)",
165 (void *)rgep, regno, bits));
166
167 regval = rge_reg_get8(rgep, regno);
168 regval |= bits;
169 rge_reg_put8(rgep, regno, regval);
170 }
171
172 static void rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits);
173 #pragma inline(rge_reg_clr8)
174
175 static void
rge_reg_clr8(rge_t * rgep,uintptr_t regno,uint8_t bits)176 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits)
177 {
178 uint8_t regval;
179
180 RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)",
181 (void *)rgep, regno, bits));
182
183 regval = rge_reg_get8(rgep, regno);
184 regval &= ~bits;
185 rge_reg_put8(rgep, regno, regval);
186 }
187
188 uint16_t rge_mii_get16(rge_t *rgep, uintptr_t mii);
189 #pragma no_inline(rge_mii_get16)
190
191 uint16_t
rge_mii_get16(rge_t * rgep,uintptr_t mii)192 rge_mii_get16(rge_t *rgep, uintptr_t mii)
193 {
194 uint32_t regval;
195 uint32_t val32;
196 uint32_t i;
197
198 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
199 rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
200
201 /*
202 * Waiting for PHY reading OK
203 */
204 for (i = 0; i < PHY_RESET_LOOP; i++) {
205 drv_usecwait(1000);
206 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
207 if (val32 & PHY_ACCESS_WR_FLAG)
208 return ((uint16_t)(val32 & 0xffff));
209 }
210
211 RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32));
212 return ((uint16_t)~0u);
213 }
214
215 void rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data);
216 #pragma no_inline(rge_mii_put16)
217
218 void
rge_mii_put16(rge_t * rgep,uintptr_t mii,uint16_t data)219 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data)
220 {
221 uint32_t regval;
222 uint32_t val32;
223 uint32_t i;
224
225 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
226 regval |= data & PHY_DATA_MASK;
227 regval |= PHY_ACCESS_WR_FLAG;
228 rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
229
230 /*
231 * Waiting for PHY writing OK
232 */
233 for (i = 0; i < PHY_RESET_LOOP; i++) {
234 drv_usecwait(1000);
235 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
236 if (!(val32 & PHY_ACCESS_WR_FLAG))
237 return;
238 }
239 RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail",
240 mii, data));
241 }
242
243 void rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data);
244 #pragma no_inline(rge_ephy_put16)
245
246 void
rge_ephy_put16(rge_t * rgep,uintptr_t emii,uint16_t data)247 rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data)
248 {
249 uint32_t regval;
250 uint32_t val32;
251 uint32_t i;
252
253 regval = (emii & EPHY_REG_MASK) << EPHY_REG_SHIFT;
254 regval |= data & EPHY_DATA_MASK;
255 regval |= EPHY_ACCESS_WR_FLAG;
256 rge_reg_put32(rgep, EPHY_ACCESS_REG, regval);
257
258 /*
259 * Waiting for PHY writing OK
260 */
261 for (i = 0; i < PHY_RESET_LOOP; i++) {
262 drv_usecwait(1000);
263 val32 = rge_reg_get32(rgep, EPHY_ACCESS_REG);
264 if (!(val32 & EPHY_ACCESS_WR_FLAG))
265 return;
266 }
267 RGE_REPORT((rgep, "rge_ephy_put16(0x%lx, 0x%x) fail",
268 emii, data));
269 }
270
271 /*
272 * Atomically shift a 32-bit word left, returning
273 * the value it had *before* the shift was applied
274 */
275 static uint32_t rge_atomic_shl32(uint32_t *sp, uint_t count);
276 #pragma inline(rge_mii_put16)
277
278 static uint32_t
rge_atomic_shl32(uint32_t * sp,uint_t count)279 rge_atomic_shl32(uint32_t *sp, uint_t count)
280 {
281 uint32_t oldval;
282 uint32_t newval;
283
284 /* ATOMICALLY */
285 do {
286 oldval = *sp;
287 newval = oldval << count;
288 } while (cas32(sp, oldval, newval) != oldval);
289
290 return (oldval);
291 }
292
293 /*
294 * PHY operation routines
295 */
296 #if RGE_DEBUGGING
297
298 void
rge_phydump(rge_t * rgep)299 rge_phydump(rge_t *rgep)
300 {
301 uint16_t regs[32];
302 int i;
303
304 ASSERT(mutex_owned(rgep->genlock));
305
306 for (i = 0; i < 32; ++i) {
307 regs[i] = rge_mii_get16(rgep, i);
308 }
309
310 for (i = 0; i < 32; i += 8)
311 RGE_DEBUG(("rge_phydump: "
312 "0x%04x %04x %04x %04x %04x %04x %04x %04x",
313 regs[i+0], regs[i+1], regs[i+2], regs[i+3],
314 regs[i+4], regs[i+5], regs[i+6], regs[i+7]));
315 }
316
317 #endif /* RGE_DEBUGGING */
318
319 static void
rge_phy_check(rge_t * rgep)320 rge_phy_check(rge_t *rgep)
321 {
322 uint16_t gig_ctl;
323
324 if (rgep->param_link_up == LINK_STATE_DOWN) {
325 /*
326 * RTL8169S/8110S PHY has the "PCS bug". Need reset PHY
327 * every 15 seconds whin link down & advertise is 1000.
328 */
329 if (rgep->chipid.phy_ver == PHY_VER_S) {
330 gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL);
331 if (gig_ctl & MII_1000BT_CTL_ADV_FDX) {
332 rgep->link_down_count++;
333 if (rgep->link_down_count > 15) {
334 (void) rge_phy_reset(rgep);
335 rgep->stats.phy_reset++;
336 rgep->link_down_count = 0;
337 }
338 }
339 }
340 } else {
341 rgep->link_down_count = 0;
342 }
343 }
344
345 /*
346 * Basic low-level function to reset the PHY.
347 * Doesn't incorporate any special-case workarounds.
348 *
349 * Returns TRUE on success, FALSE if the RESET bit doesn't clear
350 */
351 boolean_t
rge_phy_reset(rge_t * rgep)352 rge_phy_reset(rge_t *rgep)
353 {
354 uint16_t control;
355 uint_t count;
356
357 /*
358 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear
359 */
360 control = rge_mii_get16(rgep, MII_CONTROL);
361 rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET);
362 for (count = 0; count < 5; count++) {
363 drv_usecwait(100);
364 control = rge_mii_get16(rgep, MII_CONTROL);
365 if (BIC(control, MII_CONTROL_RESET))
366 return (B_TRUE);
367 }
368
369 RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control));
370 return (B_FALSE);
371 }
372
373 /*
374 * Synchronise the PHY's speed/duplex/autonegotiation capabilities
375 * and advertisements with the required settings as specified by the various
376 * param_* variables that can be poked via the NDD interface.
377 *
378 * We always reset the PHY and reprogram *all* the relevant registers,
379 * not just those changed. This should cause the link to go down, and then
380 * back up again once the link is stable and autonegotiation (if enabled)
381 * is complete. We should get a link state change interrupt somewhere along
382 * the way ...
383 *
384 * NOTE: <genlock> must already be held by the caller
385 */
386 void
rge_phy_update(rge_t * rgep)387 rge_phy_update(rge_t *rgep)
388 {
389 boolean_t adv_autoneg;
390 boolean_t adv_pause;
391 boolean_t adv_asym_pause;
392 boolean_t adv_1000fdx;
393 boolean_t adv_1000hdx;
394 boolean_t adv_100fdx;
395 boolean_t adv_100hdx;
396 boolean_t adv_10fdx;
397 boolean_t adv_10hdx;
398
399 uint16_t control;
400 uint16_t gigctrl;
401 uint16_t anar;
402
403 ASSERT(mutex_owned(rgep->genlock));
404
405 RGE_DEBUG(("rge_phy_update: autoneg %d "
406 "pause %d asym_pause %d "
407 "1000fdx %d 1000hdx %d "
408 "100fdx %d 100hdx %d "
409 "10fdx %d 10hdx %d ",
410 rgep->param_adv_autoneg,
411 rgep->param_adv_pause, rgep->param_adv_asym_pause,
412 rgep->param_adv_1000fdx, rgep->param_adv_1000hdx,
413 rgep->param_adv_100fdx, rgep->param_adv_100hdx,
414 rgep->param_adv_10fdx, rgep->param_adv_10hdx));
415
416 control = gigctrl = anar = 0;
417
418 /*
419 * PHY settings are normally based on the param_* variables,
420 * but if any loopback mode is in effect, that takes precedence.
421 *
422 * RGE supports MAC-internal loopback, PHY-internal loopback,
423 * and External loopback at a variety of speeds (with a special
424 * cable). In all cases, autoneg is turned OFF, full-duplex
425 * is turned ON, and the speed/mastership is forced.
426 */
427 switch (rgep->param_loop_mode) {
428 case RGE_LOOP_NONE:
429 default:
430 adv_autoneg = rgep->param_adv_autoneg;
431 adv_pause = rgep->param_adv_pause;
432 adv_asym_pause = rgep->param_adv_asym_pause;
433 adv_1000fdx = rgep->param_adv_1000fdx;
434 adv_1000hdx = rgep->param_adv_1000hdx;
435 adv_100fdx = rgep->param_adv_100fdx;
436 adv_100hdx = rgep->param_adv_100hdx;
437 adv_10fdx = rgep->param_adv_10fdx;
438 adv_10hdx = rgep->param_adv_10hdx;
439 break;
440
441 case RGE_LOOP_INTERNAL_PHY:
442 case RGE_LOOP_INTERNAL_MAC:
443 adv_autoneg = adv_pause = adv_asym_pause = B_FALSE;
444 adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE;
445 adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE;
446 rgep->param_link_duplex = LINK_DUPLEX_FULL;
447
448 switch (rgep->param_loop_mode) {
449 case RGE_LOOP_INTERNAL_PHY:
450 if (rgep->chipid.mac_ver != MAC_VER_8101E) {
451 rgep->param_link_speed = 1000;
452 adv_1000fdx = B_TRUE;
453 } else {
454 rgep->param_link_speed = 100;
455 adv_100fdx = B_TRUE;
456 }
457 control = MII_CONTROL_LOOPBACK;
458 break;
459
460 case RGE_LOOP_INTERNAL_MAC:
461 if (rgep->chipid.mac_ver != MAC_VER_8101E) {
462 rgep->param_link_speed = 1000;
463 adv_1000fdx = B_TRUE;
464 } else {
465 rgep->param_link_speed = 100;
466 adv_100fdx = B_TRUE;
467 break;
468 }
469 }
470
471 RGE_DEBUG(("rge_phy_update: autoneg %d "
472 "pause %d asym_pause %d "
473 "1000fdx %d 1000hdx %d "
474 "100fdx %d 100hdx %d "
475 "10fdx %d 10hdx %d ",
476 adv_autoneg,
477 adv_pause, adv_asym_pause,
478 adv_1000fdx, adv_1000hdx,
479 adv_100fdx, adv_100hdx,
480 adv_10fdx, adv_10hdx));
481
482 /*
483 * We should have at least one technology capability set;
484 * if not, we select a default of 1000Mb/s full-duplex
485 */
486 if (!adv_1000fdx && !adv_100fdx && !adv_10fdx &&
487 !adv_1000hdx && !adv_100hdx && !adv_10hdx) {
488 if (rgep->chipid.mac_ver != MAC_VER_8101E)
489 adv_1000fdx = B_TRUE;
490 } else {
491 adv_1000fdx = B_FALSE;
492 adv_100fdx = B_TRUE;
493 }
494 }
495
496 /*
497 * Now transform the adv_* variables into the proper settings
498 * of the PHY registers ...
499 *
500 * If autonegotiation is (now) enabled, we want to trigger
501 * a new autonegotiation cycle once the PHY has been
502 * programmed with the capabilities to be advertised.
503 *
504 * RTL8169/8110 doesn't support 1000Mb/s half-duplex.
505 */
506 if (adv_autoneg)
507 control |= MII_CONTROL_ANE|MII_CONTROL_RSAN;
508
509 if (adv_1000fdx)
510 control |= MII_CONTROL_1GB|MII_CONTROL_FDUPLEX;
511 else if (adv_1000hdx)
512 control |= MII_CONTROL_1GB;
513 else if (adv_100fdx)
514 control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX;
515 else if (adv_100hdx)
516 control |= MII_CONTROL_100MB;
517 else if (adv_10fdx)
518 control |= MII_CONTROL_FDUPLEX;
519 else if (adv_10hdx)
520 control |= 0;
521 else
522 { _NOTE(EMPTY); } /* Can't get here anyway ... */
523
524 if (adv_1000fdx) {
525 gigctrl |= MII_1000BT_CTL_ADV_FDX;
526 /*
527 * Chipset limitation: need set other capabilities to true
528 */
529 if (rgep->chipid.is_pcie)
530 adv_1000hdx = B_TRUE;
531 adv_100fdx = B_TRUE;
532 adv_100hdx = B_TRUE;
533 adv_10fdx = B_TRUE;
534 adv_10hdx = B_TRUE;
535 }
536
537 if (adv_1000hdx)
538 gigctrl |= MII_1000BT_CTL_ADV_HDX;
539
540 if (adv_100fdx)
541 anar |= MII_ABILITY_100BASE_TX_FD;
542 if (adv_100hdx)
543 anar |= MII_ABILITY_100BASE_TX;
544 if (adv_10fdx)
545 anar |= MII_ABILITY_10BASE_T_FD;
546 if (adv_10hdx)
547 anar |= MII_ABILITY_10BASE_T;
548
549 if (adv_pause)
550 anar |= MII_ABILITY_PAUSE;
551 if (adv_asym_pause)
552 anar |= MII_ABILITY_ASMPAUSE;
553
554 /*
555 * Munge in any other fixed bits we require ...
556 */
557 anar |= MII_AN_SELECTOR_8023;
558
559 /*
560 * Restart the PHY and write the new values. Note the
561 * time, so that we can say whether subsequent link state
562 * changes can be attributed to our reprogramming the PHY
563 */
564 rge_phy_init(rgep);
565 if (rgep->chipid.mac_ver == MAC_VER_8168B_B ||
566 rgep->chipid.mac_ver == MAC_VER_8168B_C) {
567 /* power up PHY for RTL8168B chipset */
568 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
569 rge_mii_put16(rgep, PHY_0E_REG, 0x0000);
570 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
571 }
572 rge_mii_put16(rgep, MII_AN_ADVERT, anar);
573 rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl);
574 rge_mii_put16(rgep, MII_CONTROL, control);
575
576 RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar));
577 RGE_DEBUG(("rge_phy_update: control <- 0x%x", control));
578 RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl));
579 }
580
581 void rge_phy_init(rge_t *rgep);
582 #pragma no_inline(rge_phy_init)
583
584 void
rge_phy_init(rge_t * rgep)585 rge_phy_init(rge_t *rgep)
586 {
587 rgep->phy_mii_addr = 1;
588
589 /*
590 * Below phy config steps are copied from the Programming Guide
591 * (there's no detail comments for these steps.)
592 */
593 switch (rgep->chipid.mac_ver) {
594 case MAC_VER_8169S_D:
595 case MAC_VER_8169S_E :
596 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
597 rge_mii_put16(rgep, PHY_15_REG, 0x1000);
598 rge_mii_put16(rgep, PHY_18_REG, 0x65c7);
599 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
600 rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1);
601 rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008);
602 rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020);
603 rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000);
604 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0800);
605 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
606 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
607 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
608 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60);
609 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
610 rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077);
611 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7800);
612 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
613 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
614 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
615 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
616 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
617 rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00);
618 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa800);
619 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
620 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
621 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
622 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20);
623 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
624 rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb);
625 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb800);
626 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
627 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
628 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
629 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
630 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
631 rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00);
632 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf800);
633 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
634 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
635 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
636 rge_mii_put16(rgep, PHY_0B_REG, 0x0000);
637 break;
638
639 case MAC_VER_8169SB:
640 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
641 rge_mii_put16(rgep, PHY_1B_REG, 0xD41E);
642 rge_mii_put16(rgep, PHY_0E_REG, 0x7bff);
643 rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT);
644 rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
645 rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0);
646 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
647 break;
648
649 case MAC_VER_8169SC:
650 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
651 rge_mii_put16(rgep, PHY_ANER_REG, 0x0078);
652 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x05dc);
653 rge_mii_put16(rgep, PHY_GBCR_REG, 0x2672);
654 rge_mii_put16(rgep, PHY_GBSR_REG, 0x6a14);
655 rge_mii_put16(rgep, PHY_0B_REG, 0x7cb0);
656 rge_mii_put16(rgep, PHY_0C_REG, 0xdb80);
657 rge_mii_put16(rgep, PHY_1B_REG, 0xc414);
658 rge_mii_put16(rgep, PHY_1C_REG, 0xef03);
659 rge_mii_put16(rgep, PHY_1D_REG, 0x3dc8);
660 rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
661 rge_mii_put16(rgep, PHY_13_REG, 0x0600);
662 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
663 break;
664
665 case MAC_VER_8168:
666 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
667 rge_mii_put16(rgep, PHY_ANER_REG, 0x00aa);
668 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x3173);
669 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x08fc);
670 rge_mii_put16(rgep, PHY_GBCR_REG, 0xe2d0);
671 rge_mii_put16(rgep, PHY_0B_REG, 0x941a);
672 rge_mii_put16(rgep, PHY_18_REG, 0x65fe);
673 rge_mii_put16(rgep, PHY_1C_REG, 0x1e02);
674 rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
675 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x103e);
676 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
677 break;
678
679 case MAC_VER_8168B_B:
680 case MAC_VER_8168B_C:
681 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
682 rge_mii_put16(rgep, PHY_0B_REG, 0x94b0);
683 rge_mii_put16(rgep, PHY_1B_REG, 0xc416);
684 rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
685 rge_mii_put16(rgep, PHY_12_REG, 0x6096);
686 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
687 break;
688 }
689 }
690
691 void rge_chip_ident(rge_t *rgep);
692 #pragma no_inline(rge_chip_ident)
693
694 void
rge_chip_ident(rge_t * rgep)695 rge_chip_ident(rge_t *rgep)
696 {
697 chip_id_t *chip = &rgep->chipid;
698 uint32_t val32;
699 uint16_t val16;
700
701 /*
702 * Read and record MAC version
703 */
704 val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
705 val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1;
706 chip->mac_ver = val32;
707 chip->is_pcie = pci_lcap_locate(rgep->cfg_handle,
708 PCI_CAP_ID_PCI_E, &val16) == DDI_SUCCESS;
709
710 /*
711 * Workaround for 8101E_C
712 */
713 if (chip->mac_ver == MAC_VER_8101E_C) {
714 chip->is_pcie = B_FALSE;
715 }
716
717 /*
718 * Read and record PHY version
719 */
720 val16 = rge_mii_get16(rgep, PHY_ID_REG_2);
721 val16 &= PHY_VER_MASK;
722 chip->phy_ver = val16;
723
724 /* set pci latency timer */
725 if (chip->mac_ver == MAC_VER_8169 ||
726 chip->mac_ver == MAC_VER_8169S_D ||
727 chip->mac_ver == MAC_VER_8169S_E ||
728 chip->mac_ver == MAC_VER_8169SC)
729 pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40);
730
731 if (chip->mac_ver == MAC_VER_8169SC) {
732 val16 = rge_reg_get16(rgep, RT_CONFIG_1_REG);
733 val16 &= 0x0300;
734 if (val16 == 0x1) /* 66Mhz PCI */
735 rge_reg_put32(rgep, 0x7c, 0x000700ff);
736 else if (val16 == 0x0) /* 33Mhz PCI */
737 rge_reg_put32(rgep, 0x7c, 0x0007ff00);
738 }
739
740 /*
741 * PCIE chipset require the Rx buffer start address must be
742 * 8-byte alignment and the Rx buffer size must be multiple of 8.
743 * We'll just use bcopy in receive procedure for the PCIE chipset.
744 */
745 if (chip->is_pcie) {
746 rgep->chip_flags |= CHIP_FLAG_FORCE_BCOPY;
747 if (rgep->default_mtu > ETHERMTU) {
748 rge_notice(rgep, "Jumbo packets not supported "
749 "for this PCIE chipset");
750 rgep->default_mtu = ETHERMTU;
751 }
752 }
753 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
754 rgep->head_room = 0;
755 else
756 rgep->head_room = RGE_HEADROOM;
757
758 /*
759 * Initialize other variables.
760 */
761 if (rgep->default_mtu < ETHERMTU || rgep->default_mtu > RGE_JUMBO_MTU)
762 rgep->default_mtu = ETHERMTU;
763 if (rgep->default_mtu > ETHERMTU) {
764 rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO;
765 rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO;
766 rgep->ethmax_size = RGE_JUMBO_SIZE;
767 } else {
768 rgep->rxbuf_size = RGE_BUFF_SIZE_STD;
769 rgep->txbuf_size = RGE_BUFF_SIZE_STD;
770 rgep->ethmax_size = ETHERMAX;
771 }
772 chip->rxconfig = RX_CONFIG_DEFAULT;
773 chip->txconfig = TX_CONFIG_DEFAULT;
774
775 /* interval to update statistics for polling mode */
776 rgep->tick_delta = drv_usectohz(1000*1000/CLK_TICK);
777
778 /* ensure we are not in polling mode */
779 rgep->curr_tick = ddi_get_lbolt() - 2*rgep->tick_delta;
780 RGE_TRACE(("%s: MAC version = %x, PHY version = %x",
781 rgep->ifname, chip->mac_ver, chip->phy_ver));
782 }
783
784 /*
785 * Perform first-stage chip (re-)initialisation, using only config-space
786 * accesses:
787 *
788 * + Read the vendor/device/revision/subsystem/cache-line-size registers,
789 * returning the data in the structure pointed to by <idp>.
790 * + Enable Memory Space accesses.
791 * + Enable Bus Mastering according.
792 */
793 void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp);
794 #pragma no_inline(rge_chip_cfg_init)
795
796 void
rge_chip_cfg_init(rge_t * rgep,chip_id_t * cidp)797 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp)
798 {
799 ddi_acc_handle_t handle;
800 uint16_t commd;
801
802 handle = rgep->cfg_handle;
803
804 /*
805 * Save PCI cache line size and subsystem vendor ID
806 */
807 cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
808 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
809 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
810 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
811 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
812 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
813 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
814 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
815
816 /*
817 * Turn on Master Enable (DMA) and IO Enable bits.
818 * Enable PCI Memory Space accesses
819 */
820 commd = cidp->command;
821 commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO;
822 pci_config_put16(handle, PCI_CONF_COMM, commd);
823
824 RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
825 cidp->vendor, cidp->device, cidp->revision));
826 RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x",
827 cidp->subven, cidp->subdev));
828 RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x",
829 cidp->clsize, cidp->latency, cidp->command));
830 }
831
832 int rge_chip_reset(rge_t *rgep);
833 #pragma no_inline(rge_chip_reset)
834
835 int
rge_chip_reset(rge_t * rgep)836 rge_chip_reset(rge_t *rgep)
837 {
838 int i;
839 uint8_t val8;
840
841 /*
842 * Chip should be in STOP state
843 */
844 rge_reg_clr8(rgep, RT_COMMAND_REG,
845 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
846
847 /*
848 * Disable interrupt
849 */
850 rgep->int_mask = INT_MASK_NONE;
851 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
852
853 /*
854 * Clear pended interrupt
855 */
856 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
857
858 /*
859 * Reset chip
860 */
861 rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET);
862
863 /*
864 * Wait for reset success
865 */
866 for (i = 0; i < CHIP_RESET_LOOP; i++) {
867 drv_usecwait(10);
868 val8 = rge_reg_get8(rgep, RT_COMMAND_REG);
869 if (!(val8 & RT_COMMAND_RESET)) {
870 rgep->rge_chip_state = RGE_CHIP_RESET;
871 return (0);
872 }
873 }
874 RGE_REPORT((rgep, "rge_chip_reset fail."));
875 return (-1);
876 }
877
878 void rge_chip_init(rge_t *rgep);
879 #pragma no_inline(rge_chip_init)
880
881 void
rge_chip_init(rge_t * rgep)882 rge_chip_init(rge_t *rgep)
883 {
884 uint32_t val32;
885 uint32_t val16;
886 uint32_t *hashp;
887 chip_id_t *chip = &rgep->chipid;
888
889 /*
890 * Increase the threshold voltage of RX sensitivity
891 */
892 if (chip->mac_ver == MAC_VER_8168B_B ||
893 chip->mac_ver == MAC_VER_8168B_C ||
894 chip->mac_ver == MAC_VER_8101E) {
895 rge_ephy_put16(rgep, 0x01, 0x1bd3);
896 }
897
898 if (chip->mac_ver == MAC_VER_8168 ||
899 chip->mac_ver == MAC_VER_8168B_B) {
900 val16 = rge_reg_get8(rgep, PHY_STATUS_REG);
901 val16 = 0x12<<8 | val16;
902 rge_reg_put16(rgep, PHY_STATUS_REG, val16);
903 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00021c01);
904 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f088);
905 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00004000);
906 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f0b0);
907 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x0000f068);
908 val32 = rge_reg_get32(rgep, RT_CSI_DATA_REG);
909 val32 |= 0x7000;
910 val32 &= 0xffff5fff;
911 rge_reg_put32(rgep, RT_CSI_DATA_REG, val32);
912 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f068);
913 }
914
915 /*
916 * Config MII register
917 */
918 rgep->param_link_up = LINK_STATE_DOWN;
919 rge_phy_update(rgep);
920
921 /*
922 * Enable Rx checksum offload.
923 * Then for vlan support, we must enable receive vlan de-tagging.
924 * Otherwise, there'll be checksum error.
925 */
926 val16 = rge_reg_get16(rgep, CPLUS_COMMAND_REG);
927 val16 |= RX_CKSM_OFFLOAD | RX_VLAN_DETAG;
928 if (chip->mac_ver == MAC_VER_8169S_D) {
929 val16 |= CPLUS_BIT14 | MUL_PCI_RW_ENABLE;
930 rge_reg_put8(rgep, RESV_82_REG, 0x01);
931 }
932 if (chip->mac_ver == MAC_VER_8169S_E ||
933 chip->mac_ver == MAC_VER_8169SC) {
934 val16 |= MUL_PCI_RW_ENABLE;
935 }
936 rge_reg_put16(rgep, CPLUS_COMMAND_REG, val16 & (~0x03));
937
938 /*
939 * Start transmit/receive before set tx/rx configuration register
940 */
941 if (!chip->is_pcie)
942 rge_reg_set8(rgep, RT_COMMAND_REG,
943 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
944
945 /*
946 * Set dump tally counter register
947 */
948 val32 = rgep->dma_area_stats.cookie.dmac_laddress >> 32;
949 rge_reg_put32(rgep, DUMP_COUNTER_REG_1, val32);
950 val32 = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
951 val32 &= DUMP_COUNTER_REG_RESV;
952 val32 |= rgep->dma_area_stats.cookie.dmac_laddress;
953 rge_reg_put32(rgep, DUMP_COUNTER_REG_0, val32);
954
955 /*
956 * Change to config register write enable mode
957 */
958 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
959
960 /*
961 * Set Tx/Rx maximum packet size
962 */
963 if (rgep->default_mtu > ETHERMTU) {
964 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO);
965 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO);
966 } else if (rgep->chipid.mac_ver != MAC_VER_8101E) {
967 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD);
968 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD);
969 } else {
970 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD_8101E);
971 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD_8101E);
972 }
973
974 /*
975 * Set receive configuration register
976 */
977 val32 = rge_reg_get32(rgep, RX_CONFIG_REG);
978 val32 &= RX_CONFIG_REG_RESV;
979 if (rgep->promisc)
980 val32 |= RX_ACCEPT_ALL_PKT;
981 rge_reg_put32(rgep, RX_CONFIG_REG, val32 | chip->rxconfig);
982
983 /*
984 * Set transmit configuration register
985 */
986 val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
987 val32 &= TX_CONFIG_REG_RESV;
988 rge_reg_put32(rgep, TX_CONFIG_REG, val32 | chip->txconfig);
989
990 /*
991 * Set Tx/Rx descriptor register
992 */
993 val32 = rgep->tx_desc.cookie.dmac_laddress;
994 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32);
995 val32 = rgep->tx_desc.cookie.dmac_laddress >> 32;
996 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32);
997 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0);
998 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0);
999 val32 = rgep->rx_desc.cookie.dmac_laddress;
1000 rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32);
1001 val32 = rgep->rx_desc.cookie.dmac_laddress >> 32;
1002 rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32);
1003
1004 /*
1005 * Suggested setting from Realtek
1006 */
1007 if (rgep->chipid.mac_ver != MAC_VER_8101E)
1008 rge_reg_put16(rgep, RESV_E2_REG, 0x282a);
1009 else
1010 rge_reg_put16(rgep, RESV_E2_REG, 0x0000);
1011
1012 /*
1013 * Set multicast register
1014 */
1015 hashp = (uint32_t *)rgep->mcast_hash;
1016 if (rgep->promisc) {
1017 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
1018 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
1019 } else {
1020 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
1021 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
1022 }
1023
1024 /*
1025 * Msic register setting:
1026 * -- Missed packet counter: clear it
1027 * -- TimerInt Register
1028 * -- Timer count register
1029 */
1030 rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0);
1031 rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE);
1032 rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
1033
1034 /*
1035 * disable the Unicast Wakeup Frame capability
1036 */
1037 rge_reg_clr8(rgep, RT_CONFIG_5_REG, RT_UNI_WAKE_FRAME);
1038
1039 /*
1040 * Return to normal network/host communication mode
1041 */
1042 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1043 drv_usecwait(20);
1044 }
1045
1046 /*
1047 * rge_chip_start() -- start the chip transmitting and/or receiving,
1048 * including enabling interrupts
1049 */
1050 void rge_chip_start(rge_t *rgep);
1051 #pragma no_inline(rge_chip_start)
1052
1053 void
rge_chip_start(rge_t * rgep)1054 rge_chip_start(rge_t *rgep)
1055 {
1056 /*
1057 * Clear statistics
1058 */
1059 bzero(&rgep->stats, sizeof (rge_stats_t));
1060 DMA_ZERO(rgep->dma_area_stats);
1061
1062 /*
1063 * Start transmit/receive
1064 */
1065 rge_reg_set8(rgep, RT_COMMAND_REG,
1066 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1067
1068 /*
1069 * Enable interrupt
1070 */
1071 rgep->int_mask = RGE_INT_MASK;
1072 if (rgep->chipid.is_pcie) {
1073 rgep->int_mask |= NO_TXDESC_INT;
1074 }
1075 rgep->rx_fifo_ovf = 0;
1076 rgep->int_mask |= RX_FIFO_OVERFLOW_INT;
1077 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1078
1079 /*
1080 * All done!
1081 */
1082 rgep->rge_chip_state = RGE_CHIP_RUNNING;
1083 }
1084
1085 /*
1086 * rge_chip_stop() -- stop board receiving
1087 *
1088 * Since this function is also invoked by rge_quiesce(), it
1089 * must not block; also, no tracing or logging takes place
1090 * when invoked by rge_quiesce().
1091 */
1092 void rge_chip_stop(rge_t *rgep, boolean_t fault);
1093 #pragma no_inline(rge_chip_stop)
1094
1095 void
rge_chip_stop(rge_t * rgep,boolean_t fault)1096 rge_chip_stop(rge_t *rgep, boolean_t fault)
1097 {
1098 /*
1099 * Disable interrupt
1100 */
1101 rgep->int_mask = INT_MASK_NONE;
1102 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1103
1104 /*
1105 * Clear pended interrupt
1106 */
1107 if (!rgep->suspended) {
1108 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
1109 }
1110
1111 /*
1112 * Stop the board and disable transmit/receive
1113 */
1114 rge_reg_clr8(rgep, RT_COMMAND_REG,
1115 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1116
1117 if (fault)
1118 rgep->rge_chip_state = RGE_CHIP_FAULT;
1119 else
1120 rgep->rge_chip_state = RGE_CHIP_STOPPED;
1121 }
1122
1123 /*
1124 * rge_get_mac_addr() -- get the MAC address on NIC
1125 */
1126 static void rge_get_mac_addr(rge_t *rgep);
1127 #pragma inline(rge_get_mac_addr)
1128
1129 static void
rge_get_mac_addr(rge_t * rgep)1130 rge_get_mac_addr(rge_t *rgep)
1131 {
1132 uint8_t *macaddr = rgep->netaddr;
1133 uint32_t val32;
1134
1135 /*
1136 * Read first 4-byte of mac address
1137 */
1138 val32 = rge_reg_get32(rgep, ID_0_REG);
1139 macaddr[0] = val32 & 0xff;
1140 val32 = val32 >> 8;
1141 macaddr[1] = val32 & 0xff;
1142 val32 = val32 >> 8;
1143 macaddr[2] = val32 & 0xff;
1144 val32 = val32 >> 8;
1145 macaddr[3] = val32 & 0xff;
1146
1147 /*
1148 * Read last 2-byte of mac address
1149 */
1150 val32 = rge_reg_get32(rgep, ID_4_REG);
1151 macaddr[4] = val32 & 0xff;
1152 val32 = val32 >> 8;
1153 macaddr[5] = val32 & 0xff;
1154 }
1155
1156 static void rge_set_mac_addr(rge_t *rgep);
1157 #pragma inline(rge_set_mac_addr)
1158
1159 static void
rge_set_mac_addr(rge_t * rgep)1160 rge_set_mac_addr(rge_t *rgep)
1161 {
1162 uint8_t *p = rgep->netaddr;
1163 uint32_t val32;
1164
1165 /*
1166 * Change to config register write enable mode
1167 */
1168 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1169
1170 /*
1171 * Get first 4 bytes of mac address
1172 */
1173 val32 = p[3];
1174 val32 = val32 << 8;
1175 val32 |= p[2];
1176 val32 = val32 << 8;
1177 val32 |= p[1];
1178 val32 = val32 << 8;
1179 val32 |= p[0];
1180
1181 /*
1182 * Set first 4 bytes of mac address
1183 */
1184 rge_reg_put32(rgep, ID_0_REG, val32);
1185
1186 /*
1187 * Get last 2 bytes of mac address
1188 */
1189 val32 = p[5];
1190 val32 = val32 << 8;
1191 val32 |= p[4];
1192
1193 /*
1194 * Set last 2 bytes of mac address
1195 */
1196 val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff;
1197 rge_reg_put32(rgep, ID_4_REG, val32);
1198
1199 /*
1200 * Return to normal network/host communication mode
1201 */
1202 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1203 }
1204
1205 static void rge_set_multi_addr(rge_t *rgep);
1206 #pragma inline(rge_set_multi_addr)
1207
1208 static void
rge_set_multi_addr(rge_t * rgep)1209 rge_set_multi_addr(rge_t *rgep)
1210 {
1211 uint32_t *hashp;
1212
1213 hashp = (uint32_t *)rgep->mcast_hash;
1214
1215 /*
1216 * Change to config register write enable mode
1217 */
1218 if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1219 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1220 }
1221 if (rgep->promisc) {
1222 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
1223 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
1224 } else {
1225 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
1226 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
1227 }
1228
1229 /*
1230 * Return to normal network/host communication mode
1231 */
1232 if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1233 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1234 }
1235 }
1236
1237 static void rge_set_promisc(rge_t *rgep);
1238 #pragma inline(rge_set_promisc)
1239
1240 static void
rge_set_promisc(rge_t * rgep)1241 rge_set_promisc(rge_t *rgep)
1242 {
1243 if (rgep->promisc)
1244 rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1245 else
1246 rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1247 }
1248
1249 /*
1250 * rge_chip_sync() -- program the chip with the unicast MAC address,
1251 * the multicast hash table, the required level of promiscuity, and
1252 * the current loopback mode ...
1253 */
1254 void rge_chip_sync(rge_t *rgep, enum rge_sync_op todo);
1255 #pragma no_inline(rge_chip_sync)
1256
1257 void
rge_chip_sync(rge_t * rgep,enum rge_sync_op todo)1258 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo)
1259 {
1260 switch (todo) {
1261 case RGE_GET_MAC:
1262 rge_get_mac_addr(rgep);
1263 break;
1264 case RGE_SET_MAC:
1265 /* Reprogram the unicast MAC address(es) ... */
1266 rge_set_mac_addr(rgep);
1267 break;
1268 case RGE_SET_MUL:
1269 /* Reprogram the hashed multicast address table ... */
1270 rge_set_multi_addr(rgep);
1271 break;
1272 case RGE_SET_PROMISC:
1273 /* Set or clear the PROMISCUOUS mode bit */
1274 rge_set_multi_addr(rgep);
1275 rge_set_promisc(rgep);
1276 break;
1277 default:
1278 break;
1279 }
1280 }
1281
1282 void rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag);
1283 #pragma no_inline(rge_chip_blank)
1284
1285 /* ARGSUSED */
1286 void
rge_chip_blank(void * arg,time_t ticks,uint_t count,int flag)1287 rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag)
1288 {
1289 _NOTE(ARGUNUSED(arg, ticks, count));
1290 }
1291
1292 void rge_tx_trigger(rge_t *rgep);
1293 #pragma no_inline(rge_tx_trigger)
1294
1295 void
rge_tx_trigger(rge_t * rgep)1296 rge_tx_trigger(rge_t *rgep)
1297 {
1298 rge_reg_put8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL);
1299 }
1300
1301 void rge_hw_stats_dump(rge_t *rgep);
1302 #pragma no_inline(rge_tx_trigger)
1303
1304 void
rge_hw_stats_dump(rge_t * rgep)1305 rge_hw_stats_dump(rge_t *rgep)
1306 {
1307 int i = 0;
1308 uint32_t regval = 0;
1309
1310 if (rgep->rge_mac_state == RGE_MAC_STOPPED)
1311 return;
1312
1313 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1314 while (regval & DUMP_START) {
1315 drv_usecwait(100);
1316 if (++i > STATS_DUMP_LOOP) {
1317 RGE_DEBUG(("rge h/w statistics dump fail!"));
1318 rgep->rge_chip_state = RGE_CHIP_ERROR;
1319 return;
1320 }
1321 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1322 }
1323 DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL);
1324
1325 /*
1326 * Start H/W statistics dump for RTL8169 chip
1327 */
1328 rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START);
1329 }
1330
1331 /*
1332 * ========== Hardware interrupt handler ==========
1333 */
1334
1335 #undef RGE_DBG
1336 #define RGE_DBG RGE_DBG_INT /* debug flag for this code */
1337
1338 static void rge_wake_factotum(rge_t *rgep);
1339 #pragma inline(rge_wake_factotum)
1340
1341 static void
rge_wake_factotum(rge_t * rgep)1342 rge_wake_factotum(rge_t *rgep)
1343 {
1344 if (rgep->factotum_flag == 0) {
1345 rgep->factotum_flag = 1;
1346 (void) ddi_intr_trigger_softint(rgep->factotum_hdl, NULL);
1347 }
1348 }
1349
1350 /*
1351 * rge_intr() -- handle chip interrupts
1352 */
1353 uint_t rge_intr(caddr_t arg1, caddr_t arg2);
1354 #pragma no_inline(rge_intr)
1355
1356 uint_t
rge_intr(caddr_t arg1,caddr_t arg2)1357 rge_intr(caddr_t arg1, caddr_t arg2)
1358 {
1359 rge_t *rgep = (rge_t *)arg1;
1360 uint16_t int_status;
1361 clock_t now;
1362 uint32_t tx_pkts;
1363 uint32_t rx_pkts;
1364 uint32_t poll_rate;
1365 uint32_t opt_pkts;
1366 uint32_t opt_intrs;
1367 boolean_t update_int_mask = B_FALSE;
1368 uint32_t itimer;
1369
1370 _NOTE(ARGUNUSED(arg2))
1371
1372 mutex_enter(rgep->genlock);
1373
1374 if (rgep->suspended) {
1375 mutex_exit(rgep->genlock);
1376 return (DDI_INTR_UNCLAIMED);
1377 }
1378
1379 /*
1380 * Was this interrupt caused by our device...
1381 */
1382 int_status = rge_reg_get16(rgep, INT_STATUS_REG);
1383 if (!(int_status & rgep->int_mask)) {
1384 mutex_exit(rgep->genlock);
1385 return (DDI_INTR_UNCLAIMED);
1386 /* indicate it wasn't our interrupt */
1387 }
1388 rgep->stats.intr++;
1389
1390 /*
1391 * Clear interrupt
1392 * For PCIE chipset, we need disable interrupt first.
1393 */
1394 if (rgep->chipid.is_pcie) {
1395 rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE);
1396 update_int_mask = B_TRUE;
1397 }
1398 rge_reg_put16(rgep, INT_STATUS_REG, int_status);
1399
1400 /*
1401 * Calculate optimal polling interval
1402 */
1403 now = ddi_get_lbolt();
1404 if (now - rgep->curr_tick >= rgep->tick_delta &&
1405 (rgep->param_link_speed == RGE_SPEED_1000M ||
1406 rgep->param_link_speed == RGE_SPEED_100M)) {
1407 /* number of rx and tx packets in the last tick */
1408 tx_pkts = rgep->stats.opackets - rgep->last_opackets;
1409 rx_pkts = rgep->stats.rpackets - rgep->last_rpackets;
1410
1411 rgep->last_opackets = rgep->stats.opackets;
1412 rgep->last_rpackets = rgep->stats.rpackets;
1413
1414 /* restore interrupt mask */
1415 rgep->int_mask |= TX_OK_INT | RX_OK_INT;
1416 if (rgep->chipid.is_pcie) {
1417 rgep->int_mask |= NO_TXDESC_INT;
1418 }
1419
1420 /* optimal number of packets in a tick */
1421 if (rgep->param_link_speed == RGE_SPEED_1000M) {
1422 opt_pkts = (1000*1000*1000/8)/ETHERMTU/CLK_TICK;
1423 } else {
1424 opt_pkts = (100*1000*1000/8)/ETHERMTU/CLK_TICK;
1425 }
1426
1427 /*
1428 * calculate polling interval based on rx and tx packets
1429 * in the last tick
1430 */
1431 poll_rate = 0;
1432 if (now - rgep->curr_tick < 2*rgep->tick_delta) {
1433 opt_intrs = opt_pkts/TX_COALESC;
1434 if (tx_pkts > opt_intrs) {
1435 poll_rate = max(tx_pkts/TX_COALESC, opt_intrs);
1436 rgep->int_mask &= ~(TX_OK_INT | NO_TXDESC_INT);
1437 }
1438
1439 opt_intrs = opt_pkts/RX_COALESC;
1440 if (rx_pkts > opt_intrs) {
1441 opt_intrs = max(rx_pkts/RX_COALESC, opt_intrs);
1442 poll_rate = max(opt_intrs, poll_rate);
1443 rgep->int_mask &= ~RX_OK_INT;
1444 }
1445 /* ensure poll_rate reasonable */
1446 poll_rate = min(poll_rate, opt_pkts*4);
1447 }
1448
1449 if (poll_rate) {
1450 /* move to polling mode */
1451 if (rgep->chipid.is_pcie) {
1452 itimer = (TIMER_CLK_PCIE/CLK_TICK)/poll_rate;
1453 } else {
1454 itimer = (TIMER_CLK_PCI/CLK_TICK)/poll_rate;
1455 }
1456 } else {
1457 /* move to normal mode */
1458 itimer = 0;
1459 }
1460 RGE_DEBUG(("%s: poll: itimer:%d int_mask:0x%x",
1461 __func__, itimer, rgep->int_mask));
1462 rge_reg_put32(rgep, TIMER_INT_REG, itimer);
1463
1464 /* update timestamp for statistics */
1465 rgep->curr_tick = now;
1466
1467 /* reset timer */
1468 int_status |= TIME_OUT_INT;
1469
1470 update_int_mask = B_TRUE;
1471 }
1472
1473 if (int_status & TIME_OUT_INT) {
1474 rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
1475 }
1476
1477 /* flush post writes */
1478 (void) rge_reg_get16(rgep, INT_STATUS_REG);
1479
1480 /*
1481 * Cable link change interrupt
1482 */
1483 if (int_status & LINK_CHANGE_INT) {
1484 rge_chip_cyclic(rgep);
1485 }
1486
1487 if (int_status & RX_FIFO_OVERFLOW_INT) {
1488 /* start rx watchdog timeout detection */
1489 rgep->rx_fifo_ovf = 1;
1490 if (rgep->int_mask & RX_FIFO_OVERFLOW_INT) {
1491 rgep->int_mask &= ~RX_FIFO_OVERFLOW_INT;
1492 update_int_mask = B_TRUE;
1493 }
1494 } else if (int_status & RGE_RX_INT) {
1495 /* stop rx watchdog timeout detection */
1496 rgep->rx_fifo_ovf = 0;
1497 if ((rgep->int_mask & RX_FIFO_OVERFLOW_INT) == 0) {
1498 rgep->int_mask |= RX_FIFO_OVERFLOW_INT;
1499 update_int_mask = B_TRUE;
1500 }
1501 }
1502
1503 mutex_exit(rgep->genlock);
1504
1505 /*
1506 * Receive interrupt
1507 */
1508 if (int_status & RGE_RX_INT)
1509 rge_receive(rgep);
1510
1511 /*
1512 * Transmit interrupt
1513 */
1514 if (int_status & TX_ERR_INT) {
1515 RGE_REPORT((rgep, "tx error happened, resetting the chip "));
1516 mutex_enter(rgep->genlock);
1517 rgep->rge_chip_state = RGE_CHIP_ERROR;
1518 mutex_exit(rgep->genlock);
1519 } else if ((rgep->chipid.is_pcie && (int_status & NO_TXDESC_INT)) ||
1520 ((int_status & TX_OK_INT) && rgep->tx_free < RGE_SEND_SLOTS/8)) {
1521 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1522 }
1523
1524 /*
1525 * System error interrupt
1526 */
1527 if (int_status & SYS_ERR_INT) {
1528 RGE_REPORT((rgep, "sys error happened, resetting the chip "));
1529 mutex_enter(rgep->genlock);
1530 rgep->rge_chip_state = RGE_CHIP_ERROR;
1531 mutex_exit(rgep->genlock);
1532 }
1533
1534 /*
1535 * Re-enable interrupt for PCIE chipset or install new int_mask
1536 */
1537 if (update_int_mask)
1538 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1539
1540 return (DDI_INTR_CLAIMED); /* indicate it was our interrupt */
1541 }
1542
1543 /*
1544 * ========== Factotum, implemented as a softint handler ==========
1545 */
1546
1547 #undef RGE_DBG
1548 #define RGE_DBG RGE_DBG_FACT /* debug flag for this code */
1549
1550 static boolean_t rge_factotum_link_check(rge_t *rgep);
1551 #pragma no_inline(rge_factotum_link_check)
1552
1553 static boolean_t
rge_factotum_link_check(rge_t * rgep)1554 rge_factotum_link_check(rge_t *rgep)
1555 {
1556 uint8_t media_status;
1557 int32_t link;
1558
1559 media_status = rge_reg_get8(rgep, PHY_STATUS_REG);
1560 link = (media_status & PHY_STATUS_LINK_UP) ?
1561 LINK_STATE_UP : LINK_STATE_DOWN;
1562 if (rgep->param_link_up != link) {
1563 /*
1564 * Link change.
1565 */
1566 rgep->param_link_up = link;
1567
1568 if (link == LINK_STATE_UP) {
1569 if (media_status & PHY_STATUS_1000MF) {
1570 rgep->param_link_speed = RGE_SPEED_1000M;
1571 rgep->param_link_duplex = LINK_DUPLEX_FULL;
1572 } else {
1573 rgep->param_link_speed =
1574 (media_status & PHY_STATUS_100M) ?
1575 RGE_SPEED_100M : RGE_SPEED_10M;
1576 rgep->param_link_duplex =
1577 (media_status & PHY_STATUS_DUPLEX_FULL) ?
1578 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1579 }
1580 }
1581 return (B_TRUE);
1582 }
1583 return (B_FALSE);
1584 }
1585
1586 /*
1587 * Factotum routine to check for Tx stall, using the 'watchdog' counter
1588 */
1589 static boolean_t rge_factotum_stall_check(rge_t *rgep);
1590 #pragma no_inline(rge_factotum_stall_check)
1591
1592 static boolean_t
rge_factotum_stall_check(rge_t * rgep)1593 rge_factotum_stall_check(rge_t *rgep)
1594 {
1595 uint32_t dogval;
1596
1597 ASSERT(mutex_owned(rgep->genlock));
1598
1599 /*
1600 * Specific check for RX stall ...
1601 */
1602 rgep->rx_fifo_ovf <<= 1;
1603 if (rgep->rx_fifo_ovf > rge_rx_watchdog_count) {
1604 RGE_REPORT((rgep, "rx_hang detected"));
1605 return (B_TRUE);
1606 }
1607
1608 /*
1609 * Specific check for Tx stall ...
1610 *
1611 * The 'watchdog' counter is incremented whenever a packet
1612 * is queued, reset to 1 when some (but not all) buffers
1613 * are reclaimed, reset to 0 (disabled) when all buffers
1614 * are reclaimed, and shifted left here. If it exceeds the
1615 * threshold value, the chip is assumed to have stalled and
1616 * is put into the ERROR state. The factotum will then reset
1617 * it on the next pass.
1618 *
1619 * All of which should ensure that we don't get into a state
1620 * where packets are left pending indefinitely!
1621 */
1622 if (rgep->resched_needed)
1623 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1624 dogval = rge_atomic_shl32(&rgep->watchdog, 1);
1625 if (dogval < rge_watchdog_count)
1626 return (B_FALSE);
1627
1628 RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval));
1629 return (B_TRUE);
1630
1631 }
1632
1633 /*
1634 * The factotum is woken up when there's something to do that we'd rather
1635 * not do from inside a hardware interrupt handler or high-level cyclic.
1636 * Its two main tasks are:
1637 * reset & restart the chip after an error
1638 * check the link status whenever necessary
1639 */
1640 uint_t rge_chip_factotum(caddr_t arg1, caddr_t arg2);
1641 #pragma no_inline(rge_chip_factotum)
1642
1643 uint_t
rge_chip_factotum(caddr_t arg1,caddr_t arg2)1644 rge_chip_factotum(caddr_t arg1, caddr_t arg2)
1645 {
1646 rge_t *rgep;
1647 uint_t result;
1648 boolean_t error;
1649 boolean_t linkchg;
1650
1651 rgep = (rge_t *)arg1;
1652 _NOTE(ARGUNUSED(arg2))
1653
1654 if (rgep->factotum_flag == 0)
1655 return (DDI_INTR_UNCLAIMED);
1656
1657 rgep->factotum_flag = 0;
1658 result = DDI_INTR_CLAIMED;
1659 error = B_FALSE;
1660 linkchg = B_FALSE;
1661
1662 mutex_enter(rgep->genlock);
1663 switch (rgep->rge_chip_state) {
1664 default:
1665 break;
1666
1667 case RGE_CHIP_RUNNING:
1668 linkchg = rge_factotum_link_check(rgep);
1669 error = rge_factotum_stall_check(rgep);
1670 break;
1671
1672 case RGE_CHIP_ERROR:
1673 error = B_TRUE;
1674 break;
1675
1676 case RGE_CHIP_FAULT:
1677 /*
1678 * Fault detected, time to reset ...
1679 */
1680 if (rge_autorecover) {
1681 RGE_REPORT((rgep, "automatic recovery activated"));
1682 rge_restart(rgep);
1683 }
1684 break;
1685 }
1686
1687 /*
1688 * If an error is detected, stop the chip now, marking it as
1689 * faulty, so that it will be reset next time through ...
1690 */
1691 if (error)
1692 rge_chip_stop(rgep, B_TRUE);
1693 mutex_exit(rgep->genlock);
1694
1695 /*
1696 * If the link state changed, tell the world about it.
1697 * Note: can't do this while still holding the mutex.
1698 */
1699 if (linkchg)
1700 mac_link_update(rgep->mh, rgep->param_link_up);
1701
1702 return (result);
1703 }
1704
1705 /*
1706 * High-level cyclic handler
1707 *
1708 * This routine schedules a (low-level) softint callback to the
1709 * factotum, and prods the chip to update the status block (which
1710 * will cause a hardware interrupt when complete).
1711 */
1712 void rge_chip_cyclic(void *arg);
1713 #pragma no_inline(rge_chip_cyclic)
1714
1715 void
rge_chip_cyclic(void * arg)1716 rge_chip_cyclic(void *arg)
1717 {
1718 rge_t *rgep;
1719
1720 rgep = arg;
1721
1722 switch (rgep->rge_chip_state) {
1723 default:
1724 return;
1725
1726 case RGE_CHIP_RUNNING:
1727 rge_phy_check(rgep);
1728 if (rgep->tx_free < RGE_SEND_SLOTS)
1729 rge_send_recycle(rgep);
1730 break;
1731
1732 case RGE_CHIP_FAULT:
1733 case RGE_CHIP_ERROR:
1734 break;
1735 }
1736
1737 rge_wake_factotum(rgep);
1738 }
1739
1740
1741 /*
1742 * ========== Ioctl subfunctions ==========
1743 */
1744
1745 #undef RGE_DBG
1746 #define RGE_DBG RGE_DBG_PPIO /* debug flag for this code */
1747
1748 #if RGE_DEBUGGING || RGE_DO_PPIO
1749
1750 static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1751 #pragma no_inline(rge_chip_peek_cfg)
1752
1753 static void
rge_chip_peek_cfg(rge_t * rgep,rge_peekpoke_t * ppd)1754 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1755 {
1756 uint64_t regval;
1757 uint64_t regno;
1758
1759 RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)",
1760 (void *)rgep, (void *)ppd));
1761
1762 regno = ppd->pp_acc_offset;
1763
1764 switch (ppd->pp_acc_size) {
1765 case 1:
1766 regval = pci_config_get8(rgep->cfg_handle, regno);
1767 break;
1768
1769 case 2:
1770 regval = pci_config_get16(rgep->cfg_handle, regno);
1771 break;
1772
1773 case 4:
1774 regval = pci_config_get32(rgep->cfg_handle, regno);
1775 break;
1776
1777 case 8:
1778 regval = pci_config_get64(rgep->cfg_handle, regno);
1779 break;
1780 }
1781
1782 ppd->pp_acc_data = regval;
1783 }
1784
1785 static void rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1786 #pragma no_inline(rge_chip_poke_cfg)
1787
1788 static void
rge_chip_poke_cfg(rge_t * rgep,rge_peekpoke_t * ppd)1789 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1790 {
1791 uint64_t regval;
1792 uint64_t regno;
1793
1794 RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)",
1795 (void *)rgep, (void *)ppd));
1796
1797 regno = ppd->pp_acc_offset;
1798 regval = ppd->pp_acc_data;
1799
1800 switch (ppd->pp_acc_size) {
1801 case 1:
1802 pci_config_put8(rgep->cfg_handle, regno, regval);
1803 break;
1804
1805 case 2:
1806 pci_config_put16(rgep->cfg_handle, regno, regval);
1807 break;
1808
1809 case 4:
1810 pci_config_put32(rgep->cfg_handle, regno, regval);
1811 break;
1812
1813 case 8:
1814 pci_config_put64(rgep->cfg_handle, regno, regval);
1815 break;
1816 }
1817 }
1818
1819 static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1820 #pragma no_inline(rge_chip_peek_reg)
1821
1822 static void
rge_chip_peek_reg(rge_t * rgep,rge_peekpoke_t * ppd)1823 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1824 {
1825 uint64_t regval;
1826 void *regaddr;
1827
1828 RGE_TRACE(("rge_chip_peek_reg($%p, $%p)",
1829 (void *)rgep, (void *)ppd));
1830
1831 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1832
1833 switch (ppd->pp_acc_size) {
1834 case 1:
1835 regval = ddi_get8(rgep->io_handle, regaddr);
1836 break;
1837
1838 case 2:
1839 regval = ddi_get16(rgep->io_handle, regaddr);
1840 break;
1841
1842 case 4:
1843 regval = ddi_get32(rgep->io_handle, regaddr);
1844 break;
1845
1846 case 8:
1847 regval = ddi_get64(rgep->io_handle, regaddr);
1848 break;
1849 }
1850
1851 ppd->pp_acc_data = regval;
1852 }
1853
1854 static void rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1855 #pragma no_inline(rge_chip_peek_reg)
1856
1857 static void
rge_chip_poke_reg(rge_t * rgep,rge_peekpoke_t * ppd)1858 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1859 {
1860 uint64_t regval;
1861 void *regaddr;
1862
1863 RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
1864 (void *)rgep, (void *)ppd));
1865
1866 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1867 regval = ppd->pp_acc_data;
1868
1869 switch (ppd->pp_acc_size) {
1870 case 1:
1871 ddi_put8(rgep->io_handle, regaddr, regval);
1872 break;
1873
1874 case 2:
1875 ddi_put16(rgep->io_handle, regaddr, regval);
1876 break;
1877
1878 case 4:
1879 ddi_put32(rgep->io_handle, regaddr, regval);
1880 break;
1881
1882 case 8:
1883 ddi_put64(rgep->io_handle, regaddr, regval);
1884 break;
1885 }
1886 }
1887
1888 static void rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1889 #pragma no_inline(rge_chip_peek_mii)
1890
1891 static void
rge_chip_peek_mii(rge_t * rgep,rge_peekpoke_t * ppd)1892 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1893 {
1894 RGE_TRACE(("rge_chip_peek_mii($%p, $%p)",
1895 (void *)rgep, (void *)ppd));
1896
1897 ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2);
1898 }
1899
1900 static void rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1901 #pragma no_inline(rge_chip_poke_mii)
1902
1903 static void
rge_chip_poke_mii(rge_t * rgep,rge_peekpoke_t * ppd)1904 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1905 {
1906 RGE_TRACE(("rge_chip_poke_mii($%p, $%p)",
1907 (void *)rgep, (void *)ppd));
1908
1909 rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
1910 }
1911
1912 static void rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1913 #pragma no_inline(rge_chip_peek_mem)
1914
1915 static void
rge_chip_peek_mem(rge_t * rgep,rge_peekpoke_t * ppd)1916 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1917 {
1918 uint64_t regval;
1919 void *vaddr;
1920
1921 RGE_TRACE(("rge_chip_peek_rge($%p, $%p)",
1922 (void *)rgep, (void *)ppd));
1923
1924 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1925
1926 switch (ppd->pp_acc_size) {
1927 case 1:
1928 regval = *(uint8_t *)vaddr;
1929 break;
1930
1931 case 2:
1932 regval = *(uint16_t *)vaddr;
1933 break;
1934
1935 case 4:
1936 regval = *(uint32_t *)vaddr;
1937 break;
1938
1939 case 8:
1940 regval = *(uint64_t *)vaddr;
1941 break;
1942 }
1943
1944 RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
1945 (void *)rgep, (void *)ppd, regval, vaddr));
1946
1947 ppd->pp_acc_data = regval;
1948 }
1949
1950 static void rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1951 #pragma no_inline(rge_chip_poke_mem)
1952
1953 static void
rge_chip_poke_mem(rge_t * rgep,rge_peekpoke_t * ppd)1954 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1955 {
1956 uint64_t regval;
1957 void *vaddr;
1958
1959 RGE_TRACE(("rge_chip_poke_mem($%p, $%p)",
1960 (void *)rgep, (void *)ppd));
1961
1962 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1963 regval = ppd->pp_acc_data;
1964
1965 RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
1966 (void *)rgep, (void *)ppd, regval, vaddr));
1967
1968 switch (ppd->pp_acc_size) {
1969 case 1:
1970 *(uint8_t *)vaddr = (uint8_t)regval;
1971 break;
1972
1973 case 2:
1974 *(uint16_t *)vaddr = (uint16_t)regval;
1975 break;
1976
1977 case 4:
1978 *(uint32_t *)vaddr = (uint32_t)regval;
1979 break;
1980
1981 case 8:
1982 *(uint64_t *)vaddr = (uint64_t)regval;
1983 break;
1984 }
1985 }
1986
1987 static enum ioc_reply rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
1988 struct iocblk *iocp);
1989 #pragma no_inline(rge_pp_ioctl)
1990
1991 static enum ioc_reply
rge_pp_ioctl(rge_t * rgep,int cmd,mblk_t * mp,struct iocblk * iocp)1992 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
1993 {
1994 void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd);
1995 rge_peekpoke_t *ppd;
1996 dma_area_t *areap;
1997 uint64_t sizemask;
1998 uint64_t mem_va;
1999 uint64_t maxoff;
2000 boolean_t peek;
2001
2002 switch (cmd) {
2003 default:
2004 /* NOTREACHED */
2005 rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd);
2006 return (IOC_INVAL);
2007
2008 case RGE_PEEK:
2009 peek = B_TRUE;
2010 break;
2011
2012 case RGE_POKE:
2013 peek = B_FALSE;
2014 break;
2015 }
2016
2017 /*
2018 * Validate format of ioctl
2019 */
2020 if (iocp->ioc_count != sizeof (rge_peekpoke_t))
2021 return (IOC_INVAL);
2022 if (mp->b_cont == NULL)
2023 return (IOC_INVAL);
2024 ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr;
2025
2026 /*
2027 * Validate request parameters
2028 */
2029 switch (ppd->pp_acc_space) {
2030 default:
2031 return (IOC_INVAL);
2032
2033 case RGE_PP_SPACE_CFG:
2034 /*
2035 * Config space
2036 */
2037 sizemask = 8|4|2|1;
2038 mem_va = 0;
2039 maxoff = PCI_CONF_HDR_SIZE;
2040 ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg;
2041 break;
2042
2043 case RGE_PP_SPACE_REG:
2044 /*
2045 * Memory-mapped I/O space
2046 */
2047 sizemask = 8|4|2|1;
2048 mem_va = 0;
2049 maxoff = RGE_REGISTER_MAX;
2050 ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg;
2051 break;
2052
2053 case RGE_PP_SPACE_MII:
2054 /*
2055 * PHY's MII registers
2056 * NB: all PHY registers are two bytes, but the
2057 * addresses increment in ones (word addressing).
2058 * So we scale the address here, then undo the
2059 * transformation inside the peek/poke functions.
2060 */
2061 ppd->pp_acc_offset *= 2;
2062 sizemask = 2;
2063 mem_va = 0;
2064 maxoff = (MII_MAXREG+1)*2;
2065 ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii;
2066 break;
2067
2068 case RGE_PP_SPACE_RGE:
2069 /*
2070 * RGE data structure!
2071 */
2072 sizemask = 8|4|2|1;
2073 mem_va = (uintptr_t)rgep;
2074 maxoff = sizeof (*rgep);
2075 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
2076 break;
2077
2078 case RGE_PP_SPACE_STATISTICS:
2079 case RGE_PP_SPACE_TXDESC:
2080 case RGE_PP_SPACE_TXBUFF:
2081 case RGE_PP_SPACE_RXDESC:
2082 case RGE_PP_SPACE_RXBUFF:
2083 /*
2084 * Various DMA_AREAs
2085 */
2086 switch (ppd->pp_acc_space) {
2087 case RGE_PP_SPACE_TXDESC:
2088 areap = &rgep->dma_area_txdesc;
2089 break;
2090 case RGE_PP_SPACE_RXDESC:
2091 areap = &rgep->dma_area_rxdesc;
2092 break;
2093 case RGE_PP_SPACE_STATISTICS:
2094 areap = &rgep->dma_area_stats;
2095 break;
2096 }
2097
2098 sizemask = 8|4|2|1;
2099 mem_va = (uintptr_t)areap->mem_va;
2100 maxoff = areap->alength;
2101 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
2102 break;
2103 }
2104
2105 switch (ppd->pp_acc_size) {
2106 default:
2107 return (IOC_INVAL);
2108
2109 case 8:
2110 case 4:
2111 case 2:
2112 case 1:
2113 if ((ppd->pp_acc_size & sizemask) == 0)
2114 return (IOC_INVAL);
2115 break;
2116 }
2117
2118 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
2119 return (IOC_INVAL);
2120
2121 if (ppd->pp_acc_offset >= maxoff)
2122 return (IOC_INVAL);
2123
2124 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
2125 return (IOC_INVAL);
2126
2127 /*
2128 * All OK - go do it!
2129 */
2130 ppd->pp_acc_offset += mem_va;
2131 (*ppfn)(rgep, ppd);
2132 return (peek ? IOC_REPLY : IOC_ACK);
2133 }
2134
2135 static enum ioc_reply rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
2136 struct iocblk *iocp);
2137 #pragma no_inline(rge_diag_ioctl)
2138
2139 static enum ioc_reply
rge_diag_ioctl(rge_t * rgep,int cmd,mblk_t * mp,struct iocblk * iocp)2140 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2141 {
2142 ASSERT(mutex_owned(rgep->genlock));
2143
2144 switch (cmd) {
2145 default:
2146 /* NOTREACHED */
2147 rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd);
2148 return (IOC_INVAL);
2149
2150 case RGE_DIAG:
2151 /*
2152 * Currently a no-op
2153 */
2154 return (IOC_ACK);
2155
2156 case RGE_PEEK:
2157 case RGE_POKE:
2158 return (rge_pp_ioctl(rgep, cmd, mp, iocp));
2159
2160 case RGE_PHY_RESET:
2161 return (IOC_RESTART_ACK);
2162
2163 case RGE_SOFT_RESET:
2164 case RGE_HARD_RESET:
2165 /*
2166 * Reset and reinitialise the 570x hardware
2167 */
2168 rge_restart(rgep);
2169 return (IOC_ACK);
2170 }
2171
2172 /* NOTREACHED */
2173 }
2174
2175 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2176
2177 static enum ioc_reply rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
2178 struct iocblk *iocp);
2179 #pragma no_inline(rge_mii_ioctl)
2180
2181 static enum ioc_reply
rge_mii_ioctl(rge_t * rgep,int cmd,mblk_t * mp,struct iocblk * iocp)2182 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2183 {
2184 struct rge_mii_rw *miirwp;
2185
2186 /*
2187 * Validate format of ioctl
2188 */
2189 if (iocp->ioc_count != sizeof (struct rge_mii_rw))
2190 return (IOC_INVAL);
2191 if (mp->b_cont == NULL)
2192 return (IOC_INVAL);
2193 miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr;
2194
2195 /*
2196 * Validate request parameters ...
2197 */
2198 if (miirwp->mii_reg > MII_MAXREG)
2199 return (IOC_INVAL);
2200
2201 switch (cmd) {
2202 default:
2203 /* NOTREACHED */
2204 rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd);
2205 return (IOC_INVAL);
2206
2207 case RGE_MII_READ:
2208 miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg);
2209 return (IOC_REPLY);
2210
2211 case RGE_MII_WRITE:
2212 rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data);
2213 return (IOC_ACK);
2214 }
2215
2216 /* NOTREACHED */
2217 }
2218
2219 enum ioc_reply rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp,
2220 struct iocblk *iocp);
2221 #pragma no_inline(rge_chip_ioctl)
2222
2223 enum ioc_reply
rge_chip_ioctl(rge_t * rgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)2224 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
2225 {
2226 int cmd;
2227
2228 RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)",
2229 (void *)rgep, (void *)wq, (void *)mp, (void *)iocp));
2230
2231 ASSERT(mutex_owned(rgep->genlock));
2232
2233 cmd = iocp->ioc_cmd;
2234 switch (cmd) {
2235 default:
2236 /* NOTREACHED */
2237 rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd);
2238 return (IOC_INVAL);
2239
2240 case RGE_DIAG:
2241 case RGE_PEEK:
2242 case RGE_POKE:
2243 case RGE_PHY_RESET:
2244 case RGE_SOFT_RESET:
2245 case RGE_HARD_RESET:
2246 #if RGE_DEBUGGING || RGE_DO_PPIO
2247 return (rge_diag_ioctl(rgep, cmd, mp, iocp));
2248 #else
2249 return (IOC_INVAL);
2250 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2251
2252 case RGE_MII_READ:
2253 case RGE_MII_WRITE:
2254 return (rge_mii_ioctl(rgep, cmd, mp, iocp));
2255
2256 }
2257
2258 /* NOTREACHED */
2259 }
2260