xref: /openbsd-src/sys/dev/pci/if_rge.c (revision 58fbf5d6aa35e3d66f2c32c61d2f38824a990e85)
1 /*	$OpenBSD: if_rge.c,v 1.12 2021/02/11 16:22:06 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_activate(struct device *, int);
63 int		rge_intr(void *);
64 int		rge_encap(struct rge_softc *, struct mbuf *, int);
65 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
66 void		rge_start(struct ifqueue *);
67 void		rge_watchdog(struct ifnet *);
68 int		rge_init(struct ifnet *);
69 void		rge_stop(struct ifnet *);
70 int		rge_ifmedia_upd(struct ifnet *);
71 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
72 int		rge_allocmem(struct rge_softc *);
73 int		rge_newbuf(struct rge_softc *);
74 void		rge_discard_rxbuf(struct rge_softc *, int);
75 void		rge_rx_list_init(struct rge_softc *);
76 void		rge_tx_list_init(struct rge_softc *);
77 void		rge_fill_rx_ring(struct rge_softc *);
78 int		rge_rxeof(struct rge_softc *);
79 int		rge_txeof(struct rge_softc *);
80 void		rge_reset(struct rge_softc *);
81 void		rge_iff(struct rge_softc *);
82 void		rge_set_phy_power(struct rge_softc *, int);
83 void		rge_phy_config(struct rge_softc *);
84 void		rge_phy_config_mac_cfg2(struct rge_softc *);
85 void		rge_phy_config_mac_cfg3(struct rge_softc *);
86 void		rge_phy_config_mac_cfg4(struct rge_softc *);
87 void		rge_phy_config_mac_cfg5(struct rge_softc *);
88 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
89 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
90 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
91 void		rge_hw_init(struct rge_softc *);
92 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
93 void		rge_patch_phy_mcu(struct rge_softc *, int);
94 void		rge_add_media_types(struct rge_softc *);
95 void		rge_config_imtype(struct rge_softc *, int);
96 void		rge_disable_hw_im(struct rge_softc *);
97 void		rge_disable_sim_im(struct rge_softc *);
98 void		rge_setup_sim_im(struct rge_softc *);
99 void		rge_setup_intr(struct rge_softc *, int);
100 void		rge_exit_oob(struct rge_softc *);
101 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
102 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
103 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
104 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
105 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
106 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
107 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
108 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
109 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
110 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
111 int		rge_get_link_status(struct rge_softc *);
112 void		rge_txstart(void *);
113 void		rge_tick(void *);
114 void		rge_link_state(struct rge_softc *);
115 #ifndef SMALL_KERNEL
116 int		rge_wol(struct ifnet *, int);
117 void		rge_wol_power(struct rge_softc *);
118 #endif
119 
120 static const struct {
121 	uint16_t reg;
122 	uint16_t val;
123 }  rtl8125_mac_cfg2_mcu[] = {
124 	RTL8125_MAC_CFG2_MCU
125 }, rtl8125_mac_cfg3_mcu[] = {
126 	RTL8125_MAC_CFG3_MCU
127 }, rtl8125_mac_cfg4_mcu[] = {
128 	RTL8125_MAC_CFG4_MCU
129 }, rtl8125_mac_cfg5_mcu[] = {
130 	RTL8125_MAC_CFG5_MCU
131 };
132 
133 struct cfattach rge_ca = {
134 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
135 };
136 
137 struct cfdriver rge_cd = {
138 	NULL, "rge", DV_IFNET
139 };
140 
141 const struct pci_matchid rge_devices[] = {
142 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
143 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
144 };
145 
146 int
147 rge_match(struct device *parent, void *match, void *aux)
148 {
149 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
150 	    nitems(rge_devices)));
151 }
152 
153 void
154 rge_attach(struct device *parent, struct device *self, void *aux)
155 {
156 	struct rge_softc *sc = (struct rge_softc *)self;
157 	struct pci_attach_args *pa = aux;
158 	pci_chipset_tag_t pc = pa->pa_pc;
159 	pci_intr_handle_t ih;
160 	const char *intrstr = NULL;
161 	struct ifnet *ifp;
162 	pcireg_t reg;
163 	uint32_t hwrev;
164 	uint8_t eaddr[ETHER_ADDR_LEN];
165 	int offset;
166 
167 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
168 
169 	/*
170 	 * Map control/status registers.
171 	 */
172 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
173 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
174 	    NULL, &sc->rge_bsize, 0)) {
175 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
176 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
177 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
178 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
179 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
180 			    &sc->rge_bsize, 0)) {
181 				printf(": can't map mem or i/o space\n");
182 				return;
183 			}
184 		}
185 	}
186 
187 	/*
188 	 * Allocate interrupt.
189 	 */
190 	if (pci_intr_map_msi(pa, &ih) == 0)
191 		sc->rge_flags |= RGE_FLAG_MSI;
192 	else if (pci_intr_map(pa, &ih) != 0) {
193 		printf(": couldn't map interrupt\n");
194 		return;
195 	}
196 	intrstr = pci_intr_string(pc, ih);
197 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
198 	    sc, sc->sc_dev.dv_xname);
199 	if (sc->sc_ih == NULL) {
200 		printf(": couldn't establish interrupt");
201 		if (intrstr != NULL)
202 			printf(" at %s", intrstr);
203 		printf("\n");
204 		return;
205 	}
206 	printf(": %s", intrstr);
207 
208 	sc->sc_dmat = pa->pa_dmat;
209 	sc->sc_pc = pa->pa_pc;
210 	sc->sc_tag = pa->pa_tag;
211 
212 	/* Determine hardware revision */
213 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
214 	switch (hwrev) {
215 	case 0x60800000:
216 		sc->rge_type = MAC_CFG2;
217 		break;
218 	case 0x60900000:
219 		sc->rge_type = MAC_CFG3;
220 		break;
221 	case 0x64000000:
222 		sc->rge_type = MAC_CFG4;
223 		break;
224 	case 0x64100000:
225 		sc->rge_type = MAC_CFG5;
226 		break;
227 	default:
228 		printf(": unknown version 0x%08x\n", hwrev);
229 		return;
230 	}
231 
232 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
233 
234 	/*
235 	 * PCI Express check.
236 	 */
237 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
238 	    &offset, NULL)) {
239 		/* Disable PCIe ASPM and ECPM. */
240 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
241 		    offset + PCI_PCIE_LCSR);
242 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
243 		    PCI_PCIE_LCSR_ECPM);
244 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
245 		    reg);
246 	}
247 
248 	rge_exit_oob(sc);
249 	rge_hw_init(sc);
250 
251 	rge_get_macaddr(sc, eaddr);
252 	printf(", address %s\n", ether_sprintf(eaddr));
253 
254 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
255 
256 	rge_set_phy_power(sc, 1);
257 	rge_phy_config(sc);
258 
259 	if (rge_allocmem(sc))
260 		return;
261 
262 	ifp = &sc->sc_arpcom.ac_if;
263 	ifp->if_softc = sc;
264 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
265 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
266 	ifp->if_xflags = IFXF_MPSAFE;
267 	ifp->if_ioctl = rge_ioctl;
268 	ifp->if_qstart = rge_start;
269 	ifp->if_watchdog = rge_watchdog;
270 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
271 	ifp->if_hardmtu = RGE_JUMBO_MTU;
272 
273 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
274 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
275 
276 #if NVLAN > 0
277 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
278 #endif
279 
280 #ifndef SMALL_KERNEL
281 	ifp->if_capabilities |= IFCAP_WOL;
282 	ifp->if_wol = rge_wol;
283 	rge_wol(ifp, 0);
284 #endif
285 	timeout_set(&sc->sc_timeout, rge_tick, sc);
286 	task_set(&sc->sc_task, rge_txstart, sc);
287 
288 	/* Initialize ifmedia structures. */
289 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
290 	    rge_ifmedia_sts);
291 	rge_add_media_types(sc);
292 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
293 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
294 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
295 
296 	if_attach(ifp);
297 	ether_ifattach(ifp);
298 }
299 
300 int
301 rge_activate(struct device *self, int act)
302 {
303 #ifndef SMALL_KERNEL
304 	struct rge_softc *sc = (struct rge_softc *)self;
305 #endif
306 	int rv = 0;
307 
308 	switch (act) {
309 	case DVACT_POWERDOWN:
310 		rv = config_activate_children(self, act);
311 #ifndef SMALL_KERNEL
312 		rge_wol_power(sc);
313 #endif
314 		break;
315 	default:
316 		rv = config_activate_children(self, act);
317 		break;
318 	}
319 	return (rv);
320 }
321 
322 int
323 rge_intr(void *arg)
324 {
325 	struct rge_softc *sc = arg;
326 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
327 	uint32_t status;
328 	int claimed = 0, rx, tx;
329 
330 	if (!(ifp->if_flags & IFF_RUNNING))
331 		return (0);
332 
333 	/* Disable interrupts. */
334 	RGE_WRITE_4(sc, RGE_IMR, 0);
335 
336 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
337 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
338 			return (0);
339 	}
340 
341 	status = RGE_READ_4(sc, RGE_ISR);
342 	if (status)
343 		RGE_WRITE_4(sc, RGE_ISR, status);
344 
345 	if (status & RGE_ISR_PCS_TIMEOUT)
346 		claimed = 1;
347 
348 	rx = tx = 0;
349 	if (status & sc->rge_intrs) {
350 		if (status &
351 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
352 			rx |= rge_rxeof(sc);
353 			claimed = 1;
354 		}
355 
356 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
357 			tx |= rge_txeof(sc);
358 			claimed = 1;
359 		}
360 
361 		if (status & RGE_ISR_SYSTEM_ERR) {
362 			KERNEL_LOCK();
363 			rge_init(ifp);
364 			KERNEL_UNLOCK();
365 			claimed = 1;
366 		}
367 	}
368 
369 	if (sc->rge_timerintr) {
370 		if ((tx | rx) == 0) {
371 			/*
372 			 * Nothing needs to be processed, fallback
373 			 * to use TX/RX interrupts.
374 			 */
375 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
376 
377 			/*
378 			 * Recollect, mainly to avoid the possible
379 			 * race introduced by changing interrupt
380 			 * masks.
381 			 */
382 			rge_rxeof(sc);
383 			rge_txeof(sc);
384 		} else
385 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
386 	} else if (tx | rx) {
387 		/*
388 		 * Assume that using simulated interrupt moderation
389 		 * (hardware timer based) could reduce the interrupt
390 		 * rate.
391 		 */
392 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
393 	}
394 
395 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
396 
397 	return (claimed);
398 }
399 
400 int
401 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
402 {
403 	struct rge_tx_desc *d = NULL;
404 	struct rge_txq *txq;
405 	bus_dmamap_t txmap;
406 	uint32_t cmdsts, cflags = 0;
407 	int cur, error, i, last, nsegs;
408 
409 	/*
410 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
411 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
412 	 * take affect.
413 	 */
414 	if ((m->m_pkthdr.csum_flags &
415 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
416 		cflags |= RGE_TDEXTSTS_IPCSUM;
417 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
418 			cflags |= RGE_TDEXTSTS_TCPCSUM;
419 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
420 			cflags |= RGE_TDEXTSTS_UDPCSUM;
421 	}
422 
423 	txq = &sc->rge_ldata.rge_txq[idx];
424 	txmap = txq->txq_dmamap;
425 
426 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
427 	switch (error) {
428 	case 0:
429 		break;
430 	case EFBIG: /* mbuf chain is too fragmented */
431 		if (m_defrag(m, M_DONTWAIT) == 0 &&
432 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
433 		    BUS_DMA_NOWAIT) == 0)
434 			break;
435 
436 		/* FALLTHROUGH */
437 	default:
438 		return (0);
439 	}
440 
441 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
442 	    BUS_DMASYNC_PREWRITE);
443 
444 	nsegs = txmap->dm_nsegs;
445 
446 	/* Set up hardware VLAN tagging. */
447 #if NVLAN > 0
448 	if (m->m_flags & M_VLANTAG)
449 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
450 #endif
451 
452 	cur = idx;
453 	cmdsts = RGE_TDCMDSTS_SOF;
454 
455 	for (i = 0; i < txmap->dm_nsegs; i++) {
456 		d = &sc->rge_ldata.rge_tx_list[cur];
457 
458 		d->rge_extsts = htole32(cflags);
459 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
460 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
461 
462 		cmdsts |= txmap->dm_segs[i].ds_len;
463 
464 		if (cur == RGE_TX_LIST_CNT - 1)
465 			cmdsts |= RGE_TDCMDSTS_EOR;
466 
467 		d->rge_cmdsts = htole32(cmdsts);
468 
469 		last = cur;
470 		cmdsts = RGE_TDCMDSTS_OWN;
471 		cur = RGE_NEXT_TX_DESC(cur);
472 	}
473 
474 	/* Set EOF on the last descriptor. */
475 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
476 
477 	/* Transfer ownership of packet to the chip. */
478 	d = &sc->rge_ldata.rge_tx_list[idx];
479 
480 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
481 
482 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
483 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
484 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
485 
486 	/* Update info of TX queue and descriptors. */
487 	txq->txq_mbuf = m;
488 	txq->txq_descidx = last;
489 
490 	return (nsegs);
491 }
492 
493 int
494 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
495 {
496 	struct rge_softc *sc = ifp->if_softc;
497 	struct ifreq *ifr = (struct ifreq *)data;
498 	int s, error = 0;
499 
500 	s = splnet();
501 
502 	switch (cmd) {
503 	case SIOCSIFADDR:
504 		ifp->if_flags |= IFF_UP;
505 		if (!(ifp->if_flags & IFF_RUNNING))
506 			rge_init(ifp);
507 		break;
508 	case SIOCSIFFLAGS:
509 		if (ifp->if_flags & IFF_UP) {
510 			if (ifp->if_flags & IFF_RUNNING)
511 				error = ENETRESET;
512 			else
513 				rge_init(ifp);
514 		} else {
515 			if (ifp->if_flags & IFF_RUNNING)
516 				rge_stop(ifp);
517 		}
518 		break;
519 	case SIOCGIFMEDIA:
520 	case SIOCSIFMEDIA:
521 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
522 		break;
523 	case SIOCSIFMTU:
524 		if (ifr->ifr_mtu > ifp->if_hardmtu) {
525 			error = EINVAL;
526 			break;
527 		}
528 		ifp->if_mtu = ifr->ifr_mtu;
529 		break;
530 	case SIOCGIFRXR:
531 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
532 		    NULL, RGE_JUMBO_FRAMELEN, &sc->rge_ldata.rge_rx_ring);
533 		break;
534 	default:
535 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
536 	}
537 
538 	if (error == ENETRESET) {
539 		if (ifp->if_flags & IFF_RUNNING)
540 			rge_iff(sc);
541 		error = 0;
542 	}
543 
544 	splx(s);
545 	return (error);
546 }
547 
548 void
549 rge_start(struct ifqueue *ifq)
550 {
551 	struct ifnet *ifp = ifq->ifq_if;
552 	struct rge_softc *sc = ifp->if_softc;
553 	struct mbuf *m;
554 	int free, idx, used;
555 	int queued = 0;
556 
557 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
558 		ifq_purge(ifq);
559 		return;
560 	}
561 
562 	/* Calculate free space. */
563 	idx = sc->rge_ldata.rge_txq_prodidx;
564 	free = sc->rge_ldata.rge_txq_considx;
565 	if (free <= idx)
566 		free += RGE_TX_LIST_CNT;
567 	free -= idx;
568 
569 	for (;;) {
570 		if (RGE_TX_NSEGS >= free + 2) {
571 			ifq_set_oactive(&ifp->if_snd);
572 			break;
573 		}
574 
575 		m = ifq_dequeue(ifq);
576 		if (m == NULL)
577 			break;
578 
579 		used = rge_encap(sc, m, idx);
580 		if (used == 0) {
581 			m_freem(m);
582 			continue;
583 		}
584 
585 		KASSERT(used <= free);
586 		free -= used;
587 
588 #if NBPFILTER > 0
589 		if (ifp->if_bpf)
590 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
591 #endif
592 
593 		idx += used;
594 		if (idx >= RGE_TX_LIST_CNT)
595 			idx -= RGE_TX_LIST_CNT;
596 
597 		queued++;
598 	}
599 
600 	if (queued == 0)
601 		return;
602 
603 	/* Set a timeout in case the chip goes out to lunch. */
604 	ifp->if_timer = 5;
605 
606 	sc->rge_ldata.rge_txq_prodidx = idx;
607 	ifq_serialize(ifq, &sc->sc_task);
608 }
609 
610 void
611 rge_watchdog(struct ifnet *ifp)
612 {
613 	struct rge_softc *sc = ifp->if_softc;
614 
615 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
616 	ifp->if_oerrors++;
617 
618 	rge_init(ifp);
619 }
620 
621 int
622 rge_init(struct ifnet *ifp)
623 {
624 	struct rge_softc *sc = ifp->if_softc;
625 	uint32_t val;
626 	int i;
627 
628 	rge_stop(ifp);
629 
630 	/* Set MAC address. */
631 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
632 
633 	/* Set Maximum frame size. */
634 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
635 
636 	/* Initialize RX and TX descriptors lists. */
637 	rge_rx_list_init(sc);
638 	rge_tx_list_init(sc);
639 
640 	/* Load the addresses of the RX and TX lists into the chip. */
641 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
642 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
643 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
644 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
645 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
646 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
647 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
648 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
649 
650 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
651 
652 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
653 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
654 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
655 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
656 
657 	/* Clear interrupt moderation timer. */
658 	for (i = 0; i < 64; i++)
659 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
660 
661 	/* Set the initial RX and TX configurations. */
662 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
663 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
664 
665 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
666 	rge_write_csi(sc, 0x70c, val | 0x27000000);
667 
668 	/* Enable hardware optimization function. */
669 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
670 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
671 
672 	RGE_WRITE_2(sc, 0x0382, 0x221b);
673 	RGE_WRITE_1(sc, 0x4500, 0);
674 	RGE_WRITE_2(sc, 0x4800, 0);
675 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
676 
677 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
678 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
679 
680 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
681 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
682 
683 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
684 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
685 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
686 
687 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
688 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
689 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
690 	else
691 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
692 
693 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
694 
695 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
696 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
697 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
698 	} else
699 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
700 
701 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
702 
703 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
704 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
705 
706 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
707 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
708 
709 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
710 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
711 
712 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
713 
714 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
715 
716 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
717 
718 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
719 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
720 
721 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
722 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
723 
724 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
725 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
726 
727 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
728 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
729 
730 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
731 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
732 
733 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
734 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
735 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
736 	else
737 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
738 
739 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
740 
741 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
742 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
743 
744 	/* Disable EEE plus. */
745 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
746 
747 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
748 
749 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
750 	DELAY(1);
751 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
752 
753 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
754 
755 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
756 
757 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
758 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
759 
760 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
761 
762 	for (i = 0; i < 10; i++) {
763 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
764 			break;
765 		DELAY(1000);
766 	}
767 
768 	/* Disable RXDV gate. */
769 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
770 	DELAY(2000);
771 
772 	rge_ifmedia_upd(ifp);
773 
774 	/* Enable transmit and receive. */
775 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
776 
777 	/* Program promiscuous mode and multicast filters. */
778 	rge_iff(sc);
779 
780 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
781 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
782 
783 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
784 
785 	/* Enable interrupts. */
786 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
787 
788 	ifp->if_flags |= IFF_RUNNING;
789 	ifq_clr_oactive(&ifp->if_snd);
790 
791 	timeout_add_sec(&sc->sc_timeout, 1);
792 
793 	return (0);
794 }
795 
796 /*
797  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
798  */
799 void
800 rge_stop(struct ifnet *ifp)
801 {
802 	struct rge_softc *sc = ifp->if_softc;
803 	int i;
804 
805 	timeout_del(&sc->sc_timeout);
806 
807 	ifp->if_timer = 0;
808 	ifp->if_flags &= ~IFF_RUNNING;
809 	sc->rge_timerintr = 0;
810 
811 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
812 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
813 	    RGE_RXCFG_ERRPKT);
814 
815 	RGE_WRITE_4(sc, RGE_IMR, 0);
816 
817 	/* Clear timer interrupts. */
818 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
819 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
820 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
821 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
822 
823 	rge_reset(sc);
824 
825 	intr_barrier(sc->sc_ih);
826 	ifq_barrier(&ifp->if_snd);
827 	ifq_clr_oactive(&ifp->if_snd);
828 
829 	if (sc->rge_head != NULL) {
830 		m_freem(sc->rge_head);
831 		sc->rge_head = sc->rge_tail = NULL;
832 	}
833 
834 	/* Free the TX list buffers. */
835 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
836 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
837 			bus_dmamap_unload(sc->sc_dmat,
838 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
839 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
840 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
841 		}
842 	}
843 
844 	/* Free the RX list buffers. */
845 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
846 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
847 			bus_dmamap_unload(sc->sc_dmat,
848 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
849 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
850 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
851 		}
852 	}
853 }
854 
855 /*
856  * Set media options.
857  */
858 int
859 rge_ifmedia_upd(struct ifnet *ifp)
860 {
861 	struct rge_softc *sc = ifp->if_softc;
862 	struct ifmedia *ifm = &sc->sc_media;
863 	int anar, gig, val;
864 
865 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
866 		return (EINVAL);
867 
868 	/* Disable Gigabit Lite. */
869 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
870 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
871 
872 	val = rge_read_phy_ocp(sc, 0xa5d4);
873 	val &= ~RGE_ADV_2500TFDX;
874 
875 	anar = gig = 0;
876 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
877 	case IFM_AUTO:
878 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
879 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
880 		val |= RGE_ADV_2500TFDX;
881 		break;
882 	case IFM_2500_T:
883 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
884 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
885 		val |= RGE_ADV_2500TFDX;
886 		ifp->if_baudrate = IF_Mbps(2500);
887 		break;
888 	case IFM_1000_T:
889 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
890 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
891 		ifp->if_baudrate = IF_Gbps(1);
892 		break;
893 	case IFM_100_TX:
894 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
895 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
896 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
897 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
898 		    ANAR_TX | ANAR_10_FD | ANAR_10;
899 		ifp->if_baudrate = IF_Mbps(100);
900 		break;
901 	case IFM_10_T:
902 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
903 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
904 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
905 		    ANAR_10_FD | ANAR_10 : ANAR_10;
906 		ifp->if_baudrate = IF_Mbps(10);
907 		break;
908 	default:
909 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
910 		return (EINVAL);
911 	}
912 
913 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
914 	rge_write_phy(sc, 0, MII_100T2CR, gig);
915 	rge_write_phy_ocp(sc, 0xa5d4, val);
916 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
917 	    BMCR_STARTNEG);
918 
919 	return (0);
920 }
921 
922 /*
923  * Report current media status.
924  */
925 void
926 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
927 {
928 	struct rge_softc *sc = ifp->if_softc;
929 	uint16_t status = 0;
930 
931 	ifmr->ifm_status = IFM_AVALID;
932 	ifmr->ifm_active = IFM_ETHER;
933 
934 	if (rge_get_link_status(sc)) {
935 		ifmr->ifm_status |= IFM_ACTIVE;
936 
937 		status = RGE_READ_2(sc, RGE_PHYSTAT);
938 		if ((status & RGE_PHYSTAT_FDX) ||
939 		    (status & RGE_PHYSTAT_2500MBPS))
940 			ifmr->ifm_active |= IFM_FDX;
941 		else
942 			ifmr->ifm_active |= IFM_HDX;
943 
944 		if (status & RGE_PHYSTAT_10MBPS)
945 			ifmr->ifm_active |= IFM_10_T;
946 		else if (status & RGE_PHYSTAT_100MBPS)
947 			ifmr->ifm_active |= IFM_100_TX;
948 		else if (status & RGE_PHYSTAT_1000MBPS)
949 			ifmr->ifm_active |= IFM_1000_T;
950 		else if (status & RGE_PHYSTAT_2500MBPS)
951 			ifmr->ifm_active |= IFM_2500_T;
952 	}
953 }
954 
955 /*
956  * Allocate memory for RX/TX rings.
957  */
958 int
959 rge_allocmem(struct rge_softc *sc)
960 {
961 	int error, i;
962 
963 	/* Allocate DMA'able memory for the TX ring. */
964 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
965 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
966 	if (error) {
967 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
968 		return (error);
969 	}
970 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
971 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
972 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
973 	if (error) {
974 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
975 		return (error);
976 	}
977 
978 	/* Load the map for the TX ring. */
979 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
980 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
981 	    (caddr_t *)&sc->rge_ldata.rge_tx_list,
982 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
983 	if (error) {
984 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
985 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
986 		    sc->rge_ldata.rge_tx_listnseg);
987 		return (error);
988 	}
989 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
990 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
991 	if (error) {
992 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
993 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
994 		bus_dmamem_unmap(sc->sc_dmat,
995 		    (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
996 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
997 		    sc->rge_ldata.rge_tx_listnseg);
998 		return (error);
999 	}
1000 
1001 	/* Create DMA maps for TX buffers. */
1002 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1003 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1004 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1005 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
1006 		if (error) {
1007 			printf("%s: can't create DMA map for TX\n",
1008 			    sc->sc_dev.dv_xname);
1009 			return (error);
1010 		}
1011 	}
1012 
1013 	/* Allocate DMA'able memory for the RX ring. */
1014 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1015 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1016 	if (error) {
1017 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1018 		return (error);
1019 	}
1020 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1021 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1022 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1023 	if (error) {
1024 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1025 		return (error);
1026 	}
1027 
1028 	/* Load the map for the RX ring. */
1029 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1030 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1031 	    (caddr_t *)&sc->rge_ldata.rge_rx_list,
1032 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1033 	if (error) {
1034 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1035 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1036 		    sc->rge_ldata.rge_rx_listnseg);
1037 		return (error);
1038 	}
1039 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1040 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1041 	if (error) {
1042 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1043 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1044 		bus_dmamem_unmap(sc->sc_dmat,
1045 		    (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1046 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1047 		    sc->rge_ldata.rge_rx_listnseg);
1048 		return (error);
1049 	}
1050 
1051 	/* Create DMA maps for RX buffers. */
1052 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1053 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1054 		    RGE_JUMBO_FRAMELEN, 0, 0,
1055 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1056 		if (error) {
1057 			printf("%s: can't create DMA map for RX\n",
1058 			    sc->sc_dev.dv_xname);
1059 			return (error);
1060 		}
1061 	}
1062 
1063 	return (error);
1064 }
1065 
1066 /*
1067  * Initialize the RX descriptor and attach an mbuf cluster.
1068  */
1069 int
1070 rge_newbuf(struct rge_softc *sc)
1071 {
1072 	struct mbuf *m;
1073 	struct rge_rx_desc *r;
1074 	struct rge_rxq *rxq;
1075 	bus_dmamap_t rxmap;
1076 	int idx;
1077 
1078 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1079 	if (m == NULL)
1080 		return (ENOBUFS);
1081 
1082 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1083 
1084 	idx = sc->rge_ldata.rge_rxq_prodidx;
1085 	rxq = &sc->rge_ldata.rge_rxq[idx];
1086 	rxmap = rxq->rxq_dmamap;
1087 
1088 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1089 		m_freem(m);
1090 		return (ENOBUFS);
1091 	}
1092 
1093 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1094 	    BUS_DMASYNC_PREREAD);
1095 
1096 	/* Map the segments into RX descriptors. */
1097 	r = &sc->rge_ldata.rge_rx_list[idx];
1098 
1099 	if (RGE_OWN(r)) {
1100 		printf("%s: tried to map busy RX descriptor\n",
1101 		    sc->sc_dev.dv_xname);
1102 		m_freem(m);
1103 		return (ENOBUFS);
1104 	}
1105 
1106 	rxq->rxq_mbuf = m;
1107 
1108 	r->rge_extsts = 0;
1109 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1110 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1111 
1112 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1113 	if (idx == RGE_RX_LIST_CNT - 1)
1114 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1115 
1116 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1117 
1118 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1119 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1120 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1121 
1122 	sc->rge_ldata.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1123 
1124 	return (0);
1125 }
1126 
1127 void
1128 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1129 {
1130 	struct rge_rx_desc *r;
1131 
1132 	r = &sc->rge_ldata.rge_rx_list[idx];
1133 
1134 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1135 	r->rge_extsts = 0;
1136 	if (idx == RGE_RX_LIST_CNT - 1)
1137 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1138 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1139 
1140 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1141 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1142 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1143 }
1144 
1145 void
1146 rge_rx_list_init(struct rge_softc *sc)
1147 {
1148 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1149 
1150 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1151 	sc->rge_head = sc->rge_tail = NULL;
1152 
1153 	if_rxr_init(&sc->rge_ldata.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1154 	rge_fill_rx_ring(sc);
1155 }
1156 
1157 void
1158 rge_fill_rx_ring(struct rge_softc *sc)
1159 {
1160 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1161 	int slots;
1162 
1163 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1164 		if (rge_newbuf(sc) == ENOBUFS)
1165 			break;
1166 	}
1167 	if_rxr_put(rxr, slots);
1168 }
1169 
1170 void
1171 rge_tx_list_init(struct rge_softc *sc)
1172 {
1173 	int i;
1174 
1175 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1176 
1177 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1178 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1179 
1180 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1181 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1182 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1183 
1184 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1185 }
1186 
1187 int
1188 rge_rxeof(struct rge_softc *sc)
1189 {
1190 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1191 	struct mbuf *m;
1192 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1193 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1194 	struct rge_rx_desc *cur_rx;
1195 	struct rge_rxq *rxq;
1196 	uint32_t rxstat, extsts;
1197 	int i, total_len, rx = 0;
1198 
1199 	for (i = sc->rge_ldata.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1200 	    i = RGE_NEXT_RX_DESC(i)) {
1201 		/* Invalidate the descriptor memory. */
1202 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1203 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1204 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1205 
1206 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1207 
1208 		if (RGE_OWN(cur_rx))
1209 			break;
1210 
1211 		rxstat = letoh32(cur_rx->rge_cmdsts);
1212 		extsts = letoh32(cur_rx->rge_extsts);
1213 
1214 		total_len = RGE_RXBYTES(cur_rx);
1215 		rxq = &sc->rge_ldata.rge_rxq[i];
1216 		m = rxq->rxq_mbuf;
1217 		rxq->rxq_mbuf = NULL;
1218 		if_rxr_put(rxr, 1);
1219 		rx = 1;
1220 
1221 		/* Invalidate the RX mbuf and unload its map. */
1222 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1223 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1224 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1225 
1226 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1227 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1228 			rge_discard_rxbuf(sc, i);
1229 			continue;
1230 		}
1231 
1232 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1233 			ifp->if_ierrors++;
1234 			/*
1235 			 * If this is part of a multi-fragment packet,
1236 			 * discard all the pieces.
1237 			 */
1238 			 if (sc->rge_head != NULL) {
1239 				m_freem(sc->rge_head);
1240 				sc->rge_head = sc->rge_tail = NULL;
1241 			}
1242 			rge_discard_rxbuf(sc, i);
1243 			continue;
1244 		}
1245 
1246 		if (sc->rge_head != NULL) {
1247 			m->m_len = total_len;
1248 			/*
1249 			 * Special case: if there's 4 bytes or less
1250 			 * in this buffer, the mbuf can be discarded:
1251 			 * the last 4 bytes is the CRC, which we don't
1252 			 * care about anyway.
1253 			 */
1254 			if (m->m_len <= ETHER_CRC_LEN) {
1255 				sc->rge_tail->m_len -=
1256 				    (ETHER_CRC_LEN - m->m_len);
1257 				m_freem(m);
1258 			} else {
1259 				m->m_len -= ETHER_CRC_LEN;
1260 				m->m_flags &= ~M_PKTHDR;
1261 				sc->rge_tail->m_next = m;
1262 			}
1263 			m = sc->rge_head;
1264 			sc->rge_head = sc->rge_tail = NULL;
1265 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1266 		} else
1267 			m->m_pkthdr.len = m->m_len =
1268 			    (total_len - ETHER_CRC_LEN);
1269 
1270 		/* Check IP header checksum. */
1271 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1272 		    (extsts & RGE_RDEXTSTS_IPV4))
1273 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1274 
1275 		/* Check TCP/UDP checksum. */
1276 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1277 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1278 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1279 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1280 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1281 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1282 			    M_UDP_CSUM_IN_OK;
1283 
1284 #if NVLAN > 0
1285 		if (extsts & RGE_RDEXTSTS_VTAG) {
1286 			m->m_pkthdr.ether_vtag =
1287 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1288 			m->m_flags |= M_VLANTAG;
1289 		}
1290 #endif
1291 
1292 		ml_enqueue(&ml, m);
1293 	}
1294 
1295 	if (ifiq_input(&ifp->if_rcv, &ml))
1296 		if_rxr_livelocked(rxr);
1297 
1298 	sc->rge_ldata.rge_rxq_considx = i;
1299 	rge_fill_rx_ring(sc);
1300 
1301 	if_input(ifp, &ml);
1302 
1303 	return (rx);
1304 }
1305 
1306 int
1307 rge_txeof(struct rge_softc *sc)
1308 {
1309 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1310 	struct rge_txq *txq;
1311 	uint32_t txstat;
1312 	int cons, idx, prod;
1313 	int free = 0;
1314 
1315 	prod = sc->rge_ldata.rge_txq_prodidx;
1316 	cons = sc->rge_ldata.rge_txq_considx;
1317 
1318 	while (prod != cons) {
1319 		txq = &sc->rge_ldata.rge_txq[cons];
1320 		idx = txq->txq_descidx;
1321 
1322 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1323 		    idx * sizeof(struct rge_tx_desc),
1324 		    sizeof(struct rge_tx_desc),
1325 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1326 
1327 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1328 
1329 		if (txstat & RGE_TDCMDSTS_OWN) {
1330 			free = 2;
1331 			break;
1332 		}
1333 
1334 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1335 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1336 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1337 		m_freem(txq->txq_mbuf);
1338 		txq->txq_mbuf = NULL;
1339 
1340 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1341 			ifp->if_collisions++;
1342 		if (txstat & RGE_TDCMDSTS_TXERR)
1343 			ifp->if_oerrors++;
1344 
1345 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1346 		    idx * sizeof(struct rge_tx_desc),
1347 		    sizeof(struct rge_tx_desc),
1348 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1349 
1350 		cons = RGE_NEXT_TX_DESC(idx);
1351 		free = 1;
1352 	}
1353 
1354 	if (free == 0)
1355 		return (0);
1356 
1357 	sc->rge_ldata.rge_txq_considx = cons;
1358 
1359 	if (ifq_is_oactive(&ifp->if_snd))
1360 		ifq_restart(&ifp->if_snd);
1361 	else if (free == 2)
1362 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1363 	else
1364 		ifp->if_timer = 0;
1365 
1366 	return (1);
1367 }
1368 
1369 void
1370 rge_reset(struct rge_softc *sc)
1371 {
1372 	int i;
1373 
1374 	/* Enable RXDV gate. */
1375 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1376 	DELAY(2000);
1377 
1378 	for (i = 0; i < 3000; i++) {
1379 		DELAY(50);
1380 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1381 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1382 		    RGE_MCUCMD_TXFIFO_EMPTY))
1383 			break;
1384 	}
1385 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1386 		for (i = 0; i < 3000; i++) {
1387 			DELAY(50);
1388 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1389 				break;
1390 		}
1391 	}
1392 
1393 	DELAY(2000);
1394 
1395 	/* Soft reset. */
1396 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1397 
1398 	for (i = 0; i < RGE_TIMEOUT; i++) {
1399 		DELAY(100);
1400 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1401 			break;
1402 	}
1403 	if (i == RGE_TIMEOUT)
1404 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1405 }
1406 
1407 void
1408 rge_iff(struct rge_softc *sc)
1409 {
1410 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1411 	struct arpcom *ac = &sc->sc_arpcom;
1412 	struct ether_multi *enm;
1413 	struct ether_multistep step;
1414 	uint32_t hashes[2];
1415 	uint32_t rxfilt;
1416 	int h = 0;
1417 
1418 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1419 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1420 	ifp->if_flags &= ~IFF_ALLMULTI;
1421 
1422 	/*
1423 	 * Always accept frames destined to our station address.
1424 	 * Always accept broadcast frames.
1425 	 */
1426 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1427 
1428 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1429 		ifp->if_flags |= IFF_ALLMULTI;
1430 		rxfilt |= RGE_RXCFG_MULTI;
1431 		if (ifp->if_flags & IFF_PROMISC)
1432 			rxfilt |= RGE_RXCFG_ALLPHYS;
1433 		hashes[0] = hashes[1] = 0xffffffff;
1434 	} else {
1435 		rxfilt |= RGE_RXCFG_MULTI;
1436 		/* Program new filter. */
1437 		memset(hashes, 0, sizeof(hashes));
1438 
1439 		ETHER_FIRST_MULTI(step, ac, enm);
1440 		while (enm != NULL) {
1441 			h = ether_crc32_be(enm->enm_addrlo,
1442 			    ETHER_ADDR_LEN) >> 26;
1443 
1444 			if (h < 32)
1445 				hashes[0] |= (1 << h);
1446 			else
1447 				hashes[1] |= (1 << (h - 32));
1448 
1449 			ETHER_NEXT_MULTI(step, enm);
1450 		}
1451 	}
1452 
1453 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1454 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1455 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1456 }
1457 
1458 void
1459 rge_set_phy_power(struct rge_softc *sc, int on)
1460 {
1461 	int i;
1462 
1463 	if (on) {
1464 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1465 
1466 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1467 
1468 		for (i = 0; i < RGE_TIMEOUT; i++) {
1469 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1470 				break;
1471 			DELAY(1000);
1472 		}
1473 	} else {
1474 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1475 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1476 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1477 	}
1478 }
1479 
1480 void
1481 rge_phy_config(struct rge_softc *sc)
1482 {
1483 	/* Read microcode version. */
1484 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1485 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1486 
1487 	switch (sc->rge_type) {
1488 	case MAC_CFG2:
1489 		rge_phy_config_mac_cfg2(sc);
1490 		break;
1491 	case MAC_CFG3:
1492 		rge_phy_config_mac_cfg3(sc);
1493 		break;
1494 	case MAC_CFG4:
1495 		rge_phy_config_mac_cfg4(sc);
1496 		break;
1497 	case MAC_CFG5:
1498 		rge_phy_config_mac_cfg5(sc);
1499 		break;
1500 	default:
1501 		break;	/* Can't happen. */
1502 	}
1503 
1504 	rge_write_phy(sc, 0x0a5b, 0x12,
1505 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1506 
1507 	/* Disable EEE. */
1508 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1509 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1510 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1511 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1512 	}
1513 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1514 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1515 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1516 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1517 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1518 
1519 	rge_patch_phy_mcu(sc, 1);
1520 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1521 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1522 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1523 	rge_patch_phy_mcu(sc, 0);
1524 }
1525 
1526 void
1527 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1528 {
1529 	uint16_t val;
1530 	int i;
1531 
1532 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1533 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1534 		    rtl8125_mac_cfg2_ephy[i].val);
1535 
1536 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1537 
1538 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1539 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1540 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1541 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1542 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1543 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1544 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1545 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1546 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1547 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1548 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1549 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1550 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1551 
1552 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1553 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1554 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1555 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1556 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1557 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1558 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1559 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1560 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1561 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1562 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1563 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1564 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1565 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1566 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1567 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1568 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1569 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1570 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1571 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1572 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1573 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1574 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1575 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1576 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1577 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1578 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1579 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1580 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1581 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1582 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1583 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1584 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1585 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1586 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1587 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1588 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1589 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1590 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1591 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1592 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1593 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1594 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1595 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1596 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1597 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1598 }
1599 
1600 void
1601 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1602 {
1603 	uint16_t val;
1604 	int i;
1605 	static const uint16_t mac_cfg3_a438_value[] =
1606 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1607 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1608 
1609 	static const uint16_t mac_cfg3_b88e_value[] =
1610 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1611 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1612 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1613 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1614 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1615 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1616 
1617 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1618 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1619 		    rtl8125_mac_cfg3_ephy[i].val);
1620 
1621 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1622 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1623 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1624 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1625 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1626 	rge_write_ephy(sc, 0x0002, 0x6042);
1627 	rge_write_ephy(sc, 0x0006, 0x0014);
1628 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1629 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1630 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1631 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1632 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1633 	rge_write_ephy(sc, 0x0042, 0x6042);
1634 	rge_write_ephy(sc, 0x0046, 0x0014);
1635 
1636 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1637 
1638 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1639 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1640 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1641 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1642 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1643 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1644 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1645 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1646 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1647 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1648 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1649 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1650 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1651 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1652 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1653 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1654 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1655 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1656 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1657 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1658 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1659 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1660 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1661 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1662 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1663 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1664 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1665 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1666 	    32);
1667 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1668 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1669 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1670 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1671 
1672 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1673 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1674 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1675 	for (i = 0; i < 26; i++)
1676 		rge_write_phy_ocp(sc, 0xa438, 0);
1677 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1678 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1679 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1680 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1681 
1682 	rge_patch_phy_mcu(sc, 1);
1683 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1684 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1685 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1686 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1687 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1688 	}
1689 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1690 	rge_patch_phy_mcu(sc, 0);
1691 
1692 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1693 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1694 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1695 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1696 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1697 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1698 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1699 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1700 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1701 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1702 }
1703 
1704 void
1705 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1706 {
1707 	uint16_t val;
1708 	int i;
1709 	static const uint16_t mac_cfg4_b87c_value[] =
1710 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1711 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1712 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1713 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1714 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1715 	      0x80b0, 0x0f31 };
1716 
1717 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1718 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1719 		    rtl8125_mac_cfg4_ephy[i].val);
1720 
1721 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1722 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1723 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1724 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1725 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1726 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1727 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1728 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1729 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1730 
1731 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1732 
1733 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1734 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1735 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1736 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1737 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1738 	for (i = 0; i < 6; i++) {
1739 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1740 		if (i < 3)
1741 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1742 		else
1743 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1744 	}
1745 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1746 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1747 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1748 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1749 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1750 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1751 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1752 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1753 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1754 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1755 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1756 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1757 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1758 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1759 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1760 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1761 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1762 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1763 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1764 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1765 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1766 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1767 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1768 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1769 	}
1770 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1771 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1772 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1773 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1774 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1775 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1776 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1777 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1778 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1779 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1780 	    32);
1781 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1782 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1783 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1784 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1785 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1786 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1787 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1788 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1789 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1790 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1791 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1792 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1793 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1794 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1795 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1796 	for (i = 0; i < 6; i++) {
1797 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1798 		if (i == 2)
1799 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1800 		else
1801 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1802 	}
1803 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1804 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1805 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1806 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1807 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1808 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1809 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1810 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1811 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1812 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1813 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1814 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1815 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1816 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1817 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1818 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1819 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1820 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1821 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1822 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1823 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1824 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1825 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1826 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1827 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1828 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1829 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1830 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1831 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1832 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1833 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1834 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1835 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1836 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1837 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1838 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1839 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1840 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1841 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1842 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1843 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1844 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1845 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1846 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1847 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1848 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1849 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1850 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1851 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1852 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1853 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1854 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1855 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1856 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1857 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1858 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1859 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1860 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1861 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1862 	rge_patch_phy_mcu(sc, 1);
1863 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1864 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1865 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1866 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1867 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1868 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1869 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1870 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1871 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1872 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1873 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1874 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1875 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1876 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1877 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1878 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1879 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1880 	rge_patch_phy_mcu(sc, 0);
1881 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1882 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1883 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1884 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1885 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1886 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1887 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1888 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1889 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1890 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1891 }
1892 
1893 void
1894 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1895 {
1896 	uint16_t val;
1897 	int i;
1898 
1899 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1900 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1901 		    rtl8125_mac_cfg5_ephy[i].val);
1902 
1903 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1904 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1905 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1906 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1907 
1908 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1909 
1910 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1911 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1912 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1913 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1914 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1915 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1916 	    32);
1917 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1918 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1919 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1920 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1921 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1922 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1923 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1924 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1925 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1926 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1927 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1928 	for (i = 0; i < 10; i++) {
1929 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1930 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1931 	}
1932 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1933 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1934 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1935 }
1936 
1937 void
1938 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1939 {
1940 	if (sc->rge_mcodever != mcode_version) {
1941 		int i;
1942 
1943 		rge_patch_phy_mcu(sc, 1);
1944 
1945 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1946 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1947 			if (sc->rge_type == MAC_CFG2)
1948 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1949 			else
1950 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1951 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1952 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1953 
1954 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1955 		}
1956 
1957 		if (sc->rge_type == MAC_CFG2) {
1958 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1959 				rge_write_phy_ocp(sc,
1960 				    rtl8125_mac_cfg2_mcu[i].reg,
1961 				    rtl8125_mac_cfg2_mcu[i].val);
1962 			}
1963 		} else if (sc->rge_type == MAC_CFG3) {
1964 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1965 				rge_write_phy_ocp(sc,
1966 				    rtl8125_mac_cfg3_mcu[i].reg,
1967 				    rtl8125_mac_cfg3_mcu[i].val);
1968 			}
1969 		} else if (sc->rge_type == MAC_CFG4) {
1970 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1971 				rge_write_phy_ocp(sc,
1972 				    rtl8125_mac_cfg4_mcu[i].reg,
1973 				    rtl8125_mac_cfg4_mcu[i].val);
1974 			}
1975 		} else if (sc->rge_type == MAC_CFG5) {
1976 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1977 				rge_write_phy_ocp(sc,
1978 				    rtl8125_mac_cfg5_mcu[i].reg,
1979 				    rtl8125_mac_cfg5_mcu[i].val);
1980 			}
1981 		}
1982 
1983 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1984 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1985 
1986 			rge_write_phy_ocp(sc, 0xa436, 0);
1987 			rge_write_phy_ocp(sc, 0xa438, 0);
1988 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1989 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1990 			rge_write_phy_ocp(sc, 0xa438, 0);
1991 		}
1992 
1993 		rge_patch_phy_mcu(sc, 0);
1994 
1995 		/* Write microcode version. */
1996 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1997 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1998 	}
1999 }
2000 
2001 void
2002 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2003 {
2004 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2005 	RGE_WRITE_4(sc, RGE_MAC0,
2006 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2007 	RGE_WRITE_4(sc, RGE_MAC4,
2008 	    addr[5] <<  8 | addr[4]);
2009 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2010 }
2011 
2012 void
2013 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2014 {
2015 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2016 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2017 }
2018 
2019 void
2020 rge_hw_init(struct rge_softc *sc)
2021 {
2022 	int i;
2023 
2024 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2025 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2026 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2027 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2028 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2029 
2030 	/* Disable UPS. */
2031 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2032 
2033 	/* Configure MAC MCU. */
2034 	rge_write_mac_ocp(sc, 0xfc38, 0);
2035 
2036 	for (i = 0xfc28; i < 0xfc38; i += 2)
2037 		rge_write_mac_ocp(sc, i, 0);
2038 
2039 	DELAY(3000);
2040 	rge_write_mac_ocp(sc, 0xfc26, 0);
2041 
2042 	if (sc->rge_type == MAC_CFG3) {
2043 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2044 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2045 			    rtl8125_mac_bps[i].val);
2046 		}
2047 	} else if (sc->rge_type == MAC_CFG5) {
2048 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2049 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2050 			    rtl8125b_mac_bps[i].val);
2051 		}
2052 	}
2053 
2054 	/* Disable PHY power saving. */
2055 	rge_disable_phy_ocp_pwrsave(sc);
2056 
2057 	/* Set PCIe uncorrectable error status. */
2058 	rge_write_csi(sc, 0x108,
2059 	    rge_read_csi(sc, 0x108) | 0x00100000);
2060 
2061 }
2062 
2063 void
2064 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2065 {
2066 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2067 		rge_patch_phy_mcu(sc, 1);
2068 		rge_write_phy_ocp(sc, 0xc416, 0);
2069 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2070 		rge_patch_phy_mcu(sc, 0);
2071 	}
2072 }
2073 
2074 void
2075 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2076 {
2077 	int i;
2078 
2079 	if (set)
2080 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2081 	else
2082 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2083 
2084 	for (i = 0; i < 1000; i++) {
2085 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2086 			break;
2087 		DELAY(100);
2088 	}
2089 	if (i == 1000) {
2090 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2091 		return;
2092 	}
2093 }
2094 
2095 void
2096 rge_add_media_types(struct rge_softc *sc)
2097 {
2098 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2099 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2100 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2101 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2102 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2103 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2104 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2105 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2106 }
2107 
2108 void
2109 rge_config_imtype(struct rge_softc *sc, int imtype)
2110 {
2111 	switch (imtype) {
2112 	case RGE_IMTYPE_NONE:
2113 		sc->rge_intrs = RGE_INTRS;
2114 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2115 		    RGE_ISR_RX_FIFO_OFLOW;
2116 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2117 		break;
2118 	case RGE_IMTYPE_SIM:
2119 		sc->rge_intrs = RGE_INTRS_TIMER;
2120 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2121 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2122 		break;
2123 	default:
2124 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2125 	}
2126 }
2127 
2128 void
2129 rge_disable_hw_im(struct rge_softc *sc)
2130 {
2131 	RGE_WRITE_2(sc, RGE_IM, 0);
2132 }
2133 
2134 void
2135 rge_disable_sim_im(struct rge_softc *sc)
2136 {
2137 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2138 	sc->rge_timerintr = 0;
2139 }
2140 
2141 void
2142 rge_setup_sim_im(struct rge_softc *sc)
2143 {
2144 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2145 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2146 	sc->rge_timerintr = 1;
2147 }
2148 
2149 void
2150 rge_setup_intr(struct rge_softc *sc, int imtype)
2151 {
2152 	rge_config_imtype(sc, imtype);
2153 
2154 	/* Enable interrupts. */
2155 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2156 
2157 	switch (imtype) {
2158 	case RGE_IMTYPE_NONE:
2159 		rge_disable_sim_im(sc);
2160 		rge_disable_hw_im(sc);
2161 		break;
2162 	case RGE_IMTYPE_SIM:
2163 		rge_disable_hw_im(sc);
2164 		rge_setup_sim_im(sc);
2165 		break;
2166 	default:
2167 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2168 	}
2169 }
2170 
2171 void
2172 rge_exit_oob(struct rge_softc *sc)
2173 {
2174 	int i;
2175 
2176 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2177 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2178 	    RGE_RXCFG_ERRPKT);
2179 
2180 	/* Disable RealWoW. */
2181 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2182 
2183 	rge_reset(sc);
2184 
2185 	/* Disable OOB. */
2186 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2187 
2188 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2189 
2190 	for (i = 0; i < 10; i++) {
2191 		DELAY(100);
2192 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2193 			break;
2194 	}
2195 
2196 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2197 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2198 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2199 
2200 	for (i = 0; i < 10; i++) {
2201 		DELAY(100);
2202 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2203 			break;
2204 	}
2205 
2206 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2207 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2208 		    sc->sc_dev.dv_xname);
2209 		for (i = 0; i < RGE_TIMEOUT; i++) {
2210 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2211 				break;
2212 			DELAY(1000);
2213 		}
2214 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2215 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2216 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2217 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2218 	}
2219 }
2220 
2221 void
2222 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2223 {
2224 	int i;
2225 
2226 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2227 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2228 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2229 
2230 	for (i = 0; i < 10; i++) {
2231 		 DELAY(100);
2232 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2233 			break;
2234 	}
2235 
2236 	DELAY(20);
2237 }
2238 
2239 uint32_t
2240 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2241 {
2242 	int i;
2243 
2244 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2245 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2246 
2247 	for (i = 0; i < 10; i++) {
2248 		 DELAY(100);
2249 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2250 			break;
2251 	}
2252 
2253 	DELAY(20);
2254 
2255 	return (RGE_READ_4(sc, RGE_CSIDR));
2256 }
2257 
2258 void
2259 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2260 {
2261 	uint32_t tmp;
2262 
2263 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2264 	tmp += val;
2265 	tmp |= RGE_MACOCP_BUSY;
2266 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2267 }
2268 
2269 uint16_t
2270 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2271 {
2272 	uint32_t val;
2273 
2274 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2275 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2276 
2277 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2278 }
2279 
2280 void
2281 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2282 {
2283 	uint32_t tmp;
2284 	int i;
2285 
2286 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2287 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2288 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2289 
2290 	for (i = 0; i < 10; i++) {
2291 		DELAY(100);
2292 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2293 			break;
2294 	}
2295 
2296 	DELAY(20);
2297 }
2298 
2299 uint16_t
2300 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2301 {
2302 	uint32_t val;
2303 	int i;
2304 
2305 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2306 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2307 
2308 	for (i = 0; i < 10; i++) {
2309 		DELAY(100);
2310 		val = RGE_READ_4(sc, RGE_EPHYAR);
2311 		if (val & RGE_EPHYAR_BUSY)
2312 			break;
2313 	}
2314 
2315 	DELAY(20);
2316 
2317 	return (val & RGE_EPHYAR_DATA_MASK);
2318 }
2319 
2320 void
2321 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2322 {
2323 	uint16_t off, phyaddr;
2324 
2325 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2326 	phyaddr <<= 4;
2327 
2328 	off = addr ? reg : 0x10 + (reg % 8);
2329 
2330 	phyaddr += (off - 16) << 1;
2331 
2332 	rge_write_phy_ocp(sc, phyaddr, val);
2333 }
2334 
2335 uint16_t
2336 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2337 {
2338 	uint16_t off, phyaddr;
2339 
2340 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2341 	phyaddr <<= 4;
2342 
2343 	off = addr ? reg : 0x10 + (reg % 8);
2344 
2345 	phyaddr += (off - 16) << 1;
2346 
2347 	return (rge_read_phy_ocp(sc, phyaddr));
2348 }
2349 
2350 void
2351 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2352 {
2353 	uint32_t tmp;
2354 	int i;
2355 
2356 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2357 	tmp |= RGE_PHYOCP_BUSY | val;
2358 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2359 
2360 	for (i = 0; i < RGE_TIMEOUT; i++) {
2361 		DELAY(1);
2362 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2363 			break;
2364 	}
2365 }
2366 
2367 uint16_t
2368 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2369 {
2370 	uint32_t val;
2371 	int i;
2372 
2373 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2374 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2375 
2376 	for (i = 0; i < RGE_TIMEOUT; i++) {
2377 		DELAY(1);
2378 		val = RGE_READ_4(sc, RGE_PHYOCP);
2379 		if (val & RGE_PHYOCP_BUSY)
2380 			break;
2381 	}
2382 
2383 	return (val & RGE_PHYOCP_DATA_MASK);
2384 }
2385 
2386 int
2387 rge_get_link_status(struct rge_softc *sc)
2388 {
2389 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2390 }
2391 
2392 void
2393 rge_txstart(void *arg)
2394 {
2395 	struct rge_softc *sc = arg;
2396 
2397 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2398 }
2399 
2400 void
2401 rge_tick(void *arg)
2402 {
2403 	struct rge_softc *sc = arg;
2404 	int s;
2405 
2406 	s = splnet();
2407 	rge_link_state(sc);
2408 	splx(s);
2409 
2410 	timeout_add_sec(&sc->sc_timeout, 1);
2411 }
2412 
2413 void
2414 rge_link_state(struct rge_softc *sc)
2415 {
2416 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2417 	int link = LINK_STATE_DOWN;
2418 
2419 	if (rge_get_link_status(sc))
2420 		link = LINK_STATE_UP;
2421 
2422 	if (ifp->if_link_state != link) {
2423 		ifp->if_link_state = link;
2424 		if_link_state_change(ifp);
2425 	}
2426 }
2427 
2428 #ifndef SMALL_KERNEL
2429 int
2430 rge_wol(struct ifnet *ifp, int enable)
2431 {
2432 	struct rge_softc *sc = ifp->if_softc;
2433 
2434 	if (enable) {
2435 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2436 			printf("%s: power management is disabled, "
2437 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2438 			return (ENOTSUP);
2439 		}
2440 
2441 	}
2442 
2443 	rge_iff(sc);
2444 
2445 	if (enable)
2446 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2447 	else
2448 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2449 
2450 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2451 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2452 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2453 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2454 	if (enable)
2455 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2456 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2457 
2458 	return (0);
2459 }
2460 
2461 void
2462 rge_wol_power(struct rge_softc *sc)
2463 {
2464 	/* Disable RXDV gate. */
2465 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2466 	DELAY(2000);
2467 
2468 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2469 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2470 }
2471 #endif
2472