xref: /openbsd-src/sys/dev/pci/if_rge.c (revision 24bb5fcea3ed904bc467217bdaadb5dfc618d5bf)
1 /*	$OpenBSD: if_rge.c,v 1.14 2021/07/12 12:24:41 daniel Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_activate(struct device *, int);
63 int		rge_intr(void *);
64 int		rge_encap(struct rge_queues *, struct mbuf *, int);
65 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
66 void		rge_start(struct ifqueue *);
67 void		rge_watchdog(struct ifnet *);
68 int		rge_init(struct ifnet *);
69 void		rge_stop(struct ifnet *);
70 int		rge_ifmedia_upd(struct ifnet *);
71 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
72 int		rge_allocmem(struct rge_softc *);
73 int		rge_newbuf(struct rge_queues *);
74 void		rge_discard_rxbuf(struct rge_queues *, int);
75 void		rge_rx_list_init(struct rge_queues *);
76 void		rge_tx_list_init(struct rge_queues *);
77 void		rge_fill_rx_ring(struct rge_queues *);
78 int		rge_rxeof(struct rge_queues *);
79 int		rge_txeof(struct rge_queues *);
80 void		rge_reset(struct rge_softc *);
81 void		rge_iff(struct rge_softc *);
82 void		rge_set_phy_power(struct rge_softc *, int);
83 void		rge_phy_config(struct rge_softc *);
84 void		rge_phy_config_mac_cfg2(struct rge_softc *);
85 void		rge_phy_config_mac_cfg3(struct rge_softc *);
86 void		rge_phy_config_mac_cfg4(struct rge_softc *);
87 void		rge_phy_config_mac_cfg5(struct rge_softc *);
88 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
89 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
90 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
91 void		rge_hw_init(struct rge_softc *);
92 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
93 void		rge_patch_phy_mcu(struct rge_softc *, int);
94 void		rge_add_media_types(struct rge_softc *);
95 void		rge_config_imtype(struct rge_softc *, int);
96 void		rge_disable_hw_im(struct rge_softc *);
97 void		rge_disable_sim_im(struct rge_softc *);
98 void		rge_setup_sim_im(struct rge_softc *);
99 void		rge_setup_intr(struct rge_softc *, int);
100 void		rge_exit_oob(struct rge_softc *);
101 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
102 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
103 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
104 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
105 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
106 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
107 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
108 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
109 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
110 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
111 int		rge_get_link_status(struct rge_softc *);
112 void		rge_txstart(void *);
113 void		rge_tick(void *);
114 void		rge_link_state(struct rge_softc *);
115 #ifndef SMALL_KERNEL
116 int		rge_wol(struct ifnet *, int);
117 void		rge_wol_power(struct rge_softc *);
118 #endif
119 
120 static const struct {
121 	uint16_t reg;
122 	uint16_t val;
123 }  rtl8125_mac_cfg2_mcu[] = {
124 	RTL8125_MAC_CFG2_MCU
125 }, rtl8125_mac_cfg3_mcu[] = {
126 	RTL8125_MAC_CFG3_MCU
127 }, rtl8125_mac_cfg4_mcu[] = {
128 	RTL8125_MAC_CFG4_MCU
129 }, rtl8125_mac_cfg5_mcu[] = {
130 	RTL8125_MAC_CFG5_MCU
131 };
132 
133 struct cfattach rge_ca = {
134 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
135 };
136 
137 struct cfdriver rge_cd = {
138 	NULL, "rge", DV_IFNET
139 };
140 
141 const struct pci_matchid rge_devices[] = {
142 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
143 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
144 };
145 
146 int
147 rge_match(struct device *parent, void *match, void *aux)
148 {
149 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
150 	    nitems(rge_devices)));
151 }
152 
153 void
154 rge_attach(struct device *parent, struct device *self, void *aux)
155 {
156 	struct rge_softc *sc = (struct rge_softc *)self;
157 	struct pci_attach_args *pa = aux;
158 	pci_chipset_tag_t pc = pa->pa_pc;
159 	pci_intr_handle_t ih;
160 	const char *intrstr = NULL;
161 	struct ifnet *ifp;
162 	struct rge_queues *q;
163 	pcireg_t reg;
164 	uint32_t hwrev;
165 	uint8_t eaddr[ETHER_ADDR_LEN];
166 	int offset;
167 
168 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
169 
170 	/*
171 	 * Map control/status registers.
172 	 */
173 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
174 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
175 	    NULL, &sc->rge_bsize, 0)) {
176 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
177 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
178 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
179 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
180 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
181 			    &sc->rge_bsize, 0)) {
182 				printf(": can't map mem or i/o space\n");
183 				return;
184 			}
185 		}
186 	}
187 
188 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
189 	if (q == NULL) {
190 		printf(": unable to allocate queue memory\n");
191 		return;
192 	}
193 	q->q_sc = sc;
194 	q->q_index = 0;
195 
196 	sc->sc_queues = q;
197 	sc->sc_nqueues = 1;
198 
199 	/*
200 	 * Allocate interrupt.
201 	 */
202 	if (pci_intr_map_msi(pa, &ih) == 0)
203 		sc->rge_flags |= RGE_FLAG_MSI;
204 	else if (pci_intr_map(pa, &ih) != 0) {
205 		printf(": couldn't map interrupt\n");
206 		return;
207 	}
208 	intrstr = pci_intr_string(pc, ih);
209 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
210 	    sc, sc->sc_dev.dv_xname);
211 	if (sc->sc_ih == NULL) {
212 		printf(": couldn't establish interrupt");
213 		if (intrstr != NULL)
214 			printf(" at %s", intrstr);
215 		printf("\n");
216 		return;
217 	}
218 	printf(": %s", intrstr);
219 
220 	sc->sc_dmat = pa->pa_dmat;
221 	sc->sc_pc = pa->pa_pc;
222 	sc->sc_tag = pa->pa_tag;
223 
224 	/* Determine hardware revision */
225 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
226 	switch (hwrev) {
227 	case 0x60800000:
228 		sc->rge_type = MAC_CFG2;
229 		break;
230 	case 0x60900000:
231 		sc->rge_type = MAC_CFG3;
232 		break;
233 	case 0x64000000:
234 		sc->rge_type = MAC_CFG4;
235 		break;
236 	case 0x64100000:
237 		sc->rge_type = MAC_CFG5;
238 		break;
239 	default:
240 		printf(": unknown version 0x%08x\n", hwrev);
241 		return;
242 	}
243 
244 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
245 
246 	/*
247 	 * PCI Express check.
248 	 */
249 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
250 	    &offset, NULL)) {
251 		/* Disable PCIe ASPM and ECPM. */
252 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
253 		    offset + PCI_PCIE_LCSR);
254 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
255 		    PCI_PCIE_LCSR_ECPM);
256 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
257 		    reg);
258 	}
259 
260 	rge_exit_oob(sc);
261 	rge_hw_init(sc);
262 
263 	rge_get_macaddr(sc, eaddr);
264 	printf(", address %s\n", ether_sprintf(eaddr));
265 
266 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
267 
268 	rge_set_phy_power(sc, 1);
269 	rge_phy_config(sc);
270 
271 	if (rge_allocmem(sc))
272 		return;
273 
274 	ifp = &sc->sc_arpcom.ac_if;
275 	ifp->if_softc = sc;
276 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
277 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 	ifp->if_xflags = IFXF_MPSAFE;
279 	ifp->if_ioctl = rge_ioctl;
280 	ifp->if_qstart = rge_start;
281 	ifp->if_watchdog = rge_watchdog;
282 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
283 	ifp->if_hardmtu = RGE_JUMBO_MTU;
284 
285 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
286 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
287 
288 #if NVLAN > 0
289 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
290 #endif
291 
292 #ifndef SMALL_KERNEL
293 	ifp->if_capabilities |= IFCAP_WOL;
294 	ifp->if_wol = rge_wol;
295 	rge_wol(ifp, 0);
296 #endif
297 	timeout_set(&sc->sc_timeout, rge_tick, sc);
298 	task_set(&sc->sc_task, rge_txstart, sc);
299 
300 	/* Initialize ifmedia structures. */
301 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
302 	    rge_ifmedia_sts);
303 	rge_add_media_types(sc);
304 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
305 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
306 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
307 
308 	if_attach(ifp);
309 	ether_ifattach(ifp);
310 }
311 
312 int
313 rge_activate(struct device *self, int act)
314 {
315 #ifndef SMALL_KERNEL
316 	struct rge_softc *sc = (struct rge_softc *)self;
317 #endif
318 	int rv = 0;
319 
320 	switch (act) {
321 	case DVACT_POWERDOWN:
322 		rv = config_activate_children(self, act);
323 #ifndef SMALL_KERNEL
324 		rge_wol_power(sc);
325 #endif
326 		break;
327 	default:
328 		rv = config_activate_children(self, act);
329 		break;
330 	}
331 	return (rv);
332 }
333 
334 int
335 rge_intr(void *arg)
336 {
337 	struct rge_softc *sc = arg;
338 	struct rge_queues *q = sc->sc_queues;
339 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
340 	uint32_t status;
341 	int claimed = 0, rv;
342 
343 	if (!(ifp->if_flags & IFF_RUNNING))
344 		return (0);
345 
346 	/* Disable interrupts. */
347 	RGE_WRITE_4(sc, RGE_IMR, 0);
348 
349 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
350 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
351 			return (0);
352 	}
353 
354 	status = RGE_READ_4(sc, RGE_ISR);
355 	if (status)
356 		RGE_WRITE_4(sc, RGE_ISR, status);
357 
358 	if (status & RGE_ISR_PCS_TIMEOUT)
359 		claimed = 1;
360 
361 	rv = 0;
362 	if (status & sc->rge_intrs) {
363 		rv |= rge_rxeof(q);
364 		rv |= rge_txeof(q);
365 
366 		if (status & RGE_ISR_SYSTEM_ERR) {
367 			KERNEL_LOCK();
368 			rge_init(ifp);
369 			KERNEL_UNLOCK();
370 		}
371 		claimed = 1;
372 	}
373 
374 	if (sc->rge_timerintr) {
375 		if (!rv) {
376 			/*
377 			 * Nothing needs to be processed, fallback
378 			 * to use TX/RX interrupts.
379 			 */
380 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
381 
382 			/*
383 			 * Recollect, mainly to avoid the possible
384 			 * race introduced by changing interrupt
385 			 * masks.
386 			 */
387 			rge_rxeof(q);
388 			rge_txeof(q);
389 		} else
390 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
391 	} else if (rv) {
392 		/*
393 		 * Assume that using simulated interrupt moderation
394 		 * (hardware timer based) could reduce the interrupt
395 		 * rate.
396 		 */
397 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
398 	}
399 
400 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
401 
402 	return (claimed);
403 }
404 
405 int
406 rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
407 {
408 	struct rge_softc *sc = q->q_sc;
409 	struct rge_tx_desc *d = NULL;
410 	struct rge_txq *txq;
411 	bus_dmamap_t txmap;
412 	uint32_t cmdsts, cflags = 0;
413 	int cur, error, i, last, nsegs;
414 
415 	/*
416 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
417 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
418 	 * take affect.
419 	 */
420 	if ((m->m_pkthdr.csum_flags &
421 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
422 		cflags |= RGE_TDEXTSTS_IPCSUM;
423 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
424 			cflags |= RGE_TDEXTSTS_TCPCSUM;
425 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
426 			cflags |= RGE_TDEXTSTS_UDPCSUM;
427 	}
428 
429 	txq = &q->q_tx.rge_txq[idx];
430 	txmap = txq->txq_dmamap;
431 
432 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
433 	switch (error) {
434 	case 0:
435 		break;
436 	case EFBIG: /* mbuf chain is too fragmented */
437 		if (m_defrag(m, M_DONTWAIT) == 0 &&
438 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
439 		    BUS_DMA_NOWAIT) == 0)
440 			break;
441 
442 		/* FALLTHROUGH */
443 	default:
444 		return (0);
445 	}
446 
447 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
448 	    BUS_DMASYNC_PREWRITE);
449 
450 	nsegs = txmap->dm_nsegs;
451 
452 	/* Set up hardware VLAN tagging. */
453 #if NVLAN > 0
454 	if (m->m_flags & M_VLANTAG)
455 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
456 #endif
457 
458 	cur = idx;
459 	cmdsts = RGE_TDCMDSTS_SOF;
460 
461 	for (i = 0; i < txmap->dm_nsegs; i++) {
462 		d = &q->q_tx.rge_tx_list[cur];
463 
464 		d->rge_extsts = htole32(cflags);
465 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
466 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
467 
468 		cmdsts |= txmap->dm_segs[i].ds_len;
469 
470 		if (cur == RGE_TX_LIST_CNT - 1)
471 			cmdsts |= RGE_TDCMDSTS_EOR;
472 
473 		d->rge_cmdsts = htole32(cmdsts);
474 
475 		last = cur;
476 		cmdsts = RGE_TDCMDSTS_OWN;
477 		cur = RGE_NEXT_TX_DESC(cur);
478 	}
479 
480 	/* Set EOF on the last descriptor. */
481 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
482 
483 	/* Transfer ownership of packet to the chip. */
484 	d = &q->q_tx.rge_tx_list[idx];
485 
486 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
487 
488 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
489 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
490 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
491 
492 	/* Update info of TX queue and descriptors. */
493 	txq->txq_mbuf = m;
494 	txq->txq_descidx = last;
495 
496 	return (nsegs);
497 }
498 
499 int
500 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
501 {
502 	struct rge_softc *sc = ifp->if_softc;
503 	struct ifreq *ifr = (struct ifreq *)data;
504 	int s, error = 0;
505 
506 	s = splnet();
507 
508 	switch (cmd) {
509 	case SIOCSIFADDR:
510 		ifp->if_flags |= IFF_UP;
511 		if (!(ifp->if_flags & IFF_RUNNING))
512 			rge_init(ifp);
513 		break;
514 	case SIOCSIFFLAGS:
515 		if (ifp->if_flags & IFF_UP) {
516 			if (ifp->if_flags & IFF_RUNNING)
517 				error = ENETRESET;
518 			else
519 				rge_init(ifp);
520 		} else {
521 			if (ifp->if_flags & IFF_RUNNING)
522 				rge_stop(ifp);
523 		}
524 		break;
525 	case SIOCGIFMEDIA:
526 	case SIOCSIFMEDIA:
527 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
528 		break;
529 	case SIOCSIFMTU:
530 		if (ifr->ifr_mtu > ifp->if_hardmtu) {
531 			error = EINVAL;
532 			break;
533 		}
534 		ifp->if_mtu = ifr->ifr_mtu;
535 		break;
536 	case SIOCGIFRXR:
537 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
538 		    NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
539 		break;
540 	default:
541 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
542 	}
543 
544 	if (error == ENETRESET) {
545 		if (ifp->if_flags & IFF_RUNNING)
546 			rge_iff(sc);
547 		error = 0;
548 	}
549 
550 	splx(s);
551 	return (error);
552 }
553 
554 void
555 rge_start(struct ifqueue *ifq)
556 {
557 	struct ifnet *ifp = ifq->ifq_if;
558 	struct rge_softc *sc = ifp->if_softc;
559 	struct rge_queues *q = sc->sc_queues;
560 	struct mbuf *m;
561 	int free, idx, used;
562 	int queued = 0;
563 
564 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
565 		ifq_purge(ifq);
566 		return;
567 	}
568 
569 	/* Calculate free space. */
570 	idx = q->q_tx.rge_txq_prodidx;
571 	free = q->q_tx.rge_txq_considx;
572 	if (free <= idx)
573 		free += RGE_TX_LIST_CNT;
574 	free -= idx;
575 
576 	for (;;) {
577 		if (RGE_TX_NSEGS >= free + 2) {
578 			ifq_set_oactive(&ifp->if_snd);
579 			break;
580 		}
581 
582 		m = ifq_dequeue(ifq);
583 		if (m == NULL)
584 			break;
585 
586 		used = rge_encap(q, m, idx);
587 		if (used == 0) {
588 			m_freem(m);
589 			continue;
590 		}
591 
592 		KASSERT(used <= free);
593 		free -= used;
594 
595 #if NBPFILTER > 0
596 		if (ifp->if_bpf)
597 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
598 #endif
599 
600 		idx += used;
601 		if (idx >= RGE_TX_LIST_CNT)
602 			idx -= RGE_TX_LIST_CNT;
603 
604 		queued++;
605 	}
606 
607 	if (queued == 0)
608 		return;
609 
610 	/* Set a timeout in case the chip goes out to lunch. */
611 	ifp->if_timer = 5;
612 
613 	q->q_tx.rge_txq_prodidx = idx;
614 	ifq_serialize(ifq, &sc->sc_task);
615 }
616 
617 void
618 rge_watchdog(struct ifnet *ifp)
619 {
620 	struct rge_softc *sc = ifp->if_softc;
621 
622 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
623 	ifp->if_oerrors++;
624 
625 	rge_init(ifp);
626 }
627 
628 int
629 rge_init(struct ifnet *ifp)
630 {
631 	struct rge_softc *sc = ifp->if_softc;
632 	struct rge_queues *q = sc->sc_queues;
633 	uint32_t val;
634 	int i;
635 
636 	rge_stop(ifp);
637 
638 	/* Set MAC address. */
639 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
640 
641 	/* Set Maximum frame size. */
642 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
643 
644 	/* Initialize RX and TX descriptors lists. */
645 	rge_rx_list_init(q);
646 	rge_tx_list_init(q);
647 
648 	/* Load the addresses of the RX and TX lists into the chip. */
649 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
650 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
651 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
652 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
653 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
654 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
655 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
656 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
657 
658 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
659 
660 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
661 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
662 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
663 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
664 
665 	/* Clear interrupt moderation timer. */
666 	for (i = 0; i < 64; i++)
667 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
668 
669 	/* Set the initial RX and TX configurations. */
670 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
671 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
672 
673 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
674 	rge_write_csi(sc, 0x70c, val | 0x27000000);
675 
676 	/* Enable hardware optimization function. */
677 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
678 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
679 
680 	RGE_WRITE_2(sc, 0x0382, 0x221b);
681 
682 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
683 
684 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
685 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
686 
687 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
688 
689 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
690 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
691 
692 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
693 
694 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
695 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
696 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
697 	else
698 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
699 
700 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
701 	rge_write_mac_ocp(sc, 0xe63e, val |
702 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
703 
704 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
705 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
706 		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
707 
708 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
709 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
710 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
711 
712 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
713 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
714 
715 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
716 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
717 
718 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
719 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
720 
721 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
722 
723 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
724 
725 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
726 
727 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
728 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
729 
730 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
731 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
732 
733 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
734 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
735 
736 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
737 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
738 
739 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
740 
741 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
742 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
743 
744 	/* Disable EEE plus. */
745 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
746 
747 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
748 
749 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
750 	DELAY(1);
751 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
752 
753 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
754 
755 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
756 
757 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
758 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
759 
760 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
761 
762 	for (i = 0; i < 10; i++) {
763 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
764 			break;
765 		DELAY(1000);
766 	}
767 
768 	/* Disable RXDV gate. */
769 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
770 	DELAY(2000);
771 
772 	rge_ifmedia_upd(ifp);
773 
774 	/* Enable transmit and receive. */
775 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
776 
777 	/* Program promiscuous mode and multicast filters. */
778 	rge_iff(sc);
779 
780 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
781 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
782 
783 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
784 
785 	/* Enable interrupts. */
786 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
787 
788 	ifp->if_flags |= IFF_RUNNING;
789 	ifq_clr_oactive(&ifp->if_snd);
790 
791 	timeout_add_sec(&sc->sc_timeout, 1);
792 
793 	return (0);
794 }
795 
796 /*
797  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
798  */
799 void
800 rge_stop(struct ifnet *ifp)
801 {
802 	struct rge_softc *sc = ifp->if_softc;
803 	struct rge_queues *q = sc->sc_queues;
804 	int i;
805 
806 	timeout_del(&sc->sc_timeout);
807 
808 	ifp->if_timer = 0;
809 	ifp->if_flags &= ~IFF_RUNNING;
810 	sc->rge_timerintr = 0;
811 
812 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
813 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
814 	    RGE_RXCFG_ERRPKT);
815 
816 	RGE_WRITE_4(sc, RGE_IMR, 0);
817 	RGE_WRITE_4(sc, RGE_ISR, 0);
818 
819 	/* Clear timer interrupts. */
820 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
821 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
822 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
823 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
824 
825 	rge_reset(sc);
826 
827 	intr_barrier(sc->sc_ih);
828 	ifq_barrier(&ifp->if_snd);
829 	ifq_clr_oactive(&ifp->if_snd);
830 
831 	if (q->q_rx.rge_head != NULL) {
832 		m_freem(q->q_rx.rge_head);
833 		q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
834 	}
835 
836 	/* Free the TX list buffers. */
837 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
838 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
839 			bus_dmamap_unload(sc->sc_dmat,
840 			    q->q_tx.rge_txq[i].txq_dmamap);
841 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
842 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
843 		}
844 	}
845 
846 	/* Free the RX list buffers. */
847 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
848 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
849 			bus_dmamap_unload(sc->sc_dmat,
850 			    q->q_rx.rge_rxq[i].rxq_dmamap);
851 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
852 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
853 		}
854 	}
855 }
856 
857 /*
858  * Set media options.
859  */
860 int
861 rge_ifmedia_upd(struct ifnet *ifp)
862 {
863 	struct rge_softc *sc = ifp->if_softc;
864 	struct ifmedia *ifm = &sc->sc_media;
865 	int anar, gig, val;
866 
867 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
868 		return (EINVAL);
869 
870 	/* Disable Gigabit Lite. */
871 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
872 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
873 
874 	val = rge_read_phy_ocp(sc, 0xa5d4);
875 	val &= ~RGE_ADV_2500TFDX;
876 
877 	anar = gig = 0;
878 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
879 	case IFM_AUTO:
880 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
881 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
882 		val |= RGE_ADV_2500TFDX;
883 		break;
884 	case IFM_2500_T:
885 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
886 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
887 		val |= RGE_ADV_2500TFDX;
888 		ifp->if_baudrate = IF_Mbps(2500);
889 		break;
890 	case IFM_1000_T:
891 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
892 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
893 		ifp->if_baudrate = IF_Gbps(1);
894 		break;
895 	case IFM_100_TX:
896 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
897 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
898 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
899 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
900 		    ANAR_TX | ANAR_10_FD | ANAR_10;
901 		ifp->if_baudrate = IF_Mbps(100);
902 		break;
903 	case IFM_10_T:
904 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
905 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
906 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
907 		    ANAR_10_FD | ANAR_10 : ANAR_10;
908 		ifp->if_baudrate = IF_Mbps(10);
909 		break;
910 	default:
911 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
912 		return (EINVAL);
913 	}
914 
915 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
916 	rge_write_phy(sc, 0, MII_100T2CR, gig);
917 	rge_write_phy_ocp(sc, 0xa5d4, val);
918 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
919 	    BMCR_STARTNEG);
920 
921 	return (0);
922 }
923 
924 /*
925  * Report current media status.
926  */
927 void
928 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
929 {
930 	struct rge_softc *sc = ifp->if_softc;
931 	uint16_t status = 0;
932 
933 	ifmr->ifm_status = IFM_AVALID;
934 	ifmr->ifm_active = IFM_ETHER;
935 
936 	if (rge_get_link_status(sc)) {
937 		ifmr->ifm_status |= IFM_ACTIVE;
938 
939 		status = RGE_READ_2(sc, RGE_PHYSTAT);
940 		if ((status & RGE_PHYSTAT_FDX) ||
941 		    (status & RGE_PHYSTAT_2500MBPS))
942 			ifmr->ifm_active |= IFM_FDX;
943 		else
944 			ifmr->ifm_active |= IFM_HDX;
945 
946 		if (status & RGE_PHYSTAT_10MBPS)
947 			ifmr->ifm_active |= IFM_10_T;
948 		else if (status & RGE_PHYSTAT_100MBPS)
949 			ifmr->ifm_active |= IFM_100_TX;
950 		else if (status & RGE_PHYSTAT_1000MBPS)
951 			ifmr->ifm_active |= IFM_1000_T;
952 		else if (status & RGE_PHYSTAT_2500MBPS)
953 			ifmr->ifm_active |= IFM_2500_T;
954 	}
955 }
956 
957 /*
958  * Allocate memory for RX/TX rings.
959  */
960 int
961 rge_allocmem(struct rge_softc *sc)
962 {
963 	struct rge_queues *q = sc->sc_queues;
964 	int error, i;
965 
966 	/* Allocate DMA'able memory for the TX ring. */
967 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
968 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_tx.rge_tx_list_map);
969 	if (error) {
970 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
971 		return (error);
972 	}
973 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
974 	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
975 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
976 	if (error) {
977 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
978 		return (error);
979 	}
980 
981 	/* Load the map for the TX ring. */
982 	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
983 	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
984 	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
985 	if (error) {
986 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
987 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
988 		    q->q_tx.rge_tx_listnseg);
989 		return (error);
990 	}
991 	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
992 	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
993 	if (error) {
994 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
995 		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
996 		bus_dmamem_unmap(sc->sc_dmat,
997 		    (caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
998 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
999 		    q->q_tx.rge_tx_listnseg);
1000 		return (error);
1001 	}
1002 
1003 	/* Create DMA maps for TX buffers. */
1004 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1005 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1006 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1007 		    &q->q_tx.rge_txq[i].txq_dmamap);
1008 		if (error) {
1009 			printf("%s: can't create DMA map for TX\n",
1010 			    sc->sc_dev.dv_xname);
1011 			return (error);
1012 		}
1013 	}
1014 
1015 	/* Allocate DMA'able memory for the RX ring. */
1016 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1017 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_rx.rge_rx_list_map);
1018 	if (error) {
1019 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1020 		return (error);
1021 	}
1022 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1023 	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1024 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1025 	if (error) {
1026 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1027 		return (error);
1028 	}
1029 
1030 	/* Load the map for the RX ring. */
1031 	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1032 	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1033 	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1034 	if (error) {
1035 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1036 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1037 		    q->q_rx.rge_rx_listnseg);
1038 		return (error);
1039 	}
1040 	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1041 	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1042 	if (error) {
1043 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1044 		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1045 		bus_dmamem_unmap(sc->sc_dmat,
1046 		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1047 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1048 		    q->q_rx.rge_rx_listnseg);
1049 		return (error);
1050 	}
1051 
1052 	/* Create DMA maps for RX buffers. */
1053 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1054 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1055 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1056 		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1057 		if (error) {
1058 			printf("%s: can't create DMA map for RX\n",
1059 			    sc->sc_dev.dv_xname);
1060 			return (error);
1061 		}
1062 	}
1063 
1064 	return (error);
1065 }
1066 
1067 /*
1068  * Initialize the RX descriptor and attach an mbuf cluster.
1069  */
1070 int
1071 rge_newbuf(struct rge_queues *q)
1072 {
1073 	struct rge_softc *sc = q->q_sc;
1074 	struct mbuf *m;
1075 	struct rge_rx_desc *r;
1076 	struct rge_rxq *rxq;
1077 	bus_dmamap_t rxmap;
1078 	int idx;
1079 
1080 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1081 	if (m == NULL)
1082 		return (ENOBUFS);
1083 
1084 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1085 
1086 	idx = q->q_rx.rge_rxq_prodidx;
1087 	rxq = &q->q_rx.rge_rxq[idx];
1088 	rxmap = rxq->rxq_dmamap;
1089 
1090 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1091 		m_freem(m);
1092 		return (ENOBUFS);
1093 	}
1094 
1095 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1096 	    BUS_DMASYNC_PREREAD);
1097 
1098 	/* Map the segments into RX descriptors. */
1099 	r = &q->q_rx.rge_rx_list[idx];
1100 
1101 	if (RGE_OWN(r)) {
1102 		printf("%s: tried to map busy RX descriptor\n",
1103 		    sc->sc_dev.dv_xname);
1104 		m_freem(m);
1105 		return (ENOBUFS);
1106 	}
1107 
1108 	rxq->rxq_mbuf = m;
1109 
1110 	r->rge_extsts = 0;
1111 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1112 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1113 
1114 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1115 	if (idx == RGE_RX_LIST_CNT - 1)
1116 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1117 
1118 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1119 
1120 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1121 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1122 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1123 
1124 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1125 
1126 	return (0);
1127 }
1128 
1129 void
1130 rge_discard_rxbuf(struct rge_queues *q, int idx)
1131 {
1132 	struct rge_softc *sc = q->q_sc;
1133 	struct rge_rx_desc *r;
1134 
1135 	r = &q->q_rx.rge_rx_list[idx];
1136 
1137 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1138 	r->rge_extsts = 0;
1139 	if (idx == RGE_RX_LIST_CNT - 1)
1140 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1141 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1142 
1143 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1144 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1145 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1146 }
1147 
1148 void
1149 rge_rx_list_init(struct rge_queues *q)
1150 {
1151 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1152 
1153 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1154 	q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1155 
1156 	if_rxr_init(&q->q_rx.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1157 	rge_fill_rx_ring(q);
1158 }
1159 
1160 void
1161 rge_fill_rx_ring(struct rge_queues *q)
1162 {
1163 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1164 	int slots;
1165 
1166 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1167 		if (rge_newbuf(q) == ENOBUFS)
1168 			break;
1169 	}
1170 	if_rxr_put(rxr, slots);
1171 }
1172 
1173 void
1174 rge_tx_list_init(struct rge_queues *q)
1175 {
1176 	struct rge_softc *sc = q->q_sc;
1177 	int i;
1178 
1179 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1180 
1181 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1182 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1183 
1184 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1185 	    q->q_tx.rge_tx_list_map->dm_mapsize,
1186 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1187 
1188 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1189 }
1190 
1191 int
1192 rge_rxeof(struct rge_queues *q)
1193 {
1194 	struct rge_softc *sc = q->q_sc;
1195 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1196 	struct mbuf *m;
1197 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1198 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1199 	struct rge_rx_desc *cur_rx;
1200 	struct rge_rxq *rxq;
1201 	uint32_t rxstat, extsts;
1202 	int i, total_len, rx = 0;
1203 
1204 	for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1205 	    i = RGE_NEXT_RX_DESC(i)) {
1206 		/* Invalidate the descriptor memory. */
1207 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1208 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1209 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1210 
1211 		cur_rx = &q->q_rx.rge_rx_list[i];
1212 
1213 		if (RGE_OWN(cur_rx))
1214 			break;
1215 
1216 		rxstat = letoh32(cur_rx->rge_cmdsts);
1217 		extsts = letoh32(cur_rx->rge_extsts);
1218 
1219 		total_len = RGE_RXBYTES(cur_rx);
1220 		rxq = &q->q_rx.rge_rxq[i];
1221 		m = rxq->rxq_mbuf;
1222 		rxq->rxq_mbuf = NULL;
1223 		if_rxr_put(rxr, 1);
1224 		rx = 1;
1225 
1226 		/* Invalidate the RX mbuf and unload its map. */
1227 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1228 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1229 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1230 
1231 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1232 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1233 			rge_discard_rxbuf(q, i);
1234 			continue;
1235 		}
1236 
1237 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1238 			ifp->if_ierrors++;
1239 			/*
1240 			 * If this is part of a multi-fragment packet,
1241 			 * discard all the pieces.
1242 			 */
1243 			 if (q->q_rx.rge_head != NULL) {
1244 				m_freem(q->q_rx.rge_head);
1245 				q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1246 			}
1247 			rge_discard_rxbuf(q, i);
1248 			continue;
1249 		}
1250 
1251 		if (q->q_rx.rge_head != NULL) {
1252 			m->m_len = total_len;
1253 			/*
1254 			 * Special case: if there's 4 bytes or less
1255 			 * in this buffer, the mbuf can be discarded:
1256 			 * the last 4 bytes is the CRC, which we don't
1257 			 * care about anyway.
1258 			 */
1259 			if (m->m_len <= ETHER_CRC_LEN) {
1260 				q->q_rx.rge_tail->m_len -=
1261 				    (ETHER_CRC_LEN - m->m_len);
1262 				m_freem(m);
1263 			} else {
1264 				m->m_len -= ETHER_CRC_LEN;
1265 				m->m_flags &= ~M_PKTHDR;
1266 				q->q_rx.rge_tail->m_next = m;
1267 			}
1268 			m = q->q_rx.rge_head;
1269 			q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1270 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1271 		} else
1272 			m->m_pkthdr.len = m->m_len =
1273 			    (total_len - ETHER_CRC_LEN);
1274 
1275 		/* Check IP header checksum. */
1276 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1277 		    (extsts & RGE_RDEXTSTS_IPV4))
1278 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1279 
1280 		/* Check TCP/UDP checksum. */
1281 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1282 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1283 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1284 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1285 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1286 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1287 			    M_UDP_CSUM_IN_OK;
1288 
1289 #if NVLAN > 0
1290 		if (extsts & RGE_RDEXTSTS_VTAG) {
1291 			m->m_pkthdr.ether_vtag =
1292 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1293 			m->m_flags |= M_VLANTAG;
1294 		}
1295 #endif
1296 
1297 		ml_enqueue(&ml, m);
1298 	}
1299 
1300 	if (ifiq_input(&ifp->if_rcv, &ml))
1301 		if_rxr_livelocked(rxr);
1302 
1303 	q->q_rx.rge_rxq_considx = i;
1304 	rge_fill_rx_ring(q);
1305 
1306 	return (rx);
1307 }
1308 
1309 int
1310 rge_txeof(struct rge_queues *q)
1311 {
1312 	struct rge_softc *sc = q->q_sc;
1313 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1314 	struct rge_txq *txq;
1315 	uint32_t txstat;
1316 	int cons, idx, prod;
1317 	int free = 0;
1318 
1319 	prod = q->q_tx.rge_txq_prodidx;
1320 	cons = q->q_tx.rge_txq_considx;
1321 
1322 	while (prod != cons) {
1323 		txq = &q->q_tx.rge_txq[cons];
1324 		idx = txq->txq_descidx;
1325 
1326 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1327 		    idx * sizeof(struct rge_tx_desc),
1328 		    sizeof(struct rge_tx_desc),
1329 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1330 
1331 		txstat = letoh32(q->q_tx.rge_tx_list[idx].rge_cmdsts);
1332 
1333 		if (txstat & RGE_TDCMDSTS_OWN) {
1334 			free = 2;
1335 			break;
1336 		}
1337 
1338 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1339 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1340 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1341 		m_freem(txq->txq_mbuf);
1342 		txq->txq_mbuf = NULL;
1343 
1344 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1345 			ifp->if_collisions++;
1346 		if (txstat & RGE_TDCMDSTS_TXERR)
1347 			ifp->if_oerrors++;
1348 
1349 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1350 		    idx * sizeof(struct rge_tx_desc),
1351 		    sizeof(struct rge_tx_desc),
1352 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1353 
1354 		cons = RGE_NEXT_TX_DESC(idx);
1355 		free = 1;
1356 	}
1357 
1358 	if (free == 0)
1359 		return (0);
1360 
1361 	q->q_tx.rge_txq_considx = cons;
1362 
1363 	if (ifq_is_oactive(&ifp->if_snd))
1364 		ifq_restart(&ifp->if_snd);
1365 	else if (free == 2)
1366 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1367 	else
1368 		ifp->if_timer = 0;
1369 
1370 	return (1);
1371 }
1372 
1373 void
1374 rge_reset(struct rge_softc *sc)
1375 {
1376 	int i;
1377 
1378 	/* Enable RXDV gate. */
1379 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1380 	DELAY(2000);
1381 
1382 	for (i = 0; i < 3000; i++) {
1383 		DELAY(50);
1384 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1385 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1386 		    RGE_MCUCMD_TXFIFO_EMPTY))
1387 			break;
1388 	}
1389 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1390 		for (i = 0; i < 3000; i++) {
1391 			DELAY(50);
1392 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1393 				break;
1394 		}
1395 	}
1396 
1397 	DELAY(2000);
1398 
1399 	/* Soft reset. */
1400 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1401 
1402 	for (i = 0; i < RGE_TIMEOUT; i++) {
1403 		DELAY(100);
1404 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1405 			break;
1406 	}
1407 	if (i == RGE_TIMEOUT)
1408 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1409 }
1410 
1411 void
1412 rge_iff(struct rge_softc *sc)
1413 {
1414 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1415 	struct arpcom *ac = &sc->sc_arpcom;
1416 	struct ether_multi *enm;
1417 	struct ether_multistep step;
1418 	uint32_t hashes[2];
1419 	uint32_t rxfilt;
1420 	int h = 0;
1421 
1422 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1423 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1424 	ifp->if_flags &= ~IFF_ALLMULTI;
1425 
1426 	/*
1427 	 * Always accept frames destined to our station address.
1428 	 * Always accept broadcast frames.
1429 	 */
1430 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1431 
1432 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1433 		ifp->if_flags |= IFF_ALLMULTI;
1434 		rxfilt |= RGE_RXCFG_MULTI;
1435 		if (ifp->if_flags & IFF_PROMISC)
1436 			rxfilt |= RGE_RXCFG_ALLPHYS;
1437 		hashes[0] = hashes[1] = 0xffffffff;
1438 	} else {
1439 		rxfilt |= RGE_RXCFG_MULTI;
1440 		/* Program new filter. */
1441 		memset(hashes, 0, sizeof(hashes));
1442 
1443 		ETHER_FIRST_MULTI(step, ac, enm);
1444 		while (enm != NULL) {
1445 			h = ether_crc32_be(enm->enm_addrlo,
1446 			    ETHER_ADDR_LEN) >> 26;
1447 
1448 			if (h < 32)
1449 				hashes[0] |= (1 << h);
1450 			else
1451 				hashes[1] |= (1 << (h - 32));
1452 
1453 			ETHER_NEXT_MULTI(step, enm);
1454 		}
1455 	}
1456 
1457 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1458 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1459 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1460 }
1461 
1462 void
1463 rge_set_phy_power(struct rge_softc *sc, int on)
1464 {
1465 	int i;
1466 
1467 	if (on) {
1468 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1469 
1470 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1471 
1472 		for (i = 0; i < RGE_TIMEOUT; i++) {
1473 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1474 				break;
1475 			DELAY(1000);
1476 		}
1477 	} else {
1478 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1479 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1480 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1481 	}
1482 }
1483 
1484 void
1485 rge_phy_config(struct rge_softc *sc)
1486 {
1487 	/* Read microcode version. */
1488 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1489 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1490 
1491 	switch (sc->rge_type) {
1492 	case MAC_CFG2:
1493 		rge_phy_config_mac_cfg2(sc);
1494 		break;
1495 	case MAC_CFG3:
1496 		rge_phy_config_mac_cfg3(sc);
1497 		break;
1498 	case MAC_CFG4:
1499 		rge_phy_config_mac_cfg4(sc);
1500 		break;
1501 	case MAC_CFG5:
1502 		rge_phy_config_mac_cfg5(sc);
1503 		break;
1504 	default:
1505 		break;	/* Can't happen. */
1506 	}
1507 
1508 	rge_write_phy(sc, 0x0a5b, 0x12,
1509 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1510 
1511 	/* Disable EEE. */
1512 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1513 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1514 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1515 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1516 	}
1517 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1518 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1519 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1520 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1521 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1522 
1523 	rge_patch_phy_mcu(sc, 1);
1524 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1525 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1526 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1527 	rge_patch_phy_mcu(sc, 0);
1528 }
1529 
1530 void
1531 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1532 {
1533 	uint16_t val;
1534 	int i;
1535 
1536 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1537 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1538 		    rtl8125_mac_cfg2_ephy[i].val);
1539 
1540 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1541 
1542 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1543 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1544 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1545 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1546 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1547 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1548 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1549 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1550 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1551 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1552 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1553 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1554 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1555 
1556 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1557 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1558 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1559 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1560 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1561 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1562 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1563 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1564 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1565 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1566 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1567 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1568 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1569 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1570 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1571 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1572 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1573 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1574 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1575 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1576 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1577 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1578 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1579 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1580 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1581 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1582 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1583 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1584 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1585 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1586 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1587 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1588 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1589 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1590 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1591 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1592 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1593 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1594 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1595 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1596 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1597 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1598 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1599 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1600 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1601 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1602 }
1603 
1604 void
1605 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1606 {
1607 	uint16_t val;
1608 	int i;
1609 	static const uint16_t mac_cfg3_a438_value[] =
1610 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1611 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1612 
1613 	static const uint16_t mac_cfg3_b88e_value[] =
1614 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1615 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1616 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1617 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1618 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1619 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1620 
1621 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1622 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1623 		    rtl8125_mac_cfg3_ephy[i].val);
1624 
1625 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1626 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1627 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1628 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1629 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1630 	rge_write_ephy(sc, 0x0002, 0x6042);
1631 	rge_write_ephy(sc, 0x0006, 0x0014);
1632 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1633 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1634 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1635 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1636 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1637 	rge_write_ephy(sc, 0x0042, 0x6042);
1638 	rge_write_ephy(sc, 0x0046, 0x0014);
1639 
1640 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1641 
1642 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1643 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1644 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1645 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1646 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1647 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1648 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1649 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1650 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1651 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1652 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1653 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1654 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1655 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1656 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1657 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1658 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1659 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1660 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1661 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1662 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1663 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1664 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1665 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1666 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1667 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1668 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1669 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1670 	    32);
1671 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1672 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1673 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1674 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1675 
1676 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1677 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1678 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1679 	for (i = 0; i < 26; i++)
1680 		rge_write_phy_ocp(sc, 0xa438, 0);
1681 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1682 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1683 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1684 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1685 
1686 	rge_patch_phy_mcu(sc, 1);
1687 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1688 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1689 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1690 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1691 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1692 	}
1693 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1694 	rge_patch_phy_mcu(sc, 0);
1695 
1696 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1697 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1698 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1699 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1700 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1701 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1702 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1703 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1704 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1705 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1706 }
1707 
1708 void
1709 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1710 {
1711 	uint16_t val;
1712 	int i;
1713 	static const uint16_t mac_cfg4_b87c_value[] =
1714 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1715 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1716 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1717 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1718 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1719 	      0x80b0, 0x0f31 };
1720 
1721 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1722 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1723 		    rtl8125_mac_cfg4_ephy[i].val);
1724 
1725 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1726 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1727 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1728 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1729 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1730 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1731 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1732 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1733 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1734 
1735 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1736 
1737 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1738 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1739 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1740 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1741 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1742 	for (i = 0; i < 6; i++) {
1743 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1744 		if (i < 3)
1745 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1746 		else
1747 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1748 	}
1749 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1750 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1751 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1752 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1753 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1754 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1755 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1756 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1757 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1758 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1759 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1760 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1761 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1762 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1763 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1764 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1765 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1766 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1767 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1768 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1769 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1770 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1771 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1772 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1773 	}
1774 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1775 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1776 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1777 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1778 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1779 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1780 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1781 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1782 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1783 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1784 	    32);
1785 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1786 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1787 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1788 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1789 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1790 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1791 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1792 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1793 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1794 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1795 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1796 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1797 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1798 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1799 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1800 	for (i = 0; i < 6; i++) {
1801 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1802 		if (i == 2)
1803 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1804 		else
1805 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1806 	}
1807 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1808 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1809 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1810 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1811 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1812 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1813 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1814 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1815 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1816 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1817 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1818 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1819 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1820 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1821 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1822 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1823 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1824 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1825 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1826 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1827 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1828 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1829 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1830 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1831 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1832 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1833 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1834 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1835 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1836 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1837 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1838 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1839 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1840 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1841 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1842 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1843 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1844 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1845 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1846 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1847 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1848 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1849 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1850 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1851 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1852 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1853 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1854 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1855 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1856 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1857 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1858 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1859 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1860 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1861 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1862 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1863 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1864 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1865 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1866 	rge_patch_phy_mcu(sc, 1);
1867 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1868 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1869 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1870 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1871 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1872 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1873 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1874 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1875 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1876 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1877 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1878 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1879 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1880 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1881 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1882 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1883 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1884 	rge_patch_phy_mcu(sc, 0);
1885 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1886 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1887 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1888 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1889 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1890 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1891 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1892 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1893 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1894 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1895 }
1896 
1897 void
1898 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1899 {
1900 	uint16_t val;
1901 	int i;
1902 
1903 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1904 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1905 		    rtl8125_mac_cfg5_ephy[i].val);
1906 
1907 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1908 
1909 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1910 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1911 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1912 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1913 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1914 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1915 	    32);
1916 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1917 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1918 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1919 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1920 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1921 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1922 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1923 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1924 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1925 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1926 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1927 	for (i = 0; i < 10; i++) {
1928 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1929 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1930 	}
1931 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1932 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1933 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1934 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1935 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
1936 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
1937 }
1938 
1939 void
1940 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1941 {
1942 	if (sc->rge_mcodever != mcode_version) {
1943 		int i;
1944 
1945 		rge_patch_phy_mcu(sc, 1);
1946 
1947 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1948 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1949 			if (sc->rge_type == MAC_CFG2)
1950 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1951 			else
1952 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1953 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1954 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1955 
1956 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1957 		}
1958 
1959 		if (sc->rge_type == MAC_CFG2) {
1960 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1961 				rge_write_phy_ocp(sc,
1962 				    rtl8125_mac_cfg2_mcu[i].reg,
1963 				    rtl8125_mac_cfg2_mcu[i].val);
1964 			}
1965 		} else if (sc->rge_type == MAC_CFG3) {
1966 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1967 				rge_write_phy_ocp(sc,
1968 				    rtl8125_mac_cfg3_mcu[i].reg,
1969 				    rtl8125_mac_cfg3_mcu[i].val);
1970 			}
1971 		} else if (sc->rge_type == MAC_CFG4) {
1972 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1973 				rge_write_phy_ocp(sc,
1974 				    rtl8125_mac_cfg4_mcu[i].reg,
1975 				    rtl8125_mac_cfg4_mcu[i].val);
1976 			}
1977 		} else if (sc->rge_type == MAC_CFG5) {
1978 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1979 				rge_write_phy_ocp(sc,
1980 				    rtl8125_mac_cfg5_mcu[i].reg,
1981 				    rtl8125_mac_cfg5_mcu[i].val);
1982 			}
1983 		}
1984 
1985 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1986 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1987 
1988 			rge_write_phy_ocp(sc, 0xa436, 0);
1989 			rge_write_phy_ocp(sc, 0xa438, 0);
1990 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1991 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1992 			rge_write_phy_ocp(sc, 0xa438, 0);
1993 		}
1994 
1995 		rge_patch_phy_mcu(sc, 0);
1996 
1997 		/* Write microcode version. */
1998 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1999 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2000 	}
2001 }
2002 
2003 void
2004 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2005 {
2006 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2007 	RGE_WRITE_4(sc, RGE_MAC0,
2008 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2009 	RGE_WRITE_4(sc, RGE_MAC4,
2010 	    addr[5] <<  8 | addr[4]);
2011 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2012 }
2013 
2014 void
2015 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2016 {
2017 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2018 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2019 }
2020 
2021 void
2022 rge_hw_init(struct rge_softc *sc)
2023 {
2024 	int i;
2025 
2026 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2027 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2028 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2029 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2030 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2031 
2032 	/* Disable UPS. */
2033 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2034 
2035 	/* Configure MAC MCU. */
2036 	rge_write_mac_ocp(sc, 0xfc38, 0);
2037 
2038 	for (i = 0xfc28; i < 0xfc38; i += 2)
2039 		rge_write_mac_ocp(sc, i, 0);
2040 
2041 	DELAY(3000);
2042 	rge_write_mac_ocp(sc, 0xfc26, 0);
2043 
2044 	if (sc->rge_type == MAC_CFG3) {
2045 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2046 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2047 			    rtl8125_mac_bps[i].val);
2048 		}
2049 	} else if (sc->rge_type == MAC_CFG5) {
2050 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2051 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2052 			    rtl8125b_mac_bps[i].val);
2053 		}
2054 	}
2055 
2056 	/* Disable PHY power saving. */
2057 	rge_disable_phy_ocp_pwrsave(sc);
2058 
2059 	/* Set PCIe uncorrectable error status. */
2060 	rge_write_csi(sc, 0x108,
2061 	    rge_read_csi(sc, 0x108) | 0x00100000);
2062 
2063 }
2064 
2065 void
2066 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2067 {
2068 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2069 		rge_patch_phy_mcu(sc, 1);
2070 		rge_write_phy_ocp(sc, 0xc416, 0);
2071 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2072 		rge_patch_phy_mcu(sc, 0);
2073 	}
2074 }
2075 
2076 void
2077 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2078 {
2079 	int i;
2080 
2081 	if (set)
2082 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2083 	else
2084 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2085 
2086 	for (i = 0; i < 1000; i++) {
2087 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2088 			break;
2089 		DELAY(100);
2090 	}
2091 	if (i == 1000) {
2092 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2093 		return;
2094 	}
2095 }
2096 
2097 void
2098 rge_add_media_types(struct rge_softc *sc)
2099 {
2100 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2101 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2102 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2103 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2104 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2105 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2106 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2107 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2108 }
2109 
2110 void
2111 rge_config_imtype(struct rge_softc *sc, int imtype)
2112 {
2113 	switch (imtype) {
2114 	case RGE_IMTYPE_NONE:
2115 		sc->rge_intrs = RGE_INTRS;
2116 		break;
2117 	case RGE_IMTYPE_SIM:
2118 		sc->rge_intrs = RGE_INTRS_TIMER;
2119 		break;
2120 	default:
2121 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2122 	}
2123 }
2124 
2125 void
2126 rge_disable_hw_im(struct rge_softc *sc)
2127 {
2128 	RGE_WRITE_2(sc, RGE_IM, 0);
2129 }
2130 
2131 void
2132 rge_disable_sim_im(struct rge_softc *sc)
2133 {
2134 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2135 	sc->rge_timerintr = 0;
2136 }
2137 
2138 void
2139 rge_setup_sim_im(struct rge_softc *sc)
2140 {
2141 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2142 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2143 	sc->rge_timerintr = 1;
2144 }
2145 
2146 void
2147 rge_setup_intr(struct rge_softc *sc, int imtype)
2148 {
2149 	rge_config_imtype(sc, imtype);
2150 
2151 	/* Enable interrupts. */
2152 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2153 
2154 	switch (imtype) {
2155 	case RGE_IMTYPE_NONE:
2156 		rge_disable_sim_im(sc);
2157 		rge_disable_hw_im(sc);
2158 		break;
2159 	case RGE_IMTYPE_SIM:
2160 		rge_disable_hw_im(sc);
2161 		rge_setup_sim_im(sc);
2162 		break;
2163 	default:
2164 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2165 	}
2166 }
2167 
2168 void
2169 rge_exit_oob(struct rge_softc *sc)
2170 {
2171 	int i;
2172 
2173 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2174 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2175 	    RGE_RXCFG_ERRPKT);
2176 
2177 	/* Disable RealWoW. */
2178 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2179 
2180 	rge_reset(sc);
2181 
2182 	/* Disable OOB. */
2183 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2184 
2185 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2186 
2187 	for (i = 0; i < 10; i++) {
2188 		DELAY(100);
2189 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2190 			break;
2191 	}
2192 
2193 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2194 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2195 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2196 
2197 	for (i = 0; i < 10; i++) {
2198 		DELAY(100);
2199 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2200 			break;
2201 	}
2202 
2203 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2204 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2205 		    sc->sc_dev.dv_xname);
2206 		for (i = 0; i < RGE_TIMEOUT; i++) {
2207 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2208 				break;
2209 			DELAY(1000);
2210 		}
2211 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2212 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2213 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2214 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2215 	}
2216 }
2217 
2218 void
2219 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2220 {
2221 	int i;
2222 
2223 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2224 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2225 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2226 
2227 	for (i = 0; i < 10; i++) {
2228 		 DELAY(100);
2229 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2230 			break;
2231 	}
2232 
2233 	DELAY(20);
2234 }
2235 
2236 uint32_t
2237 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2238 {
2239 	int i;
2240 
2241 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2242 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2243 
2244 	for (i = 0; i < 10; i++) {
2245 		 DELAY(100);
2246 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2247 			break;
2248 	}
2249 
2250 	DELAY(20);
2251 
2252 	return (RGE_READ_4(sc, RGE_CSIDR));
2253 }
2254 
2255 void
2256 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2257 {
2258 	uint32_t tmp;
2259 
2260 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2261 	tmp += val;
2262 	tmp |= RGE_MACOCP_BUSY;
2263 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2264 }
2265 
2266 uint16_t
2267 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2268 {
2269 	uint32_t val;
2270 
2271 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2272 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2273 
2274 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2275 }
2276 
2277 void
2278 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2279 {
2280 	uint32_t tmp;
2281 	int i;
2282 
2283 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2284 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2285 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2286 
2287 	for (i = 0; i < 10; i++) {
2288 		DELAY(100);
2289 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2290 			break;
2291 	}
2292 
2293 	DELAY(20);
2294 }
2295 
2296 uint16_t
2297 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2298 {
2299 	uint32_t val;
2300 	int i;
2301 
2302 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2303 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2304 
2305 	for (i = 0; i < 10; i++) {
2306 		DELAY(100);
2307 		val = RGE_READ_4(sc, RGE_EPHYAR);
2308 		if (val & RGE_EPHYAR_BUSY)
2309 			break;
2310 	}
2311 
2312 	DELAY(20);
2313 
2314 	return (val & RGE_EPHYAR_DATA_MASK);
2315 }
2316 
2317 void
2318 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2319 {
2320 	uint16_t off, phyaddr;
2321 
2322 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2323 	phyaddr <<= 4;
2324 
2325 	off = addr ? reg : 0x10 + (reg % 8);
2326 
2327 	phyaddr += (off - 16) << 1;
2328 
2329 	rge_write_phy_ocp(sc, phyaddr, val);
2330 }
2331 
2332 uint16_t
2333 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2334 {
2335 	uint16_t off, phyaddr;
2336 
2337 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2338 	phyaddr <<= 4;
2339 
2340 	off = addr ? reg : 0x10 + (reg % 8);
2341 
2342 	phyaddr += (off - 16) << 1;
2343 
2344 	return (rge_read_phy_ocp(sc, phyaddr));
2345 }
2346 
2347 void
2348 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2349 {
2350 	uint32_t tmp;
2351 	int i;
2352 
2353 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2354 	tmp |= RGE_PHYOCP_BUSY | val;
2355 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2356 
2357 	for (i = 0; i < RGE_TIMEOUT; i++) {
2358 		DELAY(1);
2359 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2360 			break;
2361 	}
2362 }
2363 
2364 uint16_t
2365 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2366 {
2367 	uint32_t val;
2368 	int i;
2369 
2370 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2371 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2372 
2373 	for (i = 0; i < RGE_TIMEOUT; i++) {
2374 		DELAY(1);
2375 		val = RGE_READ_4(sc, RGE_PHYOCP);
2376 		if (val & RGE_PHYOCP_BUSY)
2377 			break;
2378 	}
2379 
2380 	return (val & RGE_PHYOCP_DATA_MASK);
2381 }
2382 
2383 int
2384 rge_get_link_status(struct rge_softc *sc)
2385 {
2386 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2387 }
2388 
2389 void
2390 rge_txstart(void *arg)
2391 {
2392 	struct rge_softc *sc = arg;
2393 
2394 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2395 }
2396 
2397 void
2398 rge_tick(void *arg)
2399 {
2400 	struct rge_softc *sc = arg;
2401 	int s;
2402 
2403 	s = splnet();
2404 	rge_link_state(sc);
2405 	splx(s);
2406 
2407 	timeout_add_sec(&sc->sc_timeout, 1);
2408 }
2409 
2410 void
2411 rge_link_state(struct rge_softc *sc)
2412 {
2413 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2414 	int link = LINK_STATE_DOWN;
2415 
2416 	if (rge_get_link_status(sc))
2417 		link = LINK_STATE_UP;
2418 
2419 	if (ifp->if_link_state != link) {
2420 		ifp->if_link_state = link;
2421 		if_link_state_change(ifp);
2422 	}
2423 }
2424 
2425 #ifndef SMALL_KERNEL
2426 int
2427 rge_wol(struct ifnet *ifp, int enable)
2428 {
2429 	struct rge_softc *sc = ifp->if_softc;
2430 
2431 	if (enable) {
2432 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2433 			printf("%s: power management is disabled, "
2434 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2435 			return (ENOTSUP);
2436 		}
2437 
2438 	}
2439 
2440 	rge_iff(sc);
2441 
2442 	if (enable)
2443 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2444 	else
2445 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2446 
2447 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2448 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2449 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2450 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2451 	if (enable)
2452 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2453 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2454 
2455 	return (0);
2456 }
2457 
2458 void
2459 rge_wol_power(struct rge_softc *sc)
2460 {
2461 	/* Disable RXDV gate. */
2462 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2463 	DELAY(2000);
2464 
2465 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2466 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2467 }
2468 #endif
2469