xref: /openbsd-src/sys/dev/pci/if_rge.c (revision 824adb5411e4389b29bae28eba5c2c2bbd147f34)
1 /*	$OpenBSD: if_rge.c,v 1.15 2021/08/16 01:30:27 kevlo Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_activate(struct device *, int);
63 int		rge_intr(void *);
64 int		rge_encap(struct rge_queues *, struct mbuf *, int);
65 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
66 void		rge_start(struct ifqueue *);
67 void		rge_watchdog(struct ifnet *);
68 int		rge_init(struct ifnet *);
69 void		rge_stop(struct ifnet *);
70 int		rge_ifmedia_upd(struct ifnet *);
71 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
72 int		rge_allocmem(struct rge_softc *);
73 int		rge_newbuf(struct rge_queues *);
74 void		rge_discard_rxbuf(struct rge_queues *, int);
75 void		rge_rx_list_init(struct rge_queues *);
76 void		rge_tx_list_init(struct rge_queues *);
77 void		rge_fill_rx_ring(struct rge_queues *);
78 int		rge_rxeof(struct rge_queues *);
79 int		rge_txeof(struct rge_queues *);
80 void		rge_reset(struct rge_softc *);
81 void		rge_iff(struct rge_softc *);
82 void		rge_set_phy_power(struct rge_softc *, int);
83 void		rge_phy_config(struct rge_softc *);
84 void		rge_phy_config_mac_cfg2(struct rge_softc *);
85 void		rge_phy_config_mac_cfg3(struct rge_softc *);
86 void		rge_phy_config_mac_cfg4(struct rge_softc *);
87 void		rge_phy_config_mac_cfg5(struct rge_softc *);
88 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
89 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
90 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
91 void		rge_hw_init(struct rge_softc *);
92 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
93 void		rge_patch_phy_mcu(struct rge_softc *, int);
94 void		rge_add_media_types(struct rge_softc *);
95 void		rge_config_imtype(struct rge_softc *, int);
96 void		rge_disable_hw_im(struct rge_softc *);
97 void		rge_disable_sim_im(struct rge_softc *);
98 void		rge_setup_sim_im(struct rge_softc *);
99 void		rge_setup_intr(struct rge_softc *, int);
100 void		rge_exit_oob(struct rge_softc *);
101 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
102 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
103 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
104 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
105 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
106 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
107 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
108 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
109 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
110 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
111 int		rge_get_link_status(struct rge_softc *);
112 void		rge_txstart(void *);
113 void		rge_tick(void *);
114 void		rge_link_state(struct rge_softc *);
115 #ifndef SMALL_KERNEL
116 int		rge_wol(struct ifnet *, int);
117 void		rge_wol_power(struct rge_softc *);
118 #endif
119 
120 static const struct {
121 	uint16_t reg;
122 	uint16_t val;
123 }  rtl8125_mac_cfg2_mcu[] = {
124 	RTL8125_MAC_CFG2_MCU
125 }, rtl8125_mac_cfg3_mcu[] = {
126 	RTL8125_MAC_CFG3_MCU
127 }, rtl8125_mac_cfg4_mcu[] = {
128 	RTL8125_MAC_CFG4_MCU
129 }, rtl8125_mac_cfg5_mcu[] = {
130 	RTL8125_MAC_CFG5_MCU
131 };
132 
133 struct cfattach rge_ca = {
134 	sizeof(struct rge_softc), rge_match, rge_attach, NULL, rge_activate
135 };
136 
137 struct cfdriver rge_cd = {
138 	NULL, "rge", DV_IFNET
139 };
140 
141 const struct pci_matchid rge_devices[] = {
142 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
143 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
144 };
145 
146 int
147 rge_match(struct device *parent, void *match, void *aux)
148 {
149 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
150 	    nitems(rge_devices)));
151 }
152 
153 void
154 rge_attach(struct device *parent, struct device *self, void *aux)
155 {
156 	struct rge_softc *sc = (struct rge_softc *)self;
157 	struct pci_attach_args *pa = aux;
158 	pci_chipset_tag_t pc = pa->pa_pc;
159 	pci_intr_handle_t ih;
160 	const char *intrstr = NULL;
161 	struct ifnet *ifp;
162 	struct rge_queues *q;
163 	pcireg_t reg;
164 	uint32_t hwrev;
165 	uint8_t eaddr[ETHER_ADDR_LEN];
166 	int offset;
167 
168 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
169 
170 	/*
171 	 * Map control/status registers.
172 	 */
173 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
174 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
175 	    NULL, &sc->rge_bsize, 0)) {
176 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
177 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
178 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
179 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
180 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
181 			    &sc->rge_bsize, 0)) {
182 				printf(": can't map mem or i/o space\n");
183 				return;
184 			}
185 		}
186 	}
187 
188 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
189 	if (q == NULL) {
190 		printf(": unable to allocate queue memory\n");
191 		return;
192 	}
193 	q->q_sc = sc;
194 	q->q_index = 0;
195 
196 	sc->sc_queues = q;
197 	sc->sc_nqueues = 1;
198 
199 	/*
200 	 * Allocate interrupt.
201 	 */
202 	if (pci_intr_map_msi(pa, &ih) == 0)
203 		sc->rge_flags |= RGE_FLAG_MSI;
204 	else if (pci_intr_map(pa, &ih) != 0) {
205 		printf(": couldn't map interrupt\n");
206 		return;
207 	}
208 	intrstr = pci_intr_string(pc, ih);
209 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
210 	    sc, sc->sc_dev.dv_xname);
211 	if (sc->sc_ih == NULL) {
212 		printf(": couldn't establish interrupt");
213 		if (intrstr != NULL)
214 			printf(" at %s", intrstr);
215 		printf("\n");
216 		return;
217 	}
218 	printf(": %s", intrstr);
219 
220 	sc->sc_dmat = pa->pa_dmat;
221 	sc->sc_pc = pa->pa_pc;
222 	sc->sc_tag = pa->pa_tag;
223 
224 	/* Determine hardware revision */
225 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
226 	switch (hwrev) {
227 	case 0x60800000:
228 		sc->rge_type = MAC_CFG2;
229 		break;
230 	case 0x60900000:
231 		sc->rge_type = MAC_CFG3;
232 		break;
233 	case 0x64000000:
234 		sc->rge_type = MAC_CFG4;
235 		break;
236 	case 0x64100000:
237 		sc->rge_type = MAC_CFG5;
238 		break;
239 	default:
240 		printf(": unknown version 0x%08x\n", hwrev);
241 		return;
242 	}
243 
244 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
245 
246 	/*
247 	 * PCI Express check.
248 	 */
249 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
250 	    &offset, NULL)) {
251 		/* Disable PCIe ASPM and ECPM. */
252 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
253 		    offset + PCI_PCIE_LCSR);
254 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
255 		    PCI_PCIE_LCSR_ECPM);
256 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
257 		    reg);
258 	}
259 
260 	rge_exit_oob(sc);
261 	rge_hw_init(sc);
262 
263 	rge_get_macaddr(sc, eaddr);
264 	printf(", address %s\n", ether_sprintf(eaddr));
265 
266 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
267 
268 	rge_set_phy_power(sc, 1);
269 	rge_phy_config(sc);
270 
271 	if (rge_allocmem(sc))
272 		return;
273 
274 	ifp = &sc->sc_arpcom.ac_if;
275 	ifp->if_softc = sc;
276 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
277 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 	ifp->if_xflags = IFXF_MPSAFE;
279 	ifp->if_ioctl = rge_ioctl;
280 	ifp->if_qstart = rge_start;
281 	ifp->if_watchdog = rge_watchdog;
282 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
283 	ifp->if_hardmtu = RGE_JUMBO_MTU;
284 
285 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
286 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
287 
288 #if NVLAN > 0
289 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
290 #endif
291 
292 #ifndef SMALL_KERNEL
293 	ifp->if_capabilities |= IFCAP_WOL;
294 	ifp->if_wol = rge_wol;
295 	rge_wol(ifp, 0);
296 #endif
297 	timeout_set(&sc->sc_timeout, rge_tick, sc);
298 	task_set(&sc->sc_task, rge_txstart, sc);
299 
300 	/* Initialize ifmedia structures. */
301 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
302 	    rge_ifmedia_sts);
303 	rge_add_media_types(sc);
304 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
305 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
306 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
307 
308 	if_attach(ifp);
309 	ether_ifattach(ifp);
310 }
311 
312 int
313 rge_activate(struct device *self, int act)
314 {
315 #ifndef SMALL_KERNEL
316 	struct rge_softc *sc = (struct rge_softc *)self;
317 #endif
318 	int rv = 0;
319 
320 	switch (act) {
321 	case DVACT_POWERDOWN:
322 		rv = config_activate_children(self, act);
323 #ifndef SMALL_KERNEL
324 		rge_wol_power(sc);
325 #endif
326 		break;
327 	default:
328 		rv = config_activate_children(self, act);
329 		break;
330 	}
331 	return (rv);
332 }
333 
334 int
335 rge_intr(void *arg)
336 {
337 	struct rge_softc *sc = arg;
338 	struct rge_queues *q = sc->sc_queues;
339 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
340 	uint32_t status;
341 	int claimed = 0, rv;
342 
343 	if (!(ifp->if_flags & IFF_RUNNING))
344 		return (0);
345 
346 	/* Disable interrupts. */
347 	RGE_WRITE_4(sc, RGE_IMR, 0);
348 
349 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
350 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
351 			return (0);
352 	}
353 
354 	status = RGE_READ_4(sc, RGE_ISR);
355 	if (status)
356 		RGE_WRITE_4(sc, RGE_ISR, status);
357 
358 	if (status & RGE_ISR_PCS_TIMEOUT)
359 		claimed = 1;
360 
361 	rv = 0;
362 	if (status & sc->rge_intrs) {
363 		rv |= rge_rxeof(q);
364 		rv |= rge_txeof(q);
365 
366 		if (status & RGE_ISR_SYSTEM_ERR) {
367 			KERNEL_LOCK();
368 			rge_init(ifp);
369 			KERNEL_UNLOCK();
370 		}
371 		claimed = 1;
372 	}
373 
374 	if (sc->rge_timerintr) {
375 		if (!rv) {
376 			/*
377 			 * Nothing needs to be processed, fallback
378 			 * to use TX/RX interrupts.
379 			 */
380 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
381 
382 			/*
383 			 * Recollect, mainly to avoid the possible
384 			 * race introduced by changing interrupt
385 			 * masks.
386 			 */
387 			rge_rxeof(q);
388 			rge_txeof(q);
389 		} else
390 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
391 	} else if (rv) {
392 		/*
393 		 * Assume that using simulated interrupt moderation
394 		 * (hardware timer based) could reduce the interrupt
395 		 * rate.
396 		 */
397 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
398 	}
399 
400 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
401 
402 	return (claimed);
403 }
404 
405 int
406 rge_encap(struct rge_queues *q, struct mbuf *m, int idx)
407 {
408 	struct rge_softc *sc = q->q_sc;
409 	struct rge_tx_desc *d = NULL;
410 	struct rge_txq *txq;
411 	bus_dmamap_t txmap;
412 	uint32_t cmdsts, cflags = 0;
413 	int cur, error, i, last, nsegs;
414 
415 	/*
416 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
417 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
418 	 * take affect.
419 	 */
420 	if ((m->m_pkthdr.csum_flags &
421 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
422 		cflags |= RGE_TDEXTSTS_IPCSUM;
423 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
424 			cflags |= RGE_TDEXTSTS_TCPCSUM;
425 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
426 			cflags |= RGE_TDEXTSTS_UDPCSUM;
427 	}
428 
429 	txq = &q->q_tx.rge_txq[idx];
430 	txmap = txq->txq_dmamap;
431 
432 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
433 	switch (error) {
434 	case 0:
435 		break;
436 	case EFBIG: /* mbuf chain is too fragmented */
437 		if (m_defrag(m, M_DONTWAIT) == 0 &&
438 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
439 		    BUS_DMA_NOWAIT) == 0)
440 			break;
441 
442 		/* FALLTHROUGH */
443 	default:
444 		return (0);
445 	}
446 
447 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
448 	    BUS_DMASYNC_PREWRITE);
449 
450 	nsegs = txmap->dm_nsegs;
451 
452 	/* Set up hardware VLAN tagging. */
453 #if NVLAN > 0
454 	if (m->m_flags & M_VLANTAG)
455 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
456 #endif
457 
458 	cur = idx;
459 	cmdsts = RGE_TDCMDSTS_SOF;
460 
461 	for (i = 0; i < txmap->dm_nsegs; i++) {
462 		d = &q->q_tx.rge_tx_list[cur];
463 
464 		d->rge_extsts = htole32(cflags);
465 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
466 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
467 
468 		cmdsts |= txmap->dm_segs[i].ds_len;
469 
470 		if (cur == RGE_TX_LIST_CNT - 1)
471 			cmdsts |= RGE_TDCMDSTS_EOR;
472 
473 		d->rge_cmdsts = htole32(cmdsts);
474 
475 		last = cur;
476 		cmdsts = RGE_TDCMDSTS_OWN;
477 		cur = RGE_NEXT_TX_DESC(cur);
478 	}
479 
480 	/* Set EOF on the last descriptor. */
481 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
482 
483 	/* Transfer ownership of packet to the chip. */
484 	d = &q->q_tx.rge_tx_list[idx];
485 
486 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
487 
488 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
489 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
490 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
491 
492 	/* Update info of TX queue and descriptors. */
493 	txq->txq_mbuf = m;
494 	txq->txq_descidx = last;
495 
496 	return (nsegs);
497 }
498 
499 int
500 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
501 {
502 	struct rge_softc *sc = ifp->if_softc;
503 	struct ifreq *ifr = (struct ifreq *)data;
504 	int s, error = 0;
505 
506 	s = splnet();
507 
508 	switch (cmd) {
509 	case SIOCSIFADDR:
510 		ifp->if_flags |= IFF_UP;
511 		if (!(ifp->if_flags & IFF_RUNNING))
512 			rge_init(ifp);
513 		break;
514 	case SIOCSIFFLAGS:
515 		if (ifp->if_flags & IFF_UP) {
516 			if (ifp->if_flags & IFF_RUNNING)
517 				error = ENETRESET;
518 			else
519 				rge_init(ifp);
520 		} else {
521 			if (ifp->if_flags & IFF_RUNNING)
522 				rge_stop(ifp);
523 		}
524 		break;
525 	case SIOCGIFMEDIA:
526 	case SIOCSIFMEDIA:
527 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
528 		break;
529 	case SIOCGIFRXR:
530 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
531 		    NULL, RGE_JUMBO_FRAMELEN, &sc->sc_queues->q_rx.rge_rx_ring);
532 		break;
533 	default:
534 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
535 	}
536 
537 	if (error == ENETRESET) {
538 		if (ifp->if_flags & IFF_RUNNING)
539 			rge_iff(sc);
540 		error = 0;
541 	}
542 
543 	splx(s);
544 	return (error);
545 }
546 
547 void
548 rge_start(struct ifqueue *ifq)
549 {
550 	struct ifnet *ifp = ifq->ifq_if;
551 	struct rge_softc *sc = ifp->if_softc;
552 	struct rge_queues *q = sc->sc_queues;
553 	struct mbuf *m;
554 	int free, idx, used;
555 	int queued = 0;
556 
557 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
558 		ifq_purge(ifq);
559 		return;
560 	}
561 
562 	/* Calculate free space. */
563 	idx = q->q_tx.rge_txq_prodidx;
564 	free = q->q_tx.rge_txq_considx;
565 	if (free <= idx)
566 		free += RGE_TX_LIST_CNT;
567 	free -= idx;
568 
569 	for (;;) {
570 		if (RGE_TX_NSEGS >= free + 2) {
571 			ifq_set_oactive(&ifp->if_snd);
572 			break;
573 		}
574 
575 		m = ifq_dequeue(ifq);
576 		if (m == NULL)
577 			break;
578 
579 		used = rge_encap(q, m, idx);
580 		if (used == 0) {
581 			m_freem(m);
582 			continue;
583 		}
584 
585 		KASSERT(used <= free);
586 		free -= used;
587 
588 #if NBPFILTER > 0
589 		if (ifp->if_bpf)
590 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
591 #endif
592 
593 		idx += used;
594 		if (idx >= RGE_TX_LIST_CNT)
595 			idx -= RGE_TX_LIST_CNT;
596 
597 		queued++;
598 	}
599 
600 	if (queued == 0)
601 		return;
602 
603 	/* Set a timeout in case the chip goes out to lunch. */
604 	ifp->if_timer = 5;
605 
606 	q->q_tx.rge_txq_prodidx = idx;
607 	ifq_serialize(ifq, &sc->sc_task);
608 }
609 
610 void
611 rge_watchdog(struct ifnet *ifp)
612 {
613 	struct rge_softc *sc = ifp->if_softc;
614 
615 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
616 	ifp->if_oerrors++;
617 
618 	rge_init(ifp);
619 }
620 
621 int
622 rge_init(struct ifnet *ifp)
623 {
624 	struct rge_softc *sc = ifp->if_softc;
625 	struct rge_queues *q = sc->sc_queues;
626 	uint32_t val;
627 	int i;
628 
629 	rge_stop(ifp);
630 
631 	/* Set MAC address. */
632 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
633 
634 	/* Set Maximum frame size. */
635 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
636 
637 	/* Initialize RX and TX descriptors lists. */
638 	rge_rx_list_init(q);
639 	rge_tx_list_init(q);
640 
641 	/* Load the addresses of the RX and TX lists into the chip. */
642 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
643 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
644 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
645 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_map->dm_segs[0].ds_addr));
646 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
647 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
648 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
649 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_map->dm_segs[0].ds_addr));
650 
651 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
652 
653 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
654 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
655 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
656 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
657 
658 	/* Clear interrupt moderation timer. */
659 	for (i = 0; i < 64; i++)
660 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
661 
662 	/* Set the initial RX and TX configurations. */
663 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
664 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
665 
666 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
667 	rge_write_csi(sc, 0x70c, val | 0x27000000);
668 
669 	/* Enable hardware optimization function. */
670 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
671 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
672 
673 	RGE_WRITE_2(sc, 0x0382, 0x221b);
674 
675 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
676 
677 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
678 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
679 
680 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
681 
682 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
683 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
684 
685 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
686 
687 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
688 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
689 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
690 	else
691 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
692 
693 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
694 	rge_write_mac_ocp(sc, 0xe63e, val |
695 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
696 
697 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
698 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
699 		RGE_MAC_SETBIT(sc, 0xe63e, 0x0020);
700 
701 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
702 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
703 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
704 
705 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
706 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
707 
708 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
709 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
710 
711 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
712 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
713 
714 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
715 
716 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
717 
718 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
719 
720 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
721 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
722 
723 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
724 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
725 
726 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
727 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
728 
729 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
730 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
731 
732 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
733 
734 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
735 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
736 
737 	/* Disable EEE plus. */
738 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
739 
740 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
741 
742 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
743 	DELAY(1);
744 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
745 
746 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
747 
748 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
749 
750 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
751 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
752 
753 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
754 
755 	for (i = 0; i < 10; i++) {
756 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
757 			break;
758 		DELAY(1000);
759 	}
760 
761 	/* Disable RXDV gate. */
762 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
763 	DELAY(2000);
764 
765 	rge_ifmedia_upd(ifp);
766 
767 	/* Enable transmit and receive. */
768 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
769 
770 	/* Program promiscuous mode and multicast filters. */
771 	rge_iff(sc);
772 
773 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
774 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
775 
776 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
777 
778 	/* Enable interrupts. */
779 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
780 
781 	ifp->if_flags |= IFF_RUNNING;
782 	ifq_clr_oactive(&ifp->if_snd);
783 
784 	timeout_add_sec(&sc->sc_timeout, 1);
785 
786 	return (0);
787 }
788 
789 /*
790  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
791  */
792 void
793 rge_stop(struct ifnet *ifp)
794 {
795 	struct rge_softc *sc = ifp->if_softc;
796 	struct rge_queues *q = sc->sc_queues;
797 	int i;
798 
799 	timeout_del(&sc->sc_timeout);
800 
801 	ifp->if_timer = 0;
802 	ifp->if_flags &= ~IFF_RUNNING;
803 	sc->rge_timerintr = 0;
804 
805 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
806 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
807 	    RGE_RXCFG_ERRPKT);
808 
809 	RGE_WRITE_4(sc, RGE_IMR, 0);
810 	RGE_WRITE_4(sc, RGE_ISR, 0);
811 
812 	/* Clear timer interrupts. */
813 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
814 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
815 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
816 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
817 
818 	rge_reset(sc);
819 
820 	intr_barrier(sc->sc_ih);
821 	ifq_barrier(&ifp->if_snd);
822 	ifq_clr_oactive(&ifp->if_snd);
823 
824 	if (q->q_rx.rge_head != NULL) {
825 		m_freem(q->q_rx.rge_head);
826 		q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
827 	}
828 
829 	/* Free the TX list buffers. */
830 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
831 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
832 			bus_dmamap_unload(sc->sc_dmat,
833 			    q->q_tx.rge_txq[i].txq_dmamap);
834 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
835 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
836 		}
837 	}
838 
839 	/* Free the RX list buffers. */
840 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
841 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
842 			bus_dmamap_unload(sc->sc_dmat,
843 			    q->q_rx.rge_rxq[i].rxq_dmamap);
844 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
845 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
846 		}
847 	}
848 }
849 
850 /*
851  * Set media options.
852  */
853 int
854 rge_ifmedia_upd(struct ifnet *ifp)
855 {
856 	struct rge_softc *sc = ifp->if_softc;
857 	struct ifmedia *ifm = &sc->sc_media;
858 	int anar, gig, val;
859 
860 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
861 		return (EINVAL);
862 
863 	/* Disable Gigabit Lite. */
864 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
865 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
866 
867 	val = rge_read_phy_ocp(sc, 0xa5d4);
868 	val &= ~RGE_ADV_2500TFDX;
869 
870 	anar = gig = 0;
871 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
872 	case IFM_AUTO:
873 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
874 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
875 		val |= RGE_ADV_2500TFDX;
876 		break;
877 	case IFM_2500_T:
878 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
879 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
880 		val |= RGE_ADV_2500TFDX;
881 		ifp->if_baudrate = IF_Mbps(2500);
882 		break;
883 	case IFM_1000_T:
884 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
885 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
886 		ifp->if_baudrate = IF_Gbps(1);
887 		break;
888 	case IFM_100_TX:
889 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
890 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
891 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
892 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
893 		    ANAR_TX | ANAR_10_FD | ANAR_10;
894 		ifp->if_baudrate = IF_Mbps(100);
895 		break;
896 	case IFM_10_T:
897 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
898 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
899 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
900 		    ANAR_10_FD | ANAR_10 : ANAR_10;
901 		ifp->if_baudrate = IF_Mbps(10);
902 		break;
903 	default:
904 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
905 		return (EINVAL);
906 	}
907 
908 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
909 	rge_write_phy(sc, 0, MII_100T2CR, gig);
910 	rge_write_phy_ocp(sc, 0xa5d4, val);
911 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
912 	    BMCR_STARTNEG);
913 
914 	return (0);
915 }
916 
917 /*
918  * Report current media status.
919  */
920 void
921 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
922 {
923 	struct rge_softc *sc = ifp->if_softc;
924 	uint16_t status = 0;
925 
926 	ifmr->ifm_status = IFM_AVALID;
927 	ifmr->ifm_active = IFM_ETHER;
928 
929 	if (rge_get_link_status(sc)) {
930 		ifmr->ifm_status |= IFM_ACTIVE;
931 
932 		status = RGE_READ_2(sc, RGE_PHYSTAT);
933 		if ((status & RGE_PHYSTAT_FDX) ||
934 		    (status & RGE_PHYSTAT_2500MBPS))
935 			ifmr->ifm_active |= IFM_FDX;
936 		else
937 			ifmr->ifm_active |= IFM_HDX;
938 
939 		if (status & RGE_PHYSTAT_10MBPS)
940 			ifmr->ifm_active |= IFM_10_T;
941 		else if (status & RGE_PHYSTAT_100MBPS)
942 			ifmr->ifm_active |= IFM_100_TX;
943 		else if (status & RGE_PHYSTAT_1000MBPS)
944 			ifmr->ifm_active |= IFM_1000_T;
945 		else if (status & RGE_PHYSTAT_2500MBPS)
946 			ifmr->ifm_active |= IFM_2500_T;
947 	}
948 }
949 
950 /*
951  * Allocate memory for RX/TX rings.
952  */
953 int
954 rge_allocmem(struct rge_softc *sc)
955 {
956 	struct rge_queues *q = sc->sc_queues;
957 	int error, i;
958 
959 	/* Allocate DMA'able memory for the TX ring. */
960 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
961 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_tx.rge_tx_list_map);
962 	if (error) {
963 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
964 		return (error);
965 	}
966 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
967 	    &q->q_tx.rge_tx_listseg, 1, &q->q_tx.rge_tx_listnseg,
968 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
969 	if (error) {
970 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
971 		return (error);
972 	}
973 
974 	/* Load the map for the TX ring. */
975 	error = bus_dmamem_map(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
976 	    q->q_tx.rge_tx_listnseg, RGE_TX_LIST_SZ,
977 	    (caddr_t *)&q->q_tx.rge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
978 	if (error) {
979 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
980 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
981 		    q->q_tx.rge_tx_listnseg);
982 		return (error);
983 	}
984 	error = bus_dmamap_load(sc->sc_dmat, q->q_tx.rge_tx_list_map,
985 	    q->q_tx.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
986 	if (error) {
987 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
988 		bus_dmamap_destroy(sc->sc_dmat, q->q_tx.rge_tx_list_map);
989 		bus_dmamem_unmap(sc->sc_dmat,
990 		    (caddr_t)q->q_tx.rge_tx_list, RGE_TX_LIST_SZ);
991 		bus_dmamem_free(sc->sc_dmat, &q->q_tx.rge_tx_listseg,
992 		    q->q_tx.rge_tx_listnseg);
993 		return (error);
994 	}
995 
996 	/* Create DMA maps for TX buffers. */
997 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
998 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
999 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1000 		    &q->q_tx.rge_txq[i].txq_dmamap);
1001 		if (error) {
1002 			printf("%s: can't create DMA map for TX\n",
1003 			    sc->sc_dev.dv_xname);
1004 			return (error);
1005 		}
1006 	}
1007 
1008 	/* Allocate DMA'able memory for the RX ring. */
1009 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1010 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT, &q->q_rx.rge_rx_list_map);
1011 	if (error) {
1012 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
1013 		return (error);
1014 	}
1015 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1016 	    &q->q_rx.rge_rx_listseg, 1, &q->q_rx.rge_rx_listnseg,
1017 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1018 	if (error) {
1019 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1020 		return (error);
1021 	}
1022 
1023 	/* Load the map for the RX ring. */
1024 	error = bus_dmamem_map(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1025 	    q->q_rx.rge_rx_listnseg, RGE_RX_LIST_SZ,
1026 	    (caddr_t *)&q->q_rx.rge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1027 	if (error) {
1028 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1029 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1030 		    q->q_rx.rge_rx_listnseg);
1031 		return (error);
1032 	}
1033 	error = bus_dmamap_load(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1034 	    q->q_rx.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1035 	if (error) {
1036 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1037 		bus_dmamap_destroy(sc->sc_dmat, q->q_rx.rge_rx_list_map);
1038 		bus_dmamem_unmap(sc->sc_dmat,
1039 		    (caddr_t)q->q_rx.rge_rx_list, RGE_RX_LIST_SZ);
1040 		bus_dmamem_free(sc->sc_dmat, &q->q_rx.rge_rx_listseg,
1041 		    q->q_rx.rge_rx_listnseg);
1042 		return (error);
1043 	}
1044 
1045 	/* Create DMA maps for RX buffers. */
1046 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1047 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1048 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
1049 		    &q->q_rx.rge_rxq[i].rxq_dmamap);
1050 		if (error) {
1051 			printf("%s: can't create DMA map for RX\n",
1052 			    sc->sc_dev.dv_xname);
1053 			return (error);
1054 		}
1055 	}
1056 
1057 	return (error);
1058 }
1059 
1060 /*
1061  * Initialize the RX descriptor and attach an mbuf cluster.
1062  */
1063 int
1064 rge_newbuf(struct rge_queues *q)
1065 {
1066 	struct rge_softc *sc = q->q_sc;
1067 	struct mbuf *m;
1068 	struct rge_rx_desc *r;
1069 	struct rge_rxq *rxq;
1070 	bus_dmamap_t rxmap;
1071 	int idx;
1072 
1073 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1074 	if (m == NULL)
1075 		return (ENOBUFS);
1076 
1077 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1078 
1079 	idx = q->q_rx.rge_rxq_prodidx;
1080 	rxq = &q->q_rx.rge_rxq[idx];
1081 	rxmap = rxq->rxq_dmamap;
1082 
1083 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1084 		m_freem(m);
1085 		return (ENOBUFS);
1086 	}
1087 
1088 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1089 	    BUS_DMASYNC_PREREAD);
1090 
1091 	/* Map the segments into RX descriptors. */
1092 	r = &q->q_rx.rge_rx_list[idx];
1093 
1094 	if (RGE_OWN(r)) {
1095 		printf("%s: tried to map busy RX descriptor\n",
1096 		    sc->sc_dev.dv_xname);
1097 		m_freem(m);
1098 		return (ENOBUFS);
1099 	}
1100 
1101 	rxq->rxq_mbuf = m;
1102 
1103 	r->rge_extsts = 0;
1104 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1105 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1106 
1107 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1108 	if (idx == RGE_RX_LIST_CNT - 1)
1109 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1110 
1111 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1112 
1113 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1114 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1115 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1116 
1117 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1118 
1119 	return (0);
1120 }
1121 
1122 void
1123 rge_discard_rxbuf(struct rge_queues *q, int idx)
1124 {
1125 	struct rge_softc *sc = q->q_sc;
1126 	struct rge_rx_desc *r;
1127 
1128 	r = &q->q_rx.rge_rx_list[idx];
1129 
1130 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1131 	r->rge_extsts = 0;
1132 	if (idx == RGE_RX_LIST_CNT - 1)
1133 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1134 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1135 
1136 	bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1137 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1138 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1139 }
1140 
1141 void
1142 rge_rx_list_init(struct rge_queues *q)
1143 {
1144 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
1145 
1146 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
1147 	q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1148 
1149 	if_rxr_init(&q->q_rx.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1150 	rge_fill_rx_ring(q);
1151 }
1152 
1153 void
1154 rge_fill_rx_ring(struct rge_queues *q)
1155 {
1156 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1157 	int slots;
1158 
1159 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1160 		if (rge_newbuf(q) == ENOBUFS)
1161 			break;
1162 	}
1163 	if_rxr_put(rxr, slots);
1164 }
1165 
1166 void
1167 rge_tx_list_init(struct rge_queues *q)
1168 {
1169 	struct rge_softc *sc = q->q_sc;
1170 	int i;
1171 
1172 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
1173 
1174 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1175 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
1176 
1177 	bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map, 0,
1178 	    q->q_tx.rge_tx_list_map->dm_mapsize,
1179 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1180 
1181 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
1182 }
1183 
1184 int
1185 rge_rxeof(struct rge_queues *q)
1186 {
1187 	struct rge_softc *sc = q->q_sc;
1188 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1189 	struct mbuf *m;
1190 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1191 	struct if_rxring *rxr = &q->q_rx.rge_rx_ring;
1192 	struct rge_rx_desc *cur_rx;
1193 	struct rge_rxq *rxq;
1194 	uint32_t rxstat, extsts;
1195 	int i, total_len, rx = 0;
1196 
1197 	for (i = q->q_rx.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1198 	    i = RGE_NEXT_RX_DESC(i)) {
1199 		/* Invalidate the descriptor memory. */
1200 		bus_dmamap_sync(sc->sc_dmat, q->q_rx.rge_rx_list_map,
1201 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1202 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1203 
1204 		cur_rx = &q->q_rx.rge_rx_list[i];
1205 
1206 		if (RGE_OWN(cur_rx))
1207 			break;
1208 
1209 		rxstat = letoh32(cur_rx->rge_cmdsts);
1210 		extsts = letoh32(cur_rx->rge_extsts);
1211 
1212 		total_len = RGE_RXBYTES(cur_rx);
1213 		rxq = &q->q_rx.rge_rxq[i];
1214 		m = rxq->rxq_mbuf;
1215 		rxq->rxq_mbuf = NULL;
1216 		if_rxr_put(rxr, 1);
1217 		rx = 1;
1218 
1219 		/* Invalidate the RX mbuf and unload its map. */
1220 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1221 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1222 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1223 
1224 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1225 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1226 			rge_discard_rxbuf(q, i);
1227 			continue;
1228 		}
1229 
1230 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1231 			ifp->if_ierrors++;
1232 			/*
1233 			 * If this is part of a multi-fragment packet,
1234 			 * discard all the pieces.
1235 			 */
1236 			 if (q->q_rx.rge_head != NULL) {
1237 				m_freem(q->q_rx.rge_head);
1238 				q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1239 			}
1240 			rge_discard_rxbuf(q, i);
1241 			continue;
1242 		}
1243 
1244 		if (q->q_rx.rge_head != NULL) {
1245 			m->m_len = total_len;
1246 			/*
1247 			 * Special case: if there's 4 bytes or less
1248 			 * in this buffer, the mbuf can be discarded:
1249 			 * the last 4 bytes is the CRC, which we don't
1250 			 * care about anyway.
1251 			 */
1252 			if (m->m_len <= ETHER_CRC_LEN) {
1253 				q->q_rx.rge_tail->m_len -=
1254 				    (ETHER_CRC_LEN - m->m_len);
1255 				m_freem(m);
1256 			} else {
1257 				m->m_len -= ETHER_CRC_LEN;
1258 				m->m_flags &= ~M_PKTHDR;
1259 				q->q_rx.rge_tail->m_next = m;
1260 			}
1261 			m = q->q_rx.rge_head;
1262 			q->q_rx.rge_head = q->q_rx.rge_tail = NULL;
1263 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1264 		} else
1265 			m->m_pkthdr.len = m->m_len =
1266 			    (total_len - ETHER_CRC_LEN);
1267 
1268 		/* Check IP header checksum. */
1269 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1270 		    (extsts & RGE_RDEXTSTS_IPV4))
1271 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1272 
1273 		/* Check TCP/UDP checksum. */
1274 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1275 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1276 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1277 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1278 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1279 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1280 			    M_UDP_CSUM_IN_OK;
1281 
1282 #if NVLAN > 0
1283 		if (extsts & RGE_RDEXTSTS_VTAG) {
1284 			m->m_pkthdr.ether_vtag =
1285 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1286 			m->m_flags |= M_VLANTAG;
1287 		}
1288 #endif
1289 
1290 		ml_enqueue(&ml, m);
1291 	}
1292 
1293 	if (ifiq_input(&ifp->if_rcv, &ml))
1294 		if_rxr_livelocked(rxr);
1295 
1296 	q->q_rx.rge_rxq_considx = i;
1297 	rge_fill_rx_ring(q);
1298 
1299 	return (rx);
1300 }
1301 
1302 int
1303 rge_txeof(struct rge_queues *q)
1304 {
1305 	struct rge_softc *sc = q->q_sc;
1306 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1307 	struct rge_txq *txq;
1308 	uint32_t txstat;
1309 	int cons, idx, prod;
1310 	int free = 0;
1311 
1312 	prod = q->q_tx.rge_txq_prodidx;
1313 	cons = q->q_tx.rge_txq_considx;
1314 
1315 	while (prod != cons) {
1316 		txq = &q->q_tx.rge_txq[cons];
1317 		idx = txq->txq_descidx;
1318 
1319 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1320 		    idx * sizeof(struct rge_tx_desc),
1321 		    sizeof(struct rge_tx_desc),
1322 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1323 
1324 		txstat = letoh32(q->q_tx.rge_tx_list[idx].rge_cmdsts);
1325 
1326 		if (txstat & RGE_TDCMDSTS_OWN) {
1327 			free = 2;
1328 			break;
1329 		}
1330 
1331 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1332 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1333 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1334 		m_freem(txq->txq_mbuf);
1335 		txq->txq_mbuf = NULL;
1336 
1337 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1338 			ifp->if_collisions++;
1339 		if (txstat & RGE_TDCMDSTS_TXERR)
1340 			ifp->if_oerrors++;
1341 
1342 		bus_dmamap_sync(sc->sc_dmat, q->q_tx.rge_tx_list_map,
1343 		    idx * sizeof(struct rge_tx_desc),
1344 		    sizeof(struct rge_tx_desc),
1345 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1346 
1347 		cons = RGE_NEXT_TX_DESC(idx);
1348 		free = 1;
1349 	}
1350 
1351 	if (free == 0)
1352 		return (0);
1353 
1354 	q->q_tx.rge_txq_considx = cons;
1355 
1356 	if (ifq_is_oactive(&ifp->if_snd))
1357 		ifq_restart(&ifp->if_snd);
1358 	else if (free == 2)
1359 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1360 	else
1361 		ifp->if_timer = 0;
1362 
1363 	return (1);
1364 }
1365 
1366 void
1367 rge_reset(struct rge_softc *sc)
1368 {
1369 	int i;
1370 
1371 	/* Enable RXDV gate. */
1372 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1373 	DELAY(2000);
1374 
1375 	for (i = 0; i < 3000; i++) {
1376 		DELAY(50);
1377 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1378 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1379 		    RGE_MCUCMD_TXFIFO_EMPTY))
1380 			break;
1381 	}
1382 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1383 		for (i = 0; i < 3000; i++) {
1384 			DELAY(50);
1385 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1386 				break;
1387 		}
1388 	}
1389 
1390 	DELAY(2000);
1391 
1392 	/* Soft reset. */
1393 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1394 
1395 	for (i = 0; i < RGE_TIMEOUT; i++) {
1396 		DELAY(100);
1397 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1398 			break;
1399 	}
1400 	if (i == RGE_TIMEOUT)
1401 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1402 }
1403 
1404 void
1405 rge_iff(struct rge_softc *sc)
1406 {
1407 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1408 	struct arpcom *ac = &sc->sc_arpcom;
1409 	struct ether_multi *enm;
1410 	struct ether_multistep step;
1411 	uint32_t hashes[2];
1412 	uint32_t rxfilt;
1413 	int h = 0;
1414 
1415 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1416 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1417 	ifp->if_flags &= ~IFF_ALLMULTI;
1418 
1419 	/*
1420 	 * Always accept frames destined to our station address.
1421 	 * Always accept broadcast frames.
1422 	 */
1423 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1424 
1425 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1426 		ifp->if_flags |= IFF_ALLMULTI;
1427 		rxfilt |= RGE_RXCFG_MULTI;
1428 		if (ifp->if_flags & IFF_PROMISC)
1429 			rxfilt |= RGE_RXCFG_ALLPHYS;
1430 		hashes[0] = hashes[1] = 0xffffffff;
1431 	} else {
1432 		rxfilt |= RGE_RXCFG_MULTI;
1433 		/* Program new filter. */
1434 		memset(hashes, 0, sizeof(hashes));
1435 
1436 		ETHER_FIRST_MULTI(step, ac, enm);
1437 		while (enm != NULL) {
1438 			h = ether_crc32_be(enm->enm_addrlo,
1439 			    ETHER_ADDR_LEN) >> 26;
1440 
1441 			if (h < 32)
1442 				hashes[0] |= (1 << h);
1443 			else
1444 				hashes[1] |= (1 << (h - 32));
1445 
1446 			ETHER_NEXT_MULTI(step, enm);
1447 		}
1448 	}
1449 
1450 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1451 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1452 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1453 }
1454 
1455 void
1456 rge_set_phy_power(struct rge_softc *sc, int on)
1457 {
1458 	int i;
1459 
1460 	if (on) {
1461 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1462 
1463 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1464 
1465 		for (i = 0; i < RGE_TIMEOUT; i++) {
1466 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1467 				break;
1468 			DELAY(1000);
1469 		}
1470 	} else {
1471 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1472 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1473 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1474 	}
1475 }
1476 
1477 void
1478 rge_phy_config(struct rge_softc *sc)
1479 {
1480 	/* Read microcode version. */
1481 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1482 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1483 
1484 	switch (sc->rge_type) {
1485 	case MAC_CFG2:
1486 		rge_phy_config_mac_cfg2(sc);
1487 		break;
1488 	case MAC_CFG3:
1489 		rge_phy_config_mac_cfg3(sc);
1490 		break;
1491 	case MAC_CFG4:
1492 		rge_phy_config_mac_cfg4(sc);
1493 		break;
1494 	case MAC_CFG5:
1495 		rge_phy_config_mac_cfg5(sc);
1496 		break;
1497 	default:
1498 		break;	/* Can't happen. */
1499 	}
1500 
1501 	rge_write_phy(sc, 0x0a5b, 0x12,
1502 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1503 
1504 	/* Disable EEE. */
1505 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1506 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1507 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1508 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1509 	}
1510 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1511 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1512 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1513 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1514 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1515 
1516 	rge_patch_phy_mcu(sc, 1);
1517 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1518 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1519 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1520 	rge_patch_phy_mcu(sc, 0);
1521 }
1522 
1523 void
1524 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1525 {
1526 	uint16_t val;
1527 	int i;
1528 
1529 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1530 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1531 		    rtl8125_mac_cfg2_ephy[i].val);
1532 
1533 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1534 
1535 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1536 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1537 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1538 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1539 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1540 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1541 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1542 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1543 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1544 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1545 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1546 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1547 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1548 
1549 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1550 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1551 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1552 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1553 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1554 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1555 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1556 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1557 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1558 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1559 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1560 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1561 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1562 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1563 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1564 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1565 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1566 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1567 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1568 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1569 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1570 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1571 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1572 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1573 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1574 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1575 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1576 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1577 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1578 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1579 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1580 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1581 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1582 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1583 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1584 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1585 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1586 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1587 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1588 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1589 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1590 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1591 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1592 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1593 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1594 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1595 }
1596 
1597 void
1598 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1599 {
1600 	uint16_t val;
1601 	int i;
1602 	static const uint16_t mac_cfg3_a438_value[] =
1603 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1604 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1605 
1606 	static const uint16_t mac_cfg3_b88e_value[] =
1607 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1608 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1609 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1610 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1611 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1612 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1613 
1614 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1615 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1616 		    rtl8125_mac_cfg3_ephy[i].val);
1617 
1618 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1619 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1620 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1621 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1622 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1623 	rge_write_ephy(sc, 0x0002, 0x6042);
1624 	rge_write_ephy(sc, 0x0006, 0x0014);
1625 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1626 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1627 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1628 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1629 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1630 	rge_write_ephy(sc, 0x0042, 0x6042);
1631 	rge_write_ephy(sc, 0x0046, 0x0014);
1632 
1633 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1634 
1635 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1636 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1637 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1638 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1639 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1640 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1641 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1642 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1643 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1644 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1645 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1646 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1647 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1648 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1649 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1650 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1651 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1652 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1653 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1654 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1655 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1656 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1657 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1658 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1659 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1660 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1661 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1662 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1663 	    32);
1664 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1665 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1666 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1667 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1668 
1669 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1670 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1671 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1672 	for (i = 0; i < 26; i++)
1673 		rge_write_phy_ocp(sc, 0xa438, 0);
1674 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1675 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1676 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1677 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1678 
1679 	rge_patch_phy_mcu(sc, 1);
1680 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1681 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1682 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1683 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1684 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1685 	}
1686 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1687 	rge_patch_phy_mcu(sc, 0);
1688 
1689 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1690 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1691 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1692 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1693 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1694 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1695 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1696 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1697 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1698 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1699 }
1700 
1701 void
1702 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1703 {
1704 	uint16_t val;
1705 	int i;
1706 	static const uint16_t mac_cfg4_b87c_value[] =
1707 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1708 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1709 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1710 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1711 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1712 	      0x80b0, 0x0f31 };
1713 
1714 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1715 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1716 		    rtl8125_mac_cfg4_ephy[i].val);
1717 
1718 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1719 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1720 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1721 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1722 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1723 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1724 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1725 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1726 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1727 
1728 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1729 
1730 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1731 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1732 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1733 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1734 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1735 	for (i = 0; i < 6; i++) {
1736 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1737 		if (i < 3)
1738 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1739 		else
1740 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1741 	}
1742 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1743 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1744 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1745 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1746 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1747 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1748 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1749 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1750 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1751 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1752 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1753 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1754 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1755 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1756 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1757 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1758 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1759 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1760 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1761 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1762 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1763 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1764 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1765 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1766 	}
1767 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1768 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1769 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1770 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1771 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1772 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1773 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1774 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1775 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1776 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1777 	    32);
1778 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1779 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1780 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1781 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1782 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1783 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1784 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1785 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1786 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1787 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1788 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1789 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1790 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1791 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1792 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1793 	for (i = 0; i < 6; i++) {
1794 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1795 		if (i == 2)
1796 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1797 		else
1798 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1799 	}
1800 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1801 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1802 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1803 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1804 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1805 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1806 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1807 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1808 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1809 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1810 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1811 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1812 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1813 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1814 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1815 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1816 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1817 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1818 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1819 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1820 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1821 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1822 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1823 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1824 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1825 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1826 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1827 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1828 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1829 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1830 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1831 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1832 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1833 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1834 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1835 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1836 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1837 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1838 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1839 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1840 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1841 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1842 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1843 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1844 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1845 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1846 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1847 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1848 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1849 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1850 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1851 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1852 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1853 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1854 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1855 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1856 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1857 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1858 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1859 	rge_patch_phy_mcu(sc, 1);
1860 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1861 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1862 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1863 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1864 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1865 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1866 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1867 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1868 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1869 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1870 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1871 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1872 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1873 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1874 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1875 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1876 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1877 	rge_patch_phy_mcu(sc, 0);
1878 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1879 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1880 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1881 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1882 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1883 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1884 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1885 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1886 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1887 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1888 }
1889 
1890 void
1891 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1892 {
1893 	uint16_t val;
1894 	int i;
1895 
1896 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1897 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1898 		    rtl8125_mac_cfg5_ephy[i].val);
1899 
1900 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1901 
1902 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1903 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1904 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1905 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1906 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1907 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1908 	    32);
1909 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1910 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1911 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1912 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1913 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1914 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1915 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1916 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1917 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1918 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1919 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1920 	for (i = 0; i < 10; i++) {
1921 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1922 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1923 	}
1924 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1925 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1926 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1927 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1928 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
1929 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
1930 }
1931 
1932 void
1933 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1934 {
1935 	if (sc->rge_mcodever != mcode_version) {
1936 		int i;
1937 
1938 		rge_patch_phy_mcu(sc, 1);
1939 
1940 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1941 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1942 			if (sc->rge_type == MAC_CFG2)
1943 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1944 			else
1945 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1946 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1947 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1948 
1949 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1950 		}
1951 
1952 		if (sc->rge_type == MAC_CFG2) {
1953 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1954 				rge_write_phy_ocp(sc,
1955 				    rtl8125_mac_cfg2_mcu[i].reg,
1956 				    rtl8125_mac_cfg2_mcu[i].val);
1957 			}
1958 		} else if (sc->rge_type == MAC_CFG3) {
1959 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1960 				rge_write_phy_ocp(sc,
1961 				    rtl8125_mac_cfg3_mcu[i].reg,
1962 				    rtl8125_mac_cfg3_mcu[i].val);
1963 			}
1964 		} else if (sc->rge_type == MAC_CFG4) {
1965 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1966 				rge_write_phy_ocp(sc,
1967 				    rtl8125_mac_cfg4_mcu[i].reg,
1968 				    rtl8125_mac_cfg4_mcu[i].val);
1969 			}
1970 		} else if (sc->rge_type == MAC_CFG5) {
1971 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1972 				rge_write_phy_ocp(sc,
1973 				    rtl8125_mac_cfg5_mcu[i].reg,
1974 				    rtl8125_mac_cfg5_mcu[i].val);
1975 			}
1976 		}
1977 
1978 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1979 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1980 
1981 			rge_write_phy_ocp(sc, 0xa436, 0);
1982 			rge_write_phy_ocp(sc, 0xa438, 0);
1983 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1984 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1985 			rge_write_phy_ocp(sc, 0xa438, 0);
1986 		}
1987 
1988 		rge_patch_phy_mcu(sc, 0);
1989 
1990 		/* Write microcode version. */
1991 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1992 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1993 	}
1994 }
1995 
1996 void
1997 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1998 {
1999 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2000 	RGE_WRITE_4(sc, RGE_MAC0,
2001 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2002 	RGE_WRITE_4(sc, RGE_MAC4,
2003 	    addr[5] <<  8 | addr[4]);
2004 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2005 }
2006 
2007 void
2008 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2009 {
2010 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2011 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2012 }
2013 
2014 void
2015 rge_hw_init(struct rge_softc *sc)
2016 {
2017 	int i;
2018 
2019 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2020 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2021 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2022 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2023 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2024 
2025 	/* Disable UPS. */
2026 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2027 
2028 	/* Configure MAC MCU. */
2029 	rge_write_mac_ocp(sc, 0xfc38, 0);
2030 
2031 	for (i = 0xfc28; i < 0xfc38; i += 2)
2032 		rge_write_mac_ocp(sc, i, 0);
2033 
2034 	DELAY(3000);
2035 	rge_write_mac_ocp(sc, 0xfc26, 0);
2036 
2037 	if (sc->rge_type == MAC_CFG3) {
2038 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2039 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2040 			    rtl8125_mac_bps[i].val);
2041 		}
2042 	} else if (sc->rge_type == MAC_CFG5) {
2043 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2044 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2045 			    rtl8125b_mac_bps[i].val);
2046 		}
2047 	}
2048 
2049 	/* Disable PHY power saving. */
2050 	rge_disable_phy_ocp_pwrsave(sc);
2051 
2052 	/* Set PCIe uncorrectable error status. */
2053 	rge_write_csi(sc, 0x108,
2054 	    rge_read_csi(sc, 0x108) | 0x00100000);
2055 
2056 }
2057 
2058 void
2059 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2060 {
2061 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2062 		rge_patch_phy_mcu(sc, 1);
2063 		rge_write_phy_ocp(sc, 0xc416, 0);
2064 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2065 		rge_patch_phy_mcu(sc, 0);
2066 	}
2067 }
2068 
2069 void
2070 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2071 {
2072 	int i;
2073 
2074 	if (set)
2075 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2076 	else
2077 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2078 
2079 	for (i = 0; i < 1000; i++) {
2080 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2081 			break;
2082 		DELAY(100);
2083 	}
2084 	if (i == 1000) {
2085 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2086 		return;
2087 	}
2088 }
2089 
2090 void
2091 rge_add_media_types(struct rge_softc *sc)
2092 {
2093 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2094 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2095 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2096 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2097 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2098 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2099 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2100 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2101 }
2102 
2103 void
2104 rge_config_imtype(struct rge_softc *sc, int imtype)
2105 {
2106 	switch (imtype) {
2107 	case RGE_IMTYPE_NONE:
2108 		sc->rge_intrs = RGE_INTRS;
2109 		break;
2110 	case RGE_IMTYPE_SIM:
2111 		sc->rge_intrs = RGE_INTRS_TIMER;
2112 		break;
2113 	default:
2114 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2115 	}
2116 }
2117 
2118 void
2119 rge_disable_hw_im(struct rge_softc *sc)
2120 {
2121 	RGE_WRITE_2(sc, RGE_IM, 0);
2122 }
2123 
2124 void
2125 rge_disable_sim_im(struct rge_softc *sc)
2126 {
2127 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2128 	sc->rge_timerintr = 0;
2129 }
2130 
2131 void
2132 rge_setup_sim_im(struct rge_softc *sc)
2133 {
2134 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2135 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2136 	sc->rge_timerintr = 1;
2137 }
2138 
2139 void
2140 rge_setup_intr(struct rge_softc *sc, int imtype)
2141 {
2142 	rge_config_imtype(sc, imtype);
2143 
2144 	/* Enable interrupts. */
2145 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2146 
2147 	switch (imtype) {
2148 	case RGE_IMTYPE_NONE:
2149 		rge_disable_sim_im(sc);
2150 		rge_disable_hw_im(sc);
2151 		break;
2152 	case RGE_IMTYPE_SIM:
2153 		rge_disable_hw_im(sc);
2154 		rge_setup_sim_im(sc);
2155 		break;
2156 	default:
2157 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2158 	}
2159 }
2160 
2161 void
2162 rge_exit_oob(struct rge_softc *sc)
2163 {
2164 	int i;
2165 
2166 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2167 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2168 	    RGE_RXCFG_ERRPKT);
2169 
2170 	/* Disable RealWoW. */
2171 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2172 
2173 	rge_reset(sc);
2174 
2175 	/* Disable OOB. */
2176 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2177 
2178 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2179 
2180 	for (i = 0; i < 10; i++) {
2181 		DELAY(100);
2182 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2183 			break;
2184 	}
2185 
2186 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2187 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2188 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2189 
2190 	for (i = 0; i < 10; i++) {
2191 		DELAY(100);
2192 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2193 			break;
2194 	}
2195 
2196 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2197 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2198 		    sc->sc_dev.dv_xname);
2199 		for (i = 0; i < RGE_TIMEOUT; i++) {
2200 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2201 				break;
2202 			DELAY(1000);
2203 		}
2204 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2205 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2206 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2207 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2208 	}
2209 }
2210 
2211 void
2212 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2213 {
2214 	int i;
2215 
2216 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2217 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2218 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2219 
2220 	for (i = 0; i < 10; i++) {
2221 		 DELAY(100);
2222 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2223 			break;
2224 	}
2225 
2226 	DELAY(20);
2227 }
2228 
2229 uint32_t
2230 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2231 {
2232 	int i;
2233 
2234 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2235 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2236 
2237 	for (i = 0; i < 10; i++) {
2238 		 DELAY(100);
2239 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2240 			break;
2241 	}
2242 
2243 	DELAY(20);
2244 
2245 	return (RGE_READ_4(sc, RGE_CSIDR));
2246 }
2247 
2248 void
2249 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2250 {
2251 	uint32_t tmp;
2252 
2253 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2254 	tmp += val;
2255 	tmp |= RGE_MACOCP_BUSY;
2256 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2257 }
2258 
2259 uint16_t
2260 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2261 {
2262 	uint32_t val;
2263 
2264 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2265 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2266 
2267 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2268 }
2269 
2270 void
2271 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2272 {
2273 	uint32_t tmp;
2274 	int i;
2275 
2276 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2277 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2278 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2279 
2280 	for (i = 0; i < 10; i++) {
2281 		DELAY(100);
2282 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2283 			break;
2284 	}
2285 
2286 	DELAY(20);
2287 }
2288 
2289 uint16_t
2290 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2291 {
2292 	uint32_t val;
2293 	int i;
2294 
2295 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2296 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2297 
2298 	for (i = 0; i < 10; i++) {
2299 		DELAY(100);
2300 		val = RGE_READ_4(sc, RGE_EPHYAR);
2301 		if (val & RGE_EPHYAR_BUSY)
2302 			break;
2303 	}
2304 
2305 	DELAY(20);
2306 
2307 	return (val & RGE_EPHYAR_DATA_MASK);
2308 }
2309 
2310 void
2311 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2312 {
2313 	uint16_t off, phyaddr;
2314 
2315 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2316 	phyaddr <<= 4;
2317 
2318 	off = addr ? reg : 0x10 + (reg % 8);
2319 
2320 	phyaddr += (off - 16) << 1;
2321 
2322 	rge_write_phy_ocp(sc, phyaddr, val);
2323 }
2324 
2325 uint16_t
2326 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2327 {
2328 	uint16_t off, phyaddr;
2329 
2330 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2331 	phyaddr <<= 4;
2332 
2333 	off = addr ? reg : 0x10 + (reg % 8);
2334 
2335 	phyaddr += (off - 16) << 1;
2336 
2337 	return (rge_read_phy_ocp(sc, phyaddr));
2338 }
2339 
2340 void
2341 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2342 {
2343 	uint32_t tmp;
2344 	int i;
2345 
2346 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2347 	tmp |= RGE_PHYOCP_BUSY | val;
2348 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2349 
2350 	for (i = 0; i < RGE_TIMEOUT; i++) {
2351 		DELAY(1);
2352 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2353 			break;
2354 	}
2355 }
2356 
2357 uint16_t
2358 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2359 {
2360 	uint32_t val;
2361 	int i;
2362 
2363 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2364 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2365 
2366 	for (i = 0; i < RGE_TIMEOUT; i++) {
2367 		DELAY(1);
2368 		val = RGE_READ_4(sc, RGE_PHYOCP);
2369 		if (val & RGE_PHYOCP_BUSY)
2370 			break;
2371 	}
2372 
2373 	return (val & RGE_PHYOCP_DATA_MASK);
2374 }
2375 
2376 int
2377 rge_get_link_status(struct rge_softc *sc)
2378 {
2379 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2380 }
2381 
2382 void
2383 rge_txstart(void *arg)
2384 {
2385 	struct rge_softc *sc = arg;
2386 
2387 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2388 }
2389 
2390 void
2391 rge_tick(void *arg)
2392 {
2393 	struct rge_softc *sc = arg;
2394 	int s;
2395 
2396 	s = splnet();
2397 	rge_link_state(sc);
2398 	splx(s);
2399 
2400 	timeout_add_sec(&sc->sc_timeout, 1);
2401 }
2402 
2403 void
2404 rge_link_state(struct rge_softc *sc)
2405 {
2406 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2407 	int link = LINK_STATE_DOWN;
2408 
2409 	if (rge_get_link_status(sc))
2410 		link = LINK_STATE_UP;
2411 
2412 	if (ifp->if_link_state != link) {
2413 		ifp->if_link_state = link;
2414 		if_link_state_change(ifp);
2415 	}
2416 }
2417 
2418 #ifndef SMALL_KERNEL
2419 int
2420 rge_wol(struct ifnet *ifp, int enable)
2421 {
2422 	struct rge_softc *sc = ifp->if_softc;
2423 
2424 	if (enable) {
2425 		if (!(RGE_READ_1(sc, RGE_CFG1) & RGE_CFG1_PM_EN)) {
2426 			printf("%s: power management is disabled, "
2427 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2428 			return (ENOTSUP);
2429 		}
2430 
2431 	}
2432 
2433 	rge_iff(sc);
2434 
2435 	if (enable)
2436 		RGE_MAC_SETBIT(sc, 0xc0b6, 0x0001);
2437 	else
2438 		RGE_MAC_CLRBIT(sc, 0xc0b6, 0x0001);
2439 
2440 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2441 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE | RGE_CFG5_WOL_UCAST |
2442 	    RGE_CFG5_WOL_MCAST | RGE_CFG5_WOL_BCAST);
2443 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_WOL_LINK | RGE_CFG3_WOL_MAGIC);
2444 	if (enable)
2445 		RGE_SETBIT_1(sc, RGE_CFG5, RGE_CFG5_WOL_LANWAKE);
2446 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2447 
2448 	return (0);
2449 }
2450 
2451 void
2452 rge_wol_power(struct rge_softc *sc)
2453 {
2454 	/* Disable RXDV gate. */
2455 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
2456 	DELAY(2000);
2457 
2458 	RGE_SETBIT_1(sc, RGE_CFG1, RGE_CFG1_PM_EN);
2459 	RGE_SETBIT_1(sc, RGE_CFG2, RGE_CFG2_PMSTS_EN);
2460 }
2461 #endif
2462