xref: /openbsd-src/sys/dev/pci/if_rge.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_rge.c,v 1.7 2020/10/12 02:06:25 kevlo Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_intr(void *);
63 int		rge_encap(struct rge_softc *, struct mbuf *, int);
64 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
65 void		rge_start(struct ifqueue *);
66 void		rge_watchdog(struct ifnet *);
67 int		rge_init(struct ifnet *);
68 void		rge_stop(struct ifnet *);
69 int		rge_ifmedia_upd(struct ifnet *);
70 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
71 int		rge_allocmem(struct rge_softc *);
72 int		rge_newbuf(struct rge_softc *);
73 void		rge_discard_rxbuf(struct rge_softc *, int);
74 void		rge_rx_list_init(struct rge_softc *);
75 void		rge_tx_list_init(struct rge_softc *);
76 void		rge_fill_rx_ring(struct rge_softc *);
77 int		rge_rxeof(struct rge_softc *);
78 int		rge_txeof(struct rge_softc *);
79 void		rge_reset(struct rge_softc *);
80 void		rge_iff(struct rge_softc *);
81 void		rge_set_phy_power(struct rge_softc *, int);
82 void		rge_phy_config(struct rge_softc *);
83 void		rge_phy_config_mac_cfg2(struct rge_softc *);
84 void		rge_phy_config_mac_cfg3(struct rge_softc *);
85 void		rge_phy_config_mac_cfg4(struct rge_softc *);
86 void		rge_phy_config_mac_cfg5(struct rge_softc *);
87 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
88 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
89 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
90 void		rge_hw_init(struct rge_softc *);
91 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
92 void		rge_patch_phy_mcu(struct rge_softc *, int);
93 void		rge_add_media_types(struct rge_softc *);
94 void		rge_config_imtype(struct rge_softc *, int);
95 void		rge_disable_hw_im(struct rge_softc *);
96 void		rge_disable_sim_im(struct rge_softc *);
97 void		rge_setup_sim_im(struct rge_softc *);
98 void		rge_setup_intr(struct rge_softc *, int);
99 void		rge_exit_oob(struct rge_softc *);
100 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
101 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
102 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
103 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
104 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
105 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
106 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
107 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
108 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
109 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
110 int		rge_get_link_status(struct rge_softc *);
111 void		rge_txstart(void *);
112 void		rge_tick(void *);
113 void		rge_link_state(struct rge_softc *);
114 
115 static const struct {
116 	uint16_t reg;
117 	uint16_t val;
118 }  rtl8125_mac_cfg2_mcu[] = {
119 	RTL8125_MAC_CFG2_MCU
120 }, rtl8125_mac_cfg3_mcu[] = {
121 	RTL8125_MAC_CFG3_MCU
122 }, rtl8125_mac_cfg4_mcu[] = {
123 	RTL8125_MAC_CFG4_MCU
124 }, rtl8125_mac_cfg5_mcu[] = {
125 	RTL8125_MAC_CFG5_MCU
126 };
127 
128 struct cfattach rge_ca = {
129 	sizeof(struct rge_softc), rge_match, rge_attach
130 };
131 
132 struct cfdriver rge_cd = {
133 	NULL, "rge", DV_IFNET
134 };
135 
136 const struct pci_matchid rge_devices[] = {
137 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
138 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
139 };
140 
141 int
142 rge_match(struct device *parent, void *match, void *aux)
143 {
144 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
145 	    nitems(rge_devices)));
146 }
147 
148 void
149 rge_attach(struct device *parent, struct device *self, void *aux)
150 {
151 	struct rge_softc *sc = (struct rge_softc *)self;
152 	struct pci_attach_args *pa = aux;
153 	pci_chipset_tag_t pc = pa->pa_pc;
154 	pci_intr_handle_t ih;
155 	const char *intrstr = NULL;
156 	struct ifnet *ifp;
157 	pcireg_t reg;
158 	uint32_t hwrev;
159 	uint8_t eaddr[ETHER_ADDR_LEN];
160 	int offset;
161 
162 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
163 
164 	/*
165 	 * Map control/status registers.
166 	 */
167 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
168 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
169 	    NULL, &sc->rge_bsize, 0)) {
170 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
171 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
172 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
173 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
174 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
175 			    &sc->rge_bsize, 0)) {
176 				printf(": can't map mem or i/o space\n");
177 				return;
178 			}
179 		}
180 	}
181 
182 	/*
183 	 * Allocate interrupt.
184 	 */
185 	if (pci_intr_map_msi(pa, &ih) == 0)
186 		sc->rge_flags |= RGE_FLAG_MSI;
187 	else if (pci_intr_map(pa, &ih) != 0) {
188 		printf(": couldn't map interrupt\n");
189 		return;
190 	}
191 	intrstr = pci_intr_string(pc, ih);
192 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
193 	    sc, sc->sc_dev.dv_xname);
194 	if (sc->sc_ih == NULL) {
195 		printf(": couldn't establish interrupt");
196 		if (intrstr != NULL)
197 			printf(" at %s", intrstr);
198 		printf("\n");
199 		return;
200 	}
201 	printf(": %s", intrstr);
202 
203 	sc->sc_dmat = pa->pa_dmat;
204 	sc->sc_pc = pa->pa_pc;
205 	sc->sc_tag = pa->pa_tag;
206 
207 	/* Determine hardware revision */
208 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
209 	switch (hwrev) {
210 	case 0x60800000:
211 		sc->rge_type = MAC_CFG2;
212 		break;
213 	case 0x60900000:
214 		sc->rge_type = MAC_CFG3;
215 		break;
216 	case 0x64000000:
217 		sc->rge_type = MAC_CFG4;
218 		break;
219 	case 0x64100000:
220 		sc->rge_type = MAC_CFG5;
221 		break;
222 	default:
223 		printf(": unknown version 0x%08x\n", hwrev);
224 		return;
225 	}
226 
227 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
228 
229 	/*
230 	 * PCI Express check.
231 	 */
232 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
233 	    &offset, NULL)) {
234 		/* Disable PCIe ASPM and ECPM. */
235 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
236 		    offset + PCI_PCIE_LCSR);
237 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
238 		    PCI_PCIE_LCSR_ECPM);
239 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
240 		    reg);
241 	}
242 
243 	rge_exit_oob(sc);
244 	rge_hw_init(sc);
245 
246 	rge_get_macaddr(sc, eaddr);
247 	printf(", address %s\n", ether_sprintf(eaddr));
248 
249 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
250 
251 	rge_set_phy_power(sc, 1);
252 	rge_phy_config(sc);
253 
254 	if (rge_allocmem(sc))
255 		return;
256 
257 	ifp = &sc->sc_arpcom.ac_if;
258 	ifp->if_softc = sc;
259 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
260 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
261 	ifp->if_xflags = IFXF_MPSAFE;
262 	ifp->if_ioctl = rge_ioctl;
263 	ifp->if_qstart = rge_start;
264 	ifp->if_watchdog = rge_watchdog;
265 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
266 	ifp->if_hardmtu = RGE_JUMBO_MTU;
267 
268 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
269 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
270 
271 #if NVLAN > 0
272 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
273 #endif
274 
275 	timeout_set(&sc->sc_timeout, rge_tick, sc);
276 	task_set(&sc->sc_task, rge_txstart, sc);
277 
278 	/* Initialize ifmedia structures. */
279 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
280 	    rge_ifmedia_sts);
281 	rge_add_media_types(sc);
282 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
283 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
284 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
285 
286 	if_attach(ifp);
287 	ether_ifattach(ifp);
288 }
289 
290 int
291 rge_intr(void *arg)
292 {
293 	struct rge_softc *sc = arg;
294 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
295 	uint32_t status;
296 	int claimed = 0, rx, tx;
297 
298 	if (!(ifp->if_flags & IFF_RUNNING))
299 		return (0);
300 
301 	/* Disable interrupts. */
302 	RGE_WRITE_4(sc, RGE_IMR, 0);
303 
304 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
305 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
306 			return (0);
307 	}
308 
309 	status = RGE_READ_4(sc, RGE_ISR);
310 	if (status)
311 		RGE_WRITE_4(sc, RGE_ISR, status);
312 
313 	if (status & RGE_ISR_PCS_TIMEOUT)
314 		claimed = 1;
315 
316 	rx = tx = 0;
317 	if (status & sc->rge_intrs) {
318 		if (status &
319 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
320 			rx |= rge_rxeof(sc);
321 			claimed = 1;
322 		}
323 
324 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
325 			tx |= rge_txeof(sc);
326 			claimed = 1;
327 		}
328 
329 		if (status & RGE_ISR_SYSTEM_ERR) {
330 			KERNEL_LOCK();
331 			rge_init(ifp);
332 			KERNEL_UNLOCK();
333 			claimed = 1;
334 		}
335 	}
336 
337 	if (sc->rge_timerintr) {
338 		if ((tx | rx) == 0) {
339 			/*
340 			 * Nothing needs to be processed, fallback
341 			 * to use TX/RX interrupts.
342 			 */
343 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
344 
345 			/*
346 			 * Recollect, mainly to avoid the possible
347 			 * race introduced by changing interrupt
348 			 * masks.
349 			 */
350 			rge_rxeof(sc);
351 			rge_txeof(sc);
352 		} else
353 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
354 	} else if (tx | rx) {
355 		/*
356 		 * Assume that using simulated interrupt moderation
357 		 * (hardware timer based) could reduce the interrupt
358 		 * rate.
359 		 */
360 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
361 	}
362 
363 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
364 
365 	return (claimed);
366 }
367 
368 int
369 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
370 {
371 	struct rge_tx_desc *d = NULL;
372 	struct rge_txq *txq;
373 	bus_dmamap_t txmap;
374 	uint32_t cmdsts, cflags = 0;
375 	int cur, error, i, last, nsegs;
376 
377 	/*
378 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
379 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
380 	 * take affect.
381 	 */
382 	if ((m->m_pkthdr.csum_flags &
383 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
384 		cflags |= RGE_TDEXTSTS_IPCSUM;
385 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
386 			cflags |= RGE_TDEXTSTS_TCPCSUM;
387 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
388 			cflags |= RGE_TDEXTSTS_UDPCSUM;
389 	}
390 
391 	txq = &sc->rge_ldata.rge_txq[idx];
392 	txmap = txq->txq_dmamap;
393 
394 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
395 	switch (error) {
396 	case 0:
397 		break;
398 	case EFBIG: /* mbuf chain is too fragmented */
399 		if (m_defrag(m, M_DONTWAIT) == 0 &&
400 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
401 		    BUS_DMA_NOWAIT) == 0)
402 			break;
403 
404 		/* FALLTHROUGH */
405 	default:
406 		return (0);
407 	}
408 
409 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
410 	    BUS_DMASYNC_PREWRITE);
411 
412 	nsegs = txmap->dm_nsegs;
413 
414 	/* Set up hardware VLAN tagging. */
415 #if NVLAN > 0
416 	if (m->m_flags & M_VLANTAG)
417 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
418 #endif
419 
420 	cur = idx;
421 	cmdsts = RGE_TDCMDSTS_SOF;
422 
423 	for (i = 0; i < txmap->dm_nsegs; i++) {
424 		d = &sc->rge_ldata.rge_tx_list[cur];
425 
426 		d->rge_extsts = htole32(cflags);
427 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
428 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
429 
430 		cmdsts |= txmap->dm_segs[i].ds_len;
431 
432 		if (cur == RGE_TX_LIST_CNT - 1)
433 			cmdsts |= RGE_TDCMDSTS_EOR;
434 
435 		d->rge_cmdsts = htole32(cmdsts);
436 
437 		last = cur;
438 		cmdsts = RGE_TDCMDSTS_OWN;
439 		cur = RGE_NEXT_TX_DESC(cur);
440 	}
441 
442 	/* Set EOF on the last descriptor. */
443 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
444 
445 	/* Transfer ownership of packet to the chip. */
446 	d = &sc->rge_ldata.rge_tx_list[idx];
447 
448 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
449 
450 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
451 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
452 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
453 
454 	/* Update info of TX queue and descriptors. */
455 	txq->txq_mbuf = m;
456 	txq->txq_descidx = last;
457 
458 	return (nsegs);
459 }
460 
461 int
462 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
463 {
464 	struct rge_softc *sc = ifp->if_softc;
465 	struct ifreq *ifr = (struct ifreq *)data;
466 	int s, error = 0;
467 
468 	s = splnet();
469 
470 	switch (cmd) {
471 	case SIOCSIFADDR:
472 		ifp->if_flags |= IFF_UP;
473 		if (!(ifp->if_flags & IFF_RUNNING))
474 			rge_init(ifp);
475 		break;
476 	case SIOCSIFFLAGS:
477 		if (ifp->if_flags & IFF_UP) {
478 			if (ifp->if_flags & IFF_RUNNING)
479 				error = ENETRESET;
480 			else
481 				rge_init(ifp);
482 		} else {
483 			if (ifp->if_flags & IFF_RUNNING)
484 				rge_stop(ifp);
485 		}
486 		break;
487 	case SIOCGIFMEDIA:
488 	case SIOCSIFMEDIA:
489 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
490 		break;
491 	case SIOCSIFMTU:
492 		if (ifr->ifr_mtu > ifp->if_hardmtu) {
493 			error = EINVAL;
494 			break;
495 		}
496 		ifp->if_mtu = ifr->ifr_mtu;
497 		break;
498 	case SIOCGIFRXR:
499 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
500 		    NULL, RGE_JUMBO_FRAMELEN, &sc->rge_ldata.rge_rx_ring);
501 		break;
502 	default:
503 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
504 	}
505 
506 	if (error == ENETRESET) {
507 		if (ifp->if_flags & IFF_RUNNING)
508 			rge_iff(sc);
509 		error = 0;
510 	}
511 
512 	splx(s);
513 	return (error);
514 }
515 
516 void
517 rge_start(struct ifqueue *ifq)
518 {
519 	struct ifnet *ifp = ifq->ifq_if;
520 	struct rge_softc *sc = ifp->if_softc;
521 	struct mbuf *m;
522 	int free, idx, used;
523 	int queued = 0;
524 
525 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
526 		ifq_purge(ifq);
527 		return;
528 	}
529 
530 	/* Calculate free space. */
531 	idx = sc->rge_ldata.rge_txq_prodidx;
532 	free = sc->rge_ldata.rge_txq_considx;
533 	if (free <= idx)
534 		free += RGE_TX_LIST_CNT;
535 	free -= idx;
536 
537 	for (;;) {
538 		if (RGE_TX_NSEGS >= free + 2) {
539 			ifq_set_oactive(&ifp->if_snd);
540 			break;
541 		}
542 
543 		m = ifq_dequeue(ifq);
544 		if (m == NULL)
545 			break;
546 
547 		used = rge_encap(sc, m, idx);
548 		if (used == 0) {
549 			m_freem(m);
550 			continue;
551 		}
552 
553 		KASSERT(used <= free);
554 		free -= used;
555 
556 #if NBPFILTER > 0
557 		if (ifp->if_bpf)
558 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
559 #endif
560 
561 		idx += used;
562 		if (idx >= RGE_TX_LIST_CNT)
563 			idx -= RGE_TX_LIST_CNT;
564 
565 		queued++;
566 	}
567 
568 	if (queued == 0)
569 		return;
570 
571 	/* Set a timeout in case the chip goes out to lunch. */
572 	ifp->if_timer = 5;
573 
574 	sc->rge_ldata.rge_txq_prodidx = idx;
575 	ifq_serialize(ifq, &sc->sc_task);
576 }
577 
578 void
579 rge_watchdog(struct ifnet *ifp)
580 {
581 	struct rge_softc *sc = ifp->if_softc;
582 
583 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
584 	ifp->if_oerrors++;
585 
586 	rge_init(ifp);
587 }
588 
589 int
590 rge_init(struct ifnet *ifp)
591 {
592 	struct rge_softc *sc = ifp->if_softc;
593 	uint32_t val;
594 	int i;
595 
596 	rge_stop(ifp);
597 
598 	/* Set MAC address. */
599 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
600 
601 	/* Set Maximum frame size. */
602 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
603 
604 	/* Initialize RX and TX descriptors lists. */
605 	rge_rx_list_init(sc);
606 	rge_tx_list_init(sc);
607 
608 	/* Load the addresses of the RX and TX lists into the chip. */
609 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
610 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
611 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
612 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
613 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
614 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
615 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
616 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
617 
618 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
619 
620 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
621 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
622 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
623 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
624 
625 	/* Clear interrupt moderation timer. */
626 	for (i = 0; i < 64; i++)
627 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
628 
629 	/* Set the initial RX and TX configurations. */
630 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
631 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
632 
633 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
634 	rge_write_csi(sc, 0x70c, val | 0x27000000);
635 
636 	/* Enable hardware optimization function. */
637 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
638 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
639 
640 	RGE_WRITE_2(sc, 0x0382, 0x221b);
641 	RGE_WRITE_1(sc, 0x4500, 0);
642 	RGE_WRITE_2(sc, 0x4800, 0);
643 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
644 
645 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
646 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
647 
648 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
649 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
650 
651 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
652 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
653 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
654 
655 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
656 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
657 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
658 	else
659 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
660 
661 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
662 
663 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
664 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
665 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
666 	} else
667 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
668 
669 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
670 
671 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
672 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
673 
674 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
675 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
676 
677 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
678 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
679 
680 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
681 
682 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
683 
684 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
685 
686 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
687 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
688 
689 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
690 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
691 
692 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
693 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
694 
695 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
696 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
697 
698 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
699 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
700 
701 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
702 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
703 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
704 	else
705 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
706 
707 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
708 
709 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
710 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
711 
712 	/* Disable EEE plus. */
713 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
714 
715 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
716 
717 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
718 	DELAY(1);
719 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
720 
721 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
722 
723 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
724 
725 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
726 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
727 
728 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
729 
730 	for (i = 0; i < 10; i++) {
731 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
732 			break;
733 		DELAY(1000);
734 	}
735 
736 	/* Disable RXDV gate. */
737 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
738 	DELAY(2000);
739 
740 	rge_ifmedia_upd(ifp);
741 
742 	/* Enable transmit and receive. */
743 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
744 
745 	/* Program promiscuous mode and multicast filters. */
746 	rge_iff(sc);
747 
748 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
749 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
750 
751 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
752 
753 	/* Enable interrupts. */
754 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
755 
756 	ifp->if_flags |= IFF_RUNNING;
757 	ifq_clr_oactive(&ifp->if_snd);
758 
759 	timeout_add_sec(&sc->sc_timeout, 1);
760 
761 	return (0);
762 }
763 
764 /*
765  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
766  */
767 void
768 rge_stop(struct ifnet *ifp)
769 {
770 	struct rge_softc *sc = ifp->if_softc;
771 	int i;
772 
773 	timeout_del(&sc->sc_timeout);
774 
775 	ifp->if_timer = 0;
776 	ifp->if_flags &= ~IFF_RUNNING;
777 	sc->rge_timerintr = 0;
778 
779 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
780 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
781 	    RGE_RXCFG_ERRPKT);
782 
783 	RGE_WRITE_4(sc, RGE_IMR, 0);
784 
785 	/* Clear timer interrupts. */
786 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
787 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
788 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
789 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
790 
791 	rge_reset(sc);
792 
793 	intr_barrier(sc->sc_ih);
794 	ifq_barrier(&ifp->if_snd);
795 	ifq_clr_oactive(&ifp->if_snd);
796 
797 	if (sc->rge_head != NULL) {
798 		m_freem(sc->rge_head);
799 		sc->rge_head = sc->rge_tail = NULL;
800 	}
801 
802 	/* Free the TX list buffers. */
803 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
804 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
805 			bus_dmamap_unload(sc->sc_dmat,
806 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
807 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
808 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
809 		}
810 	}
811 
812 	/* Free the RX list buffers. */
813 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
814 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
815 			bus_dmamap_unload(sc->sc_dmat,
816 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
817 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
818 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
819 		}
820 	}
821 }
822 
823 /*
824  * Set media options.
825  */
826 int
827 rge_ifmedia_upd(struct ifnet *ifp)
828 {
829 	struct rge_softc *sc = ifp->if_softc;
830 	struct ifmedia *ifm = &sc->sc_media;
831 	int anar, gig, val;
832 
833 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
834 		return (EINVAL);
835 
836 	/* Disable Gigabit Lite. */
837 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
838 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
839 
840 	val = rge_read_phy_ocp(sc, 0xa5d4);
841 	val &= ~RGE_ADV_2500TFDX;
842 
843 	anar = gig = 0;
844 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
845 	case IFM_AUTO:
846 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
847 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
848 		val |= RGE_ADV_2500TFDX;
849 		break;
850 	case IFM_2500_T:
851 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
852 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
853 		val |= RGE_ADV_2500TFDX;
854 		ifp->if_baudrate = IF_Mbps(2500);
855 		break;
856 	case IFM_1000_T:
857 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
858 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
859 		ifp->if_baudrate = IF_Gbps(1);
860 		break;
861 	case IFM_100_TX:
862 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
863 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
864 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
865 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
866 		    ANAR_TX | ANAR_10_FD | ANAR_10;
867 		ifp->if_baudrate = IF_Mbps(100);
868 		break;
869 	case IFM_10_T:
870 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
871 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
872 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
873 		    ANAR_10_FD | ANAR_10 : ANAR_10;
874 		ifp->if_baudrate = IF_Mbps(10);
875 		break;
876 	default:
877 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
878 		return (EINVAL);
879 	}
880 
881 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
882 	rge_write_phy(sc, 0, MII_100T2CR, gig);
883 	rge_write_phy_ocp(sc, 0xa5d4, val);
884 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
885 	    BMCR_STARTNEG);
886 
887 	return (0);
888 }
889 
890 /*
891  * Report current media status.
892  */
893 void
894 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
895 {
896 	struct rge_softc *sc = ifp->if_softc;
897 	uint16_t status = 0;
898 
899 	ifmr->ifm_status = IFM_AVALID;
900 	ifmr->ifm_active = IFM_ETHER;
901 
902 	if (rge_get_link_status(sc)) {
903 		ifmr->ifm_status |= IFM_ACTIVE;
904 
905 		status = RGE_READ_2(sc, RGE_PHYSTAT);
906 		if ((status & RGE_PHYSTAT_FDX) ||
907 		    (status & RGE_PHYSTAT_2500MBPS))
908 			ifmr->ifm_active |= IFM_FDX;
909 		else
910 			ifmr->ifm_active |= IFM_HDX;
911 
912 		if (status & RGE_PHYSTAT_10MBPS)
913 			ifmr->ifm_active |= IFM_10_T;
914 		else if (status & RGE_PHYSTAT_100MBPS)
915 			ifmr->ifm_active |= IFM_100_TX;
916 		else if (status & RGE_PHYSTAT_1000MBPS)
917 			ifmr->ifm_active |= IFM_1000_T;
918 		else if (status & RGE_PHYSTAT_2500MBPS)
919 			ifmr->ifm_active |= IFM_2500_T;
920 	}
921 }
922 
923 /*
924  * Allocate memory for RX/TX rings.
925  */
926 int
927 rge_allocmem(struct rge_softc *sc)
928 {
929 	int error, i;
930 
931 	/* Allocate DMA'able memory for the TX ring. */
932 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
933 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
934 	if (error) {
935 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
936 		return (error);
937 	}
938 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
939 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
940 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
941 	if (error) {
942 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
943 		return (error);
944 	}
945 
946 	/* Load the map for the TX ring. */
947 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
948 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
949 	    (caddr_t *)&sc->rge_ldata.rge_tx_list,
950 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
951 	if (error) {
952 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
953 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
954 		    sc->rge_ldata.rge_tx_listnseg);
955 		return (error);
956 	}
957 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
958 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
959 	if (error) {
960 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
961 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
962 		bus_dmamem_unmap(sc->sc_dmat,
963 		    (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
964 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
965 		    sc->rge_ldata.rge_tx_listnseg);
966 		return (error);
967 	}
968 
969 	/* Create DMA maps for TX buffers. */
970 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
971 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
972 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
973 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
974 		if (error) {
975 			printf("%s: can't create DMA map for TX\n",
976 			    sc->sc_dev.dv_xname);
977 			return (error);
978 		}
979 	}
980 
981 	/* Allocate DMA'able memory for the RX ring. */
982 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
983 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
984 	if (error) {
985 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
986 		return (error);
987 	}
988 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
989 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
990 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
991 	if (error) {
992 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
993 		return (error);
994 	}
995 
996 	/* Load the map for the RX ring. */
997 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
998 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
999 	    (caddr_t *)&sc->rge_ldata.rge_rx_list,
1000 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1001 	if (error) {
1002 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1003 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1004 		    sc->rge_ldata.rge_rx_listnseg);
1005 		return (error);
1006 	}
1007 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1008 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1009 	if (error) {
1010 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1011 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1012 		bus_dmamem_unmap(sc->sc_dmat,
1013 		    (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1014 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1015 		    sc->rge_ldata.rge_rx_listnseg);
1016 		return (error);
1017 	}
1018 
1019 	/* Create DMA maps for RX buffers. */
1020 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1021 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1022 		    RGE_JUMBO_FRAMELEN, 0, 0,
1023 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1024 		if (error) {
1025 			printf("%s: can't create DMA map for RX\n",
1026 			    sc->sc_dev.dv_xname);
1027 			return (error);
1028 		}
1029 	}
1030 
1031 	return (error);
1032 }
1033 
1034 /*
1035  * Initialize the RX descriptor and attach an mbuf cluster.
1036  */
1037 int
1038 rge_newbuf(struct rge_softc *sc)
1039 {
1040 	struct mbuf *m;
1041 	struct rge_rx_desc *r;
1042 	struct rge_rxq *rxq;
1043 	bus_dmamap_t rxmap;
1044 	int idx;
1045 
1046 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
1047 	if (m == NULL)
1048 		return (ENOBUFS);
1049 
1050 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1051 
1052 	idx = sc->rge_ldata.rge_rxq_prodidx;
1053 	rxq = &sc->rge_ldata.rge_rxq[idx];
1054 	rxmap = rxq->rxq_dmamap;
1055 
1056 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1057 		m_freem(m);
1058 		return (ENOBUFS);
1059 	}
1060 
1061 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1062 	    BUS_DMASYNC_PREREAD);
1063 
1064 	/* Map the segments into RX descriptors. */
1065 	r = &sc->rge_ldata.rge_rx_list[idx];
1066 
1067 	if (RGE_OWN(r)) {
1068 		printf("%s: tried to map busy RX descriptor\n",
1069 		    sc->sc_dev.dv_xname);
1070 		m_freem(m);
1071 		return (ENOBUFS);
1072 	}
1073 
1074 	rxq->rxq_mbuf = m;
1075 
1076 	r->rge_extsts = 0;
1077 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1078 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1079 
1080 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1081 	if (idx == RGE_RX_LIST_CNT - 1)
1082 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1083 
1084 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1085 
1086 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1087 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1088 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1089 
1090 	sc->rge_ldata.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1091 
1092 	return (0);
1093 }
1094 
1095 void
1096 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1097 {
1098 	struct rge_rx_desc *r;
1099 
1100 	r = &sc->rge_ldata.rge_rx_list[idx];
1101 
1102 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1103 	r->rge_extsts = 0;
1104 	if (idx == RGE_RX_LIST_CNT - 1)
1105 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1106 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1107 
1108 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1109 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1110 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1111 }
1112 
1113 void
1114 rge_rx_list_init(struct rge_softc *sc)
1115 {
1116 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1117 
1118 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1119 	sc->rge_head = sc->rge_tail = NULL;
1120 
1121 	if_rxr_init(&sc->rge_ldata.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1122 	rge_fill_rx_ring(sc);
1123 }
1124 
1125 void
1126 rge_fill_rx_ring(struct rge_softc *sc)
1127 {
1128 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1129 	int slots;
1130 
1131 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1132 		if (rge_newbuf(sc) == ENOBUFS)
1133 			break;
1134 	}
1135 	if_rxr_put(rxr, slots);
1136 }
1137 
1138 void
1139 rge_tx_list_init(struct rge_softc *sc)
1140 {
1141 	int i;
1142 
1143 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1144 
1145 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1146 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1147 
1148 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1149 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1150 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1151 
1152 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1153 }
1154 
1155 int
1156 rge_rxeof(struct rge_softc *sc)
1157 {
1158 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1159 	struct mbuf *m;
1160 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1161 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1162 	struct rge_rx_desc *cur_rx;
1163 	struct rge_rxq *rxq;
1164 	uint32_t rxstat, extsts;
1165 	int i, total_len, rx = 0;
1166 
1167 	for (i = sc->rge_ldata.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1168 	    i = RGE_NEXT_RX_DESC(i)) {
1169 		/* Invalidate the descriptor memory. */
1170 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1171 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1172 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1173 
1174 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1175 
1176 		if (RGE_OWN(cur_rx))
1177 			break;
1178 
1179 		rxstat = letoh32(cur_rx->rge_cmdsts);
1180 		extsts = letoh32(cur_rx->rge_extsts);
1181 
1182 		total_len = RGE_RXBYTES(cur_rx);
1183 		rxq = &sc->rge_ldata.rge_rxq[i];
1184 		m = rxq->rxq_mbuf;
1185 		rxq->rxq_mbuf = NULL;
1186 		if_rxr_put(rxr, 1);
1187 		rx = 1;
1188 
1189 		/* Invalidate the RX mbuf and unload its map. */
1190 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1191 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1192 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1193 
1194 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1195 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1196 			rge_discard_rxbuf(sc, i);
1197 			continue;
1198 		}
1199 
1200 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1201 			ifp->if_ierrors++;
1202 			/*
1203 			 * If this is part of a multi-fragment packet,
1204 			 * discard all the pieces.
1205 			 */
1206 			 if (sc->rge_head != NULL) {
1207 				m_freem(sc->rge_head);
1208 				sc->rge_head = sc->rge_tail = NULL;
1209 			}
1210 			rge_discard_rxbuf(sc, i);
1211 			continue;
1212 		}
1213 
1214 		if (sc->rge_head != NULL) {
1215 			m->m_len = total_len;
1216 			/*
1217 			 * Special case: if there's 4 bytes or less
1218 			 * in this buffer, the mbuf can be discarded:
1219 			 * the last 4 bytes is the CRC, which we don't
1220 			 * care about anyway.
1221 			 */
1222 			if (m->m_len <= ETHER_CRC_LEN) {
1223 				sc->rge_tail->m_len -=
1224 				    (ETHER_CRC_LEN - m->m_len);
1225 				m_freem(m);
1226 			} else {
1227 				m->m_len -= ETHER_CRC_LEN;
1228 				m->m_flags &= ~M_PKTHDR;
1229 				sc->rge_tail->m_next = m;
1230 			}
1231 			m = sc->rge_head;
1232 			sc->rge_head = sc->rge_tail = NULL;
1233 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1234 		} else
1235 			m->m_pkthdr.len = m->m_len =
1236 			    (total_len - ETHER_CRC_LEN);
1237 
1238 		/* Check IP header checksum. */
1239 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1240 		    (extsts & RGE_RDEXTSTS_IPV4))
1241 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1242 
1243 		/* Check TCP/UDP checksum. */
1244 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1245 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1246 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1247 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1248 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1249 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1250 			    M_UDP_CSUM_IN_OK;
1251 
1252 #if NVLAN > 0
1253 		if (extsts & RGE_RDEXTSTS_VTAG) {
1254 			m->m_pkthdr.ether_vtag =
1255 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1256 			m->m_flags |= M_VLANTAG;
1257 		}
1258 #endif
1259 
1260 		ml_enqueue(&ml, m);
1261 	}
1262 
1263 	sc->rge_ldata.rge_rxq_considx = i;
1264 	rge_fill_rx_ring(sc);
1265 
1266 	if_input(ifp, &ml);
1267 
1268 	return (rx);
1269 }
1270 
1271 int
1272 rge_txeof(struct rge_softc *sc)
1273 {
1274 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1275 	struct rge_txq *txq;
1276 	uint32_t txstat;
1277 	int cons, idx, prod;
1278 	int free = 0;
1279 
1280 	prod = sc->rge_ldata.rge_txq_prodidx;
1281 	cons = sc->rge_ldata.rge_txq_considx;
1282 
1283 	while (prod != cons) {
1284 		txq = &sc->rge_ldata.rge_txq[cons];
1285 		idx = txq->txq_descidx;
1286 
1287 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1288 		    idx * sizeof(struct rge_tx_desc),
1289 		    sizeof(struct rge_tx_desc),
1290 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1291 
1292 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1293 
1294 		if (txstat & RGE_TDCMDSTS_OWN) {
1295 			free = 2;
1296 			break;
1297 		}
1298 
1299 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1300 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1301 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1302 		m_freem(txq->txq_mbuf);
1303 		txq->txq_mbuf = NULL;
1304 
1305 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1306 			ifp->if_collisions++;
1307 		if (txstat & RGE_TDCMDSTS_TXERR)
1308 			ifp->if_oerrors++;
1309 
1310 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1311 		    idx * sizeof(struct rge_tx_desc),
1312 		    sizeof(struct rge_tx_desc),
1313 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1314 
1315 		cons = RGE_NEXT_TX_DESC(idx);
1316 		free = 1;
1317 	}
1318 
1319 	if (free == 0)
1320 		return (0);
1321 
1322 	sc->rge_ldata.rge_txq_considx = cons;
1323 
1324 	if (ifq_is_oactive(&ifp->if_snd))
1325 		ifq_restart(&ifp->if_snd);
1326 	else if (free == 2)
1327 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1328 	else
1329 		ifp->if_timer = 0;
1330 
1331 	return (1);
1332 }
1333 
1334 void
1335 rge_reset(struct rge_softc *sc)
1336 {
1337 	int i;
1338 
1339 	/* Enable RXDV gate. */
1340 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1341 	DELAY(2000);
1342 
1343 	for (i = 0; i < 3000; i++) {
1344 		DELAY(50);
1345 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1346 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1347 		    RGE_MCUCMD_TXFIFO_EMPTY))
1348 			break;
1349 	}
1350 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1351 		for (i = 0; i < 3000; i++) {
1352 			DELAY(50);
1353 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1354 				break;
1355 		}
1356 	}
1357 
1358 	DELAY(2000);
1359 
1360 	/* Soft reset. */
1361 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1362 
1363 	for (i = 0; i < RGE_TIMEOUT; i++) {
1364 		DELAY(100);
1365 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1366 			break;
1367 	}
1368 	if (i == RGE_TIMEOUT)
1369 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1370 }
1371 
1372 void
1373 rge_iff(struct rge_softc *sc)
1374 {
1375 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1376 	struct arpcom *ac = &sc->sc_arpcom;
1377 	struct ether_multi *enm;
1378 	struct ether_multistep step;
1379 	uint32_t hashes[2];
1380 	uint32_t rxfilt;
1381 	int h = 0;
1382 
1383 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1384 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1385 	ifp->if_flags &= ~IFF_ALLMULTI;
1386 
1387 	/*
1388 	 * Always accept frames destined to our station address.
1389 	 * Always accept broadcast frames.
1390 	 */
1391 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1392 
1393 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1394 		ifp->if_flags |= IFF_ALLMULTI;
1395 		rxfilt |= RGE_RXCFG_MULTI;
1396 		if (ifp->if_flags & IFF_PROMISC)
1397 			rxfilt |= RGE_RXCFG_ALLPHYS;
1398 		hashes[0] = hashes[1] = 0xffffffff;
1399 	} else {
1400 		rxfilt |= RGE_RXCFG_MULTI;
1401 		/* Program new filter. */
1402 		memset(hashes, 0, sizeof(hashes));
1403 
1404 		ETHER_FIRST_MULTI(step, ac, enm);
1405 		while (enm != NULL) {
1406 			h = ether_crc32_be(enm->enm_addrlo,
1407 			    ETHER_ADDR_LEN) >> 26;
1408 
1409 			if (h < 32)
1410 				hashes[0] |= (1 << h);
1411 			else
1412 				hashes[1] |= (1 << (h - 32));
1413 
1414 			ETHER_NEXT_MULTI(step, enm);
1415 		}
1416 	}
1417 
1418 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1419 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1420 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1421 }
1422 
1423 void
1424 rge_set_phy_power(struct rge_softc *sc, int on)
1425 {
1426 	int i;
1427 
1428 	if (on) {
1429 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1430 
1431 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1432 
1433 		for (i = 0; i < RGE_TIMEOUT; i++) {
1434 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1435 				break;
1436 			DELAY(1000);
1437 		}
1438 	} else {
1439 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1440 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1441 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1442 	}
1443 }
1444 
1445 void
1446 rge_phy_config(struct rge_softc *sc)
1447 {
1448 	/* Read microcode version. */
1449 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1450 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1451 
1452 	switch (sc->rge_type) {
1453 	case MAC_CFG2:
1454 		rge_phy_config_mac_cfg2(sc);
1455 		break;
1456 	case MAC_CFG3:
1457 		rge_phy_config_mac_cfg3(sc);
1458 		break;
1459 	case MAC_CFG4:
1460 		rge_phy_config_mac_cfg4(sc);
1461 		break;
1462 	case MAC_CFG5:
1463 		rge_phy_config_mac_cfg5(sc);
1464 		break;
1465 	default:
1466 		break;	/* Can't happen. */
1467 	}
1468 
1469 	rge_write_phy(sc, 0x0a5b, 0x12,
1470 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1471 
1472 	/* Disable EEE. */
1473 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1474 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1475 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1476 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1477 	}
1478 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1479 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1480 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1481 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1482 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1483 
1484 	rge_patch_phy_mcu(sc, 1);
1485 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1486 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1487 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1488 	rge_patch_phy_mcu(sc, 0);
1489 }
1490 
1491 void
1492 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1493 {
1494 	uint16_t val;
1495 	int i;
1496 
1497 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1498 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1499 		    rtl8125_mac_cfg2_ephy[i].val);
1500 
1501 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1502 
1503 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1504 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1505 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1506 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1507 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1508 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1509 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1510 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1511 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1512 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1513 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1514 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1515 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1516 
1517 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1518 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1519 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1520 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1521 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1522 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1523 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1524 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1525 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1526 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1527 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1528 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1529 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1530 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1531 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1532 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1533 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1534 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1535 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1536 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1537 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1538 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1539 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1540 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1541 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1542 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1543 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1544 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1545 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1546 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1547 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1548 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1549 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1550 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1551 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1552 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1553 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1554 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1555 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1556 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1557 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1558 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1559 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1560 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1561 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1562 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1563 }
1564 
1565 void
1566 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1567 {
1568 	uint16_t val;
1569 	int i;
1570 	static const uint16_t mac_cfg3_a438_value[] =
1571 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1572 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1573 
1574 	static const uint16_t mac_cfg3_b88e_value[] =
1575 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1576 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1577 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1578 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1579 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1580 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1581 
1582 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1583 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1584 		    rtl8125_mac_cfg3_ephy[i].val);
1585 
1586 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1587 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1588 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1589 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1590 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1591 	rge_write_ephy(sc, 0x0002, 0x6042);
1592 	rge_write_ephy(sc, 0x0006, 0x0014);
1593 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1594 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1595 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1596 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1597 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1598 	rge_write_ephy(sc, 0x0042, 0x6042);
1599 	rge_write_ephy(sc, 0x0046, 0x0014);
1600 
1601 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1602 
1603 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1604 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1605 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1606 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1607 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1608 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1609 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1610 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1611 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1612 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1613 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1614 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1615 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1616 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1617 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1618 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1619 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1620 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1621 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1622 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1623 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1624 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1625 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1626 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1627 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1628 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1629 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1630 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1631 	    32);
1632 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1633 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1634 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1635 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1636 
1637 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1638 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1639 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1640 	for (i = 0; i < 26; i++)
1641 		rge_write_phy_ocp(sc, 0xa438, 0);
1642 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1643 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1644 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1645 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1646 
1647 	rge_patch_phy_mcu(sc, 1);
1648 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1649 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1650 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1651 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1652 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1653 	}
1654 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1655 	rge_patch_phy_mcu(sc, 0);
1656 
1657 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1658 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1659 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1660 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1661 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1662 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1663 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1664 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1665 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1666 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1667 }
1668 
1669 void
1670 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1671 {
1672 	uint16_t val;
1673 	int i;
1674 	static const uint16_t mac_cfg4_b87c_value[] =
1675 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1676 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1677 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1678 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1679 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1680 	      0x80b0, 0x0f31 };
1681 
1682 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1683 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1684 		    rtl8125_mac_cfg4_ephy[i].val);
1685 
1686 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1687 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1688 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1689 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1690 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1691 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1692 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1693 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1694 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1695 
1696 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1697 
1698 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1699 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1700 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1701 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1702 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1703 	for (i = 0; i < 6; i++) {
1704 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1705 		if (i < 3)
1706 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1707 		else
1708 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1709 	}
1710 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1711 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1712 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1713 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1714 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1715 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1716 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1717 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1718 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1719 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1720 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1721 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1722 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1723 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1724 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1725 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1726 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1727 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1728 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1729 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1730 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1731 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1732 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1733 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1734 	}
1735 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1736 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1737 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1738 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1739 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1740 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1741 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1742 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1743 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1744 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1745 	    32);
1746 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1747 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1748 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1749 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1750 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1751 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1752 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1753 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1754 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1755 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1756 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1757 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1758 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1759 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1760 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1761 	for (i = 0; i < 6; i++) {
1762 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1763 		if (i == 2)
1764 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1765 		else
1766 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1767 	}
1768 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1769 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1770 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1771 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1772 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1773 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1774 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1775 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1776 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1777 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1778 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1779 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1780 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1781 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1782 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1783 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1784 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1785 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1786 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1787 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1788 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1789 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1790 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1791 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1792 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1793 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1794 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1795 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1796 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1797 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1798 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1799 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1800 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1801 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1802 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1803 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1804 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1805 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1806 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1807 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1808 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1809 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1810 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1811 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1812 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1813 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1814 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1815 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1816 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1817 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1818 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1819 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1820 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1821 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1822 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1823 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1824 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1825 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1826 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1827 	rge_patch_phy_mcu(sc, 1);
1828 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1829 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1830 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1831 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1832 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1833 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1834 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1835 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1836 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1837 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1838 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1839 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1840 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1841 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1842 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1843 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1844 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1845 	rge_patch_phy_mcu(sc, 0);
1846 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1847 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1848 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1849 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1850 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1851 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1852 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1853 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1854 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1855 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1856 }
1857 
1858 void
1859 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1860 {
1861 	uint16_t val;
1862 	int i;
1863 
1864 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1865 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1866 		    rtl8125_mac_cfg5_ephy[i].val);
1867 
1868 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1869 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1870 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1871 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1872 
1873 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1874 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1875 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1876 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1877 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1878 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1879 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1880 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1881 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1882 
1883 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1884 
1885 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1886 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1887 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1888 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1889 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1890 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, RGE_JUMBO_MTU + ETHER_HDR_LEN +
1891 	    32);
1892 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1893 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1894 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1895 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1896 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1897 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1898 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1899 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1900 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1901 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1902 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1903 	for (i = 0; i < 10; i++) {
1904 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1905 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1906 	}
1907 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1908 }
1909 
1910 void
1911 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1912 {
1913 	if (sc->rge_mcodever != mcode_version) {
1914 		int i;
1915 
1916 		rge_patch_phy_mcu(sc, 1);
1917 
1918 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1919 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1920 			if (sc->rge_type == MAC_CFG2)
1921 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1922 			else
1923 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1924 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1925 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1926 
1927 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1928 		}
1929 
1930 		if (sc->rge_type == MAC_CFG2) {
1931 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1932 				rge_write_phy_ocp(sc,
1933 				    rtl8125_mac_cfg2_mcu[i].reg,
1934 				    rtl8125_mac_cfg2_mcu[i].val);
1935 			}
1936 		} else if (sc->rge_type == MAC_CFG3) {
1937 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1938 				rge_write_phy_ocp(sc,
1939 				    rtl8125_mac_cfg3_mcu[i].reg,
1940 				    rtl8125_mac_cfg3_mcu[i].val);
1941 			}
1942 		} else if (sc->rge_type == MAC_CFG4) {
1943 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1944 				rge_write_phy_ocp(sc,
1945 				    rtl8125_mac_cfg4_mcu[i].reg,
1946 				    rtl8125_mac_cfg4_mcu[i].val);
1947 			}
1948 		} else if (sc->rge_type == MAC_CFG5) {
1949 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1950 				rge_write_phy_ocp(sc,
1951 				    rtl8125_mac_cfg5_mcu[i].reg,
1952 				    rtl8125_mac_cfg5_mcu[i].val);
1953 			}
1954 		}
1955 
1956 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1957 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1958 
1959 			rge_write_phy_ocp(sc, 0xa436, 0);
1960 			rge_write_phy_ocp(sc, 0xa438, 0);
1961 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1962 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1963 			rge_write_phy_ocp(sc, 0xa438, 0);
1964 		}
1965 
1966 		rge_patch_phy_mcu(sc, 0);
1967 
1968 		/* Write microcode version. */
1969 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1970 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1971 	}
1972 }
1973 
1974 void
1975 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1976 {
1977 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1978 	RGE_WRITE_4(sc, RGE_MAC0,
1979 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1980 	RGE_WRITE_4(sc, RGE_MAC4,
1981 	    addr[5] <<  8 | addr[4]);
1982 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1983 }
1984 
1985 void
1986 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1987 {
1988 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1989 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1990 }
1991 
1992 void
1993 rge_hw_init(struct rge_softc *sc)
1994 {
1995 	int i;
1996 
1997 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1998 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1999 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2000 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2001 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2002 
2003 	/* Disable UPS. */
2004 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2005 
2006 	/* Configure MAC MCU. */
2007 	rge_write_mac_ocp(sc, 0xfc38, 0);
2008 
2009 	for (i = 0xfc28; i < 0xfc38; i += 2)
2010 		rge_write_mac_ocp(sc, i, 0);
2011 
2012 	DELAY(3000);
2013 	rge_write_mac_ocp(sc, 0xfc26, 0);
2014 
2015 	if (sc->rge_type == MAC_CFG3) {
2016 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2017 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2018 			    rtl8125_mac_bps[i].val);
2019 		}
2020 	} else if (sc->rge_type == MAC_CFG5) {
2021 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2022 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2023 			    rtl8125b_mac_bps[i].val);
2024 		}
2025 	}
2026 
2027 	/* Disable PHY power saving. */
2028 	rge_disable_phy_ocp_pwrsave(sc);
2029 
2030 	/* Set PCIe uncorrectable error status. */
2031 	rge_write_csi(sc, 0x108,
2032 	    rge_read_csi(sc, 0x108) | 0x00100000);
2033 }
2034 
2035 void
2036 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2037 {
2038 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2039 		rge_patch_phy_mcu(sc, 1);
2040 		rge_write_phy_ocp(sc, 0xc416, 0);
2041 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2042 		rge_patch_phy_mcu(sc, 0);
2043 	}
2044 }
2045 
2046 void
2047 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2048 {
2049 	int i;
2050 
2051 	if (set)
2052 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2053 	else
2054 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2055 
2056 	for (i = 0; i < 1000; i++) {
2057 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2058 			break;
2059 		DELAY(100);
2060 	}
2061 	if (i == 1000) {
2062 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2063 		return;
2064 	}
2065 }
2066 
2067 void
2068 rge_add_media_types(struct rge_softc *sc)
2069 {
2070 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2071 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2072 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2073 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2074 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2075 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2076 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2077 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2078 }
2079 
2080 void
2081 rge_config_imtype(struct rge_softc *sc, int imtype)
2082 {
2083 	switch (imtype) {
2084 	case RGE_IMTYPE_NONE:
2085 		sc->rge_intrs = RGE_INTRS;
2086 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2087 		    RGE_ISR_RX_FIFO_OFLOW;
2088 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2089 		break;
2090 	case RGE_IMTYPE_SIM:
2091 		sc->rge_intrs = RGE_INTRS_TIMER;
2092 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2093 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2094 		break;
2095 	default:
2096 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2097 	}
2098 }
2099 
2100 void
2101 rge_disable_hw_im(struct rge_softc *sc)
2102 {
2103 	RGE_WRITE_2(sc, RGE_IM, 0);
2104 }
2105 
2106 void
2107 rge_disable_sim_im(struct rge_softc *sc)
2108 {
2109 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2110 	sc->rge_timerintr = 0;
2111 }
2112 
2113 void
2114 rge_setup_sim_im(struct rge_softc *sc)
2115 {
2116 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2117 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2118 	sc->rge_timerintr = 1;
2119 }
2120 
2121 void
2122 rge_setup_intr(struct rge_softc *sc, int imtype)
2123 {
2124 	rge_config_imtype(sc, imtype);
2125 
2126 	/* Enable interrupts. */
2127 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2128 
2129 	switch (imtype) {
2130 	case RGE_IMTYPE_NONE:
2131 		rge_disable_sim_im(sc);
2132 		rge_disable_hw_im(sc);
2133 		break;
2134 	case RGE_IMTYPE_SIM:
2135 		rge_disable_hw_im(sc);
2136 		rge_setup_sim_im(sc);
2137 		break;
2138 	default:
2139 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2140 	}
2141 }
2142 
2143 void
2144 rge_exit_oob(struct rge_softc *sc)
2145 {
2146 	int i;
2147 
2148 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2149 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2150 	    RGE_RXCFG_ERRPKT);
2151 
2152 	/* Disable RealWoW. */
2153 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2154 
2155 	rge_reset(sc);
2156 
2157 	/* Disable OOB. */
2158 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2159 
2160 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2161 
2162 	for (i = 0; i < 10; i++) {
2163 		DELAY(100);
2164 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2165 			break;
2166 	}
2167 
2168 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2169 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2170 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2171 
2172 	for (i = 0; i < 10; i++) {
2173 		DELAY(100);
2174 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2175 			break;
2176 	}
2177 
2178 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2179 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2180 		    sc->sc_dev.dv_xname);
2181 		for (i = 0; i < RGE_TIMEOUT; i++) {
2182 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2183 				break;
2184 			DELAY(1000);
2185 		}
2186 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2187 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2188 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2189 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2190 	}
2191 }
2192 
2193 void
2194 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2195 {
2196 	int i;
2197 
2198 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2199 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2200 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2201 
2202 	for (i = 0; i < 10; i++) {
2203 		 DELAY(100);
2204 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2205 			break;
2206 	}
2207 
2208 	DELAY(20);
2209 }
2210 
2211 uint32_t
2212 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2213 {
2214 	int i;
2215 
2216 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2217 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2218 
2219 	for (i = 0; i < 10; i++) {
2220 		 DELAY(100);
2221 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2222 			break;
2223 	}
2224 
2225 	DELAY(20);
2226 
2227 	return (RGE_READ_4(sc, RGE_CSIDR));
2228 }
2229 
2230 void
2231 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2232 {
2233 	uint32_t tmp;
2234 
2235 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2236 	tmp += val;
2237 	tmp |= RGE_MACOCP_BUSY;
2238 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2239 }
2240 
2241 uint16_t
2242 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2243 {
2244 	uint32_t val;
2245 
2246 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2247 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2248 
2249 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2250 }
2251 
2252 void
2253 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2254 {
2255 	uint32_t tmp;
2256 	int i;
2257 
2258 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2259 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2260 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2261 
2262 	for (i = 0; i < 10; i++) {
2263 		DELAY(100);
2264 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2265 			break;
2266 	}
2267 
2268 	DELAY(20);
2269 }
2270 
2271 uint16_t
2272 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2273 {
2274 	uint32_t val;
2275 	int i;
2276 
2277 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2278 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2279 
2280 	for (i = 0; i < 10; i++) {
2281 		DELAY(100);
2282 		val = RGE_READ_4(sc, RGE_EPHYAR);
2283 		if (val & RGE_EPHYAR_BUSY)
2284 			break;
2285 	}
2286 
2287 	DELAY(20);
2288 
2289 	return (val & RGE_EPHYAR_DATA_MASK);
2290 }
2291 
2292 void
2293 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2294 {
2295 	uint16_t off, phyaddr;
2296 
2297 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2298 	phyaddr <<= 4;
2299 
2300 	off = addr ? reg : 0x10 + (reg % 8);
2301 
2302 	phyaddr += (off - 16) << 1;
2303 
2304 	rge_write_phy_ocp(sc, phyaddr, val);
2305 }
2306 
2307 uint16_t
2308 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2309 {
2310 	uint16_t off, phyaddr;
2311 
2312 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2313 	phyaddr <<= 4;
2314 
2315 	off = addr ? reg : 0x10 + (reg % 8);
2316 
2317 	phyaddr += (off - 16) << 1;
2318 
2319 	return (rge_read_phy_ocp(sc, phyaddr));
2320 }
2321 
2322 void
2323 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2324 {
2325 	uint32_t tmp;
2326 	int i;
2327 
2328 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2329 	tmp |= RGE_PHYOCP_BUSY | val;
2330 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2331 
2332 	for (i = 0; i < RGE_TIMEOUT; i++) {
2333 		DELAY(1);
2334 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2335 			break;
2336 	}
2337 }
2338 
2339 uint16_t
2340 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2341 {
2342 	uint32_t val;
2343 	int i;
2344 
2345 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2346 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2347 
2348 	for (i = 0; i < RGE_TIMEOUT; i++) {
2349 		DELAY(1);
2350 		val = RGE_READ_4(sc, RGE_PHYOCP);
2351 		if (val & RGE_PHYOCP_BUSY)
2352 			break;
2353 	}
2354 
2355 	return (val & RGE_PHYOCP_DATA_MASK);
2356 }
2357 
2358 int
2359 rge_get_link_status(struct rge_softc *sc)
2360 {
2361 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2362 }
2363 
2364 void
2365 rge_txstart(void *arg)
2366 {
2367 	struct rge_softc *sc = arg;
2368 
2369 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2370 }
2371 
2372 void
2373 rge_tick(void *arg)
2374 {
2375 	struct rge_softc *sc = arg;
2376 	int s;
2377 
2378 	s = splnet();
2379 	rge_link_state(sc);
2380 	splx(s);
2381 
2382 	timeout_add_sec(&sc->sc_timeout, 1);
2383 }
2384 
2385 void
2386 rge_link_state(struct rge_softc *sc)
2387 {
2388 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2389 	int link = LINK_STATE_DOWN;
2390 
2391 	if (rge_get_link_status(sc))
2392 		link = LINK_STATE_UP;
2393 
2394 	if (ifp->if_link_state != link) {
2395 		ifp->if_link_state = link;
2396 		if_link_state_change(ifp);
2397 	}
2398 }
2399