xref: /openbsd-src/sys/dev/pci/if_rge.c (revision 33ca586cd686481d14b77e1bc896d07e4e7c8fa3)
1 /*	$OpenBSD: if_rge.c,v 1.6 2020/08/07 13:53:58 kevlo Exp $	*/
2 
3 /*
4  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 #include "vlan.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/sockio.h>
25 #include <sys/mbuf.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 
32 #include <net/if.h>
33 #include <net/if_media.h>
34 
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #if NBPFILTER > 0
39 #include <net/bpf.h>
40 #endif
41 
42 #include <machine/bus.h>
43 #include <machine/intr.h>
44 
45 #include <dev/mii/mii.h>
46 
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <dev/pci/if_rgereg.h>
52 
53 #ifdef RGE_DEBUG
54 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
55 int rge_debug = 0;
56 #else
57 #define DPRINTF(x)
58 #endif
59 
60 int		rge_match(struct device *, void *, void *);
61 void		rge_attach(struct device *, struct device *, void *);
62 int		rge_intr(void *);
63 int		rge_encap(struct rge_softc *, struct mbuf *, int);
64 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
65 void		rge_start(struct ifqueue *);
66 void		rge_watchdog(struct ifnet *);
67 int		rge_init(struct ifnet *);
68 void		rge_stop(struct ifnet *);
69 int		rge_ifmedia_upd(struct ifnet *);
70 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
71 int		rge_allocmem(struct rge_softc *);
72 int		rge_newbuf(struct rge_softc *);
73 void		rge_discard_rxbuf(struct rge_softc *, int);
74 void		rge_rx_list_init(struct rge_softc *);
75 void		rge_tx_list_init(struct rge_softc *);
76 void		rge_fill_rx_ring(struct rge_softc *);
77 int		rge_rxeof(struct rge_softc *);
78 int		rge_txeof(struct rge_softc *);
79 void		rge_reset(struct rge_softc *);
80 void		rge_iff(struct rge_softc *);
81 void		rge_set_phy_power(struct rge_softc *, int);
82 void		rge_phy_config(struct rge_softc *);
83 void		rge_phy_config_mac_cfg2(struct rge_softc *);
84 void		rge_phy_config_mac_cfg3(struct rge_softc *);
85 void		rge_phy_config_mac_cfg4(struct rge_softc *);
86 void		rge_phy_config_mac_cfg5(struct rge_softc *);
87 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
88 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
89 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
90 void		rge_hw_init(struct rge_softc *);
91 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
92 void		rge_patch_phy_mcu(struct rge_softc *, int);
93 void		rge_add_media_types(struct rge_softc *);
94 void		rge_config_imtype(struct rge_softc *, int);
95 void		rge_disable_hw_im(struct rge_softc *);
96 void		rge_disable_sim_im(struct rge_softc *);
97 void		rge_setup_sim_im(struct rge_softc *);
98 void		rge_setup_intr(struct rge_softc *, int);
99 void		rge_exit_oob(struct rge_softc *);
100 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
101 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
102 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
103 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
104 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
105 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
106 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
107 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
108 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
109 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
110 int		rge_get_link_status(struct rge_softc *);
111 void		rge_txstart(void *);
112 void		rge_tick(void *);
113 void		rge_link_state(struct rge_softc *);
114 
115 static const struct {
116 	uint16_t reg;
117 	uint16_t val;
118 }  rtl8125_mac_cfg2_mcu[] = {
119 	RTL8125_MAC_CFG2_MCU
120 }, rtl8125_mac_cfg3_mcu[] = {
121 	RTL8125_MAC_CFG3_MCU
122 }, rtl8125_mac_cfg4_mcu[] = {
123 	RTL8125_MAC_CFG4_MCU
124 }, rtl8125_mac_cfg5_mcu[] = {
125 	RTL8125_MAC_CFG5_MCU
126 };
127 
128 struct cfattach rge_ca = {
129 	sizeof(struct rge_softc), rge_match, rge_attach
130 };
131 
132 struct cfdriver rge_cd = {
133 	NULL, "rge", DV_IFNET
134 };
135 
136 const struct pci_matchid rge_devices[] = {
137 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
138 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
139 };
140 
141 int
142 rge_match(struct device *parent, void *match, void *aux)
143 {
144 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
145 	    nitems(rge_devices)));
146 }
147 
148 void
149 rge_attach(struct device *parent, struct device *self, void *aux)
150 {
151 	struct rge_softc *sc = (struct rge_softc *)self;
152 	struct pci_attach_args *pa = aux;
153 	pci_chipset_tag_t pc = pa->pa_pc;
154 	pci_intr_handle_t ih;
155 	const char *intrstr = NULL;
156 	struct ifnet *ifp;
157 	pcireg_t reg;
158 	uint32_t hwrev;
159 	uint8_t eaddr[ETHER_ADDR_LEN];
160 	int offset;
161 
162 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
163 
164 	/*
165 	 * Map control/status registers.
166 	 */
167 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
168 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
169 	    NULL, &sc->rge_bsize, 0)) {
170 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
171 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
172 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
173 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
174 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
175 			    &sc->rge_bsize, 0)) {
176 				printf(": can't map mem or i/o space\n");
177 				return;
178 			}
179 		}
180 	}
181 
182 	/*
183 	 * Allocate interrupt.
184 	 */
185 	if (pci_intr_map_msi(pa, &ih) == 0)
186 		sc->rge_flags |= RGE_FLAG_MSI;
187 	else if (pci_intr_map(pa, &ih) != 0) {
188 		printf(": couldn't map interrupt\n");
189 		return;
190 	}
191 	intrstr = pci_intr_string(pc, ih);
192 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
193 	    sc, sc->sc_dev.dv_xname);
194 	if (sc->sc_ih == NULL) {
195 		printf(": couldn't establish interrupt");
196 		if (intrstr != NULL)
197 			printf(" at %s", intrstr);
198 		printf("\n");
199 		return;
200 	}
201 	printf(": %s", intrstr);
202 
203 	sc->sc_dmat = pa->pa_dmat;
204 	sc->sc_pc = pa->pa_pc;
205 	sc->sc_tag = pa->pa_tag;
206 
207 	/* Determine hardware revision */
208 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
209 	switch (hwrev) {
210 	case 0x60800000:
211 		sc->rge_type = MAC_CFG2;
212 		break;
213 	case 0x60900000:
214 		sc->rge_type = MAC_CFG3;
215 		break;
216 	case 0x64000000:
217 		sc->rge_type = MAC_CFG4;
218 		break;
219 	case 0x64100000:
220 		sc->rge_type = MAC_CFG5;
221 		break;
222 	default:
223 		printf(": unknown version 0x%08x\n", hwrev);
224 		return;
225 	}
226 
227 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
228 
229 	/*
230 	 * PCI Express check.
231 	 */
232 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
233 	    &offset, NULL)) {
234 		/* Disable PCIe ASPM and ECPM. */
235 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
236 		    offset + PCI_PCIE_LCSR);
237 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
238 		    PCI_PCIE_LCSR_ECPM);
239 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
240 		    reg);
241 	}
242 
243 	rge_exit_oob(sc);
244 	rge_hw_init(sc);
245 
246 	rge_get_macaddr(sc, eaddr);
247 	printf(", address %s\n", ether_sprintf(eaddr));
248 
249 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
250 
251 	rge_set_phy_power(sc, 1);
252 	rge_phy_config(sc);
253 
254 	if (rge_allocmem(sc))
255 		return;
256 
257 	ifp = &sc->sc_arpcom.ac_if;
258 	ifp->if_softc = sc;
259 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
260 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
261 	ifp->if_xflags = IFXF_MPSAFE;
262 	ifp->if_ioctl = rge_ioctl;
263 	ifp->if_qstart = rge_start;
264 	ifp->if_watchdog = rge_watchdog;
265 	ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
266 	ifp->if_hardmtu = RGE_JUMBO_MTU;
267 
268 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
269 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
270 
271 #if NVLAN > 0
272 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
273 #endif
274 
275 	timeout_set(&sc->sc_timeout, rge_tick, sc);
276 	task_set(&sc->sc_task, rge_txstart, sc);
277 
278 	/* Initialize ifmedia structures. */
279 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
280 	    rge_ifmedia_sts);
281 	rge_add_media_types(sc);
282 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
283 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
284 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
285 
286 	if_attach(ifp);
287 	ether_ifattach(ifp);
288 }
289 
290 int
291 rge_intr(void *arg)
292 {
293 	struct rge_softc *sc = arg;
294 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
295 	uint32_t status;
296 	int claimed = 0, rx, tx;
297 
298 	if (!(ifp->if_flags & IFF_RUNNING))
299 		return (0);
300 
301 	/* Disable interrupts. */
302 	RGE_WRITE_4(sc, RGE_IMR, 0);
303 
304 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
305 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
306 			return (0);
307 	}
308 
309 	status = RGE_READ_4(sc, RGE_ISR);
310 	if (status)
311 		RGE_WRITE_4(sc, RGE_ISR, status);
312 
313 	if (status & RGE_ISR_PCS_TIMEOUT)
314 		claimed = 1;
315 
316 	rx = tx = 0;
317 	if (status & sc->rge_intrs) {
318 		if (status &
319 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
320 			rx |= rge_rxeof(sc);
321 			claimed = 1;
322 		}
323 
324 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
325 			tx |= rge_txeof(sc);
326 			claimed = 1;
327 		}
328 
329 		if (status & RGE_ISR_SYSTEM_ERR) {
330 			KERNEL_LOCK();
331 			rge_init(ifp);
332 			KERNEL_UNLOCK();
333 			claimed = 1;
334 		}
335 	}
336 
337 	if (sc->rge_timerintr) {
338 		if ((tx | rx) == 0) {
339 			/*
340 			 * Nothing needs to be processed, fallback
341 			 * to use TX/RX interrupts.
342 			 */
343 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
344 
345 			/*
346 			 * Recollect, mainly to avoid the possible
347 			 * race introduced by changing interrupt
348 			 * masks.
349 			 */
350 			rge_rxeof(sc);
351 			rge_txeof(sc);
352 		} else
353 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
354 	} else if (tx | rx) {
355 		/*
356 		 * Assume that using simulated interrupt moderation
357 		 * (hardware timer based) could reduce the interrupt
358 		 * rate.
359 		 */
360 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
361 	}
362 
363 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
364 
365 	return (claimed);
366 }
367 
368 int
369 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
370 {
371 	struct rge_tx_desc *d = NULL;
372 	struct rge_txq *txq;
373 	bus_dmamap_t txmap;
374 	uint32_t cmdsts, cflags = 0;
375 	int cur, error, i, last, nsegs;
376 
377 	/*
378 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
379 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
380 	 * take affect.
381 	 */
382 	if ((m->m_pkthdr.csum_flags &
383 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
384 		cflags |= RGE_TDEXTSTS_IPCSUM;
385 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
386 			cflags |= RGE_TDEXTSTS_TCPCSUM;
387 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
388 			cflags |= RGE_TDEXTSTS_UDPCSUM;
389 	}
390 
391 	txq = &sc->rge_ldata.rge_txq[idx];
392 	txmap = txq->txq_dmamap;
393 
394 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
395 	switch (error) {
396 	case 0:
397 		break;
398 	case EFBIG: /* mbuf chain is too fragmented */
399 		if (m_defrag(m, M_DONTWAIT) == 0 &&
400 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
401 		    BUS_DMA_NOWAIT) == 0)
402 			break;
403 
404 		/* FALLTHROUGH */
405 	default:
406 		return (0);
407 	}
408 
409 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
410 	    BUS_DMASYNC_PREWRITE);
411 
412 	nsegs = txmap->dm_nsegs;
413 
414 	/* Set up hardware VLAN tagging. */
415 #if NVLAN > 0
416 	if (m->m_flags & M_VLANTAG)
417 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
418 #endif
419 
420 	cur = idx;
421 	cmdsts = RGE_TDCMDSTS_SOF;
422 
423 	for (i = 0; i < txmap->dm_nsegs; i++) {
424 		d = &sc->rge_ldata.rge_tx_list[cur];
425 
426 		d->rge_extsts = htole32(cflags);
427 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
428 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
429 
430 		cmdsts |= txmap->dm_segs[i].ds_len;
431 
432 		if (cur == RGE_TX_LIST_CNT - 1)
433 			cmdsts |= RGE_TDCMDSTS_EOR;
434 
435 		d->rge_cmdsts = htole32(cmdsts);
436 
437 		last = cur;
438 		cmdsts = RGE_TDCMDSTS_OWN;
439 		cur = RGE_NEXT_TX_DESC(cur);
440 	}
441 
442 	/* Set EOF on the last descriptor. */
443 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
444 
445 	/* Transfer ownership of packet to the chip. */
446 	d = &sc->rge_ldata.rge_tx_list[idx];
447 
448 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
449 
450 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
451 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
452 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
453 
454 	/* Update info of TX queue and descriptors. */
455 	txq->txq_mbuf = m;
456 	txq->txq_descidx = last;
457 
458 	return (nsegs);
459 }
460 
461 int
462 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
463 {
464 	struct rge_softc *sc = ifp->if_softc;
465 	struct ifreq *ifr = (struct ifreq *)data;
466 	int s, error = 0;
467 
468 	s = splnet();
469 
470 	switch (cmd) {
471 	case SIOCSIFADDR:
472 		ifp->if_flags |= IFF_UP;
473 		if (!(ifp->if_flags & IFF_RUNNING))
474 			rge_init(ifp);
475 		break;
476 	case SIOCSIFFLAGS:
477 		if (ifp->if_flags & IFF_UP) {
478 			if (ifp->if_flags & IFF_RUNNING)
479 				error = ENETRESET;
480 			else
481 				rge_init(ifp);
482 		} else {
483 			if (ifp->if_flags & IFF_RUNNING)
484 				rge_stop(ifp);
485 		}
486 		break;
487 	case SIOCGIFMEDIA:
488 	case SIOCSIFMEDIA:
489 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
490 		break;
491 	case SIOCSIFMTU:
492 		if (ifr->ifr_mtu > ifp->if_hardmtu) {
493 			error = EINVAL;
494 			break;
495 		}
496 		ifp->if_mtu = ifr->ifr_mtu;
497 		break;
498 	case SIOCGIFRXR:
499 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
500 		    NULL, RGE_JUMBO_FRAMELEN, &sc->rge_ldata.rge_rx_ring);
501 		break;
502 	default:
503 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
504 	}
505 
506 	if (error == ENETRESET) {
507 		if (ifp->if_flags & IFF_RUNNING)
508 			rge_iff(sc);
509 		error = 0;
510 	}
511 
512 	splx(s);
513 	return (error);
514 }
515 
516 void
517 rge_start(struct ifqueue *ifq)
518 {
519 	struct ifnet *ifp = ifq->ifq_if;
520 	struct rge_softc *sc = ifp->if_softc;
521 	struct mbuf *m;
522 	int free, idx, used;
523 	int queued = 0;
524 
525 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
526 		ifq_purge(ifq);
527 		return;
528 	}
529 
530 	/* Calculate free space. */
531 	idx = sc->rge_ldata.rge_txq_prodidx;
532 	free = sc->rge_ldata.rge_txq_considx;
533 	if (free <= idx)
534 		free += RGE_TX_LIST_CNT;
535 	free -= idx;
536 
537 	for (;;) {
538 		if (RGE_TX_NSEGS >= free + 2) {
539 			ifq_set_oactive(&ifp->if_snd);
540 			break;
541 		}
542 
543 		m = ifq_dequeue(ifq);
544 		if (m == NULL)
545 			break;
546 
547 		used = rge_encap(sc, m, idx);
548 		if (used == 0) {
549 			m_freem(m);
550 			continue;
551 		}
552 
553 		KASSERT(used <= free);
554 		free -= used;
555 
556 #if NBPFILTER > 0
557 		if (ifp->if_bpf)
558 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
559 #endif
560 
561 		idx += used;
562 		if (idx >= RGE_TX_LIST_CNT)
563 			idx -= RGE_TX_LIST_CNT;
564 
565 		queued++;
566 	}
567 
568 	if (queued == 0)
569 		return;
570 
571 	/* Set a timeout in case the chip goes out to lunch. */
572 	ifp->if_timer = 5;
573 
574 	sc->rge_ldata.rge_txq_prodidx = idx;
575 	ifq_serialize(ifq, &sc->sc_task);
576 }
577 
578 void
579 rge_watchdog(struct ifnet *ifp)
580 {
581 	struct rge_softc *sc = ifp->if_softc;
582 
583 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
584 	ifp->if_oerrors++;
585 
586 	rge_init(ifp);
587 }
588 
589 int
590 rge_init(struct ifnet *ifp)
591 {
592 	struct rge_softc *sc = ifp->if_softc;
593 	uint32_t val;
594 	int i;
595 
596 	rge_stop(ifp);
597 
598 	/* Set MAC address. */
599 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
600 
601 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
602 	if (ifp->if_mtu < ETHERMTU)
603 		sc->rge_rxbufsz = ETHERMTU;
604 	else
605 		sc->rge_rxbufsz = ifp->if_mtu;
606 
607 	sc->rge_rxbufsz += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
608 	    ETHER_CRC_LEN + 1;
609 
610 	if (sc->rge_rxbufsz > RGE_JUMBO_FRAMELEN)
611 		sc->rge_rxbufsz -= 1;
612 
613 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, sc->rge_rxbufsz);
614 
615 	/* Initialize RX and TX descriptors lists. */
616 	rge_rx_list_init(sc);
617 	rge_tx_list_init(sc);
618 
619 	/* Load the addresses of the RX and TX lists into the chip. */
620 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
621 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
622 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
623 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
624 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
625 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
626 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
627 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
628 
629 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
630 
631 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
632 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
633 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
634 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
635 
636 	/* Clear interrupt moderation timer. */
637 	for (i = 0; i < 64; i++)
638 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
639 
640 	/* Set the initial RX and TX configurations. */
641 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
642 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
643 
644 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
645 	rge_write_csi(sc, 0x70c, val | 0x27000000);
646 
647 	/* Enable hardware optimization function. */
648 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
649 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
650 
651 	RGE_WRITE_2(sc, 0x0382, 0x221b);
652 	RGE_WRITE_1(sc, 0x4500, 0);
653 	RGE_WRITE_2(sc, 0x4800, 0);
654 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
655 
656 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
657 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
658 
659 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
660 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
661 
662 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
663 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
664 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
665 
666 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
667 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
668 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
669 	else
670 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
671 
672 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
673 
674 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
675 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
676 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
677 	} else
678 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
679 
680 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
681 
682 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
683 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
684 
685 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
686 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
687 
688 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
689 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
690 
691 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
692 
693 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
694 
695 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
696 
697 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
698 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
699 
700 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
701 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
702 
703 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
704 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
705 
706 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
707 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
708 
709 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
710 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
711 
712 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
713 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
714 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
715 	else
716 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
717 
718 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
719 
720 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
721 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
722 
723 	/* Disable EEE plus. */
724 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
725 
726 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
727 
728 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
729 	DELAY(1);
730 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
731 
732 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
733 
734 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
735 
736 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
737 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
738 
739 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
740 
741 	for (i = 0; i < 10; i++) {
742 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
743 			break;
744 		DELAY(1000);
745 	}
746 
747 	/* Disable RXDV gate. */
748 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
749 	DELAY(2000);
750 
751 	rge_ifmedia_upd(ifp);
752 
753 	/* Enable transmit and receive. */
754 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
755 
756 	/* Program promiscuous mode and multicast filters. */
757 	rge_iff(sc);
758 
759 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
760 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
761 
762 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
763 
764 	/* Enable interrupts. */
765 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
766 
767 	ifp->if_flags |= IFF_RUNNING;
768 	ifq_clr_oactive(&ifp->if_snd);
769 
770 	timeout_add_sec(&sc->sc_timeout, 1);
771 
772 	return (0);
773 }
774 
775 /*
776  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
777  */
778 void
779 rge_stop(struct ifnet *ifp)
780 {
781 	struct rge_softc *sc = ifp->if_softc;
782 	int i;
783 
784 	timeout_del(&sc->sc_timeout);
785 
786 	ifp->if_timer = 0;
787 	ifp->if_flags &= ~IFF_RUNNING;
788 	sc->rge_timerintr = 0;
789 
790 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
791 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
792 	    RGE_RXCFG_ERRPKT);
793 
794 	RGE_WRITE_4(sc, RGE_IMR, 0);
795 
796 	/* Clear timer interrupts. */
797 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
798 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
799 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
800 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
801 
802 	rge_reset(sc);
803 
804 	intr_barrier(sc->sc_ih);
805 	ifq_barrier(&ifp->if_snd);
806 	ifq_clr_oactive(&ifp->if_snd);
807 
808 	if (sc->rge_head != NULL) {
809 		m_freem(sc->rge_head);
810 		sc->rge_head = sc->rge_tail = NULL;
811 	}
812 
813 	/* Free the TX list buffers. */
814 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
815 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
816 			bus_dmamap_unload(sc->sc_dmat,
817 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
818 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
819 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
820 		}
821 	}
822 
823 	/* Free the RX list buffers. */
824 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
825 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
826 			bus_dmamap_unload(sc->sc_dmat,
827 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
828 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
829 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
830 		}
831 	}
832 }
833 
834 /*
835  * Set media options.
836  */
837 int
838 rge_ifmedia_upd(struct ifnet *ifp)
839 {
840 	struct rge_softc *sc = ifp->if_softc;
841 	struct ifmedia *ifm = &sc->sc_media;
842 	int anar, gig, val;
843 
844 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
845 		return (EINVAL);
846 
847 	/* Disable Gigabit Lite. */
848 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
849 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
850 
851 	val = rge_read_phy_ocp(sc, 0xa5d4);
852 	val &= ~RGE_ADV_2500TFDX;
853 
854 	anar = gig = 0;
855 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
856 	case IFM_AUTO:
857 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
858 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
859 		val |= RGE_ADV_2500TFDX;
860 		break;
861 	case IFM_2500_T:
862 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
863 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
864 		val |= RGE_ADV_2500TFDX;
865 		ifp->if_baudrate = IF_Mbps(2500);
866 		break;
867 	case IFM_1000_T:
868 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
869 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
870 		ifp->if_baudrate = IF_Gbps(1);
871 		break;
872 	case IFM_100_TX:
873 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
874 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
875 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
876 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
877 		    ANAR_TX | ANAR_10_FD | ANAR_10;
878 		ifp->if_baudrate = IF_Mbps(100);
879 		break;
880 	case IFM_10_T:
881 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
882 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
883 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
884 		    ANAR_10_FD | ANAR_10 : ANAR_10;
885 		ifp->if_baudrate = IF_Mbps(10);
886 		break;
887 	default:
888 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
889 		return (EINVAL);
890 	}
891 
892 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
893 	rge_write_phy(sc, 0, MII_100T2CR, gig);
894 	rge_write_phy_ocp(sc, 0xa5d4, val);
895 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
896 	    BMCR_STARTNEG);
897 
898 	return (0);
899 }
900 
901 /*
902  * Report current media status.
903  */
904 void
905 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
906 {
907 	struct rge_softc *sc = ifp->if_softc;
908 	uint16_t status = 0;
909 
910 	ifmr->ifm_status = IFM_AVALID;
911 	ifmr->ifm_active = IFM_ETHER;
912 
913 	if (rge_get_link_status(sc)) {
914 		ifmr->ifm_status |= IFM_ACTIVE;
915 
916 		status = RGE_READ_2(sc, RGE_PHYSTAT);
917 		if ((status & RGE_PHYSTAT_FDX) ||
918 		    (status & RGE_PHYSTAT_2500MBPS))
919 			ifmr->ifm_active |= IFM_FDX;
920 		else
921 			ifmr->ifm_active |= IFM_HDX;
922 
923 		if (status & RGE_PHYSTAT_10MBPS)
924 			ifmr->ifm_active |= IFM_10_T;
925 		else if (status & RGE_PHYSTAT_100MBPS)
926 			ifmr->ifm_active |= IFM_100_TX;
927 		else if (status & RGE_PHYSTAT_1000MBPS)
928 			ifmr->ifm_active |= IFM_1000_T;
929 		else if (status & RGE_PHYSTAT_2500MBPS)
930 			ifmr->ifm_active |= IFM_2500_T;
931 	}
932 }
933 
934 /*
935  * Allocate memory for RX/TX rings.
936  */
937 int
938 rge_allocmem(struct rge_softc *sc)
939 {
940 	int error, i;
941 
942 	/* Allocate DMA'able memory for the TX ring. */
943 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
944 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
945 	if (error) {
946 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
947 		return (error);
948 	}
949 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
950 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
951 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
952 	if (error) {
953 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
954 		return (error);
955 	}
956 
957 	/* Load the map for the TX ring. */
958 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
959 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
960 	    (caddr_t *)&sc->rge_ldata.rge_tx_list,
961 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
962 	if (error) {
963 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
964 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
965 		    sc->rge_ldata.rge_tx_listnseg);
966 		return (error);
967 	}
968 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
969 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
970 	if (error) {
971 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
972 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
973 		bus_dmamem_unmap(sc->sc_dmat,
974 		    (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
975 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
976 		    sc->rge_ldata.rge_tx_listnseg);
977 		return (error);
978 	}
979 
980 	/* Create DMA maps for TX buffers. */
981 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
982 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
983 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
984 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
985 		if (error) {
986 			printf("%s: can't create DMA map for TX\n",
987 			    sc->sc_dev.dv_xname);
988 			return (error);
989 		}
990 	}
991 
992 	/* Allocate DMA'able memory for the RX ring. */
993 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
994 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
995 	if (error) {
996 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
997 		return (error);
998 	}
999 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1000 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1001 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
1002 	if (error) {
1003 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
1004 		return (error);
1005 	}
1006 
1007 	/* Load the map for the RX ring. */
1008 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1009 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1010 	    (caddr_t *)&sc->rge_ldata.rge_rx_list,
1011 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1012 	if (error) {
1013 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1014 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1015 		    sc->rge_ldata.rge_rx_listnseg);
1016 		return (error);
1017 	}
1018 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1019 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1020 	if (error) {
1021 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1022 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1023 		bus_dmamem_unmap(sc->sc_dmat,
1024 		    (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1025 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1026 		    sc->rge_ldata.rge_rx_listnseg);
1027 		return (error);
1028 	}
1029 
1030 	/* Create DMA maps for RX buffers. */
1031 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1032 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1033 		    RGE_JUMBO_FRAMELEN, 0, 0,
1034 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1035 		if (error) {
1036 			printf("%s: can't create DMA map for RX\n",
1037 			    sc->sc_dev.dv_xname);
1038 			return (error);
1039 		}
1040 	}
1041 
1042 	return (error);
1043 }
1044 
1045 /*
1046  * Initialize the RX descriptor and attach an mbuf cluster.
1047  */
1048 int
1049 rge_newbuf(struct rge_softc *sc)
1050 {
1051 	struct mbuf *m;
1052 	struct rge_rx_desc *r;
1053 	struct rge_rxq *rxq;
1054 	bus_dmamap_t rxmap;
1055 	int idx;
1056 
1057 	m = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rge_rxbufsz);
1058 	if (m == NULL)
1059 		return (ENOBUFS);
1060 
1061 	m->m_data += (m->m_ext.ext_size - sc->rge_rxbufsz);
1062 	m->m_len = m->m_pkthdr.len = sc->rge_rxbufsz;
1063 
1064 	idx = sc->rge_ldata.rge_rxq_prodidx;
1065 	rxq = &sc->rge_ldata.rge_rxq[idx];
1066 	rxmap = rxq->rxq_dmamap;
1067 
1068 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT)) {
1069 		m_freem(m);
1070 		return (ENOBUFS);
1071 	}
1072 
1073 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1074 	    BUS_DMASYNC_PREREAD);
1075 
1076 	/* Map the segments into RX descriptors. */
1077 	r = &sc->rge_ldata.rge_rx_list[idx];
1078 
1079 	if (RGE_OWN(r)) {
1080 		printf("%s: tried to map busy RX descriptor\n",
1081 		    sc->sc_dev.dv_xname);
1082 		m_freem(m);
1083 		return (ENOBUFS);
1084 	}
1085 
1086 	rxq->rxq_mbuf = m;
1087 
1088 	r->rge_extsts = 0;
1089 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1090 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1091 
1092 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1093 	if (idx == RGE_RX_LIST_CNT - 1)
1094 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1095 
1096 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1097 
1098 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1099 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1100 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1101 
1102 	sc->rge_ldata.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
1103 
1104 	return (0);
1105 }
1106 
1107 void
1108 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1109 {
1110 	struct rge_rx_desc *r;
1111 
1112 	r = &sc->rge_ldata.rge_rx_list[idx];
1113 
1114 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1115 	r->rge_extsts = 0;
1116 	if (idx == RGE_RX_LIST_CNT - 1)
1117 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1118 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1119 
1120 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1121 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1122 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1123 }
1124 
1125 void
1126 rge_rx_list_init(struct rge_softc *sc)
1127 {
1128 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1129 
1130 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1131 	sc->rge_head = sc->rge_tail = NULL;
1132 
1133 	if_rxr_init(&sc->rge_ldata.rge_rx_ring, 2, RGE_RX_LIST_CNT - 1);
1134 	rge_fill_rx_ring(sc);
1135 }
1136 
1137 void
1138 rge_fill_rx_ring(struct rge_softc *sc)
1139 {
1140 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1141 	int slots;
1142 
1143 	for (slots = if_rxr_get(rxr, RGE_RX_LIST_CNT); slots > 0; slots--) {
1144 		if (rge_newbuf(sc) == ENOBUFS)
1145 			break;
1146 	}
1147 	if_rxr_put(rxr, slots);
1148 }
1149 
1150 void
1151 rge_tx_list_init(struct rge_softc *sc)
1152 {
1153 	int i;
1154 
1155 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1156 
1157 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1158 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1159 
1160 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1161 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1162 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1163 
1164 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1165 }
1166 
1167 int
1168 rge_rxeof(struct rge_softc *sc)
1169 {
1170 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1171 	struct mbuf *m;
1172 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1173 	struct if_rxring *rxr = &sc->rge_ldata.rge_rx_ring;
1174 	struct rge_rx_desc *cur_rx;
1175 	struct rge_rxq *rxq;
1176 	uint32_t rxstat, extsts;
1177 	int i, total_len, rx = 0;
1178 
1179 	for (i = sc->rge_ldata.rge_rxq_considx; if_rxr_inuse(rxr) > 0;
1180 	    i = RGE_NEXT_RX_DESC(i)) {
1181 		/* Invalidate the descriptor memory. */
1182 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1183 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1184 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1185 
1186 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1187 
1188 		if (RGE_OWN(cur_rx))
1189 			break;
1190 
1191 		rxstat = letoh32(cur_rx->rge_cmdsts);
1192 		extsts = letoh32(cur_rx->rge_extsts);
1193 
1194 		total_len = RGE_RXBYTES(cur_rx);
1195 		rxq = &sc->rge_ldata.rge_rxq[i];
1196 		m = rxq->rxq_mbuf;
1197 		rxq->rxq_mbuf = NULL;
1198 		if_rxr_put(rxr, 1);
1199 		rx = 1;
1200 
1201 		/* Invalidate the RX mbuf and unload its map. */
1202 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1203 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1204 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1205 
1206 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1207 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1208 			rge_discard_rxbuf(sc, i);
1209 			continue;
1210 		}
1211 
1212 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1213 			ifp->if_ierrors++;
1214 			/*
1215 			 * If this is part of a multi-fragment packet,
1216 			 * discard all the pieces.
1217 			 */
1218 			 if (sc->rge_head != NULL) {
1219 				m_freem(sc->rge_head);
1220 				sc->rge_head = sc->rge_tail = NULL;
1221 			}
1222 			rge_discard_rxbuf(sc, i);
1223 			continue;
1224 		}
1225 
1226 		if (sc->rge_head != NULL) {
1227 			m->m_len = total_len;
1228 			/*
1229 			 * Special case: if there's 4 bytes or less
1230 			 * in this buffer, the mbuf can be discarded:
1231 			 * the last 4 bytes is the CRC, which we don't
1232 			 * care about anyway.
1233 			 */
1234 			if (m->m_len <= ETHER_CRC_LEN) {
1235 				sc->rge_tail->m_len -=
1236 				    (ETHER_CRC_LEN - m->m_len);
1237 				m_freem(m);
1238 			} else {
1239 				m->m_len -= ETHER_CRC_LEN;
1240 				m->m_flags &= ~M_PKTHDR;
1241 				sc->rge_tail->m_next = m;
1242 			}
1243 			m = sc->rge_head;
1244 			sc->rge_head = sc->rge_tail = NULL;
1245 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1246 		} else
1247 			m->m_pkthdr.len = m->m_len =
1248 			    (total_len - ETHER_CRC_LEN);
1249 
1250 		/* Check IP header checksum. */
1251 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1252 		    (extsts & RGE_RDEXTSTS_IPV4))
1253 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1254 
1255 		/* Check TCP/UDP checksum. */
1256 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1257 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1258 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1259 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1260 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1261 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1262 			    M_UDP_CSUM_IN_OK;
1263 
1264 #if NVLAN > 0
1265 		if (extsts & RGE_RDEXTSTS_VTAG) {
1266 			m->m_pkthdr.ether_vtag =
1267 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1268 			m->m_flags |= M_VLANTAG;
1269 		}
1270 #endif
1271 
1272 		ml_enqueue(&ml, m);
1273 	}
1274 
1275 	sc->rge_ldata.rge_rxq_considx = i;
1276 	rge_fill_rx_ring(sc);
1277 
1278 	if_input(ifp, &ml);
1279 
1280 	return (rx);
1281 }
1282 
1283 int
1284 rge_txeof(struct rge_softc *sc)
1285 {
1286 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1287 	struct rge_txq *txq;
1288 	uint32_t txstat;
1289 	int cons, idx, prod;
1290 	int free = 0;
1291 
1292 	prod = sc->rge_ldata.rge_txq_prodidx;
1293 	cons = sc->rge_ldata.rge_txq_considx;
1294 
1295 	while (prod != cons) {
1296 		txq = &sc->rge_ldata.rge_txq[cons];
1297 		idx = txq->txq_descidx;
1298 
1299 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1300 		    idx * sizeof(struct rge_tx_desc),
1301 		    sizeof(struct rge_tx_desc),
1302 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1303 
1304 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1305 
1306 		if (txstat & RGE_TDCMDSTS_OWN) {
1307 			free = 2;
1308 			break;
1309 		}
1310 
1311 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1312 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1313 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1314 		m_freem(txq->txq_mbuf);
1315 		txq->txq_mbuf = NULL;
1316 
1317 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1318 			ifp->if_collisions++;
1319 		if (txstat & RGE_TDCMDSTS_TXERR)
1320 			ifp->if_oerrors++;
1321 
1322 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1323 		    idx * sizeof(struct rge_tx_desc),
1324 		    sizeof(struct rge_tx_desc),
1325 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1326 
1327 		cons = RGE_NEXT_TX_DESC(idx);
1328 		free = 1;
1329 	}
1330 
1331 	if (free == 0)
1332 		return (0);
1333 
1334 	sc->rge_ldata.rge_txq_considx = cons;
1335 
1336 	if (ifq_is_oactive(&ifp->if_snd))
1337 		ifq_restart(&ifp->if_snd);
1338 	else if (free == 2)
1339 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1340 	else
1341 		ifp->if_timer = 0;
1342 
1343 	return (1);
1344 }
1345 
1346 void
1347 rge_reset(struct rge_softc *sc)
1348 {
1349 	int i;
1350 
1351 	/* Enable RXDV gate. */
1352 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1353 	DELAY(2000);
1354 
1355 	for (i = 0; i < 3000; i++) {
1356 		DELAY(50);
1357 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1358 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1359 		    RGE_MCUCMD_TXFIFO_EMPTY))
1360 			break;
1361 	}
1362 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1363 		for (i = 0; i < 3000; i++) {
1364 			DELAY(50);
1365 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1366 				break;
1367 		}
1368 	}
1369 
1370 	DELAY(2000);
1371 
1372 	/* Soft reset. */
1373 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1374 
1375 	for (i = 0; i < RGE_TIMEOUT; i++) {
1376 		DELAY(100);
1377 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1378 			break;
1379 	}
1380 	if (i == RGE_TIMEOUT)
1381 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1382 }
1383 
1384 void
1385 rge_iff(struct rge_softc *sc)
1386 {
1387 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1388 	struct arpcom *ac = &sc->sc_arpcom;
1389 	struct ether_multi *enm;
1390 	struct ether_multistep step;
1391 	uint32_t hashes[2];
1392 	uint32_t rxfilt;
1393 	int h = 0;
1394 
1395 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1396 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1397 	ifp->if_flags &= ~IFF_ALLMULTI;
1398 
1399 	/*
1400 	 * Always accept frames destined to our station address.
1401 	 * Always accept broadcast frames.
1402 	 */
1403 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1404 
1405 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1406 		ifp->if_flags |= IFF_ALLMULTI;
1407 		rxfilt |= RGE_RXCFG_MULTI;
1408 		if (ifp->if_flags & IFF_PROMISC)
1409 			rxfilt |= RGE_RXCFG_ALLPHYS;
1410 		hashes[0] = hashes[1] = 0xffffffff;
1411 	} else {
1412 		rxfilt |= RGE_RXCFG_MULTI;
1413 		/* Program new filter. */
1414 		memset(hashes, 0, sizeof(hashes));
1415 
1416 		ETHER_FIRST_MULTI(step, ac, enm);
1417 		while (enm != NULL) {
1418 			h = ether_crc32_be(enm->enm_addrlo,
1419 			    ETHER_ADDR_LEN) >> 26;
1420 
1421 			if (h < 32)
1422 				hashes[0] |= (1 << h);
1423 			else
1424 				hashes[1] |= (1 << (h - 32));
1425 
1426 			ETHER_NEXT_MULTI(step, enm);
1427 		}
1428 	}
1429 
1430 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1431 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1432 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1433 }
1434 
1435 void
1436 rge_set_phy_power(struct rge_softc *sc, int on)
1437 {
1438 	int i;
1439 
1440 	if (on) {
1441 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1442 
1443 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1444 
1445 		for (i = 0; i < RGE_TIMEOUT; i++) {
1446 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1447 				break;
1448 			DELAY(1000);
1449 		}
1450 	} else {
1451 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1452 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1453 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1454 	}
1455 }
1456 
1457 void
1458 rge_phy_config(struct rge_softc *sc)
1459 {
1460 	/* Read microcode version. */
1461 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1462 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1463 
1464 	switch (sc->rge_type) {
1465 	case MAC_CFG2:
1466 		rge_phy_config_mac_cfg2(sc);
1467 		break;
1468 	case MAC_CFG3:
1469 		rge_phy_config_mac_cfg3(sc);
1470 		break;
1471 	case MAC_CFG4:
1472 		rge_phy_config_mac_cfg4(sc);
1473 		break;
1474 	case MAC_CFG5:
1475 		rge_phy_config_mac_cfg5(sc);
1476 		break;
1477 	default:
1478 		break;	/* Can't happen. */
1479 	}
1480 
1481 	rge_write_phy(sc, 0x0a5b, 0x12,
1482 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1483 
1484 	/* Disable EEE. */
1485 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1486 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1487 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1488 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1489 	}
1490 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1491 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1492 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1493 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1494 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1495 
1496 	rge_patch_phy_mcu(sc, 1);
1497 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1498 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1499 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1500 	rge_patch_phy_mcu(sc, 0);
1501 }
1502 
1503 void
1504 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1505 {
1506 	uint16_t val;
1507 	int i;
1508 
1509 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1510 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1511 		    rtl8125_mac_cfg2_ephy[i].val);
1512 
1513 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1514 
1515 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1516 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1517 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1518 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1519 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1520 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1521 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1522 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1523 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1524 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1525 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1526 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1527 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1528 
1529 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1530 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1531 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1532 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1533 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1534 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1535 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1536 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1537 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1538 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1539 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1540 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1541 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1542 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1543 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1544 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1545 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1546 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1547 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1548 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1549 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1550 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1551 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1552 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1553 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1554 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1555 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1556 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1557 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1558 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1559 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1560 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1561 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1562 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1563 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1564 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1565 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1566 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1567 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1568 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1569 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1570 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1571 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1572 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1573 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1574 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1575 }
1576 
1577 void
1578 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1579 {
1580 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1581 	uint16_t val;
1582 	int i;
1583 	static const uint16_t mac_cfg3_a438_value[] =
1584 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1585 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1586 
1587 	static const uint16_t mac_cfg3_b88e_value[] =
1588 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1589 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1590 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1591 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1592 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1593 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1594 
1595 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1596 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1597 		    rtl8125_mac_cfg3_ephy[i].val);
1598 
1599 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1600 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1601 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1602 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1603 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1604 	rge_write_ephy(sc, 0x0002, 0x6042);
1605 	rge_write_ephy(sc, 0x0006, 0x0014);
1606 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1607 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1608 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1609 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1610 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1611 	rge_write_ephy(sc, 0x0042, 0x6042);
1612 	rge_write_ephy(sc, 0x0046, 0x0014);
1613 
1614 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1615 
1616 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1617 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1618 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1619 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1620 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1621 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1622 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1623 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1624 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1625 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1626 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1627 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1628 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1629 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1630 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1631 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1632 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1633 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1634 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1635 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1636 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1637 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1638 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1639 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1640 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1641 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1642 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1643 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN + 32);
1644 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1645 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1646 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1647 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1648 
1649 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1650 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1651 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1652 	for (i = 0; i < 26; i++)
1653 		rge_write_phy_ocp(sc, 0xa438, 0);
1654 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1655 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1656 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1657 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1658 
1659 	rge_patch_phy_mcu(sc, 1);
1660 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1661 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1662 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1663 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1664 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1665 	}
1666 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1667 	rge_patch_phy_mcu(sc, 0);
1668 
1669 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1670 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1671 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1672 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1673 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1674 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1675 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1676 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1677 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1678 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1679 }
1680 
1681 void
1682 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1683 {
1684 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1685 	uint16_t val;
1686 	int i;
1687 	static const uint16_t mac_cfg4_b87c_value[] =
1688 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1689 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1690 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1691 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1692 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1693 	      0x80b0, 0x0f31 };
1694 
1695 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1696 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1697 		    rtl8125_mac_cfg4_ephy[i].val);
1698 
1699 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1700 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1701 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1702 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1703 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1704 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1705 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1706 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1707 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1708 
1709 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1710 
1711 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1712 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1713 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1714 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1715 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1716 	for (i = 0; i < 6; i++) {
1717 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1718 		if (i < 3)
1719 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1720 		else
1721 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1722 	}
1723 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1724 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1725 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1726 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1727 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1728 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1729 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1730 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1731 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1732 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1733 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1734 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1735 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1736 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1737 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1738 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1739 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1740 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1741 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1742 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1743 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1744 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1745 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1746 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1747 	}
1748 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1749 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1750 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1751 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1752 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1753 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1754 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1755 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1756 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1757 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN + 32);
1758 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1759 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1760 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1761 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1762 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1763 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1764 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1765 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1766 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1767 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1768 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1769 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1770 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1771 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1772 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1773 	for (i = 0; i < 6; i++) {
1774 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1775 		if (i == 2)
1776 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1777 		else
1778 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1779 	}
1780 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1781 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1782 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1783 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1784 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1785 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1786 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1787 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1788 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1789 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1790 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1791 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1792 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1793 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1794 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1795 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1796 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1797 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1798 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1799 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1800 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1801 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1802 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1803 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1804 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1805 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1806 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1807 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1808 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1809 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1810 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1811 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1812 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1813 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1814 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1815 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1816 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1817 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1818 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1819 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1820 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1821 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1822 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1823 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1824 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1825 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1826 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1827 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1828 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1829 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1830 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1831 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1832 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1833 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1834 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1835 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1836 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1837 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1838 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1839 	rge_patch_phy_mcu(sc, 1);
1840 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1841 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1842 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1843 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1844 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1845 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1846 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1847 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1848 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1849 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1850 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1851 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1852 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1853 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1854 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1855 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1856 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1857 	rge_patch_phy_mcu(sc, 0);
1858 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1859 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1860 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1861 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1862 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1863 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1864 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1865 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1866 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1867 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1868 }
1869 
1870 void
1871 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1872 {
1873 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1874 	uint16_t val;
1875 	int i;
1876 
1877 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1878 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1879 		    rtl8125_mac_cfg5_ephy[i].val);
1880 
1881 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1882 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1883 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1884 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1885 
1886 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1887 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1888 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1889 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1890 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1891 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1892 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1893 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1894 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1895 
1896 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1897 
1898 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1899 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1900 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1901 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1902 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1903 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN + 32);
1904 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1905 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1906 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1907 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1908 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1909 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1910 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1911 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1912 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1913 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1914 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1915 	for (i = 0; i < 10; i++) {
1916 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1917 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1918 	}
1919 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1920 }
1921 
1922 void
1923 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1924 {
1925 	if (sc->rge_mcodever != mcode_version) {
1926 		int i;
1927 
1928 		rge_patch_phy_mcu(sc, 1);
1929 
1930 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1931 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1932 			if (sc->rge_type == MAC_CFG2)
1933 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
1934 			else
1935 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
1936 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1937 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
1938 
1939 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1940 		}
1941 
1942 		if (sc->rge_type == MAC_CFG2) {
1943 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1944 				rge_write_phy_ocp(sc,
1945 				    rtl8125_mac_cfg2_mcu[i].reg,
1946 				    rtl8125_mac_cfg2_mcu[i].val);
1947 			}
1948 		} else if (sc->rge_type == MAC_CFG3) {
1949 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1950 				rge_write_phy_ocp(sc,
1951 				    rtl8125_mac_cfg3_mcu[i].reg,
1952 				    rtl8125_mac_cfg3_mcu[i].val);
1953 			}
1954 		} else if (sc->rge_type == MAC_CFG4) {
1955 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
1956 				rge_write_phy_ocp(sc,
1957 				    rtl8125_mac_cfg4_mcu[i].reg,
1958 				    rtl8125_mac_cfg4_mcu[i].val);
1959 			}
1960 		} else if (sc->rge_type == MAC_CFG5) {
1961 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
1962 				rge_write_phy_ocp(sc,
1963 				    rtl8125_mac_cfg5_mcu[i].reg,
1964 				    rtl8125_mac_cfg5_mcu[i].val);
1965 			}
1966 		}
1967 
1968 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1969 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1970 
1971 			rge_write_phy_ocp(sc, 0xa436, 0);
1972 			rge_write_phy_ocp(sc, 0xa438, 0);
1973 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1974 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
1975 			rge_write_phy_ocp(sc, 0xa438, 0);
1976 		}
1977 
1978 		rge_patch_phy_mcu(sc, 0);
1979 
1980 		/* Write microcode version. */
1981 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
1982 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
1983 	}
1984 }
1985 
1986 void
1987 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1988 {
1989 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1990 	RGE_WRITE_4(sc, RGE_MAC0,
1991 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1992 	RGE_WRITE_4(sc, RGE_MAC4,
1993 	    addr[5] <<  8 | addr[4]);
1994 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1995 }
1996 
1997 void
1998 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1999 {
2000 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2001 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2002 }
2003 
2004 void
2005 rge_hw_init(struct rge_softc *sc)
2006 {
2007 	int i;
2008 
2009 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2010 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2011 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2012 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2013 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2014 
2015 	/* Disable UPS. */
2016 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2017 
2018 	/* Configure MAC MCU. */
2019 	rge_write_mac_ocp(sc, 0xfc38, 0);
2020 
2021 	for (i = 0xfc28; i < 0xfc38; i += 2)
2022 		rge_write_mac_ocp(sc, i, 0);
2023 
2024 	DELAY(3000);
2025 	rge_write_mac_ocp(sc, 0xfc26, 0);
2026 
2027 	if (sc->rge_type == MAC_CFG3) {
2028 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2029 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2030 			    rtl8125_mac_bps[i].val);
2031 		}
2032 	} else if (sc->rge_type == MAC_CFG5) {
2033 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2034 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2035 			    rtl8125b_mac_bps[i].val);
2036 		}
2037 	}
2038 
2039 	/* Disable PHY power saving. */
2040 	rge_disable_phy_ocp_pwrsave(sc);
2041 
2042 	/* Set PCIe uncorrectable error status. */
2043 	rge_write_csi(sc, 0x108,
2044 	    rge_read_csi(sc, 0x108) | 0x00100000);
2045 }
2046 
2047 void
2048 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2049 {
2050 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2051 		rge_patch_phy_mcu(sc, 1);
2052 		rge_write_phy_ocp(sc, 0xc416, 0);
2053 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2054 		rge_patch_phy_mcu(sc, 0);
2055 	}
2056 }
2057 
2058 void
2059 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2060 {
2061 	int i;
2062 
2063 	if (set)
2064 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2065 	else
2066 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2067 
2068 	for (i = 0; i < 1000; i++) {
2069 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2070 			break;
2071 		DELAY(100);
2072 	}
2073 	if (i == 1000) {
2074 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2075 		return;
2076 	}
2077 }
2078 
2079 void
2080 rge_add_media_types(struct rge_softc *sc)
2081 {
2082 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2083 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2084 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2085 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2086 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2087 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2088 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2089 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2090 }
2091 
2092 void
2093 rge_config_imtype(struct rge_softc *sc, int imtype)
2094 {
2095 	switch (imtype) {
2096 	case RGE_IMTYPE_NONE:
2097 		sc->rge_intrs = RGE_INTRS;
2098 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2099 		    RGE_ISR_RX_FIFO_OFLOW;
2100 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2101 		break;
2102 	case RGE_IMTYPE_SIM:
2103 		sc->rge_intrs = RGE_INTRS_TIMER;
2104 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2105 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2106 		break;
2107 	default:
2108 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2109 	}
2110 }
2111 
2112 void
2113 rge_disable_hw_im(struct rge_softc *sc)
2114 {
2115 	RGE_WRITE_2(sc, RGE_IM, 0);
2116 }
2117 
2118 void
2119 rge_disable_sim_im(struct rge_softc *sc)
2120 {
2121 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2122 	sc->rge_timerintr = 0;
2123 }
2124 
2125 void
2126 rge_setup_sim_im(struct rge_softc *sc)
2127 {
2128 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2129 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2130 	sc->rge_timerintr = 1;
2131 }
2132 
2133 void
2134 rge_setup_intr(struct rge_softc *sc, int imtype)
2135 {
2136 	rge_config_imtype(sc, imtype);
2137 
2138 	/* Enable interrupts. */
2139 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2140 
2141 	switch (imtype) {
2142 	case RGE_IMTYPE_NONE:
2143 		rge_disable_sim_im(sc);
2144 		rge_disable_hw_im(sc);
2145 		break;
2146 	case RGE_IMTYPE_SIM:
2147 		rge_disable_hw_im(sc);
2148 		rge_setup_sim_im(sc);
2149 		break;
2150 	default:
2151 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
2152 	}
2153 }
2154 
2155 void
2156 rge_exit_oob(struct rge_softc *sc)
2157 {
2158 	int i;
2159 
2160 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2161 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2162 	    RGE_RXCFG_ERRPKT);
2163 
2164 	/* Disable RealWoW. */
2165 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2166 
2167 	rge_reset(sc);
2168 
2169 	/* Disable OOB. */
2170 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2171 
2172 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2173 
2174 	for (i = 0; i < 10; i++) {
2175 		DELAY(100);
2176 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2177 			break;
2178 	}
2179 
2180 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2181 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2182 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2183 
2184 	for (i = 0; i < 10; i++) {
2185 		DELAY(100);
2186 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2187 			break;
2188 	}
2189 
2190 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2191 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2192 		    sc->sc_dev.dv_xname);
2193 		for (i = 0; i < RGE_TIMEOUT; i++) {
2194 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2195 				break;
2196 			DELAY(1000);
2197 		}
2198 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2199 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2200 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2201 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2202 	}
2203 }
2204 
2205 void
2206 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2207 {
2208 	int i;
2209 
2210 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2211 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2212 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2213 
2214 	for (i = 0; i < 10; i++) {
2215 		 DELAY(100);
2216 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2217 			break;
2218 	}
2219 
2220 	DELAY(20);
2221 }
2222 
2223 uint32_t
2224 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2225 {
2226 	int i;
2227 
2228 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2229 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2230 
2231 	for (i = 0; i < 10; i++) {
2232 		 DELAY(100);
2233 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2234 			break;
2235 	}
2236 
2237 	DELAY(20);
2238 
2239 	return (RGE_READ_4(sc, RGE_CSIDR));
2240 }
2241 
2242 void
2243 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2244 {
2245 	uint32_t tmp;
2246 
2247 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2248 	tmp += val;
2249 	tmp |= RGE_MACOCP_BUSY;
2250 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2251 }
2252 
2253 uint16_t
2254 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2255 {
2256 	uint32_t val;
2257 
2258 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2259 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2260 
2261 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2262 }
2263 
2264 void
2265 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2266 {
2267 	uint32_t tmp;
2268 	int i;
2269 
2270 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2271 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2272 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2273 
2274 	for (i = 0; i < 10; i++) {
2275 		DELAY(100);
2276 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2277 			break;
2278 	}
2279 
2280 	DELAY(20);
2281 }
2282 
2283 uint16_t
2284 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2285 {
2286 	uint32_t val;
2287 	int i;
2288 
2289 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2290 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2291 
2292 	for (i = 0; i < 10; i++) {
2293 		DELAY(100);
2294 		val = RGE_READ_4(sc, RGE_EPHYAR);
2295 		if (val & RGE_EPHYAR_BUSY)
2296 			break;
2297 	}
2298 
2299 	DELAY(20);
2300 
2301 	return (val & RGE_EPHYAR_DATA_MASK);
2302 }
2303 
2304 void
2305 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2306 {
2307 	uint16_t off, phyaddr;
2308 
2309 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2310 	phyaddr <<= 4;
2311 
2312 	off = addr ? reg : 0x10 + (reg % 8);
2313 
2314 	phyaddr += (off - 16) << 1;
2315 
2316 	rge_write_phy_ocp(sc, phyaddr, val);
2317 }
2318 
2319 uint16_t
2320 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2321 {
2322 	uint16_t off, phyaddr;
2323 
2324 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2325 	phyaddr <<= 4;
2326 
2327 	off = addr ? reg : 0x10 + (reg % 8);
2328 
2329 	phyaddr += (off - 16) << 1;
2330 
2331 	return (rge_read_phy_ocp(sc, phyaddr));
2332 }
2333 
2334 void
2335 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2336 {
2337 	uint32_t tmp;
2338 	int i;
2339 
2340 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2341 	tmp |= RGE_PHYOCP_BUSY | val;
2342 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2343 
2344 	for (i = 0; i < RGE_TIMEOUT; i++) {
2345 		DELAY(1);
2346 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2347 			break;
2348 	}
2349 }
2350 
2351 uint16_t
2352 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2353 {
2354 	uint32_t val;
2355 	int i;
2356 
2357 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2358 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2359 
2360 	for (i = 0; i < RGE_TIMEOUT; i++) {
2361 		DELAY(1);
2362 		val = RGE_READ_4(sc, RGE_PHYOCP);
2363 		if (val & RGE_PHYOCP_BUSY)
2364 			break;
2365 	}
2366 
2367 	return (val & RGE_PHYOCP_DATA_MASK);
2368 }
2369 
2370 int
2371 rge_get_link_status(struct rge_softc *sc)
2372 {
2373 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2374 }
2375 
2376 void
2377 rge_txstart(void *arg)
2378 {
2379 	struct rge_softc *sc = arg;
2380 
2381 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2382 }
2383 
2384 void
2385 rge_tick(void *arg)
2386 {
2387 	struct rge_softc *sc = arg;
2388 	int s;
2389 
2390 	s = splnet();
2391 	rge_link_state(sc);
2392 	splx(s);
2393 
2394 	timeout_add_sec(&sc->sc_timeout, 1);
2395 }
2396 
2397 void
2398 rge_link_state(struct rge_softc *sc)
2399 {
2400 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2401 	int link = LINK_STATE_DOWN;
2402 
2403 	if (rge_get_link_status(sc))
2404 		link = LINK_STATE_UP;
2405 
2406 	if (ifp->if_link_state != link) {
2407 		ifp->if_link_state = link;
2408 		if_link_state_change(ifp);
2409 	}
2410 }
2411