xref: /netbsd-src/sys/dev/pci/if_rge.c (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 /*	$NetBSD: if_rge.c,v 1.19 2021/05/08 00:27:02 thorpej Exp $	*/
2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
3 
4 /*
5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.19 2021/05/08 00:27:02 thorpej Exp $");
22 
23 #include <sys/types.h>
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/malloc.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/device.h>
33 #include <sys/endian.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 
37 #include <net/if.h>
38 
39 #include <net/if_dl.h>
40 #include <net/if_ether.h>
41 
42 #include <net/if_media.h>
43 
44 #include <netinet/in.h>
45 #include <net/if_ether.h>
46 
47 #include <net/bpf.h>
48 
49 #include <sys/bus.h>
50 #include <machine/intr.h>
51 
52 #include <dev/mii/mii.h>
53 
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcidevs.h>
57 
58 #include <dev/pci/if_rgereg.h>
59 
60 #ifdef __NetBSD__
61 #define letoh32 	htole32
62 #define nitems(x) 	__arraycount(x)
63 
64 static struct mbuf *
65 MCLGETL(struct rge_softc *sc __unused, int how,
66     u_int size)
67 {
68 	struct mbuf *m;
69 
70 	MGETHDR(m, how, MT_DATA);
71 	if (m == NULL)
72 		return NULL;
73 
74 	MEXTMALLOC(m, size, how);
75 	if ((m->m_flags & M_EXT) == 0) {
76 		m_freem(m);
77 		return NULL;
78 	}
79 	return m;
80 }
81 
82 #ifdef NET_MPSAFE
83 #define 	RGE_MPSAFE	1
84 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
85 #else
86 #define 	CALLOUT_FLAGS	0
87 #endif
88 #endif
89 
90 #ifdef RGE_DEBUG
91 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
92 int rge_debug = 0;
93 #else
94 #define DPRINTF(x)
95 #endif
96 
97 static int		rge_match(device_t, cfdata_t, void *);
98 static void		rge_attach(device_t, device_t, void *);
99 int		rge_intr(void *);
100 int		rge_encap(struct rge_softc *, struct mbuf *, int);
101 int		rge_ioctl(struct ifnet *, u_long, void *);
102 void		rge_start(struct ifnet *);
103 void		rge_watchdog(struct ifnet *);
104 int		rge_init(struct ifnet *);
105 void		rge_stop(struct ifnet *, int);
106 int		rge_ifmedia_upd(struct ifnet *);
107 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
108 int		rge_allocmem(struct rge_softc *);
109 int		rge_newbuf(struct rge_softc *, int);
110 void		rge_discard_rxbuf(struct rge_softc *, int);
111 int		rge_rx_list_init(struct rge_softc *);
112 void		rge_tx_list_init(struct rge_softc *);
113 int		rge_rxeof(struct rge_softc *);
114 int		rge_txeof(struct rge_softc *);
115 void		rge_reset(struct rge_softc *);
116 void		rge_iff(struct rge_softc *);
117 void		rge_set_phy_power(struct rge_softc *, int);
118 void		rge_phy_config(struct rge_softc *);
119 void		rge_phy_config_mac_cfg2(struct rge_softc *);
120 void		rge_phy_config_mac_cfg3(struct rge_softc *);
121 void		rge_phy_config_mac_cfg4(struct rge_softc *);
122 void		rge_phy_config_mac_cfg5(struct rge_softc *);
123 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
124 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
125 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
126 void		rge_hw_init(struct rge_softc *);
127 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
128 void		rge_patch_phy_mcu(struct rge_softc *, int);
129 void		rge_add_media_types(struct rge_softc *);
130 void		rge_config_imtype(struct rge_softc *, int);
131 void		rge_disable_hw_im(struct rge_softc *);
132 void		rge_disable_sim_im(struct rge_softc *);
133 void		rge_setup_sim_im(struct rge_softc *);
134 void		rge_setup_intr(struct rge_softc *, int);
135 void		rge_exit_oob(struct rge_softc *);
136 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
137 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
138 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
139 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
140 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
141 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
142 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
143 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
144 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
145 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
146 int		rge_get_link_status(struct rge_softc *);
147 void		rge_txstart(struct work *, void *);
148 void		rge_tick(void *);
149 void		rge_link_state(struct rge_softc *);
150 
151 static const struct {
152 	uint16_t reg;
153 	uint16_t val;
154 }  rtl8125_mac_cfg2_mcu[] = {
155 	RTL8125_MAC_CFG2_MCU
156 }, rtl8125_mac_cfg3_mcu[] = {
157 	RTL8125_MAC_CFG3_MCU
158 }, rtl8125_mac_cfg4_mcu[] = {
159 	RTL8125_MAC_CFG4_MCU
160 }, rtl8125_mac_cfg5_mcu[] = {
161 	RTL8125_MAC_CFG5_MCU
162 };
163 
164 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
165 		NULL, NULL); /* Sevan - detach function? */
166 
167 static const struct device_compatible_entry compat_data[] = {
168 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
169 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
170 
171 	PCI_COMPAT_EOL
172 };
173 
174 static int
175 rge_match(device_t parent, cfdata_t match, void *aux)
176 {
177 	struct pci_attach_args *pa =aux;
178 
179 	return pci_compatible_match(pa, compat_data);
180 }
181 
182 void
183 rge_attach(device_t parent, device_t self, void *aux)
184 {
185 	struct rge_softc *sc = device_private(self);
186 	struct pci_attach_args *pa = aux;
187 	pci_chipset_tag_t pc = pa->pa_pc;
188 	pci_intr_handle_t *ihp;
189 	char intrbuf[PCI_INTRSTR_LEN];
190 	const char *intrstr = NULL;
191 	struct ifnet *ifp;
192 	pcireg_t reg;
193 	uint32_t hwrev;
194 	uint8_t eaddr[ETHER_ADDR_LEN];
195 	int offset;
196 	pcireg_t command;
197 
198 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
199 
200 	sc->sc_dev = self;
201 
202 	pci_aprint_devinfo(pa, "Ethernet controller");
203 
204 	/*
205 	 * Map control/status registers.
206 	 */
207 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
208 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
209 	    NULL, &sc->rge_bsize)) {
210 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
211 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
212 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
213 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
214 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
215 			    &sc->rge_bsize)) {
216 				aprint_error(": can't map mem or i/o space\n");
217 				return;
218 			}
219 		}
220 	}
221 
222 	int counts[PCI_INTR_TYPE_SIZE] = {
223  		[PCI_INTR_TYPE_INTX] = 1,
224  		[PCI_INTR_TYPE_MSI] = 1,
225  		[PCI_INTR_TYPE_MSIX] = 1,
226  	};
227 	int max_type = PCI_INTR_TYPE_MSIX;
228 	/*
229 	 * Allocate interrupt.
230 	 */
231 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
232 		aprint_error(": couldn't map interrupt\n");
233 		return;
234 	}
235 	switch (pci_intr_type(pc, ihp[0])) {
236 	case PCI_INTR_TYPE_MSIX:
237 	case PCI_INTR_TYPE_MSI:
238 		sc->rge_flags |= RGE_FLAG_MSI;
239 		break;
240 	default:
241 		break;
242 	}
243 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
244 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
245 	    sc, device_xname(sc->sc_dev));
246 	if (sc->sc_ih == NULL) {
247 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
248 		if (intrstr != NULL)
249 			aprint_error(" at %s\n", intrstr);
250 		aprint_error("\n");
251 		return;
252 	}
253 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
254 
255 	if (pci_dma64_available(pa))
256 		sc->sc_dmat = pa->pa_dmat64;
257 	else
258 		sc->sc_dmat = pa->pa_dmat;
259 
260 	sc->sc_pc = pa->pa_pc;
261 	sc->sc_tag = pa->pa_tag;
262 
263 	/* Determine hardware revision */
264 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
265 	switch (hwrev) {
266 	case 0x60800000:
267 		sc->rge_type = MAC_CFG2;
268 		break;
269 	case 0x60900000:
270 		sc->rge_type = MAC_CFG3;
271 		break;
272 	case 0x64000000:
273 		sc->rge_type = MAC_CFG4;
274 		break;
275 	case 0x64100000:
276 		sc->rge_type = MAC_CFG5;
277 		break;
278 	default:
279 		aprint_error(": unknown version 0x%08x\n", hwrev);
280 		return;
281 	}
282 
283 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
284 
285 	/*
286 	 * PCI Express check.
287 	 */
288 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
289 	    &offset, NULL)) {
290 		/* Disable PCIe ASPM and ECPM. */
291 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
292 		    offset + PCIE_LCSR);
293 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
294 		    PCIE_LCSR_ENCLKPM);
295 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
296 		    reg);
297 	}
298 
299 	rge_exit_oob(sc);
300 	rge_hw_init(sc);
301 
302 	rge_get_macaddr(sc, eaddr);
303 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
304 	    ether_sprintf(eaddr));
305 
306 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
307 
308 	rge_set_phy_power(sc, 1);
309 	rge_phy_config(sc);
310 
311 	if (rge_allocmem(sc))
312 		return;
313 
314 	ifp = &sc->sc_ec.ec_if;
315 	ifp->if_softc = sc;
316 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
317 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
318 #ifdef RGE_MPSAFE
319 	ifp->if_extflags = IFEF_MPSAFE;
320 #endif
321 	ifp->if_ioctl = rge_ioctl;
322 	ifp->if_stop = rge_stop;
323 	ifp->if_start = rge_start;
324 	ifp->if_init = rge_init;
325 	ifp->if_watchdog = rge_watchdog;
326 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
327 
328 #if notyet
329 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
330 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
331 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
332 #endif
333 
334 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
335 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
336 
337 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
338 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
339 
340 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
341 	command |= PCI_COMMAND_MASTER_ENABLE;
342 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
343 
344 	/* Initialize ifmedia structures. */
345 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
346 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
347 	    rge_ifmedia_sts);
348 	rge_add_media_types(sc);
349 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
350 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
351 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
352 
353 	if_attach(ifp);
354 	ether_ifattach(ifp, eaddr);
355 }
356 
357 int
358 rge_intr(void *arg)
359 {
360 	struct rge_softc *sc = arg;
361 	struct ifnet *ifp = &sc->sc_ec.ec_if;
362 	uint32_t status;
363 	int claimed = 0, rx, tx;
364 
365 	if (!(ifp->if_flags & IFF_RUNNING))
366 		return (0);
367 
368 	/* Disable interrupts. */
369 	RGE_WRITE_4(sc, RGE_IMR, 0);
370 
371 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
372 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
373 			return (0);
374 	}
375 
376 	status = RGE_READ_4(sc, RGE_ISR);
377 	if (status)
378 		RGE_WRITE_4(sc, RGE_ISR, status);
379 
380 	if (status & RGE_ISR_PCS_TIMEOUT)
381 		claimed = 1;
382 
383 	rx = tx = 0;
384 	if (status & sc->rge_intrs) {
385 		if (status &
386 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
387 			rx |= rge_rxeof(sc);
388 			claimed = 1;
389 		}
390 
391 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
392 			tx |= rge_txeof(sc);
393 			claimed = 1;
394 		}
395 
396 		if (status & RGE_ISR_SYSTEM_ERR) {
397 			KERNEL_LOCK(1, NULL);
398 			rge_init(ifp);
399 			KERNEL_UNLOCK_ONE(NULL);
400 			claimed = 1;
401 		}
402 	}
403 
404 	if (sc->rge_timerintr) {
405 		if ((tx | rx) == 0) {
406 			/*
407 			 * Nothing needs to be processed, fallback
408 			 * to use TX/RX interrupts.
409 			 */
410 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
411 
412 			/*
413 			 * Recollect, mainly to avoid the possible
414 			 * race introduced by changing interrupt
415 			 * masks.
416 			 */
417 			rge_rxeof(sc);
418 			rge_txeof(sc);
419 		} else
420 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
421 	} else if (tx | rx) {
422 		/*
423 		 * Assume that using simulated interrupt moderation
424 		 * (hardware timer based) could reduce the interrupt
425 		 * rate.
426 		 */
427 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
428 	}
429 
430 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
431 
432 	return (claimed);
433 }
434 
435 int
436 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
437 {
438 	struct rge_tx_desc *d = NULL;
439 	struct rge_txq *txq;
440 	bus_dmamap_t txmap;
441 	uint32_t cmdsts, cflags = 0;
442 	int cur, error, i, last, nsegs;
443 
444 #if notyet
445 	/*
446 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
447 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
448 	 * take affect.
449 	 */
450 	if ((m->m_pkthdr.csum_flags &
451 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
452 		cflags |= RGE_TDEXTSTS_IPCSUM;
453 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
454 			cflags |= RGE_TDEXTSTS_TCPCSUM;
455 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
456 			cflags |= RGE_TDEXTSTS_UDPCSUM;
457 	}
458 #endif
459 
460 	txq = &sc->rge_ldata.rge_txq[idx];
461 	txmap = txq->txq_dmamap;
462 
463 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
464 	switch (error) {
465 	case 0:
466 		break;
467 	case EFBIG: /* mbuf chain is too fragmented */
468 		if (m_defrag(m, M_DONTWAIT) == 0 &&
469 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
470 		    BUS_DMA_NOWAIT) == 0)
471 			break;
472 
473 		/* FALLTHROUGH */
474 	default:
475 		return (0);
476 	}
477 
478 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
479 	    BUS_DMASYNC_PREWRITE);
480 
481 	nsegs = txmap->dm_nsegs;
482 
483 	/* Set up hardware VLAN tagging. */
484 	if (vlan_has_tag(m))
485 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
486 
487 	last = cur = idx;
488 	cmdsts = RGE_TDCMDSTS_SOF;
489 
490 	for (i = 0; i < txmap->dm_nsegs; i++) {
491 		d = &sc->rge_ldata.rge_tx_list[cur];
492 
493 		d->rge_extsts = htole32(cflags);
494 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
495 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
496 
497 		cmdsts |= txmap->dm_segs[i].ds_len;
498 
499 		if (cur == RGE_TX_LIST_CNT - 1)
500 			cmdsts |= RGE_TDCMDSTS_EOR;
501 
502 		d->rge_cmdsts = htole32(cmdsts);
503 
504 		last = cur;
505 		cmdsts = RGE_TDCMDSTS_OWN;
506 		cur = RGE_NEXT_TX_DESC(cur);
507 	}
508 
509 	/* Set EOF on the last descriptor. */
510 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
511 
512 	/* Transfer ownership of packet to the chip. */
513 	d = &sc->rge_ldata.rge_tx_list[idx];
514 
515 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
516 
517 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
518 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
519 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
520 
521 	/* Update info of TX queue and descriptors. */
522 	txq->txq_mbuf = m;
523 	txq->txq_descidx = last;
524 
525 	return (nsegs);
526 }
527 
528 int
529 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
530 {
531 	struct rge_softc *sc = ifp->if_softc;
532 	//struct ifreq *ifr = (struct ifreq *)data;
533 	int s, error = 0;
534 
535 	s = splnet();
536 
537 	switch (cmd) {
538 	case SIOCSIFFLAGS:
539 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
540 			break;
541 		/* XXX set an ifflags callback and let ether_ioctl
542 		 * handle all of this.
543 		 */
544 		if (ifp->if_flags & IFF_UP) {
545 			if (ifp->if_flags & IFF_RUNNING)
546 				error = ENETRESET;
547 			else
548 				rge_init(ifp);
549 		} else {
550 			if (ifp->if_flags & IFF_RUNNING)
551 				rge_stop(ifp, 1);
552 		}
553 		break;
554 	default:
555 		error = ether_ioctl(ifp, cmd, data);
556 	}
557 
558 	if (error == ENETRESET) {
559 		if (ifp->if_flags & IFF_RUNNING)
560 			rge_iff(sc);
561 		error = 0;
562 	}
563 
564 	splx(s);
565 	return (error);
566 }
567 
568 void
569 rge_start(struct ifnet *ifp)
570 {
571 	struct rge_softc *sc = ifp->if_softc;
572 	struct mbuf *m;
573 	int free, idx, used;
574 	int queued = 0;
575 
576 #define LINK_STATE_IS_UP(_s)    \
577 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
578 
579 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
580 		IFQ_PURGE(&ifp->if_snd);
581 		return;
582 	}
583 
584 	/* Calculate free space. */
585 	idx = sc->rge_ldata.rge_txq_prodidx;
586 	free = sc->rge_ldata.rge_txq_considx;
587 	if (free <= idx)
588 		free += RGE_TX_LIST_CNT;
589 	free -= idx;
590 
591 	for (;;) {
592 		if (RGE_TX_NSEGS >= free + 2) {
593 			SET(ifp->if_flags, IFF_OACTIVE);
594 			break;
595 		}
596 
597 		IFQ_DEQUEUE(&ifp->if_snd, m);
598 		if (m == NULL)
599 			break;
600 
601 		used = rge_encap(sc, m, idx);
602 		if (used == 0) {
603 			m_freem(m);
604 			continue;
605 		}
606 
607 		KASSERT(used <= free);
608 		free -= used;
609 
610 		bpf_mtap(ifp, m, BPF_D_OUT);
611 
612 		idx += used;
613 		if (idx >= RGE_TX_LIST_CNT)
614 			idx -= RGE_TX_LIST_CNT;
615 
616 		queued++;
617 	}
618 
619 	if (queued == 0)
620 		return;
621 
622 	/* Set a timeout in case the chip goes out to lunch. */
623 	ifp->if_timer = 5;
624 
625 	sc->rge_ldata.rge_txq_prodidx = idx;
626 #if 0
627 	ifq_serialize(ifq, &sc->sc_task);
628 #else
629 	rge_txstart(&sc->sc_task, sc);
630 #endif
631 }
632 
633 void
634 rge_watchdog(struct ifnet *ifp)
635 {
636 	struct rge_softc *sc = ifp->if_softc;
637 
638 	device_printf(sc->sc_dev, "watchdog timeout\n");
639 	if_statinc(ifp, if_oerrors);
640 
641 	rge_init(ifp);
642 }
643 
644 int
645 rge_init(struct ifnet *ifp)
646 {
647 	struct rge_softc *sc = ifp->if_softc;
648 	uint32_t val;
649 	int i;
650 
651 	rge_stop(ifp, 0);
652 
653 	/* Set MAC address. */
654 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
655 
656 	/* Set Maximum frame size. */
657 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
658 
659 	/* Initialize RX descriptors list. */
660 	if (rge_rx_list_init(sc) == ENOBUFS) {
661 		device_printf(sc->sc_dev,
662 		    "init failed: no memory for RX buffers\n");
663 		rge_stop(ifp, 1);
664 		return (ENOBUFS);
665 	}
666 
667 	/* Initialize TX descriptors. */
668 	rge_tx_list_init(sc);
669 
670 	/* Load the addresses of the RX and TX lists into the chip. */
671 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
672 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
673 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
674 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
675 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
676 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
677 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
678 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
679 
680 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
681 
682 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
683 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
684 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
685 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
686 
687 	/* Clear interrupt moderation timer. */
688 	for (i = 0; i < 64; i++)
689 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
690 
691 	/* Set the initial RX and TX configurations. */
692 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
693 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
694 
695 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
696 	rge_write_csi(sc, 0x70c, val | 0x27000000);
697 
698 	/* Enable hardware optimization function. */
699 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
700 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
701 
702 	RGE_WRITE_2(sc, 0x0382, 0x221b);
703 	RGE_WRITE_1(sc, 0x4500, 0);
704 	RGE_WRITE_2(sc, 0x4800, 0);
705 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
706 
707 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
708 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
709 
710 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
711 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
712 
713 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
714 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
715 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
716 
717 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
718 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
719 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
720 	else
721 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
722 
723 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
724 
725 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
726 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
727 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
728 	} else
729 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
730 
731 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
732 
733 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
734 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
735 
736 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
737 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
738 
739 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
740 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
741 
742 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
743 
744 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
745 
746 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
747 
748 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
749 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
750 
751 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
752 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
753 
754 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
755 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
756 
757 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
758 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
759 
760 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
761 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
762 
763 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
764 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
765 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
766 	else
767 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
768 
769 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
770 
771 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
772 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
773 
774 	/* Disable EEE plus. */
775 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
776 
777 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
778 
779 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
780 	DELAY(1);
781 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
782 
783 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
784 
785 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
786 
787 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
788 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
789 	else
790 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
791 
792 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
793 
794 	for (i = 0; i < 10; i++) {
795 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
796 			break;
797 		DELAY(1000);
798 	}
799 
800 	/* Disable RXDV gate. */
801 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
802 	DELAY(2000);
803 
804 	rge_ifmedia_upd(ifp);
805 
806 	/* Enable transmit and receive. */
807 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
808 
809 	/* Program promiscuous mode and multicast filters. */
810 	rge_iff(sc);
811 
812 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
813 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
814 
815 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
816 
817 	/* Enable interrupts. */
818 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
819 
820 	ifp->if_flags |= IFF_RUNNING;
821 	CLR(ifp->if_flags, IFF_OACTIVE);
822 
823 	callout_schedule(&sc->sc_timeout, 1);
824 
825 	return (0);
826 }
827 
828 /*
829  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
830  */
831 void
832 rge_stop(struct ifnet *ifp, int disable)
833 {
834 	struct rge_softc *sc = ifp->if_softc;
835 	int i;
836 
837 	if (disable) {
838 		callout_halt(&sc->sc_timeout, NULL);
839 	} else
840 		callout_stop(&sc->sc_timeout);
841 
842 	ifp->if_timer = 0;
843 	ifp->if_flags &= ~IFF_RUNNING;
844 	sc->rge_timerintr = 0;
845 
846 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
847 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
848 	    RGE_RXCFG_ERRPKT);
849 
850 	RGE_WRITE_4(sc, RGE_IMR, 0);
851 
852 	/* Clear timer interrupts. */
853 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
854 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
855 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
856 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
857 
858 	rge_reset(sc);
859 
860 //	intr_barrier(sc->sc_ih);
861 //	ifq_barrier(&ifp->if_snd);
862 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
863 
864 	if (sc->rge_head != NULL) {
865 		m_freem(sc->rge_head);
866 		sc->rge_head = sc->rge_tail = NULL;
867 	}
868 
869 	/* Free the TX list buffers. */
870 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
871 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
872 			bus_dmamap_unload(sc->sc_dmat,
873 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
874 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
875 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
876 		}
877 	}
878 
879 	/* Free the RX list buffers. */
880 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
881 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
882 			bus_dmamap_unload(sc->sc_dmat,
883 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
884 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
885 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
886 		}
887 	}
888 }
889 
890 /*
891  * Set media options.
892  */
893 int
894 rge_ifmedia_upd(struct ifnet *ifp)
895 {
896 	struct rge_softc *sc = ifp->if_softc;
897 	struct ifmedia *ifm = &sc->sc_media;
898 	int anar, gig, val;
899 
900 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
901 		return (EINVAL);
902 
903 	/* Disable Gigabit Lite. */
904 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
905 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
906 
907 	val = rge_read_phy_ocp(sc, 0xa5d4);
908 	val &= ~RGE_ADV_2500TFDX;
909 
910 	anar = gig = 0;
911 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
912 	case IFM_AUTO:
913 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
914 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
915 		val |= RGE_ADV_2500TFDX;
916 		break;
917 	case IFM_2500_T:
918 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
919 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
920 		val |= RGE_ADV_2500TFDX;
921 		ifp->if_baudrate = IF_Mbps(2500);
922 		break;
923 	case IFM_1000_T:
924 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
925 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
926 		ifp->if_baudrate = IF_Gbps(1);
927 		break;
928 	case IFM_100_TX:
929 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
930 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
931 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
932 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
933 		    ANAR_TX | ANAR_10_FD | ANAR_10;
934 		ifp->if_baudrate = IF_Mbps(100);
935 		break;
936 	case IFM_10_T:
937 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
938 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
939 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
940 		    ANAR_10_FD | ANAR_10 : ANAR_10;
941 		ifp->if_baudrate = IF_Mbps(10);
942 		break;
943 	default:
944 		device_printf(sc->sc_dev,
945 		    "unsupported media type\n");
946 		return (EINVAL);
947 	}
948 
949 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
950 	rge_write_phy(sc, 0, MII_100T2CR, gig);
951 	rge_write_phy_ocp(sc, 0xa5d4, val);
952 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
953 	    BMCR_STARTNEG);
954 
955 	return (0);
956 }
957 
958 /*
959  * Report current media status.
960  */
961 void
962 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
963 {
964 	struct rge_softc *sc = ifp->if_softc;
965 	uint16_t status = 0;
966 
967 	ifmr->ifm_status = IFM_AVALID;
968 	ifmr->ifm_active = IFM_ETHER;
969 
970 	if (rge_get_link_status(sc)) {
971 		ifmr->ifm_status |= IFM_ACTIVE;
972 
973 		status = RGE_READ_2(sc, RGE_PHYSTAT);
974 		if ((status & RGE_PHYSTAT_FDX) ||
975 		    (status & RGE_PHYSTAT_2500MBPS))
976 			ifmr->ifm_active |= IFM_FDX;
977 		else
978 			ifmr->ifm_active |= IFM_HDX;
979 
980 		if (status & RGE_PHYSTAT_10MBPS)
981 			ifmr->ifm_active |= IFM_10_T;
982 		else if (status & RGE_PHYSTAT_100MBPS)
983 			ifmr->ifm_active |= IFM_100_TX;
984 		else if (status & RGE_PHYSTAT_1000MBPS)
985 			ifmr->ifm_active |= IFM_1000_T;
986 		else if (status & RGE_PHYSTAT_2500MBPS)
987 			ifmr->ifm_active |= IFM_2500_T;
988 	}
989 }
990 
991 /*
992  * Allocate memory for RX/TX rings.
993  */
994 int
995 rge_allocmem(struct rge_softc *sc)
996 {
997 	int error, i;
998 
999 	/* Allocate DMA'able memory for the TX ring. */
1000 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1001 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
1002 	if (error) {
1003 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
1004 		return (error);
1005 	}
1006 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1007 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
1008 	    BUS_DMA_NOWAIT);
1009 	if (error) {
1010 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1011 		return (error);
1012 	}
1013 
1014 	/* Load the map for the TX ring. */
1015 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1016 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1017 	    (void **) &sc->rge_ldata.rge_tx_list,
1018 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1019 	if (error) {
1020 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1021 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1022 		    sc->rge_ldata.rge_tx_listnseg);
1023 		return (error);
1024 	}
1025 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1026 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1027 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1028 	if (error) {
1029 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1030 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1031 		bus_dmamem_unmap(sc->sc_dmat,
1032 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1033 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1034 		    sc->rge_ldata.rge_tx_listnseg);
1035 		return (error);
1036 	}
1037 
1038 	/* Create DMA maps for TX buffers. */
1039 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1040 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1041 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1042 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
1043 		if (error) {
1044 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1045 			return (error);
1046 		}
1047 	}
1048 
1049 	/* Allocate DMA'able memory for the RX ring. */
1050 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1051 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1052 	if (error) {
1053 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1054 		return (error);
1055 	}
1056 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1057 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1058 	    BUS_DMA_NOWAIT);
1059 	if (error) {
1060 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1061 		return (error);
1062 	}
1063 
1064 	/* Load the map for the RX ring. */
1065 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1066 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1067 	    (void **) &sc->rge_ldata.rge_rx_list,
1068 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1069 	if (error) {
1070 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1071 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1072 		    sc->rge_ldata.rge_rx_listnseg);
1073 		return (error);
1074 	}
1075 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1076 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1077 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1078 	if (error) {
1079 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1080 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1081 		bus_dmamem_unmap(sc->sc_dmat,
1082 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1083 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1084 		    sc->rge_ldata.rge_rx_listnseg);
1085 		return (error);
1086 	}
1087 
1088 	/* Create DMA maps for RX buffers. */
1089 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1090 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1091 		    RGE_JUMBO_FRAMELEN, 0, 0,
1092 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1093 		if (error) {
1094 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1095 			return (error);
1096 		}
1097 	}
1098 
1099 	return (error);
1100 }
1101 
1102 /*
1103  * Initialize the RX descriptor and attach an mbuf cluster.
1104  */
1105 int
1106 rge_newbuf(struct rge_softc *sc, int idx)
1107 {
1108 	struct mbuf *m;
1109 	struct rge_rx_desc *r;
1110 	struct rge_rxq *rxq;
1111 	bus_dmamap_t rxmap;
1112 
1113 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1114 	if (m == NULL)
1115 		return (ENOBUFS);
1116 
1117 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1118 
1119 	rxq = &sc->rge_ldata.rge_rxq[idx];
1120 	rxmap = rxq->rxq_dmamap;
1121 
1122 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1123 		goto out;
1124 
1125 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1126 	    BUS_DMASYNC_PREREAD);
1127 
1128 	/* Map the segments into RX descriptors. */
1129 	r = &sc->rge_ldata.rge_rx_list[idx];
1130 
1131 	if (RGE_OWN(r)) {
1132 		device_printf(sc->sc_dev, "tried to map busy RX descriptor\n");
1133 		goto out;
1134 	}
1135 
1136 	rxq->rxq_mbuf = m;
1137 
1138 	r->rge_extsts = 0;
1139 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1140 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1141 
1142 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1143 	if (idx == RGE_RX_LIST_CNT - 1)
1144 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1145 
1146 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1147 
1148 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1149 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1150 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1151 
1152 	return (0);
1153 out:
1154 	if (m != NULL)
1155 		m_freem(m);
1156 	return (ENOMEM);
1157 }
1158 
1159 void
1160 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1161 {
1162 	struct rge_rx_desc *r;
1163 
1164 	r = &sc->rge_ldata.rge_rx_list[idx];
1165 
1166 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1167 	r->rge_extsts = 0;
1168 	if (idx == RGE_RX_LIST_CNT - 1)
1169 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1170 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1171 
1172 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1173 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1174 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1175 }
1176 
1177 int
1178 rge_rx_list_init(struct rge_softc *sc)
1179 {
1180 	int i;
1181 
1182 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1183 
1184 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1185 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1186 		if (rge_newbuf(sc, i) == ENOBUFS)
1187 			return (ENOBUFS);
1188 	}
1189 
1190 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1191 	sc->rge_head = sc->rge_tail = NULL;
1192 
1193 	return (0);
1194 }
1195 
1196 void
1197 rge_tx_list_init(struct rge_softc *sc)
1198 {
1199 	int i;
1200 
1201 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1202 
1203 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1204 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1205 
1206 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1207 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1208 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1209 
1210 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1211 }
1212 
1213 int
1214 rge_rxeof(struct rge_softc *sc)
1215 {
1216 	struct mbuf *m;
1217 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1218 	struct rge_rx_desc *cur_rx;
1219 	struct rge_rxq *rxq;
1220 	uint32_t rxstat, extsts;
1221 	int i, total_len, rx = 0;
1222 
1223 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1224 		/* Invalidate the descriptor memory. */
1225 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1226 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1227 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1228 
1229 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1230 
1231 		if (RGE_OWN(cur_rx))
1232 			break;
1233 
1234 		rxstat = letoh32(cur_rx->rge_cmdsts);
1235 		extsts = letoh32(cur_rx->rge_extsts);
1236 
1237 		total_len = RGE_RXBYTES(cur_rx);
1238 		rxq = &sc->rge_ldata.rge_rxq[i];
1239 		m = rxq->rxq_mbuf;
1240 		rxq->rxq_mbuf = NULL;
1241 		rx = 1;
1242 
1243 		/* Invalidate the RX mbuf and unload its map. */
1244 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1245 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1246 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1247 
1248 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1249 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1250 			rge_discard_rxbuf(sc, i);
1251 			continue;
1252 		}
1253 
1254 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1255 			if_statinc(ifp, if_ierrors);
1256 			/*
1257 			 * If this is part of a multi-fragment packet,
1258 			 * discard all the pieces.
1259 			 */
1260 			 if (sc->rge_head != NULL) {
1261 				m_freem(sc->rge_head);
1262 				sc->rge_head = sc->rge_tail = NULL;
1263 			}
1264 			rge_discard_rxbuf(sc, i);
1265 			continue;
1266 		}
1267 
1268 		/*
1269 		 * If allocating a replacement mbuf fails,
1270 		 * reload the current one.
1271 		 */
1272 
1273 		if (rge_newbuf(sc, i) == ENOBUFS) {
1274 			if (sc->rge_head != NULL) {
1275 				m_freem(sc->rge_head);
1276 				sc->rge_head = sc->rge_tail = NULL;
1277 			}
1278 			rge_discard_rxbuf(sc, i);
1279 			continue;
1280 		}
1281 
1282 		m_set_rcvif(m, ifp);
1283 		if (sc->rge_head != NULL) {
1284 			m->m_len = total_len;
1285 			/*
1286 			 * Special case: if there's 4 bytes or less
1287 			 * in this buffer, the mbuf can be discarded:
1288 			 * the last 4 bytes is the CRC, which we don't
1289 			 * care about anyway.
1290 			 */
1291 			if (m->m_len <= ETHER_CRC_LEN) {
1292 				sc->rge_tail->m_len -=
1293 				    (ETHER_CRC_LEN - m->m_len);
1294 				m_freem(m);
1295 			} else {
1296 				m->m_len -= ETHER_CRC_LEN;
1297 				m->m_flags &= ~M_PKTHDR;
1298 				sc->rge_tail->m_next = m;
1299 			}
1300 			m = sc->rge_head;
1301 			sc->rge_head = sc->rge_tail = NULL;
1302 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1303 		} else
1304 	#if 0
1305 			m->m_pkthdr.len = m->m_len =
1306 			    (total_len - ETHER_CRC_LEN);
1307 	#else
1308 		{
1309 			m->m_pkthdr.len = m->m_len = total_len;
1310 			m->m_flags |= M_HASFCS;
1311 		}
1312 	#endif
1313 
1314 #if notyet
1315 		/* Check IP header checksum. */
1316 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1317 		    (extsts & RGE_RDEXTSTS_IPV4))
1318 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1319 
1320 		/* Check TCP/UDP checksum. */
1321 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1322 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1323 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1324 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1325 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1326 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1327 			    M_UDP_CSUM_IN_OK;
1328 #endif
1329 
1330 		if (extsts & RGE_RDEXTSTS_VTAG) {
1331 			vlan_set_tag(m,
1332 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1333 		}
1334 
1335 		if_percpuq_enqueue(ifp->if_percpuq, m);
1336 	}
1337 
1338 	sc->rge_ldata.rge_rxq_considx = i;
1339 
1340 	return (rx);
1341 }
1342 
1343 int
1344 rge_txeof(struct rge_softc *sc)
1345 {
1346 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1347 	struct rge_txq *txq;
1348 	uint32_t txstat;
1349 	int cons, idx, prod;
1350 	int free = 0;
1351 
1352 	prod = sc->rge_ldata.rge_txq_prodidx;
1353 	cons = sc->rge_ldata.rge_txq_considx;
1354 
1355 	while (prod != cons) {
1356 		txq = &sc->rge_ldata.rge_txq[cons];
1357 		idx = txq->txq_descidx;
1358 
1359 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1360 		    idx * sizeof(struct rge_tx_desc),
1361 		    sizeof(struct rge_tx_desc),
1362 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1363 
1364 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1365 
1366 		if (txstat & RGE_TDCMDSTS_OWN) {
1367 			free = 2;
1368 			break;
1369 		}
1370 
1371 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1372 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1373 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1374 		m_freem(txq->txq_mbuf);
1375 		txq->txq_mbuf = NULL;
1376 
1377 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1378 			if_statinc(ifp, if_collisions);
1379 		if (txstat & RGE_TDCMDSTS_TXERR)
1380 			if_statinc(ifp, if_oerrors);
1381 
1382 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1383 		    idx * sizeof(struct rge_tx_desc),
1384 		    sizeof(struct rge_tx_desc),
1385 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1386 
1387 		cons = RGE_NEXT_TX_DESC(idx);
1388 		free = 1;
1389 	}
1390 
1391 	if (free == 0)
1392 		return (0);
1393 
1394 	sc->rge_ldata.rge_txq_considx = cons;
1395 
1396 #if 0
1397 	if (ifq_is_oactive(&ifp->if_snd))
1398 		ifq_restart(&ifp->if_snd);
1399 	else if (free == 2)
1400 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1401 	else
1402 		ifp->if_timer = 0;
1403 #else
1404 #if 0
1405 	if (!IF_IS_EMPTY(&ifp->if_snd))
1406 		rge_start(ifp);
1407 	else
1408 	if (free == 2)
1409 		if (0) { rge_txstart(&sc->sc_task, sc); }
1410 	else
1411 #endif
1412 		ifp->if_timer = 0;
1413 #endif
1414 
1415 	return (1);
1416 }
1417 
1418 void
1419 rge_reset(struct rge_softc *sc)
1420 {
1421 	int i;
1422 
1423 	/* Enable RXDV gate. */
1424 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1425 	DELAY(2000);
1426 
1427 	for (i = 0; i < 3000; i++) {
1428 		DELAY(50);
1429 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1430 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1431 		    RGE_MCUCMD_TXFIFO_EMPTY))
1432 			break;
1433 	}
1434 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1435 		for (i = 0; i < 3000; i++) {
1436 			DELAY(50);
1437 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1438 				break;
1439 		}
1440 	}
1441 
1442 	DELAY(2000);
1443 
1444 	/* Soft reset. */
1445 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1446 
1447 	for (i = 0; i < RGE_TIMEOUT; i++) {
1448 		DELAY(100);
1449 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1450 			break;
1451 	}
1452 	if (i == RGE_TIMEOUT)
1453 		device_printf(sc->sc_dev, "reset never completed!\n");
1454 }
1455 
1456 void
1457 rge_iff(struct rge_softc *sc)
1458 {
1459 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1460 	struct ethercom *ec = &sc->sc_ec;
1461 	struct ether_multi *enm;
1462 	struct ether_multistep step;
1463 	uint32_t hashes[2];
1464 	uint32_t rxfilt;
1465 	int h = 0;
1466 
1467 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1468 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1469 	ifp->if_flags &= ~IFF_ALLMULTI;
1470 
1471 	/*
1472 	 * Always accept frames destined to our station address.
1473 	 * Always accept broadcast frames.
1474 	 */
1475 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1476 
1477 	if (ifp->if_flags & IFF_PROMISC) {
1478  allmulti:
1479 		ifp->if_flags |= IFF_ALLMULTI;
1480 		rxfilt |= RGE_RXCFG_MULTI;
1481 		if (ifp->if_flags & IFF_PROMISC)
1482 			rxfilt |= RGE_RXCFG_ALLPHYS;
1483 		hashes[0] = hashes[1] = 0xffffffff;
1484 	} else {
1485 		rxfilt |= RGE_RXCFG_MULTI;
1486 		/* Program new filter. */
1487 		memset(hashes, 0, sizeof(hashes));
1488 
1489 		ETHER_LOCK(ec);
1490 		ETHER_FIRST_MULTI(step, ec, enm);
1491 		while (enm != NULL) {
1492 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1493 			    ETHER_ADDR_LEN) != 0) {
1494 			    	ETHER_UNLOCK(ec);
1495 				goto allmulti;
1496 			}
1497 			h = ether_crc32_be(enm->enm_addrlo,
1498 			    ETHER_ADDR_LEN) >> 26;
1499 
1500 			if (h < 32)
1501 				hashes[0] |= (1 << h);
1502 			else
1503 				hashes[1] |= (1 << (h - 32));
1504 
1505 			ETHER_NEXT_MULTI(step, enm);
1506 		}
1507 		ETHER_UNLOCK(ec);
1508 	}
1509 
1510 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1511 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1512 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1513 }
1514 
1515 void
1516 rge_set_phy_power(struct rge_softc *sc, int on)
1517 {
1518 	int i;
1519 
1520 	if (on) {
1521 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1522 
1523 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1524 
1525 		for (i = 0; i < RGE_TIMEOUT; i++) {
1526 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1527 				break;
1528 			DELAY(1000);
1529 		}
1530 	} else {
1531 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1532 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1533 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1534 	}
1535 }
1536 
1537 void
1538 rge_phy_config(struct rge_softc *sc)
1539 {
1540 	/* Read microcode version. */
1541 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1542 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1543 
1544 	switch (sc->rge_type) {
1545 	case MAC_CFG2:
1546 		rge_phy_config_mac_cfg2(sc);
1547 		break;
1548 	case MAC_CFG3:
1549 		rge_phy_config_mac_cfg3(sc);
1550 		break;
1551 	case MAC_CFG4:
1552 		rge_phy_config_mac_cfg4(sc);
1553 		break;
1554 	case MAC_CFG5:
1555 		rge_phy_config_mac_cfg5(sc);
1556 		break;
1557 	default:
1558 		break;	/* Can't happen. */
1559 	}
1560 
1561 	rge_write_phy(sc, 0x0a5b, 0x12,
1562 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1563 
1564 	/* Disable EEE. */
1565 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1566 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1567 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1568 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1569 	}
1570 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1571 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1572 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1573 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1574 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1575 
1576 	rge_patch_phy_mcu(sc, 1);
1577 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1578 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1579 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1580 	rge_patch_phy_mcu(sc, 0);
1581 }
1582 
1583 void
1584 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1585 {
1586 	uint16_t val;
1587 	int i;
1588 
1589 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1590 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1591 		    rtl8125_mac_cfg2_ephy[i].val);
1592 
1593 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1594 
1595 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1596 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1597 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1598 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1599 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1600 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1601 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1602 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1603 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1604 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1605 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1606 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1607 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1608 
1609 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1610 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1611 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1612 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1613 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1614 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1615 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1616 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1617 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1618 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1619 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1620 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1621 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1622 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1623 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1624 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1625 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1626 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1627 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1628 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1629 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1630 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1631 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1632 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1633 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1634 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1635 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1636 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1637 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1638 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1639 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1640 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1641 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1642 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1643 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1644 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1645 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1646 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1647 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1648 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1649 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1650 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1651 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1652 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1653 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1654 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1655 }
1656 
1657 void
1658 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1659 {
1660 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1661 	uint16_t val;
1662 	int i;
1663 	static const uint16_t mac_cfg3_a438_value[] =
1664 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1665 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1666 
1667 	static const uint16_t mac_cfg3_b88e_value[] =
1668 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1669 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1670 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1671 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1672 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1673 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1674 
1675 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1676 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1677 		    rtl8125_mac_cfg3_ephy[i].val);
1678 
1679 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1680 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1681 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1682 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1683 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1684 	rge_write_ephy(sc, 0x0002, 0x6042);
1685 	rge_write_ephy(sc, 0x0006, 0x0014);
1686 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1687 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1688 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1689 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1690 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1691 	rge_write_ephy(sc, 0x0042, 0x6042);
1692 	rge_write_ephy(sc, 0x0046, 0x0014);
1693 
1694 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1695 
1696 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1697 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1698 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1699 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1700 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1701 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1702 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1703 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1704 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1705 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1706 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1707 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1708 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1709 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1710 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1711 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1712 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1713 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1714 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1715 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1716 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1717 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1718 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1719 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1720 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1721 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1722 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1723 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1724 	    32);
1725 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1726 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1727 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1728 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1729 
1730 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1731 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1732 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1733 	for (i = 0; i < 26; i++)
1734 		rge_write_phy_ocp(sc, 0xa438, 0);
1735 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1736 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1737 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1738 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1739 
1740 	rge_patch_phy_mcu(sc, 1);
1741 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1742 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1743 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1744 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1745 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1746 	}
1747 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1748 	rge_patch_phy_mcu(sc, 0);
1749 
1750 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1751 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1752 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1753 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1754 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1755 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1756 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1757 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1758 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1759 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1760 }
1761 
1762 void
1763 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1764 {
1765 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1766 	uint16_t val;
1767 	int i;
1768 	static const uint16_t mac_cfg4_b87c_value[] =
1769 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1770 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1771 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1772 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1773 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1774 	      0x80b0, 0x0f31 };
1775 
1776 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1777 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1778 		    rtl8125_mac_cfg4_ephy[i].val);
1779 
1780 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1781 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1782 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1783 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1784 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1785 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1786 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1787 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1788 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1789 
1790 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1791 
1792 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1793 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1794 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1795 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1796 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1797 	for (i = 0; i < 6; i++) {
1798 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1799 		if (i < 3)
1800 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1801 		else
1802 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1803 	}
1804 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1805 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1806 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1807 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1808 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1809 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1810 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1811 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1812 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1813 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1814 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1815 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1816 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1817 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1818 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1819 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1820 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1821 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1822 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1823 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1824 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1825 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1826 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1827 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1828 	}
1829 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1830 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1831 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1832 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1833 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1834 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1835 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1836 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1837 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1838 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1839 	    32);
1840 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1841 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1842 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1843 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1844 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1845 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1846 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1847 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1848 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1849 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1850 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1851 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1852 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1853 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1854 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1855 	for (i = 0; i < 6; i++) {
1856 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1857 		if (i == 2)
1858 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1859 		else
1860 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1861 	}
1862 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1863 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1864 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1865 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1866 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1867 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1868 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1869 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1870 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1871 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1872 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1873 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1874 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1875 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1876 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1877 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1878 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1879 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1880 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1881 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1882 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1883 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1884 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1885 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1886 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1887 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1888 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1889 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1890 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1891 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1892 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1893 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1894 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1895 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1896 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1897 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1898 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1899 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1900 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1901 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1902 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1903 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1904 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1905 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1906 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1907 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1908 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1909 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1910 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1911 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1912 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1913 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1914 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1915 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1916 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1917 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1918 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1919 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1920 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1921 	rge_patch_phy_mcu(sc, 1);
1922 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1923 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1924 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1925 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1926 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1927 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1928 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1929 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1930 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1931 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1932 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1933 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1934 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1935 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1936 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1937 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1938 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1939 	rge_patch_phy_mcu(sc, 0);
1940 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1941 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1942 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1943 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1944 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1945 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1946 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1947 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1948 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1949 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1950 }
1951 
1952 void
1953 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1954 {
1955 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1956 	uint16_t val;
1957 	int i;
1958 
1959 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1960 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1961 		    rtl8125_mac_cfg5_ephy[i].val);
1962 
1963 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1964 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1965 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1966 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1967 
1968 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1969 
1970 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1971 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1972 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1973 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1974 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1975 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1976 	    32);
1977 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1978 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1979 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1980 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1981 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1982 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1983 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1984 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1985 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1986 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1987 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1988 	for (i = 0; i < 10; i++) {
1989 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1990 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1991 	}
1992 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1993 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1994 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1995 }
1996 
1997 void
1998 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1999 {
2000 	if (sc->rge_mcodever != mcode_version) {
2001 		int i;
2002 
2003 		rge_patch_phy_mcu(sc, 1);
2004 
2005 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2006 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2007 			if (sc->rge_type == MAC_CFG2)
2008 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
2009 			else
2010 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
2011 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2012 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
2013 
2014 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2015 		}
2016 
2017 		if (sc->rge_type == MAC_CFG2) {
2018 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2019 				rge_write_phy_ocp(sc,
2020 				    rtl8125_mac_cfg2_mcu[i].reg,
2021 				    rtl8125_mac_cfg2_mcu[i].val);
2022 			}
2023 		} else if (sc->rge_type == MAC_CFG3) {
2024 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2025 				rge_write_phy_ocp(sc,
2026 				    rtl8125_mac_cfg3_mcu[i].reg,
2027 				    rtl8125_mac_cfg3_mcu[i].val);
2028 			}
2029 		} else if (sc->rge_type == MAC_CFG4) {
2030 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2031 				rge_write_phy_ocp(sc,
2032 				    rtl8125_mac_cfg4_mcu[i].reg,
2033 				    rtl8125_mac_cfg4_mcu[i].val);
2034 			}
2035 		} else if (sc->rge_type == MAC_CFG5) {
2036 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2037 				rge_write_phy_ocp(sc,
2038 				    rtl8125_mac_cfg5_mcu[i].reg,
2039 				    rtl8125_mac_cfg5_mcu[i].val);
2040 			}
2041 		}
2042 
2043 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2044 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2045 
2046 			rge_write_phy_ocp(sc, 0xa436, 0);
2047 			rge_write_phy_ocp(sc, 0xa438, 0);
2048 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2049 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2050 			rge_write_phy_ocp(sc, 0xa438, 0);
2051 		}
2052 
2053 		rge_patch_phy_mcu(sc, 0);
2054 
2055 		/* Write microcode version. */
2056 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2057 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2058 	}
2059 }
2060 
2061 void
2062 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2063 {
2064 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2065 	RGE_WRITE_4(sc, RGE_MAC0,
2066 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2067 	RGE_WRITE_4(sc, RGE_MAC4,
2068 	    addr[5] <<  8 | addr[4]);
2069 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2070 }
2071 
2072 void
2073 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2074 {
2075 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2076 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2077 }
2078 
2079 void
2080 rge_hw_init(struct rge_softc *sc)
2081 {
2082 	int i;
2083 
2084 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2085 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2086 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2087 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2088 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2089 
2090 	/* Disable UPS. */
2091 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2092 
2093 	/* Configure MAC MCU. */
2094 	rge_write_mac_ocp(sc, 0xfc38, 0);
2095 
2096 	for (i = 0xfc28; i < 0xfc38; i += 2)
2097 		rge_write_mac_ocp(sc, i, 0);
2098 
2099 	DELAY(3000);
2100 	rge_write_mac_ocp(sc, 0xfc26, 0);
2101 
2102 	if (sc->rge_type == MAC_CFG3) {
2103 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2104 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2105 			    rtl8125_mac_bps[i].val);
2106 		}
2107 	} else if (sc->rge_type == MAC_CFG5) {
2108 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2109 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2110 			    rtl8125b_mac_bps[i].val);
2111 		}
2112 	}
2113 
2114 	/* Disable PHY power saving. */
2115 	rge_disable_phy_ocp_pwrsave(sc);
2116 
2117 	/* Set PCIe uncorrectable error status. */
2118 	rge_write_csi(sc, 0x108,
2119 	    rge_read_csi(sc, 0x108) | 0x00100000);
2120 }
2121 
2122 void
2123 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2124 {
2125 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2126 		rge_patch_phy_mcu(sc, 1);
2127 		rge_write_phy_ocp(sc, 0xc416, 0);
2128 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2129 		rge_patch_phy_mcu(sc, 0);
2130 	}
2131 }
2132 
2133 void
2134 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2135 {
2136 	int i;
2137 
2138 	if (set)
2139 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2140 	else
2141 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2142 
2143 	for (i = 0; i < 1000; i++) {
2144 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2145 			break;
2146 		DELAY(100);
2147 	}
2148 	if (i == 1000) {
2149 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2150 		return;
2151 	}
2152 }
2153 
2154 void
2155 rge_add_media_types(struct rge_softc *sc)
2156 {
2157 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2158 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2159 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2160 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2161 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2162 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2163 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2164 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2165 }
2166 
2167 void
2168 rge_config_imtype(struct rge_softc *sc, int imtype)
2169 {
2170 	switch (imtype) {
2171 	case RGE_IMTYPE_NONE:
2172 		sc->rge_intrs = RGE_INTRS;
2173 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2174 		    RGE_ISR_RX_FIFO_OFLOW;
2175 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2176 		break;
2177 	case RGE_IMTYPE_SIM:
2178 		sc->rge_intrs = RGE_INTRS_TIMER;
2179 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2180 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2181 		break;
2182 	default:
2183 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2184 	}
2185 }
2186 
2187 void
2188 rge_disable_hw_im(struct rge_softc *sc)
2189 {
2190 	RGE_WRITE_2(sc, RGE_IM, 0);
2191 }
2192 
2193 void
2194 rge_disable_sim_im(struct rge_softc *sc)
2195 {
2196 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2197 	sc->rge_timerintr = 0;
2198 }
2199 
2200 void
2201 rge_setup_sim_im(struct rge_softc *sc)
2202 {
2203 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2204 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2205 	sc->rge_timerintr = 1;
2206 }
2207 
2208 void
2209 rge_setup_intr(struct rge_softc *sc, int imtype)
2210 {
2211 	rge_config_imtype(sc, imtype);
2212 
2213 	/* Enable interrupts. */
2214 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2215 
2216 	switch (imtype) {
2217 	case RGE_IMTYPE_NONE:
2218 		rge_disable_sim_im(sc);
2219 		rge_disable_hw_im(sc);
2220 		break;
2221 	case RGE_IMTYPE_SIM:
2222 		rge_disable_hw_im(sc);
2223 		rge_setup_sim_im(sc);
2224 		break;
2225 	default:
2226 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2227 	}
2228 }
2229 
2230 void
2231 rge_exit_oob(struct rge_softc *sc)
2232 {
2233 	int i;
2234 
2235 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2236 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2237 	    RGE_RXCFG_ERRPKT);
2238 
2239 	/* Disable RealWoW. */
2240 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2241 
2242 	rge_reset(sc);
2243 
2244 	/* Disable OOB. */
2245 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2246 
2247 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2248 
2249 	for (i = 0; i < 10; i++) {
2250 		DELAY(100);
2251 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2252 			break;
2253 	}
2254 
2255 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2256 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2257 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2258 
2259 	for (i = 0; i < 10; i++) {
2260 		DELAY(100);
2261 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2262 			break;
2263 	}
2264 
2265 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2266 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2267 		    device_xname(sc->sc_dev));
2268 		for (i = 0; i < RGE_TIMEOUT; i++) {
2269 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2270 				break;
2271 			DELAY(1000);
2272 		}
2273 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2274 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2275 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2276 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2277 	}
2278 }
2279 
2280 void
2281 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2282 {
2283 	int i;
2284 
2285 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2286 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2287 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2288 
2289 	for (i = 0; i < 10; i++) {
2290 		 DELAY(100);
2291 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2292 			break;
2293 	}
2294 
2295 	DELAY(20);
2296 }
2297 
2298 uint32_t
2299 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2300 {
2301 	int i;
2302 
2303 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2304 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2305 
2306 	for (i = 0; i < 10; i++) {
2307 		 DELAY(100);
2308 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2309 			break;
2310 	}
2311 
2312 	DELAY(20);
2313 
2314 	return (RGE_READ_4(sc, RGE_CSIDR));
2315 }
2316 
2317 void
2318 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2319 {
2320 	uint32_t tmp;
2321 
2322 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2323 	tmp += val;
2324 	tmp |= RGE_MACOCP_BUSY;
2325 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2326 }
2327 
2328 uint16_t
2329 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2330 {
2331 	uint32_t val;
2332 
2333 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2334 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2335 
2336 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2337 }
2338 
2339 void
2340 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2341 {
2342 	uint32_t tmp;
2343 	int i;
2344 
2345 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2346 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2347 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2348 
2349 	for (i = 0; i < 10; i++) {
2350 		DELAY(100);
2351 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2352 			break;
2353 	}
2354 
2355 	DELAY(20);
2356 }
2357 
2358 uint16_t
2359 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2360 {
2361 	uint32_t val;
2362 	int i;
2363 
2364 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2365 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2366 
2367 	for (i = 0; i < 10; i++) {
2368 		DELAY(100);
2369 		val = RGE_READ_4(sc, RGE_EPHYAR);
2370 		if (val & RGE_EPHYAR_BUSY)
2371 			break;
2372 	}
2373 
2374 	DELAY(20);
2375 
2376 	return (val & RGE_EPHYAR_DATA_MASK);
2377 }
2378 
2379 void
2380 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2381 {
2382 	uint16_t off, phyaddr;
2383 
2384 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2385 	phyaddr <<= 4;
2386 
2387 	off = addr ? reg : 0x10 + (reg % 8);
2388 
2389 	phyaddr += (off - 16) << 1;
2390 
2391 	rge_write_phy_ocp(sc, phyaddr, val);
2392 }
2393 
2394 uint16_t
2395 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2396 {
2397 	uint16_t off, phyaddr;
2398 
2399 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2400 	phyaddr <<= 4;
2401 
2402 	off = addr ? reg : 0x10 + (reg % 8);
2403 
2404 	phyaddr += (off - 16) << 1;
2405 
2406 	return (rge_read_phy_ocp(sc, phyaddr));
2407 }
2408 
2409 void
2410 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2411 {
2412 	uint32_t tmp;
2413 	int i;
2414 
2415 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2416 	tmp |= RGE_PHYOCP_BUSY | val;
2417 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2418 
2419 	for (i = 0; i < RGE_TIMEOUT; i++) {
2420 		DELAY(1);
2421 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2422 			break;
2423 	}
2424 }
2425 
2426 uint16_t
2427 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2428 {
2429 	uint32_t val;
2430 	int i;
2431 
2432 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2433 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2434 
2435 	for (i = 0; i < RGE_TIMEOUT; i++) {
2436 		DELAY(1);
2437 		val = RGE_READ_4(sc, RGE_PHYOCP);
2438 		if (val & RGE_PHYOCP_BUSY)
2439 			break;
2440 	}
2441 
2442 	return (val & RGE_PHYOCP_DATA_MASK);
2443 }
2444 
2445 int
2446 rge_get_link_status(struct rge_softc *sc)
2447 {
2448 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2449 }
2450 
2451 void
2452 rge_txstart(struct work *wk, void *arg)
2453 {
2454 	struct rge_softc *sc = arg;
2455 
2456 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2457 }
2458 
2459 void
2460 rge_tick(void *arg)
2461 {
2462 	struct rge_softc *sc = arg;
2463 	int s;
2464 
2465 	s = splnet();
2466 	rge_link_state(sc);
2467 	splx(s);
2468 
2469 	callout_schedule(&sc->sc_timeout, hz);
2470 }
2471 
2472 void
2473 rge_link_state(struct rge_softc *sc)
2474 {
2475 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2476 	int link = LINK_STATE_DOWN;
2477 
2478 	if (rge_get_link_status(sc))
2479 		link = LINK_STATE_UP;
2480 
2481 	if (ifp->if_link_state != link) { /* XXX not safe to access */
2482 		if_link_state_change(ifp, link);
2483 	}
2484 }
2485