xref: /netbsd-src/sys/dev/pci/if_rge.c (revision aef5eb5f59cdfe8314f1b5f78ac04eb144e44010)
1 /*	$NetBSD: if_rge.c,v 1.24 2022/09/24 18:12:42 thorpej Exp $	*/
2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
3 
4 /*
5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.24 2022/09/24 18:12:42 thorpej Exp $");
22 
23 #include <sys/types.h>
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/endian.h>
33 #include <sys/callout.h>
34 #include <sys/workqueue.h>
35 
36 #include <net/if.h>
37 
38 #include <net/if_dl.h>
39 #include <net/if_ether.h>
40 
41 #include <net/if_media.h>
42 
43 #include <netinet/in.h>
44 #include <net/if_ether.h>
45 
46 #include <net/bpf.h>
47 
48 #include <sys/bus.h>
49 #include <machine/intr.h>
50 
51 #include <dev/mii/mii.h>
52 
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcidevs.h>
56 
57 #include <dev/pci/if_rgereg.h>
58 
59 #ifdef __NetBSD__
60 #define letoh32 	htole32
61 #define nitems(x) 	__arraycount(x)
62 
63 static struct mbuf *
64 MCLGETL(struct rge_softc *sc __unused, int how,
65     u_int size)
66 {
67 	struct mbuf *m;
68 
69 	MGETHDR(m, how, MT_DATA);
70 	if (m == NULL)
71 		return NULL;
72 
73 	MEXTMALLOC(m, size, how);
74 	if ((m->m_flags & M_EXT) == 0) {
75 		m_freem(m);
76 		return NULL;
77 	}
78 	return m;
79 }
80 
81 #ifdef NET_MPSAFE
82 #define 	RGE_MPSAFE	1
83 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
84 #else
85 #define 	CALLOUT_FLAGS	0
86 #endif
87 #endif
88 
89 #ifdef RGE_DEBUG
90 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
91 int rge_debug = 0;
92 #else
93 #define DPRINTF(x)
94 #endif
95 
96 static int		rge_match(device_t, cfdata_t, void *);
97 static void		rge_attach(device_t, device_t, void *);
98 int		rge_intr(void *);
99 int		rge_encap(struct rge_softc *, struct mbuf *, int);
100 int		rge_ioctl(struct ifnet *, u_long, void *);
101 void		rge_start(struct ifnet *);
102 void		rge_watchdog(struct ifnet *);
103 int		rge_init(struct ifnet *);
104 void		rge_stop(struct ifnet *, int);
105 int		rge_ifmedia_upd(struct ifnet *);
106 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
107 int		rge_allocmem(struct rge_softc *);
108 int		rge_newbuf(struct rge_softc *, int);
109 void		rge_discard_rxbuf(struct rge_softc *, int);
110 int		rge_rx_list_init(struct rge_softc *);
111 void		rge_tx_list_init(struct rge_softc *);
112 int		rge_rxeof(struct rge_softc *);
113 int		rge_txeof(struct rge_softc *);
114 void		rge_reset(struct rge_softc *);
115 void		rge_iff(struct rge_softc *);
116 void		rge_set_phy_power(struct rge_softc *, int);
117 void		rge_phy_config(struct rge_softc *);
118 void		rge_phy_config_mac_cfg2(struct rge_softc *);
119 void		rge_phy_config_mac_cfg3(struct rge_softc *);
120 void		rge_phy_config_mac_cfg4(struct rge_softc *);
121 void		rge_phy_config_mac_cfg5(struct rge_softc *);
122 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
123 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
124 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
125 void		rge_hw_init(struct rge_softc *);
126 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
127 void		rge_patch_phy_mcu(struct rge_softc *, int);
128 void		rge_add_media_types(struct rge_softc *);
129 void		rge_config_imtype(struct rge_softc *, int);
130 void		rge_disable_hw_im(struct rge_softc *);
131 void		rge_disable_sim_im(struct rge_softc *);
132 void		rge_setup_sim_im(struct rge_softc *);
133 void		rge_setup_intr(struct rge_softc *, int);
134 void		rge_exit_oob(struct rge_softc *);
135 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
136 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
137 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
138 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
139 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
140 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
141 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
142 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
143 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
144 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
145 int		rge_get_link_status(struct rge_softc *);
146 void		rge_txstart(struct work *, void *);
147 void		rge_tick(void *);
148 void		rge_link_state(struct rge_softc *);
149 
150 static const struct {
151 	uint16_t reg;
152 	uint16_t val;
153 }  rtl8125_mac_cfg2_mcu[] = {
154 	RTL8125_MAC_CFG2_MCU
155 }, rtl8125_mac_cfg3_mcu[] = {
156 	RTL8125_MAC_CFG3_MCU
157 }, rtl8125_mac_cfg4_mcu[] = {
158 	RTL8125_MAC_CFG4_MCU
159 }, rtl8125_mac_cfg5_mcu[] = {
160 	RTL8125_MAC_CFG5_MCU
161 };
162 
163 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
164 		NULL, NULL); /* Sevan - detach function? */
165 
166 static const struct device_compatible_entry compat_data[] = {
167 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
168 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
169 
170 	PCI_COMPAT_EOL
171 };
172 
173 static int
174 rge_match(device_t parent, cfdata_t match, void *aux)
175 {
176 	struct pci_attach_args *pa =aux;
177 
178 	return pci_compatible_match(pa, compat_data);
179 }
180 
181 void
182 rge_attach(device_t parent, device_t self, void *aux)
183 {
184 	struct rge_softc *sc = device_private(self);
185 	struct pci_attach_args *pa = aux;
186 	pci_chipset_tag_t pc = pa->pa_pc;
187 	pci_intr_handle_t *ihp;
188 	char intrbuf[PCI_INTRSTR_LEN];
189 	const char *intrstr = NULL;
190 	struct ifnet *ifp;
191 	pcireg_t reg;
192 	uint32_t hwrev;
193 	uint8_t eaddr[ETHER_ADDR_LEN];
194 	int offset;
195 	pcireg_t command;
196 
197 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
198 
199 	sc->sc_dev = self;
200 
201 	pci_aprint_devinfo(pa, "Ethernet controller");
202 
203 	/*
204 	 * Map control/status registers.
205 	 */
206 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
207 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
208 	    NULL, &sc->rge_bsize)) {
209 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
210 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
211 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
212 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
213 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
214 			    &sc->rge_bsize)) {
215 				aprint_error(": can't map mem or i/o space\n");
216 				return;
217 			}
218 		}
219 	}
220 
221 	int counts[PCI_INTR_TYPE_SIZE] = {
222  		[PCI_INTR_TYPE_INTX] = 1,
223  		[PCI_INTR_TYPE_MSI] = 1,
224  		[PCI_INTR_TYPE_MSIX] = 1,
225  	};
226 	int max_type = PCI_INTR_TYPE_MSIX;
227 	/*
228 	 * Allocate interrupt.
229 	 */
230 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
231 		aprint_error(": couldn't map interrupt\n");
232 		return;
233 	}
234 	switch (pci_intr_type(pc, ihp[0])) {
235 	case PCI_INTR_TYPE_MSIX:
236 	case PCI_INTR_TYPE_MSI:
237 		sc->rge_flags |= RGE_FLAG_MSI;
238 		break;
239 	default:
240 		break;
241 	}
242 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
243 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
244 	    sc, device_xname(sc->sc_dev));
245 	if (sc->sc_ih == NULL) {
246 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
247 		if (intrstr != NULL)
248 			aprint_error(" at %s\n", intrstr);
249 		aprint_error("\n");
250 		return;
251 	}
252 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
253 
254 	if (pci_dma64_available(pa))
255 		sc->sc_dmat = pa->pa_dmat64;
256 	else
257 		sc->sc_dmat = pa->pa_dmat;
258 
259 	sc->sc_pc = pa->pa_pc;
260 	sc->sc_tag = pa->pa_tag;
261 
262 	/* Determine hardware revision */
263 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
264 	switch (hwrev) {
265 	case 0x60800000:
266 		sc->rge_type = MAC_CFG2;
267 		break;
268 	case 0x60900000:
269 		sc->rge_type = MAC_CFG3;
270 		break;
271 	case 0x64000000:
272 		sc->rge_type = MAC_CFG4;
273 		break;
274 	case 0x64100000:
275 		sc->rge_type = MAC_CFG5;
276 		break;
277 	default:
278 		aprint_error(": unknown version 0x%08x\n", hwrev);
279 		return;
280 	}
281 
282 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
283 
284 	/*
285 	 * PCI Express check.
286 	 */
287 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
288 	    &offset, NULL)) {
289 		/* Disable PCIe ASPM and ECPM. */
290 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
291 		    offset + PCIE_LCSR);
292 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
293 		    PCIE_LCSR_ENCLKPM);
294 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
295 		    reg);
296 	}
297 
298 	rge_exit_oob(sc);
299 	rge_hw_init(sc);
300 
301 	rge_get_macaddr(sc, eaddr);
302 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
303 	    ether_sprintf(eaddr));
304 
305 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
306 
307 	rge_set_phy_power(sc, 1);
308 	rge_phy_config(sc);
309 
310 	if (rge_allocmem(sc))
311 		return;
312 
313 	ifp = &sc->sc_ec.ec_if;
314 	ifp->if_softc = sc;
315 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
316 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
317 #ifdef RGE_MPSAFE
318 	ifp->if_extflags = IFEF_MPSAFE;
319 #endif
320 	ifp->if_ioctl = rge_ioctl;
321 	ifp->if_stop = rge_stop;
322 	ifp->if_start = rge_start;
323 	ifp->if_init = rge_init;
324 	ifp->if_watchdog = rge_watchdog;
325 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
326 
327 #if notyet
328 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
329 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
330 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
331 #endif
332 
333 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
334 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
335 
336 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
337 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
338 
339 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
340 	command |= PCI_COMMAND_MASTER_ENABLE;
341 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
342 
343 	/* Initialize ifmedia structures. */
344 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
345 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
346 	    rge_ifmedia_sts);
347 	rge_add_media_types(sc);
348 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
349 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
350 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
351 
352 	if_attach(ifp);
353 	ether_ifattach(ifp, eaddr);
354 
355 	if (pmf_device_register(self, NULL, NULL))
356 		pmf_class_network_register(self, ifp);
357 	else
358 		aprint_error_dev(self, "couldn't establish power handler\n");
359 }
360 
361 int
362 rge_intr(void *arg)
363 {
364 	struct rge_softc *sc = arg;
365 	struct ifnet *ifp = &sc->sc_ec.ec_if;
366 	uint32_t status;
367 	int claimed = 0, rx, tx;
368 
369 	if (!(ifp->if_flags & IFF_RUNNING))
370 		return (0);
371 
372 	/* Disable interrupts. */
373 	RGE_WRITE_4(sc, RGE_IMR, 0);
374 
375 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
376 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
377 			return (0);
378 	}
379 
380 	status = RGE_READ_4(sc, RGE_ISR);
381 	if (status)
382 		RGE_WRITE_4(sc, RGE_ISR, status);
383 
384 	if (status & RGE_ISR_PCS_TIMEOUT)
385 		claimed = 1;
386 
387 	rx = tx = 0;
388 	if (status & sc->rge_intrs) {
389 		if (status &
390 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
391 			rx |= rge_rxeof(sc);
392 			claimed = 1;
393 		}
394 
395 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
396 			tx |= rge_txeof(sc);
397 			claimed = 1;
398 		}
399 
400 		if (status & RGE_ISR_SYSTEM_ERR) {
401 			KERNEL_LOCK(1, NULL);
402 			rge_init(ifp);
403 			KERNEL_UNLOCK_ONE(NULL);
404 			claimed = 1;
405 		}
406 	}
407 
408 	if (sc->rge_timerintr) {
409 		if ((tx | rx) == 0) {
410 			/*
411 			 * Nothing needs to be processed, fallback
412 			 * to use TX/RX interrupts.
413 			 */
414 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
415 
416 			/*
417 			 * Recollect, mainly to avoid the possible
418 			 * race introduced by changing interrupt
419 			 * masks.
420 			 */
421 			rge_rxeof(sc);
422 			rge_txeof(sc);
423 		} else
424 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
425 	} else if (tx | rx) {
426 		/*
427 		 * Assume that using simulated interrupt moderation
428 		 * (hardware timer based) could reduce the interrupt
429 		 * rate.
430 		 */
431 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
432 	}
433 
434 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
435 
436 	return (claimed);
437 }
438 
439 int
440 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
441 {
442 	struct rge_tx_desc *d = NULL;
443 	struct rge_txq *txq;
444 	bus_dmamap_t txmap;
445 	uint32_t cmdsts, cflags = 0;
446 	int cur, error, i, last, nsegs;
447 
448 #if notyet
449 	/*
450 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
451 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
452 	 * take affect.
453 	 */
454 	if ((m->m_pkthdr.csum_flags &
455 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
456 		cflags |= RGE_TDEXTSTS_IPCSUM;
457 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
458 			cflags |= RGE_TDEXTSTS_TCPCSUM;
459 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
460 			cflags |= RGE_TDEXTSTS_UDPCSUM;
461 	}
462 #endif
463 
464 	txq = &sc->rge_ldata.rge_txq[idx];
465 	txmap = txq->txq_dmamap;
466 
467 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
468 	switch (error) {
469 	case 0:
470 		break;
471 	case EFBIG: /* mbuf chain is too fragmented */
472 		if (m_defrag(m, M_DONTWAIT) == 0 &&
473 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
474 		    BUS_DMA_NOWAIT) == 0)
475 			break;
476 
477 		/* FALLTHROUGH */
478 	default:
479 		return (0);
480 	}
481 
482 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
483 	    BUS_DMASYNC_PREWRITE);
484 
485 	nsegs = txmap->dm_nsegs;
486 
487 	/* Set up hardware VLAN tagging. */
488 	if (vlan_has_tag(m))
489 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
490 
491 	last = cur = idx;
492 	cmdsts = RGE_TDCMDSTS_SOF;
493 
494 	for (i = 0; i < txmap->dm_nsegs; i++) {
495 		d = &sc->rge_ldata.rge_tx_list[cur];
496 
497 		d->rge_extsts = htole32(cflags);
498 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
499 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
500 
501 		cmdsts |= txmap->dm_segs[i].ds_len;
502 
503 		if (cur == RGE_TX_LIST_CNT - 1)
504 			cmdsts |= RGE_TDCMDSTS_EOR;
505 
506 		d->rge_cmdsts = htole32(cmdsts);
507 
508 		last = cur;
509 		cmdsts = RGE_TDCMDSTS_OWN;
510 		cur = RGE_NEXT_TX_DESC(cur);
511 	}
512 
513 	/* Set EOF on the last descriptor. */
514 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
515 
516 	/* Transfer ownership of packet to the chip. */
517 	d = &sc->rge_ldata.rge_tx_list[idx];
518 
519 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
520 
521 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
522 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
523 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
524 
525 	/* Update info of TX queue and descriptors. */
526 	txq->txq_mbuf = m;
527 	txq->txq_descidx = last;
528 
529 	return (nsegs);
530 }
531 
532 int
533 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
534 {
535 	struct rge_softc *sc = ifp->if_softc;
536 	//struct ifreq *ifr = (struct ifreq *)data;
537 	int s, error = 0;
538 
539 	s = splnet();
540 
541 	switch (cmd) {
542 	case SIOCSIFFLAGS:
543 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
544 			break;
545 		/* XXX set an ifflags callback and let ether_ioctl
546 		 * handle all of this.
547 		 */
548 		if (ifp->if_flags & IFF_UP) {
549 			if (ifp->if_flags & IFF_RUNNING)
550 				error = ENETRESET;
551 			else
552 				rge_init(ifp);
553 		} else {
554 			if (ifp->if_flags & IFF_RUNNING)
555 				rge_stop(ifp, 1);
556 		}
557 		break;
558 	default:
559 		error = ether_ioctl(ifp, cmd, data);
560 	}
561 
562 	if (error == ENETRESET) {
563 		if (ifp->if_flags & IFF_RUNNING)
564 			rge_iff(sc);
565 		error = 0;
566 	}
567 
568 	splx(s);
569 	return (error);
570 }
571 
572 void
573 rge_start(struct ifnet *ifp)
574 {
575 	struct rge_softc *sc = ifp->if_softc;
576 	struct mbuf *m;
577 	int free, idx, used;
578 	int queued = 0;
579 
580 #define LINK_STATE_IS_UP(_s)    \
581 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
582 
583 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
584 		IFQ_PURGE(&ifp->if_snd);
585 		return;
586 	}
587 
588 	/* Calculate free space. */
589 	idx = sc->rge_ldata.rge_txq_prodidx;
590 	free = sc->rge_ldata.rge_txq_considx;
591 	if (free <= idx)
592 		free += RGE_TX_LIST_CNT;
593 	free -= idx;
594 
595 	for (;;) {
596 		if (RGE_TX_NSEGS >= free + 2) {
597 			SET(ifp->if_flags, IFF_OACTIVE);
598 			break;
599 		}
600 
601 		IFQ_DEQUEUE(&ifp->if_snd, m);
602 		if (m == NULL)
603 			break;
604 
605 		used = rge_encap(sc, m, idx);
606 		if (used == 0) {
607 			m_freem(m);
608 			continue;
609 		}
610 
611 		KASSERT(used <= free);
612 		free -= used;
613 
614 		bpf_mtap(ifp, m, BPF_D_OUT);
615 
616 		idx += used;
617 		if (idx >= RGE_TX_LIST_CNT)
618 			idx -= RGE_TX_LIST_CNT;
619 
620 		queued++;
621 	}
622 
623 	if (queued == 0)
624 		return;
625 
626 	/* Set a timeout in case the chip goes out to lunch. */
627 	ifp->if_timer = 5;
628 
629 	sc->rge_ldata.rge_txq_prodidx = idx;
630 #if 0
631 	ifq_serialize(ifq, &sc->sc_task);
632 #else
633 	rge_txstart(&sc->sc_task, sc);
634 #endif
635 }
636 
637 void
638 rge_watchdog(struct ifnet *ifp)
639 {
640 	struct rge_softc *sc = ifp->if_softc;
641 
642 	device_printf(sc->sc_dev, "watchdog timeout\n");
643 	if_statinc(ifp, if_oerrors);
644 
645 	rge_init(ifp);
646 }
647 
648 int
649 rge_init(struct ifnet *ifp)
650 {
651 	struct rge_softc *sc = ifp->if_softc;
652 	uint32_t val;
653 	int i;
654 
655 	rge_stop(ifp, 0);
656 
657 	/* Set MAC address. */
658 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
659 
660 	/* Set Maximum frame size. */
661 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
662 
663 	/* Initialize RX descriptors list. */
664 	if (rge_rx_list_init(sc) == ENOBUFS) {
665 		device_printf(sc->sc_dev,
666 		    "init failed: no memory for RX buffers\n");
667 		rge_stop(ifp, 1);
668 		return (ENOBUFS);
669 	}
670 
671 	/* Initialize TX descriptors. */
672 	rge_tx_list_init(sc);
673 
674 	/* Load the addresses of the RX and TX lists into the chip. */
675 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
676 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
677 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
678 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
679 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
680 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
681 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
682 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
683 
684 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
685 
686 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
687 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
688 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
689 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
690 
691 	/* Clear interrupt moderation timer. */
692 	for (i = 0; i < 64; i++)
693 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
694 
695 	/* Set the initial RX and TX configurations. */
696 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
697 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
698 
699 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
700 	rge_write_csi(sc, 0x70c, val | 0x27000000);
701 
702 	/* Enable hardware optimization function. */
703 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
704 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
705 
706 	RGE_WRITE_2(sc, 0x0382, 0x221b);
707 	RGE_WRITE_1(sc, 0x4500, 0);
708 	RGE_WRITE_2(sc, 0x4800, 0);
709 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
710 
711 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
712 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
713 
714 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
715 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
716 
717 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
718 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
719 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
720 
721 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
722 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
723 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
724 	else
725 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
726 
727 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
728 
729 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
730 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
731 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
732 	} else
733 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
734 
735 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
736 
737 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
738 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
739 
740 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
741 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
742 
743 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
744 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
745 
746 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
747 
748 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
749 
750 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
751 
752 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
753 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
754 
755 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
756 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
757 
758 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
759 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
760 
761 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
762 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
763 
764 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
765 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
766 
767 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
768 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
769 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
770 	else
771 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
772 
773 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
774 
775 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
776 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
777 
778 	/* Disable EEE plus. */
779 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
780 
781 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
782 
783 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
784 	DELAY(1);
785 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
786 
787 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
788 
789 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
790 
791 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
792 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
793 	else
794 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
795 
796 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
797 
798 	for (i = 0; i < 10; i++) {
799 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
800 			break;
801 		DELAY(1000);
802 	}
803 
804 	/* Disable RXDV gate. */
805 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
806 	DELAY(2000);
807 
808 	rge_ifmedia_upd(ifp);
809 
810 	/* Enable transmit and receive. */
811 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
812 
813 	/* Program promiscuous mode and multicast filters. */
814 	rge_iff(sc);
815 
816 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
817 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
818 
819 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
820 
821 	/* Enable interrupts. */
822 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
823 
824 	ifp->if_flags |= IFF_RUNNING;
825 	CLR(ifp->if_flags, IFF_OACTIVE);
826 
827 	callout_schedule(&sc->sc_timeout, 1);
828 
829 	return (0);
830 }
831 
832 /*
833  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
834  */
835 void
836 rge_stop(struct ifnet *ifp, int disable)
837 {
838 	struct rge_softc *sc = ifp->if_softc;
839 	int i;
840 
841 	if (disable) {
842 		callout_halt(&sc->sc_timeout, NULL);
843 	} else
844 		callout_stop(&sc->sc_timeout);
845 
846 	ifp->if_timer = 0;
847 	ifp->if_flags &= ~IFF_RUNNING;
848 	sc->rge_timerintr = 0;
849 
850 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
851 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
852 	    RGE_RXCFG_ERRPKT);
853 
854 	RGE_WRITE_4(sc, RGE_IMR, 0);
855 
856 	/* Clear timer interrupts. */
857 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
858 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
859 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
860 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
861 
862 	rge_reset(sc);
863 
864 //	intr_barrier(sc->sc_ih);
865 //	ifq_barrier(&ifp->if_snd);
866 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
867 
868 	if (sc->rge_head != NULL) {
869 		m_freem(sc->rge_head);
870 		sc->rge_head = sc->rge_tail = NULL;
871 	}
872 
873 	/* Free the TX list buffers. */
874 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
875 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
876 			bus_dmamap_unload(sc->sc_dmat,
877 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
878 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
879 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
880 		}
881 	}
882 
883 	/* Free the RX list buffers. */
884 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
885 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
886 			bus_dmamap_unload(sc->sc_dmat,
887 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
888 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
889 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
890 		}
891 	}
892 }
893 
894 /*
895  * Set media options.
896  */
897 int
898 rge_ifmedia_upd(struct ifnet *ifp)
899 {
900 	struct rge_softc *sc = ifp->if_softc;
901 	struct ifmedia *ifm = &sc->sc_media;
902 	int anar, gig, val;
903 
904 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
905 		return (EINVAL);
906 
907 	/* Disable Gigabit Lite. */
908 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
909 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
910 
911 	val = rge_read_phy_ocp(sc, 0xa5d4);
912 	val &= ~RGE_ADV_2500TFDX;
913 
914 	anar = gig = 0;
915 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
916 	case IFM_AUTO:
917 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
918 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
919 		val |= RGE_ADV_2500TFDX;
920 		break;
921 	case IFM_2500_T:
922 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
923 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
924 		val |= RGE_ADV_2500TFDX;
925 		ifp->if_baudrate = IF_Mbps(2500);
926 		break;
927 	case IFM_1000_T:
928 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
929 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
930 		ifp->if_baudrate = IF_Gbps(1);
931 		break;
932 	case IFM_100_TX:
933 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
934 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
935 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
936 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
937 		    ANAR_TX | ANAR_10_FD | ANAR_10;
938 		ifp->if_baudrate = IF_Mbps(100);
939 		break;
940 	case IFM_10_T:
941 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
942 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
943 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
944 		    ANAR_10_FD | ANAR_10 : ANAR_10;
945 		ifp->if_baudrate = IF_Mbps(10);
946 		break;
947 	default:
948 		device_printf(sc->sc_dev,
949 		    "unsupported media type\n");
950 		return (EINVAL);
951 	}
952 
953 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
954 	rge_write_phy(sc, 0, MII_100T2CR, gig);
955 	rge_write_phy_ocp(sc, 0xa5d4, val);
956 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
957 	    BMCR_STARTNEG);
958 
959 	return (0);
960 }
961 
962 /*
963  * Report current media status.
964  */
965 void
966 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
967 {
968 	struct rge_softc *sc = ifp->if_softc;
969 	uint16_t status = 0;
970 
971 	ifmr->ifm_status = IFM_AVALID;
972 	ifmr->ifm_active = IFM_ETHER;
973 
974 	if (rge_get_link_status(sc)) {
975 		ifmr->ifm_status |= IFM_ACTIVE;
976 
977 		status = RGE_READ_2(sc, RGE_PHYSTAT);
978 		if ((status & RGE_PHYSTAT_FDX) ||
979 		    (status & RGE_PHYSTAT_2500MBPS))
980 			ifmr->ifm_active |= IFM_FDX;
981 		else
982 			ifmr->ifm_active |= IFM_HDX;
983 
984 		if (status & RGE_PHYSTAT_10MBPS)
985 			ifmr->ifm_active |= IFM_10_T;
986 		else if (status & RGE_PHYSTAT_100MBPS)
987 			ifmr->ifm_active |= IFM_100_TX;
988 		else if (status & RGE_PHYSTAT_1000MBPS)
989 			ifmr->ifm_active |= IFM_1000_T;
990 		else if (status & RGE_PHYSTAT_2500MBPS)
991 			ifmr->ifm_active |= IFM_2500_T;
992 	}
993 }
994 
995 /*
996  * Allocate memory for RX/TX rings.
997  */
998 int
999 rge_allocmem(struct rge_softc *sc)
1000 {
1001 	int error, i;
1002 
1003 	/* Allocate DMA'able memory for the TX ring. */
1004 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1005 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
1006 	if (error) {
1007 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
1008 		return (error);
1009 	}
1010 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1011 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
1012 	    BUS_DMA_NOWAIT);
1013 	if (error) {
1014 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1015 		return (error);
1016 	}
1017 
1018 	/* Load the map for the TX ring. */
1019 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1020 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1021 	    (void **) &sc->rge_ldata.rge_tx_list,
1022 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1023 	if (error) {
1024 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1025 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1026 		    sc->rge_ldata.rge_tx_listnseg);
1027 		return (error);
1028 	}
1029 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1030 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1031 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1032 	if (error) {
1033 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1034 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1035 		bus_dmamem_unmap(sc->sc_dmat,
1036 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1037 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1038 		    sc->rge_ldata.rge_tx_listnseg);
1039 		return (error);
1040 	}
1041 
1042 	/* Create DMA maps for TX buffers. */
1043 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1044 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1045 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1046 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
1047 		if (error) {
1048 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1049 			return (error);
1050 		}
1051 	}
1052 
1053 	/* Allocate DMA'able memory for the RX ring. */
1054 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1055 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1056 	if (error) {
1057 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1058 		return (error);
1059 	}
1060 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1061 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1062 	    BUS_DMA_NOWAIT);
1063 	if (error) {
1064 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1065 		return (error);
1066 	}
1067 
1068 	/* Load the map for the RX ring. */
1069 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1070 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1071 	    (void **) &sc->rge_ldata.rge_rx_list,
1072 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1073 	if (error) {
1074 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1075 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1076 		    sc->rge_ldata.rge_rx_listnseg);
1077 		return (error);
1078 	}
1079 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1080 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1081 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1082 	if (error) {
1083 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1084 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1085 		bus_dmamem_unmap(sc->sc_dmat,
1086 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1087 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1088 		    sc->rge_ldata.rge_rx_listnseg);
1089 		return (error);
1090 	}
1091 
1092 	/* Create DMA maps for RX buffers. */
1093 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1094 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1095 		    RGE_JUMBO_FRAMELEN, 0, 0,
1096 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1097 		if (error) {
1098 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1099 			return (error);
1100 		}
1101 	}
1102 
1103 	return (error);
1104 }
1105 
1106 /*
1107  * Initialize the RX descriptor and attach an mbuf cluster.
1108  */
1109 int
1110 rge_newbuf(struct rge_softc *sc, int idx)
1111 {
1112 	struct mbuf *m;
1113 	struct rge_rx_desc *r;
1114 	struct rge_rxq *rxq;
1115 	bus_dmamap_t rxmap;
1116 
1117 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1118 	if (m == NULL)
1119 		return (ENOBUFS);
1120 
1121 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1122 
1123 	rxq = &sc->rge_ldata.rge_rxq[idx];
1124 	rxmap = rxq->rxq_dmamap;
1125 
1126 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1127 		goto out;
1128 
1129 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1130 	    BUS_DMASYNC_PREREAD);
1131 
1132 	/* Map the segments into RX descriptors. */
1133 	r = &sc->rge_ldata.rge_rx_list[idx];
1134 
1135 	if (RGE_OWN(r)) {
1136 		device_printf(sc->sc_dev, "tried to map busy RX descriptor\n");
1137 		goto out;
1138 	}
1139 
1140 	rxq->rxq_mbuf = m;
1141 
1142 	r->rge_extsts = 0;
1143 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1144 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1145 
1146 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1147 	if (idx == RGE_RX_LIST_CNT - 1)
1148 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1149 
1150 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1151 
1152 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1153 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1154 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1155 
1156 	return (0);
1157 out:
1158 	if (m != NULL)
1159 		m_freem(m);
1160 	return (ENOMEM);
1161 }
1162 
1163 void
1164 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1165 {
1166 	struct rge_rx_desc *r;
1167 
1168 	r = &sc->rge_ldata.rge_rx_list[idx];
1169 
1170 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1171 	r->rge_extsts = 0;
1172 	if (idx == RGE_RX_LIST_CNT - 1)
1173 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1174 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1175 
1176 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1177 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1178 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1179 }
1180 
1181 int
1182 rge_rx_list_init(struct rge_softc *sc)
1183 {
1184 	int i;
1185 
1186 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1187 
1188 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1189 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1190 		if (rge_newbuf(sc, i) == ENOBUFS)
1191 			return (ENOBUFS);
1192 	}
1193 
1194 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1195 	sc->rge_head = sc->rge_tail = NULL;
1196 
1197 	return (0);
1198 }
1199 
1200 void
1201 rge_tx_list_init(struct rge_softc *sc)
1202 {
1203 	int i;
1204 
1205 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1206 
1207 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1208 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1209 
1210 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1211 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1212 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1213 
1214 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1215 }
1216 
1217 int
1218 rge_rxeof(struct rge_softc *sc)
1219 {
1220 	struct mbuf *m;
1221 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1222 	struct rge_rx_desc *cur_rx;
1223 	struct rge_rxq *rxq;
1224 	uint32_t rxstat, extsts;
1225 	int i, total_len, rx = 0;
1226 
1227 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1228 		/* Invalidate the descriptor memory. */
1229 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1230 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1231 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1232 
1233 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1234 
1235 		if (RGE_OWN(cur_rx))
1236 			break;
1237 
1238 		rxstat = letoh32(cur_rx->rge_cmdsts);
1239 		extsts = letoh32(cur_rx->rge_extsts);
1240 
1241 		total_len = RGE_RXBYTES(cur_rx);
1242 		rxq = &sc->rge_ldata.rge_rxq[i];
1243 		m = rxq->rxq_mbuf;
1244 		rxq->rxq_mbuf = NULL;
1245 		rx = 1;
1246 
1247 		/* Invalidate the RX mbuf and unload its map. */
1248 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1249 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1250 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1251 
1252 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1253 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1254 			rge_discard_rxbuf(sc, i);
1255 			continue;
1256 		}
1257 
1258 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1259 			if_statinc(ifp, if_ierrors);
1260 			/*
1261 			 * If this is part of a multi-fragment packet,
1262 			 * discard all the pieces.
1263 			 */
1264 			 if (sc->rge_head != NULL) {
1265 				m_freem(sc->rge_head);
1266 				sc->rge_head = sc->rge_tail = NULL;
1267 			}
1268 			rge_discard_rxbuf(sc, i);
1269 			continue;
1270 		}
1271 
1272 		/*
1273 		 * If allocating a replacement mbuf fails,
1274 		 * reload the current one.
1275 		 */
1276 
1277 		if (rge_newbuf(sc, i) == ENOBUFS) {
1278 			if (sc->rge_head != NULL) {
1279 				m_freem(sc->rge_head);
1280 				sc->rge_head = sc->rge_tail = NULL;
1281 			}
1282 			rge_discard_rxbuf(sc, i);
1283 			continue;
1284 		}
1285 
1286 		m_set_rcvif(m, ifp);
1287 		if (sc->rge_head != NULL) {
1288 			m->m_len = total_len;
1289 			/*
1290 			 * Special case: if there's 4 bytes or less
1291 			 * in this buffer, the mbuf can be discarded:
1292 			 * the last 4 bytes is the CRC, which we don't
1293 			 * care about anyway.
1294 			 */
1295 			if (m->m_len <= ETHER_CRC_LEN) {
1296 				sc->rge_tail->m_len -=
1297 				    (ETHER_CRC_LEN - m->m_len);
1298 				m_freem(m);
1299 			} else {
1300 				m->m_len -= ETHER_CRC_LEN;
1301 				m->m_flags &= ~M_PKTHDR;
1302 				sc->rge_tail->m_next = m;
1303 			}
1304 			m = sc->rge_head;
1305 			sc->rge_head = sc->rge_tail = NULL;
1306 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1307 		} else
1308 	#if 0
1309 			m->m_pkthdr.len = m->m_len =
1310 			    (total_len - ETHER_CRC_LEN);
1311 	#else
1312 		{
1313 			m->m_pkthdr.len = m->m_len = total_len;
1314 			m->m_flags |= M_HASFCS;
1315 		}
1316 	#endif
1317 
1318 #if notyet
1319 		/* Check IP header checksum. */
1320 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1321 		    (extsts & RGE_RDEXTSTS_IPV4))
1322 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1323 
1324 		/* Check TCP/UDP checksum. */
1325 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1326 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1327 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1328 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1329 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1330 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1331 			    M_UDP_CSUM_IN_OK;
1332 #endif
1333 
1334 		if (extsts & RGE_RDEXTSTS_VTAG) {
1335 			vlan_set_tag(m,
1336 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1337 		}
1338 
1339 		if_percpuq_enqueue(ifp->if_percpuq, m);
1340 	}
1341 
1342 	sc->rge_ldata.rge_rxq_considx = i;
1343 
1344 	return (rx);
1345 }
1346 
1347 int
1348 rge_txeof(struct rge_softc *sc)
1349 {
1350 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1351 	struct rge_txq *txq;
1352 	uint32_t txstat;
1353 	int cons, idx, prod;
1354 	int free = 0;
1355 
1356 	prod = sc->rge_ldata.rge_txq_prodidx;
1357 	cons = sc->rge_ldata.rge_txq_considx;
1358 
1359 	while (prod != cons) {
1360 		txq = &sc->rge_ldata.rge_txq[cons];
1361 		idx = txq->txq_descidx;
1362 
1363 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1364 		    idx * sizeof(struct rge_tx_desc),
1365 		    sizeof(struct rge_tx_desc),
1366 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1367 
1368 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1369 
1370 		if (txstat & RGE_TDCMDSTS_OWN) {
1371 			free = 2;
1372 			break;
1373 		}
1374 
1375 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1376 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1377 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1378 		m_freem(txq->txq_mbuf);
1379 		txq->txq_mbuf = NULL;
1380 
1381 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1382 			if_statinc(ifp, if_collisions);
1383 		if (txstat & RGE_TDCMDSTS_TXERR)
1384 			if_statinc(ifp, if_oerrors);
1385 
1386 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1387 		    idx * sizeof(struct rge_tx_desc),
1388 		    sizeof(struct rge_tx_desc),
1389 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1390 
1391 		cons = RGE_NEXT_TX_DESC(idx);
1392 		free = 1;
1393 	}
1394 
1395 	if (free == 0)
1396 		return (0);
1397 
1398 	sc->rge_ldata.rge_txq_considx = cons;
1399 
1400 #if 0
1401 	if (ifq_is_oactive(&ifp->if_snd))
1402 		ifq_restart(&ifp->if_snd);
1403 	else if (free == 2)
1404 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1405 	else
1406 		ifp->if_timer = 0;
1407 #else
1408 #if 0
1409 	if (!IF_IS_EMPTY(&ifp->if_snd))
1410 		rge_start(ifp);
1411 	else
1412 	if (free == 2)
1413 		if (0) { rge_txstart(&sc->sc_task, sc); }
1414 	else
1415 #endif
1416 		ifp->if_timer = 0;
1417 #endif
1418 
1419 	return (1);
1420 }
1421 
1422 void
1423 rge_reset(struct rge_softc *sc)
1424 {
1425 	int i;
1426 
1427 	/* Enable RXDV gate. */
1428 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1429 	DELAY(2000);
1430 
1431 	for (i = 0; i < 3000; i++) {
1432 		DELAY(50);
1433 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1434 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1435 		    RGE_MCUCMD_TXFIFO_EMPTY))
1436 			break;
1437 	}
1438 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1439 		for (i = 0; i < 3000; i++) {
1440 			DELAY(50);
1441 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1442 				break;
1443 		}
1444 	}
1445 
1446 	DELAY(2000);
1447 
1448 	/* Soft reset. */
1449 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1450 
1451 	for (i = 0; i < RGE_TIMEOUT; i++) {
1452 		DELAY(100);
1453 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1454 			break;
1455 	}
1456 	if (i == RGE_TIMEOUT)
1457 		device_printf(sc->sc_dev, "reset never completed!\n");
1458 }
1459 
1460 void
1461 rge_iff(struct rge_softc *sc)
1462 {
1463 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1464 	struct ethercom *ec = &sc->sc_ec;
1465 	struct ether_multi *enm;
1466 	struct ether_multistep step;
1467 	uint32_t hashes[2];
1468 	uint32_t rxfilt;
1469 	int h = 0;
1470 
1471 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1472 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1473 	ifp->if_flags &= ~IFF_ALLMULTI;
1474 
1475 	/*
1476 	 * Always accept frames destined to our station address.
1477 	 * Always accept broadcast frames.
1478 	 */
1479 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1480 
1481 	if (ifp->if_flags & IFF_PROMISC) {
1482  allmulti:
1483 		ifp->if_flags |= IFF_ALLMULTI;
1484 		rxfilt |= RGE_RXCFG_MULTI;
1485 		if (ifp->if_flags & IFF_PROMISC)
1486 			rxfilt |= RGE_RXCFG_ALLPHYS;
1487 		hashes[0] = hashes[1] = 0xffffffff;
1488 	} else {
1489 		rxfilt |= RGE_RXCFG_MULTI;
1490 		/* Program new filter. */
1491 		memset(hashes, 0, sizeof(hashes));
1492 
1493 		ETHER_LOCK(ec);
1494 		ETHER_FIRST_MULTI(step, ec, enm);
1495 		while (enm != NULL) {
1496 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1497 			    ETHER_ADDR_LEN) != 0) {
1498 			    	ETHER_UNLOCK(ec);
1499 				goto allmulti;
1500 			}
1501 			h = ether_crc32_be(enm->enm_addrlo,
1502 			    ETHER_ADDR_LEN) >> 26;
1503 
1504 			if (h < 32)
1505 				hashes[0] |= (1U << h);
1506 			else
1507 				hashes[1] |= (1U << (h - 32));
1508 
1509 			ETHER_NEXT_MULTI(step, enm);
1510 		}
1511 		ETHER_UNLOCK(ec);
1512 	}
1513 
1514 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1515 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1516 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1517 }
1518 
1519 void
1520 rge_set_phy_power(struct rge_softc *sc, int on)
1521 {
1522 	int i;
1523 
1524 	if (on) {
1525 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1526 
1527 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1528 
1529 		for (i = 0; i < RGE_TIMEOUT; i++) {
1530 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1531 				break;
1532 			DELAY(1000);
1533 		}
1534 	} else {
1535 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1536 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1537 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1538 	}
1539 }
1540 
1541 void
1542 rge_phy_config(struct rge_softc *sc)
1543 {
1544 	/* Read microcode version. */
1545 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1546 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1547 
1548 	switch (sc->rge_type) {
1549 	case MAC_CFG2:
1550 		rge_phy_config_mac_cfg2(sc);
1551 		break;
1552 	case MAC_CFG3:
1553 		rge_phy_config_mac_cfg3(sc);
1554 		break;
1555 	case MAC_CFG4:
1556 		rge_phy_config_mac_cfg4(sc);
1557 		break;
1558 	case MAC_CFG5:
1559 		rge_phy_config_mac_cfg5(sc);
1560 		break;
1561 	default:
1562 		break;	/* Can't happen. */
1563 	}
1564 
1565 	rge_write_phy(sc, 0x0a5b, 0x12,
1566 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1567 
1568 	/* Disable EEE. */
1569 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1570 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1571 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1572 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1573 	}
1574 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1575 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1576 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1577 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1578 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1579 
1580 	rge_patch_phy_mcu(sc, 1);
1581 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1582 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1583 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1584 	rge_patch_phy_mcu(sc, 0);
1585 }
1586 
1587 void
1588 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1589 {
1590 	uint16_t val;
1591 	int i;
1592 
1593 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1594 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1595 		    rtl8125_mac_cfg2_ephy[i].val);
1596 
1597 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1598 
1599 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1600 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1601 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1602 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1603 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1604 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1605 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1606 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1607 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1608 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1609 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1610 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1611 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1612 
1613 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1614 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1615 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1616 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1617 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1618 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1619 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1620 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1621 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1622 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1623 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1624 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1625 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1626 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1627 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1628 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1629 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1630 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1631 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1632 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1633 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1634 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1635 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1636 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1637 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1638 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1639 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1640 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1641 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1642 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1643 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1644 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1645 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1646 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1647 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1648 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1649 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1650 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1651 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1652 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1653 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1654 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1655 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1656 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1657 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1658 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1659 }
1660 
1661 void
1662 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1663 {
1664 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1665 	uint16_t val;
1666 	int i;
1667 	static const uint16_t mac_cfg3_a438_value[] =
1668 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1669 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1670 
1671 	static const uint16_t mac_cfg3_b88e_value[] =
1672 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1673 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1674 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1675 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1676 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1677 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1678 
1679 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1680 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1681 		    rtl8125_mac_cfg3_ephy[i].val);
1682 
1683 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1684 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1685 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1686 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1687 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1688 	rge_write_ephy(sc, 0x0002, 0x6042);
1689 	rge_write_ephy(sc, 0x0006, 0x0014);
1690 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1691 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1692 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1693 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1694 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1695 	rge_write_ephy(sc, 0x0042, 0x6042);
1696 	rge_write_ephy(sc, 0x0046, 0x0014);
1697 
1698 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1699 
1700 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1701 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1702 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1703 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1704 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1705 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1706 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1707 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1708 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1709 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1710 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1711 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1712 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1713 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1714 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1715 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1716 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1717 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1718 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1719 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1720 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1721 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1722 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1723 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1724 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1725 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1726 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1727 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1728 	    32);
1729 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1730 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1731 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1732 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1733 
1734 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1735 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1736 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1737 	for (i = 0; i < 26; i++)
1738 		rge_write_phy_ocp(sc, 0xa438, 0);
1739 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1740 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1741 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1742 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1743 
1744 	rge_patch_phy_mcu(sc, 1);
1745 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1746 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1747 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1748 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1749 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1750 	}
1751 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1752 	rge_patch_phy_mcu(sc, 0);
1753 
1754 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1755 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1756 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1757 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1758 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1759 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1760 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1761 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1762 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1763 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1764 }
1765 
1766 void
1767 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1768 {
1769 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1770 	uint16_t val;
1771 	int i;
1772 	static const uint16_t mac_cfg4_b87c_value[] =
1773 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1774 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1775 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1776 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1777 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1778 	      0x80b0, 0x0f31 };
1779 
1780 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1781 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1782 		    rtl8125_mac_cfg4_ephy[i].val);
1783 
1784 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1785 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1786 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1787 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1788 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1789 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1790 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1791 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1792 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1793 
1794 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1795 
1796 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1797 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1798 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1799 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1800 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1801 	for (i = 0; i < 6; i++) {
1802 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1803 		if (i < 3)
1804 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1805 		else
1806 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1807 	}
1808 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1809 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1810 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1811 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1812 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1813 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1814 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1815 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1816 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1817 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1818 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1819 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1820 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1821 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1822 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1823 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1824 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1825 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1826 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1827 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1828 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1829 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1830 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1831 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1832 	}
1833 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1834 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1835 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1836 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1837 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1838 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1839 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1840 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1841 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1842 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1843 	    32);
1844 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1845 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1846 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1847 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1848 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1849 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1850 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1851 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1852 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1853 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1854 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1855 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1856 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1857 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1858 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1859 	for (i = 0; i < 6; i++) {
1860 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1861 		if (i == 2)
1862 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1863 		else
1864 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1865 	}
1866 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1867 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1868 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1869 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1870 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1871 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1872 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1873 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1874 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1875 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1876 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1877 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1878 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1879 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1880 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1881 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1882 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1883 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1884 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1885 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1886 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1887 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1888 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1889 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1890 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1891 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1892 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1893 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1894 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1895 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1896 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1897 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1898 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1899 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1900 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1901 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1902 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1903 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1904 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1905 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1906 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1907 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1908 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1909 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1910 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1911 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1912 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1913 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1914 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1915 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1916 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1917 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1918 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1919 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1920 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1921 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1922 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1923 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1924 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1925 	rge_patch_phy_mcu(sc, 1);
1926 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1927 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1928 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1929 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1930 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1931 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1932 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1933 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1934 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1935 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1936 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1937 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1938 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1939 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1940 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1941 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1942 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1943 	rge_patch_phy_mcu(sc, 0);
1944 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1945 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1946 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1947 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1948 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1949 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1950 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1951 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1952 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1953 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1954 }
1955 
1956 void
1957 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1958 {
1959 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1960 	uint16_t val;
1961 	int i;
1962 
1963 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1964 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1965 		    rtl8125_mac_cfg5_ephy[i].val);
1966 
1967 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1968 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1969 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1970 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1971 
1972 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1973 
1974 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1975 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1976 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1977 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1978 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1979 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1980 	    32);
1981 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1982 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1983 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1984 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1985 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1986 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1987 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1988 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1989 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1990 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1991 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1992 	for (i = 0; i < 10; i++) {
1993 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1994 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1995 	}
1996 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1997 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1998 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1999 }
2000 
2001 void
2002 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2003 {
2004 	if (sc->rge_mcodever != mcode_version) {
2005 		int i;
2006 
2007 		rge_patch_phy_mcu(sc, 1);
2008 
2009 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2010 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2011 			if (sc->rge_type == MAC_CFG2)
2012 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
2013 			else
2014 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
2015 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2016 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
2017 
2018 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2019 		}
2020 
2021 		if (sc->rge_type == MAC_CFG2) {
2022 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2023 				rge_write_phy_ocp(sc,
2024 				    rtl8125_mac_cfg2_mcu[i].reg,
2025 				    rtl8125_mac_cfg2_mcu[i].val);
2026 			}
2027 		} else if (sc->rge_type == MAC_CFG3) {
2028 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2029 				rge_write_phy_ocp(sc,
2030 				    rtl8125_mac_cfg3_mcu[i].reg,
2031 				    rtl8125_mac_cfg3_mcu[i].val);
2032 			}
2033 		} else if (sc->rge_type == MAC_CFG4) {
2034 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2035 				rge_write_phy_ocp(sc,
2036 				    rtl8125_mac_cfg4_mcu[i].reg,
2037 				    rtl8125_mac_cfg4_mcu[i].val);
2038 			}
2039 		} else if (sc->rge_type == MAC_CFG5) {
2040 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2041 				rge_write_phy_ocp(sc,
2042 				    rtl8125_mac_cfg5_mcu[i].reg,
2043 				    rtl8125_mac_cfg5_mcu[i].val);
2044 			}
2045 		}
2046 
2047 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2048 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2049 
2050 			rge_write_phy_ocp(sc, 0xa436, 0);
2051 			rge_write_phy_ocp(sc, 0xa438, 0);
2052 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2053 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2054 			rge_write_phy_ocp(sc, 0xa438, 0);
2055 		}
2056 
2057 		rge_patch_phy_mcu(sc, 0);
2058 
2059 		/* Write microcode version. */
2060 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2061 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2062 	}
2063 }
2064 
2065 void
2066 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2067 {
2068 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2069 	RGE_WRITE_4(sc, RGE_MAC0,
2070 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2071 	RGE_WRITE_4(sc, RGE_MAC4,
2072 	    addr[5] <<  8 | addr[4]);
2073 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2074 }
2075 
2076 void
2077 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2078 {
2079 	int i;
2080 
2081 	for (i = 0; i < ETHER_ADDR_LEN; i++)
2082 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
2083 }
2084 
2085 void
2086 rge_hw_init(struct rge_softc *sc)
2087 {
2088 	int i;
2089 
2090 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2091 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2092 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2093 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2094 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2095 
2096 	/* Disable UPS. */
2097 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2098 
2099 	/* Configure MAC MCU. */
2100 	rge_write_mac_ocp(sc, 0xfc38, 0);
2101 
2102 	for (i = 0xfc28; i < 0xfc38; i += 2)
2103 		rge_write_mac_ocp(sc, i, 0);
2104 
2105 	DELAY(3000);
2106 	rge_write_mac_ocp(sc, 0xfc26, 0);
2107 
2108 	if (sc->rge_type == MAC_CFG3) {
2109 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2110 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2111 			    rtl8125_mac_bps[i].val);
2112 		}
2113 	} else if (sc->rge_type == MAC_CFG5) {
2114 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2115 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2116 			    rtl8125b_mac_bps[i].val);
2117 		}
2118 	}
2119 
2120 	/* Disable PHY power saving. */
2121 	rge_disable_phy_ocp_pwrsave(sc);
2122 
2123 	/* Set PCIe uncorrectable error status. */
2124 	rge_write_csi(sc, 0x108,
2125 	    rge_read_csi(sc, 0x108) | 0x00100000);
2126 }
2127 
2128 void
2129 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2130 {
2131 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2132 		rge_patch_phy_mcu(sc, 1);
2133 		rge_write_phy_ocp(sc, 0xc416, 0);
2134 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2135 		rge_patch_phy_mcu(sc, 0);
2136 	}
2137 }
2138 
2139 void
2140 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2141 {
2142 	int i;
2143 
2144 	if (set)
2145 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2146 	else
2147 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2148 
2149 	for (i = 0; i < 1000; i++) {
2150 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2151 			break;
2152 		DELAY(100);
2153 	}
2154 	if (i == 1000) {
2155 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2156 		return;
2157 	}
2158 }
2159 
2160 void
2161 rge_add_media_types(struct rge_softc *sc)
2162 {
2163 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2164 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2165 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2166 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2167 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2168 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2169 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2170 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2171 }
2172 
2173 void
2174 rge_config_imtype(struct rge_softc *sc, int imtype)
2175 {
2176 	switch (imtype) {
2177 	case RGE_IMTYPE_NONE:
2178 		sc->rge_intrs = RGE_INTRS;
2179 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2180 		    RGE_ISR_RX_FIFO_OFLOW;
2181 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2182 		break;
2183 	case RGE_IMTYPE_SIM:
2184 		sc->rge_intrs = RGE_INTRS_TIMER;
2185 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2186 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2187 		break;
2188 	default:
2189 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2190 	}
2191 }
2192 
2193 void
2194 rge_disable_hw_im(struct rge_softc *sc)
2195 {
2196 	RGE_WRITE_2(sc, RGE_IM, 0);
2197 }
2198 
2199 void
2200 rge_disable_sim_im(struct rge_softc *sc)
2201 {
2202 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2203 	sc->rge_timerintr = 0;
2204 }
2205 
2206 void
2207 rge_setup_sim_im(struct rge_softc *sc)
2208 {
2209 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2210 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2211 	sc->rge_timerintr = 1;
2212 }
2213 
2214 void
2215 rge_setup_intr(struct rge_softc *sc, int imtype)
2216 {
2217 	rge_config_imtype(sc, imtype);
2218 
2219 	/* Enable interrupts. */
2220 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2221 
2222 	switch (imtype) {
2223 	case RGE_IMTYPE_NONE:
2224 		rge_disable_sim_im(sc);
2225 		rge_disable_hw_im(sc);
2226 		break;
2227 	case RGE_IMTYPE_SIM:
2228 		rge_disable_hw_im(sc);
2229 		rge_setup_sim_im(sc);
2230 		break;
2231 	default:
2232 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2233 	}
2234 }
2235 
2236 void
2237 rge_exit_oob(struct rge_softc *sc)
2238 {
2239 	int i;
2240 
2241 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2242 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2243 	    RGE_RXCFG_ERRPKT);
2244 
2245 	/* Disable RealWoW. */
2246 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2247 
2248 	rge_reset(sc);
2249 
2250 	/* Disable OOB. */
2251 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2252 
2253 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2254 
2255 	for (i = 0; i < 10; i++) {
2256 		DELAY(100);
2257 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2258 			break;
2259 	}
2260 
2261 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2262 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2263 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2264 
2265 	for (i = 0; i < 10; i++) {
2266 		DELAY(100);
2267 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2268 			break;
2269 	}
2270 
2271 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2272 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2273 		    device_xname(sc->sc_dev));
2274 		for (i = 0; i < RGE_TIMEOUT; i++) {
2275 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2276 				break;
2277 			DELAY(1000);
2278 		}
2279 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2280 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2281 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2282 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2283 	}
2284 }
2285 
2286 void
2287 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2288 {
2289 	int i;
2290 
2291 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2292 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2293 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2294 
2295 	for (i = 0; i < 10; i++) {
2296 		 DELAY(100);
2297 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2298 			break;
2299 	}
2300 
2301 	DELAY(20);
2302 }
2303 
2304 uint32_t
2305 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2306 {
2307 	int i;
2308 
2309 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2310 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2311 
2312 	for (i = 0; i < 10; i++) {
2313 		 DELAY(100);
2314 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2315 			break;
2316 	}
2317 
2318 	DELAY(20);
2319 
2320 	return (RGE_READ_4(sc, RGE_CSIDR));
2321 }
2322 
2323 void
2324 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2325 {
2326 	uint32_t tmp;
2327 
2328 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2329 	tmp += val;
2330 	tmp |= RGE_MACOCP_BUSY;
2331 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2332 }
2333 
2334 uint16_t
2335 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2336 {
2337 	uint32_t val;
2338 
2339 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2340 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2341 
2342 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2343 }
2344 
2345 void
2346 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2347 {
2348 	uint32_t tmp;
2349 	int i;
2350 
2351 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2352 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2353 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2354 
2355 	for (i = 0; i < 10; i++) {
2356 		DELAY(100);
2357 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2358 			break;
2359 	}
2360 
2361 	DELAY(20);
2362 }
2363 
2364 uint16_t
2365 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2366 {
2367 	uint32_t val;
2368 	int i;
2369 
2370 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2371 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2372 
2373 	for (i = 0; i < 10; i++) {
2374 		DELAY(100);
2375 		val = RGE_READ_4(sc, RGE_EPHYAR);
2376 		if (val & RGE_EPHYAR_BUSY)
2377 			break;
2378 	}
2379 
2380 	DELAY(20);
2381 
2382 	return (val & RGE_EPHYAR_DATA_MASK);
2383 }
2384 
2385 void
2386 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2387 {
2388 	uint16_t off, phyaddr;
2389 
2390 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2391 	phyaddr <<= 4;
2392 
2393 	off = addr ? reg : 0x10 + (reg % 8);
2394 
2395 	phyaddr += (off - 16) << 1;
2396 
2397 	rge_write_phy_ocp(sc, phyaddr, val);
2398 }
2399 
2400 uint16_t
2401 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2402 {
2403 	uint16_t off, phyaddr;
2404 
2405 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2406 	phyaddr <<= 4;
2407 
2408 	off = addr ? reg : 0x10 + (reg % 8);
2409 
2410 	phyaddr += (off - 16) << 1;
2411 
2412 	return (rge_read_phy_ocp(sc, phyaddr));
2413 }
2414 
2415 void
2416 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2417 {
2418 	uint32_t tmp;
2419 	int i;
2420 
2421 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2422 	tmp |= RGE_PHYOCP_BUSY | val;
2423 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2424 
2425 	for (i = 0; i < RGE_TIMEOUT; i++) {
2426 		DELAY(1);
2427 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2428 			break;
2429 	}
2430 }
2431 
2432 uint16_t
2433 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2434 {
2435 	uint32_t val;
2436 	int i;
2437 
2438 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2439 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2440 
2441 	for (i = 0; i < RGE_TIMEOUT; i++) {
2442 		DELAY(1);
2443 		val = RGE_READ_4(sc, RGE_PHYOCP);
2444 		if (val & RGE_PHYOCP_BUSY)
2445 			break;
2446 	}
2447 
2448 	return (val & RGE_PHYOCP_DATA_MASK);
2449 }
2450 
2451 int
2452 rge_get_link_status(struct rge_softc *sc)
2453 {
2454 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2455 }
2456 
2457 void
2458 rge_txstart(struct work *wk, void *arg)
2459 {
2460 	struct rge_softc *sc = arg;
2461 
2462 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2463 }
2464 
2465 void
2466 rge_tick(void *arg)
2467 {
2468 	struct rge_softc *sc = arg;
2469 	int s;
2470 
2471 	s = splnet();
2472 	rge_link_state(sc);
2473 	splx(s);
2474 
2475 	callout_schedule(&sc->sc_timeout, hz);
2476 }
2477 
2478 void
2479 rge_link_state(struct rge_softc *sc)
2480 {
2481 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2482 	int link = LINK_STATE_DOWN;
2483 
2484 	if (rge_get_link_status(sc))
2485 		link = LINK_STATE_UP;
2486 
2487 	if (ifp->if_link_state != link) { /* XXX not safe to access */
2488 		if_link_state_change(ifp, link);
2489 	}
2490 }
2491