xref: /netbsd-src/sys/dev/pci/if_rge.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: if_rge.c,v 1.22 2021/10/11 15:11:49 msaitoh Exp $	*/
2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
3 
4 /*
5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.22 2021/10/11 15:11:49 msaitoh Exp $");
22 
23 #include <sys/types.h>
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/malloc.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/device.h>
33 #include <sys/endian.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 
37 #include <net/if.h>
38 
39 #include <net/if_dl.h>
40 #include <net/if_ether.h>
41 
42 #include <net/if_media.h>
43 
44 #include <netinet/in.h>
45 #include <net/if_ether.h>
46 
47 #include <net/bpf.h>
48 
49 #include <sys/bus.h>
50 #include <machine/intr.h>
51 
52 #include <dev/mii/mii.h>
53 
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcidevs.h>
57 
58 #include <dev/pci/if_rgereg.h>
59 
60 #ifdef __NetBSD__
61 #define letoh32 	htole32
62 #define nitems(x) 	__arraycount(x)
63 
64 static struct mbuf *
65 MCLGETL(struct rge_softc *sc __unused, int how,
66     u_int size)
67 {
68 	struct mbuf *m;
69 
70 	MGETHDR(m, how, MT_DATA);
71 	if (m == NULL)
72 		return NULL;
73 
74 	MEXTMALLOC(m, size, how);
75 	if ((m->m_flags & M_EXT) == 0) {
76 		m_freem(m);
77 		return NULL;
78 	}
79 	return m;
80 }
81 
82 #ifdef NET_MPSAFE
83 #define 	RGE_MPSAFE	1
84 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
85 #else
86 #define 	CALLOUT_FLAGS	0
87 #endif
88 #endif
89 
90 #ifdef RGE_DEBUG
91 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
92 int rge_debug = 0;
93 #else
94 #define DPRINTF(x)
95 #endif
96 
97 static int		rge_match(device_t, cfdata_t, void *);
98 static void		rge_attach(device_t, device_t, void *);
99 int		rge_intr(void *);
100 int		rge_encap(struct rge_softc *, struct mbuf *, int);
101 int		rge_ioctl(struct ifnet *, u_long, void *);
102 void		rge_start(struct ifnet *);
103 void		rge_watchdog(struct ifnet *);
104 int		rge_init(struct ifnet *);
105 void		rge_stop(struct ifnet *, int);
106 int		rge_ifmedia_upd(struct ifnet *);
107 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
108 int		rge_allocmem(struct rge_softc *);
109 int		rge_newbuf(struct rge_softc *, int);
110 void		rge_discard_rxbuf(struct rge_softc *, int);
111 int		rge_rx_list_init(struct rge_softc *);
112 void		rge_tx_list_init(struct rge_softc *);
113 int		rge_rxeof(struct rge_softc *);
114 int		rge_txeof(struct rge_softc *);
115 void		rge_reset(struct rge_softc *);
116 void		rge_iff(struct rge_softc *);
117 void		rge_set_phy_power(struct rge_softc *, int);
118 void		rge_phy_config(struct rge_softc *);
119 void		rge_phy_config_mac_cfg2(struct rge_softc *);
120 void		rge_phy_config_mac_cfg3(struct rge_softc *);
121 void		rge_phy_config_mac_cfg4(struct rge_softc *);
122 void		rge_phy_config_mac_cfg5(struct rge_softc *);
123 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
124 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
125 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
126 void		rge_hw_init(struct rge_softc *);
127 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
128 void		rge_patch_phy_mcu(struct rge_softc *, int);
129 void		rge_add_media_types(struct rge_softc *);
130 void		rge_config_imtype(struct rge_softc *, int);
131 void		rge_disable_hw_im(struct rge_softc *);
132 void		rge_disable_sim_im(struct rge_softc *);
133 void		rge_setup_sim_im(struct rge_softc *);
134 void		rge_setup_intr(struct rge_softc *, int);
135 void		rge_exit_oob(struct rge_softc *);
136 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
137 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
138 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
139 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
140 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
141 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
142 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
143 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
144 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
145 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
146 int		rge_get_link_status(struct rge_softc *);
147 void		rge_txstart(struct work *, void *);
148 void		rge_tick(void *);
149 void		rge_link_state(struct rge_softc *);
150 
151 static const struct {
152 	uint16_t reg;
153 	uint16_t val;
154 }  rtl8125_mac_cfg2_mcu[] = {
155 	RTL8125_MAC_CFG2_MCU
156 }, rtl8125_mac_cfg3_mcu[] = {
157 	RTL8125_MAC_CFG3_MCU
158 }, rtl8125_mac_cfg4_mcu[] = {
159 	RTL8125_MAC_CFG4_MCU
160 }, rtl8125_mac_cfg5_mcu[] = {
161 	RTL8125_MAC_CFG5_MCU
162 };
163 
164 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
165 		NULL, NULL); /* Sevan - detach function? */
166 
167 static const struct device_compatible_entry compat_data[] = {
168 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
169 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
170 
171 	PCI_COMPAT_EOL
172 };
173 
174 static int
175 rge_match(device_t parent, cfdata_t match, void *aux)
176 {
177 	struct pci_attach_args *pa =aux;
178 
179 	return pci_compatible_match(pa, compat_data);
180 }
181 
182 void
183 rge_attach(device_t parent, device_t self, void *aux)
184 {
185 	struct rge_softc *sc = device_private(self);
186 	struct pci_attach_args *pa = aux;
187 	pci_chipset_tag_t pc = pa->pa_pc;
188 	pci_intr_handle_t *ihp;
189 	char intrbuf[PCI_INTRSTR_LEN];
190 	const char *intrstr = NULL;
191 	struct ifnet *ifp;
192 	pcireg_t reg;
193 	uint32_t hwrev;
194 	uint8_t eaddr[ETHER_ADDR_LEN];
195 	int offset;
196 	pcireg_t command;
197 
198 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
199 
200 	sc->sc_dev = self;
201 
202 	pci_aprint_devinfo(pa, "Ethernet controller");
203 
204 	/*
205 	 * Map control/status registers.
206 	 */
207 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
208 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
209 	    NULL, &sc->rge_bsize)) {
210 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
211 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
212 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
213 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
214 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
215 			    &sc->rge_bsize)) {
216 				aprint_error(": can't map mem or i/o space\n");
217 				return;
218 			}
219 		}
220 	}
221 
222 	int counts[PCI_INTR_TYPE_SIZE] = {
223  		[PCI_INTR_TYPE_INTX] = 1,
224  		[PCI_INTR_TYPE_MSI] = 1,
225  		[PCI_INTR_TYPE_MSIX] = 1,
226  	};
227 	int max_type = PCI_INTR_TYPE_MSIX;
228 	/*
229 	 * Allocate interrupt.
230 	 */
231 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
232 		aprint_error(": couldn't map interrupt\n");
233 		return;
234 	}
235 	switch (pci_intr_type(pc, ihp[0])) {
236 	case PCI_INTR_TYPE_MSIX:
237 	case PCI_INTR_TYPE_MSI:
238 		sc->rge_flags |= RGE_FLAG_MSI;
239 		break;
240 	default:
241 		break;
242 	}
243 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
244 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
245 	    sc, device_xname(sc->sc_dev));
246 	if (sc->sc_ih == NULL) {
247 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
248 		if (intrstr != NULL)
249 			aprint_error(" at %s\n", intrstr);
250 		aprint_error("\n");
251 		return;
252 	}
253 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
254 
255 	if (pci_dma64_available(pa))
256 		sc->sc_dmat = pa->pa_dmat64;
257 	else
258 		sc->sc_dmat = pa->pa_dmat;
259 
260 	sc->sc_pc = pa->pa_pc;
261 	sc->sc_tag = pa->pa_tag;
262 
263 	/* Determine hardware revision */
264 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
265 	switch (hwrev) {
266 	case 0x60800000:
267 		sc->rge_type = MAC_CFG2;
268 		break;
269 	case 0x60900000:
270 		sc->rge_type = MAC_CFG3;
271 		break;
272 	case 0x64000000:
273 		sc->rge_type = MAC_CFG4;
274 		break;
275 	case 0x64100000:
276 		sc->rge_type = MAC_CFG5;
277 		break;
278 	default:
279 		aprint_error(": unknown version 0x%08x\n", hwrev);
280 		return;
281 	}
282 
283 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
284 
285 	/*
286 	 * PCI Express check.
287 	 */
288 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
289 	    &offset, NULL)) {
290 		/* Disable PCIe ASPM and ECPM. */
291 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
292 		    offset + PCIE_LCSR);
293 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
294 		    PCIE_LCSR_ENCLKPM);
295 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
296 		    reg);
297 	}
298 
299 	rge_exit_oob(sc);
300 	rge_hw_init(sc);
301 
302 	rge_get_macaddr(sc, eaddr);
303 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
304 	    ether_sprintf(eaddr));
305 
306 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
307 
308 	rge_set_phy_power(sc, 1);
309 	rge_phy_config(sc);
310 
311 	if (rge_allocmem(sc))
312 		return;
313 
314 	ifp = &sc->sc_ec.ec_if;
315 	ifp->if_softc = sc;
316 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
317 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
318 #ifdef RGE_MPSAFE
319 	ifp->if_extflags = IFEF_MPSAFE;
320 #endif
321 	ifp->if_ioctl = rge_ioctl;
322 	ifp->if_stop = rge_stop;
323 	ifp->if_start = rge_start;
324 	ifp->if_init = rge_init;
325 	ifp->if_watchdog = rge_watchdog;
326 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
327 
328 #if notyet
329 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
330 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
331 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
332 #endif
333 
334 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
335 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
336 
337 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
338 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
339 
340 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
341 	command |= PCI_COMMAND_MASTER_ENABLE;
342 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
343 
344 	/* Initialize ifmedia structures. */
345 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
346 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
347 	    rge_ifmedia_sts);
348 	rge_add_media_types(sc);
349 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
350 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
351 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
352 
353 	if_attach(ifp);
354 	ether_ifattach(ifp, eaddr);
355 
356 	if (pmf_device_register(self, NULL, NULL))
357 		pmf_class_network_register(self, ifp);
358 	else
359 		aprint_error_dev(self, "couldn't establish power handler\n");
360 }
361 
362 int
363 rge_intr(void *arg)
364 {
365 	struct rge_softc *sc = arg;
366 	struct ifnet *ifp = &sc->sc_ec.ec_if;
367 	uint32_t status;
368 	int claimed = 0, rx, tx;
369 
370 	if (!(ifp->if_flags & IFF_RUNNING))
371 		return (0);
372 
373 	/* Disable interrupts. */
374 	RGE_WRITE_4(sc, RGE_IMR, 0);
375 
376 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
377 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
378 			return (0);
379 	}
380 
381 	status = RGE_READ_4(sc, RGE_ISR);
382 	if (status)
383 		RGE_WRITE_4(sc, RGE_ISR, status);
384 
385 	if (status & RGE_ISR_PCS_TIMEOUT)
386 		claimed = 1;
387 
388 	rx = tx = 0;
389 	if (status & sc->rge_intrs) {
390 		if (status &
391 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
392 			rx |= rge_rxeof(sc);
393 			claimed = 1;
394 		}
395 
396 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
397 			tx |= rge_txeof(sc);
398 			claimed = 1;
399 		}
400 
401 		if (status & RGE_ISR_SYSTEM_ERR) {
402 			KERNEL_LOCK(1, NULL);
403 			rge_init(ifp);
404 			KERNEL_UNLOCK_ONE(NULL);
405 			claimed = 1;
406 		}
407 	}
408 
409 	if (sc->rge_timerintr) {
410 		if ((tx | rx) == 0) {
411 			/*
412 			 * Nothing needs to be processed, fallback
413 			 * to use TX/RX interrupts.
414 			 */
415 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
416 
417 			/*
418 			 * Recollect, mainly to avoid the possible
419 			 * race introduced by changing interrupt
420 			 * masks.
421 			 */
422 			rge_rxeof(sc);
423 			rge_txeof(sc);
424 		} else
425 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
426 	} else if (tx | rx) {
427 		/*
428 		 * Assume that using simulated interrupt moderation
429 		 * (hardware timer based) could reduce the interrupt
430 		 * rate.
431 		 */
432 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
433 	}
434 
435 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
436 
437 	return (claimed);
438 }
439 
440 int
441 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
442 {
443 	struct rge_tx_desc *d = NULL;
444 	struct rge_txq *txq;
445 	bus_dmamap_t txmap;
446 	uint32_t cmdsts, cflags = 0;
447 	int cur, error, i, last, nsegs;
448 
449 #if notyet
450 	/*
451 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
452 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
453 	 * take affect.
454 	 */
455 	if ((m->m_pkthdr.csum_flags &
456 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
457 		cflags |= RGE_TDEXTSTS_IPCSUM;
458 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
459 			cflags |= RGE_TDEXTSTS_TCPCSUM;
460 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
461 			cflags |= RGE_TDEXTSTS_UDPCSUM;
462 	}
463 #endif
464 
465 	txq = &sc->rge_ldata.rge_txq[idx];
466 	txmap = txq->txq_dmamap;
467 
468 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
469 	switch (error) {
470 	case 0:
471 		break;
472 	case EFBIG: /* mbuf chain is too fragmented */
473 		if (m_defrag(m, M_DONTWAIT) == 0 &&
474 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
475 		    BUS_DMA_NOWAIT) == 0)
476 			break;
477 
478 		/* FALLTHROUGH */
479 	default:
480 		return (0);
481 	}
482 
483 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
484 	    BUS_DMASYNC_PREWRITE);
485 
486 	nsegs = txmap->dm_nsegs;
487 
488 	/* Set up hardware VLAN tagging. */
489 	if (vlan_has_tag(m))
490 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
491 
492 	last = cur = idx;
493 	cmdsts = RGE_TDCMDSTS_SOF;
494 
495 	for (i = 0; i < txmap->dm_nsegs; i++) {
496 		d = &sc->rge_ldata.rge_tx_list[cur];
497 
498 		d->rge_extsts = htole32(cflags);
499 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
500 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
501 
502 		cmdsts |= txmap->dm_segs[i].ds_len;
503 
504 		if (cur == RGE_TX_LIST_CNT - 1)
505 			cmdsts |= RGE_TDCMDSTS_EOR;
506 
507 		d->rge_cmdsts = htole32(cmdsts);
508 
509 		last = cur;
510 		cmdsts = RGE_TDCMDSTS_OWN;
511 		cur = RGE_NEXT_TX_DESC(cur);
512 	}
513 
514 	/* Set EOF on the last descriptor. */
515 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
516 
517 	/* Transfer ownership of packet to the chip. */
518 	d = &sc->rge_ldata.rge_tx_list[idx];
519 
520 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
521 
522 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
523 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
524 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
525 
526 	/* Update info of TX queue and descriptors. */
527 	txq->txq_mbuf = m;
528 	txq->txq_descidx = last;
529 
530 	return (nsegs);
531 }
532 
533 int
534 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
535 {
536 	struct rge_softc *sc = ifp->if_softc;
537 	//struct ifreq *ifr = (struct ifreq *)data;
538 	int s, error = 0;
539 
540 	s = splnet();
541 
542 	switch (cmd) {
543 	case SIOCSIFFLAGS:
544 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
545 			break;
546 		/* XXX set an ifflags callback and let ether_ioctl
547 		 * handle all of this.
548 		 */
549 		if (ifp->if_flags & IFF_UP) {
550 			if (ifp->if_flags & IFF_RUNNING)
551 				error = ENETRESET;
552 			else
553 				rge_init(ifp);
554 		} else {
555 			if (ifp->if_flags & IFF_RUNNING)
556 				rge_stop(ifp, 1);
557 		}
558 		break;
559 	default:
560 		error = ether_ioctl(ifp, cmd, data);
561 	}
562 
563 	if (error == ENETRESET) {
564 		if (ifp->if_flags & IFF_RUNNING)
565 			rge_iff(sc);
566 		error = 0;
567 	}
568 
569 	splx(s);
570 	return (error);
571 }
572 
573 void
574 rge_start(struct ifnet *ifp)
575 {
576 	struct rge_softc *sc = ifp->if_softc;
577 	struct mbuf *m;
578 	int free, idx, used;
579 	int queued = 0;
580 
581 #define LINK_STATE_IS_UP(_s)    \
582 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
583 
584 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
585 		IFQ_PURGE(&ifp->if_snd);
586 		return;
587 	}
588 
589 	/* Calculate free space. */
590 	idx = sc->rge_ldata.rge_txq_prodidx;
591 	free = sc->rge_ldata.rge_txq_considx;
592 	if (free <= idx)
593 		free += RGE_TX_LIST_CNT;
594 	free -= idx;
595 
596 	for (;;) {
597 		if (RGE_TX_NSEGS >= free + 2) {
598 			SET(ifp->if_flags, IFF_OACTIVE);
599 			break;
600 		}
601 
602 		IFQ_DEQUEUE(&ifp->if_snd, m);
603 		if (m == NULL)
604 			break;
605 
606 		used = rge_encap(sc, m, idx);
607 		if (used == 0) {
608 			m_freem(m);
609 			continue;
610 		}
611 
612 		KASSERT(used <= free);
613 		free -= used;
614 
615 		bpf_mtap(ifp, m, BPF_D_OUT);
616 
617 		idx += used;
618 		if (idx >= RGE_TX_LIST_CNT)
619 			idx -= RGE_TX_LIST_CNT;
620 
621 		queued++;
622 	}
623 
624 	if (queued == 0)
625 		return;
626 
627 	/* Set a timeout in case the chip goes out to lunch. */
628 	ifp->if_timer = 5;
629 
630 	sc->rge_ldata.rge_txq_prodidx = idx;
631 #if 0
632 	ifq_serialize(ifq, &sc->sc_task);
633 #else
634 	rge_txstart(&sc->sc_task, sc);
635 #endif
636 }
637 
638 void
639 rge_watchdog(struct ifnet *ifp)
640 {
641 	struct rge_softc *sc = ifp->if_softc;
642 
643 	device_printf(sc->sc_dev, "watchdog timeout\n");
644 	if_statinc(ifp, if_oerrors);
645 
646 	rge_init(ifp);
647 }
648 
649 int
650 rge_init(struct ifnet *ifp)
651 {
652 	struct rge_softc *sc = ifp->if_softc;
653 	uint32_t val;
654 	int i;
655 
656 	rge_stop(ifp, 0);
657 
658 	/* Set MAC address. */
659 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
660 
661 	/* Set Maximum frame size. */
662 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
663 
664 	/* Initialize RX descriptors list. */
665 	if (rge_rx_list_init(sc) == ENOBUFS) {
666 		device_printf(sc->sc_dev,
667 		    "init failed: no memory for RX buffers\n");
668 		rge_stop(ifp, 1);
669 		return (ENOBUFS);
670 	}
671 
672 	/* Initialize TX descriptors. */
673 	rge_tx_list_init(sc);
674 
675 	/* Load the addresses of the RX and TX lists into the chip. */
676 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
677 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
678 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
679 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
680 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
681 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
682 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
683 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
684 
685 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
686 
687 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
688 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
689 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
690 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
691 
692 	/* Clear interrupt moderation timer. */
693 	for (i = 0; i < 64; i++)
694 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
695 
696 	/* Set the initial RX and TX configurations. */
697 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
698 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
699 
700 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
701 	rge_write_csi(sc, 0x70c, val | 0x27000000);
702 
703 	/* Enable hardware optimization function. */
704 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
705 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
706 
707 	RGE_WRITE_2(sc, 0x0382, 0x221b);
708 	RGE_WRITE_1(sc, 0x4500, 0);
709 	RGE_WRITE_2(sc, 0x4800, 0);
710 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
711 
712 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
713 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
714 
715 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
716 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
717 
718 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
719 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
720 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
721 
722 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
723 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
724 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
725 	else
726 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
727 
728 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
729 
730 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
731 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
732 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
733 	} else
734 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
735 
736 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
737 
738 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
739 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
740 
741 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
742 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
743 
744 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
745 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
746 
747 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
748 
749 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
750 
751 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
752 
753 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
754 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
755 
756 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
757 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
758 
759 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
760 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
761 
762 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
763 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
764 
765 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
766 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
767 
768 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
769 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
770 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
771 	else
772 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
773 
774 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
775 
776 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
777 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
778 
779 	/* Disable EEE plus. */
780 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
781 
782 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
783 
784 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
785 	DELAY(1);
786 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
787 
788 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
789 
790 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
791 
792 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
793 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
794 	else
795 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
796 
797 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
798 
799 	for (i = 0; i < 10; i++) {
800 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
801 			break;
802 		DELAY(1000);
803 	}
804 
805 	/* Disable RXDV gate. */
806 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
807 	DELAY(2000);
808 
809 	rge_ifmedia_upd(ifp);
810 
811 	/* Enable transmit and receive. */
812 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
813 
814 	/* Program promiscuous mode and multicast filters. */
815 	rge_iff(sc);
816 
817 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
818 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
819 
820 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
821 
822 	/* Enable interrupts. */
823 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
824 
825 	ifp->if_flags |= IFF_RUNNING;
826 	CLR(ifp->if_flags, IFF_OACTIVE);
827 
828 	callout_schedule(&sc->sc_timeout, 1);
829 
830 	return (0);
831 }
832 
833 /*
834  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
835  */
836 void
837 rge_stop(struct ifnet *ifp, int disable)
838 {
839 	struct rge_softc *sc = ifp->if_softc;
840 	int i;
841 
842 	if (disable) {
843 		callout_halt(&sc->sc_timeout, NULL);
844 	} else
845 		callout_stop(&sc->sc_timeout);
846 
847 	ifp->if_timer = 0;
848 	ifp->if_flags &= ~IFF_RUNNING;
849 	sc->rge_timerintr = 0;
850 
851 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
852 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
853 	    RGE_RXCFG_ERRPKT);
854 
855 	RGE_WRITE_4(sc, RGE_IMR, 0);
856 
857 	/* Clear timer interrupts. */
858 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
859 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
860 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
861 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
862 
863 	rge_reset(sc);
864 
865 //	intr_barrier(sc->sc_ih);
866 //	ifq_barrier(&ifp->if_snd);
867 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
868 
869 	if (sc->rge_head != NULL) {
870 		m_freem(sc->rge_head);
871 		sc->rge_head = sc->rge_tail = NULL;
872 	}
873 
874 	/* Free the TX list buffers. */
875 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
876 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
877 			bus_dmamap_unload(sc->sc_dmat,
878 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
879 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
880 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
881 		}
882 	}
883 
884 	/* Free the RX list buffers. */
885 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
886 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
887 			bus_dmamap_unload(sc->sc_dmat,
888 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
889 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
890 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
891 		}
892 	}
893 }
894 
895 /*
896  * Set media options.
897  */
898 int
899 rge_ifmedia_upd(struct ifnet *ifp)
900 {
901 	struct rge_softc *sc = ifp->if_softc;
902 	struct ifmedia *ifm = &sc->sc_media;
903 	int anar, gig, val;
904 
905 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
906 		return (EINVAL);
907 
908 	/* Disable Gigabit Lite. */
909 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
910 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
911 
912 	val = rge_read_phy_ocp(sc, 0xa5d4);
913 	val &= ~RGE_ADV_2500TFDX;
914 
915 	anar = gig = 0;
916 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
917 	case IFM_AUTO:
918 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
919 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
920 		val |= RGE_ADV_2500TFDX;
921 		break;
922 	case IFM_2500_T:
923 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
924 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
925 		val |= RGE_ADV_2500TFDX;
926 		ifp->if_baudrate = IF_Mbps(2500);
927 		break;
928 	case IFM_1000_T:
929 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
930 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
931 		ifp->if_baudrate = IF_Gbps(1);
932 		break;
933 	case IFM_100_TX:
934 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
935 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
936 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
937 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
938 		    ANAR_TX | ANAR_10_FD | ANAR_10;
939 		ifp->if_baudrate = IF_Mbps(100);
940 		break;
941 	case IFM_10_T:
942 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
943 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
944 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
945 		    ANAR_10_FD | ANAR_10 : ANAR_10;
946 		ifp->if_baudrate = IF_Mbps(10);
947 		break;
948 	default:
949 		device_printf(sc->sc_dev,
950 		    "unsupported media type\n");
951 		return (EINVAL);
952 	}
953 
954 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
955 	rge_write_phy(sc, 0, MII_100T2CR, gig);
956 	rge_write_phy_ocp(sc, 0xa5d4, val);
957 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
958 	    BMCR_STARTNEG);
959 
960 	return (0);
961 }
962 
963 /*
964  * Report current media status.
965  */
966 void
967 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
968 {
969 	struct rge_softc *sc = ifp->if_softc;
970 	uint16_t status = 0;
971 
972 	ifmr->ifm_status = IFM_AVALID;
973 	ifmr->ifm_active = IFM_ETHER;
974 
975 	if (rge_get_link_status(sc)) {
976 		ifmr->ifm_status |= IFM_ACTIVE;
977 
978 		status = RGE_READ_2(sc, RGE_PHYSTAT);
979 		if ((status & RGE_PHYSTAT_FDX) ||
980 		    (status & RGE_PHYSTAT_2500MBPS))
981 			ifmr->ifm_active |= IFM_FDX;
982 		else
983 			ifmr->ifm_active |= IFM_HDX;
984 
985 		if (status & RGE_PHYSTAT_10MBPS)
986 			ifmr->ifm_active |= IFM_10_T;
987 		else if (status & RGE_PHYSTAT_100MBPS)
988 			ifmr->ifm_active |= IFM_100_TX;
989 		else if (status & RGE_PHYSTAT_1000MBPS)
990 			ifmr->ifm_active |= IFM_1000_T;
991 		else if (status & RGE_PHYSTAT_2500MBPS)
992 			ifmr->ifm_active |= IFM_2500_T;
993 	}
994 }
995 
996 /*
997  * Allocate memory for RX/TX rings.
998  */
999 int
1000 rge_allocmem(struct rge_softc *sc)
1001 {
1002 	int error, i;
1003 
1004 	/* Allocate DMA'able memory for the TX ring. */
1005 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1006 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
1007 	if (error) {
1008 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
1009 		return (error);
1010 	}
1011 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1012 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
1013 	    BUS_DMA_NOWAIT);
1014 	if (error) {
1015 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1016 		return (error);
1017 	}
1018 
1019 	/* Load the map for the TX ring. */
1020 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1021 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1022 	    (void **) &sc->rge_ldata.rge_tx_list,
1023 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1024 	if (error) {
1025 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1026 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1027 		    sc->rge_ldata.rge_tx_listnseg);
1028 		return (error);
1029 	}
1030 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1031 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1032 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1033 	if (error) {
1034 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1035 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1036 		bus_dmamem_unmap(sc->sc_dmat,
1037 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1038 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1039 		    sc->rge_ldata.rge_tx_listnseg);
1040 		return (error);
1041 	}
1042 
1043 	/* Create DMA maps for TX buffers. */
1044 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1045 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1046 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1047 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
1048 		if (error) {
1049 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1050 			return (error);
1051 		}
1052 	}
1053 
1054 	/* Allocate DMA'able memory for the RX ring. */
1055 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1056 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1057 	if (error) {
1058 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1059 		return (error);
1060 	}
1061 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1062 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1063 	    BUS_DMA_NOWAIT);
1064 	if (error) {
1065 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1066 		return (error);
1067 	}
1068 
1069 	/* Load the map for the RX ring. */
1070 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1071 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1072 	    (void **) &sc->rge_ldata.rge_rx_list,
1073 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1074 	if (error) {
1075 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1076 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1077 		    sc->rge_ldata.rge_rx_listnseg);
1078 		return (error);
1079 	}
1080 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1081 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1082 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1083 	if (error) {
1084 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1085 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1086 		bus_dmamem_unmap(sc->sc_dmat,
1087 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1088 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1089 		    sc->rge_ldata.rge_rx_listnseg);
1090 		return (error);
1091 	}
1092 
1093 	/* Create DMA maps for RX buffers. */
1094 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1095 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1096 		    RGE_JUMBO_FRAMELEN, 0, 0,
1097 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1098 		if (error) {
1099 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1100 			return (error);
1101 		}
1102 	}
1103 
1104 	return (error);
1105 }
1106 
1107 /*
1108  * Initialize the RX descriptor and attach an mbuf cluster.
1109  */
1110 int
1111 rge_newbuf(struct rge_softc *sc, int idx)
1112 {
1113 	struct mbuf *m;
1114 	struct rge_rx_desc *r;
1115 	struct rge_rxq *rxq;
1116 	bus_dmamap_t rxmap;
1117 
1118 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1119 	if (m == NULL)
1120 		return (ENOBUFS);
1121 
1122 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1123 
1124 	rxq = &sc->rge_ldata.rge_rxq[idx];
1125 	rxmap = rxq->rxq_dmamap;
1126 
1127 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1128 		goto out;
1129 
1130 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1131 	    BUS_DMASYNC_PREREAD);
1132 
1133 	/* Map the segments into RX descriptors. */
1134 	r = &sc->rge_ldata.rge_rx_list[idx];
1135 
1136 	if (RGE_OWN(r)) {
1137 		device_printf(sc->sc_dev, "tried to map busy RX descriptor\n");
1138 		goto out;
1139 	}
1140 
1141 	rxq->rxq_mbuf = m;
1142 
1143 	r->rge_extsts = 0;
1144 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1145 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1146 
1147 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1148 	if (idx == RGE_RX_LIST_CNT - 1)
1149 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1150 
1151 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1152 
1153 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1154 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1155 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1156 
1157 	return (0);
1158 out:
1159 	if (m != NULL)
1160 		m_freem(m);
1161 	return (ENOMEM);
1162 }
1163 
1164 void
1165 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1166 {
1167 	struct rge_rx_desc *r;
1168 
1169 	r = &sc->rge_ldata.rge_rx_list[idx];
1170 
1171 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1172 	r->rge_extsts = 0;
1173 	if (idx == RGE_RX_LIST_CNT - 1)
1174 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1175 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1176 
1177 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1178 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1179 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1180 }
1181 
1182 int
1183 rge_rx_list_init(struct rge_softc *sc)
1184 {
1185 	int i;
1186 
1187 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1188 
1189 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1190 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1191 		if (rge_newbuf(sc, i) == ENOBUFS)
1192 			return (ENOBUFS);
1193 	}
1194 
1195 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1196 	sc->rge_head = sc->rge_tail = NULL;
1197 
1198 	return (0);
1199 }
1200 
1201 void
1202 rge_tx_list_init(struct rge_softc *sc)
1203 {
1204 	int i;
1205 
1206 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1207 
1208 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1209 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1210 
1211 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1212 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1213 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1214 
1215 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1216 }
1217 
1218 int
1219 rge_rxeof(struct rge_softc *sc)
1220 {
1221 	struct mbuf *m;
1222 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1223 	struct rge_rx_desc *cur_rx;
1224 	struct rge_rxq *rxq;
1225 	uint32_t rxstat, extsts;
1226 	int i, total_len, rx = 0;
1227 
1228 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1229 		/* Invalidate the descriptor memory. */
1230 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1231 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1232 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1233 
1234 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1235 
1236 		if (RGE_OWN(cur_rx))
1237 			break;
1238 
1239 		rxstat = letoh32(cur_rx->rge_cmdsts);
1240 		extsts = letoh32(cur_rx->rge_extsts);
1241 
1242 		total_len = RGE_RXBYTES(cur_rx);
1243 		rxq = &sc->rge_ldata.rge_rxq[i];
1244 		m = rxq->rxq_mbuf;
1245 		rxq->rxq_mbuf = NULL;
1246 		rx = 1;
1247 
1248 		/* Invalidate the RX mbuf and unload its map. */
1249 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1250 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1251 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1252 
1253 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1254 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1255 			rge_discard_rxbuf(sc, i);
1256 			continue;
1257 		}
1258 
1259 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1260 			if_statinc(ifp, if_ierrors);
1261 			/*
1262 			 * If this is part of a multi-fragment packet,
1263 			 * discard all the pieces.
1264 			 */
1265 			 if (sc->rge_head != NULL) {
1266 				m_freem(sc->rge_head);
1267 				sc->rge_head = sc->rge_tail = NULL;
1268 			}
1269 			rge_discard_rxbuf(sc, i);
1270 			continue;
1271 		}
1272 
1273 		/*
1274 		 * If allocating a replacement mbuf fails,
1275 		 * reload the current one.
1276 		 */
1277 
1278 		if (rge_newbuf(sc, i) == ENOBUFS) {
1279 			if (sc->rge_head != NULL) {
1280 				m_freem(sc->rge_head);
1281 				sc->rge_head = sc->rge_tail = NULL;
1282 			}
1283 			rge_discard_rxbuf(sc, i);
1284 			continue;
1285 		}
1286 
1287 		m_set_rcvif(m, ifp);
1288 		if (sc->rge_head != NULL) {
1289 			m->m_len = total_len;
1290 			/*
1291 			 * Special case: if there's 4 bytes or less
1292 			 * in this buffer, the mbuf can be discarded:
1293 			 * the last 4 bytes is the CRC, which we don't
1294 			 * care about anyway.
1295 			 */
1296 			if (m->m_len <= ETHER_CRC_LEN) {
1297 				sc->rge_tail->m_len -=
1298 				    (ETHER_CRC_LEN - m->m_len);
1299 				m_freem(m);
1300 			} else {
1301 				m->m_len -= ETHER_CRC_LEN;
1302 				m->m_flags &= ~M_PKTHDR;
1303 				sc->rge_tail->m_next = m;
1304 			}
1305 			m = sc->rge_head;
1306 			sc->rge_head = sc->rge_tail = NULL;
1307 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1308 		} else
1309 	#if 0
1310 			m->m_pkthdr.len = m->m_len =
1311 			    (total_len - ETHER_CRC_LEN);
1312 	#else
1313 		{
1314 			m->m_pkthdr.len = m->m_len = total_len;
1315 			m->m_flags |= M_HASFCS;
1316 		}
1317 	#endif
1318 
1319 #if notyet
1320 		/* Check IP header checksum. */
1321 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1322 		    (extsts & RGE_RDEXTSTS_IPV4))
1323 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1324 
1325 		/* Check TCP/UDP checksum. */
1326 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1327 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1328 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1329 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1330 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1331 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1332 			    M_UDP_CSUM_IN_OK;
1333 #endif
1334 
1335 		if (extsts & RGE_RDEXTSTS_VTAG) {
1336 			vlan_set_tag(m,
1337 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1338 		}
1339 
1340 		if_percpuq_enqueue(ifp->if_percpuq, m);
1341 	}
1342 
1343 	sc->rge_ldata.rge_rxq_considx = i;
1344 
1345 	return (rx);
1346 }
1347 
1348 int
1349 rge_txeof(struct rge_softc *sc)
1350 {
1351 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1352 	struct rge_txq *txq;
1353 	uint32_t txstat;
1354 	int cons, idx, prod;
1355 	int free = 0;
1356 
1357 	prod = sc->rge_ldata.rge_txq_prodidx;
1358 	cons = sc->rge_ldata.rge_txq_considx;
1359 
1360 	while (prod != cons) {
1361 		txq = &sc->rge_ldata.rge_txq[cons];
1362 		idx = txq->txq_descidx;
1363 
1364 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1365 		    idx * sizeof(struct rge_tx_desc),
1366 		    sizeof(struct rge_tx_desc),
1367 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1368 
1369 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1370 
1371 		if (txstat & RGE_TDCMDSTS_OWN) {
1372 			free = 2;
1373 			break;
1374 		}
1375 
1376 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1377 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1378 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1379 		m_freem(txq->txq_mbuf);
1380 		txq->txq_mbuf = NULL;
1381 
1382 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1383 			if_statinc(ifp, if_collisions);
1384 		if (txstat & RGE_TDCMDSTS_TXERR)
1385 			if_statinc(ifp, if_oerrors);
1386 
1387 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1388 		    idx * sizeof(struct rge_tx_desc),
1389 		    sizeof(struct rge_tx_desc),
1390 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1391 
1392 		cons = RGE_NEXT_TX_DESC(idx);
1393 		free = 1;
1394 	}
1395 
1396 	if (free == 0)
1397 		return (0);
1398 
1399 	sc->rge_ldata.rge_txq_considx = cons;
1400 
1401 #if 0
1402 	if (ifq_is_oactive(&ifp->if_snd))
1403 		ifq_restart(&ifp->if_snd);
1404 	else if (free == 2)
1405 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1406 	else
1407 		ifp->if_timer = 0;
1408 #else
1409 #if 0
1410 	if (!IF_IS_EMPTY(&ifp->if_snd))
1411 		rge_start(ifp);
1412 	else
1413 	if (free == 2)
1414 		if (0) { rge_txstart(&sc->sc_task, sc); }
1415 	else
1416 #endif
1417 		ifp->if_timer = 0;
1418 #endif
1419 
1420 	return (1);
1421 }
1422 
1423 void
1424 rge_reset(struct rge_softc *sc)
1425 {
1426 	int i;
1427 
1428 	/* Enable RXDV gate. */
1429 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1430 	DELAY(2000);
1431 
1432 	for (i = 0; i < 3000; i++) {
1433 		DELAY(50);
1434 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1435 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1436 		    RGE_MCUCMD_TXFIFO_EMPTY))
1437 			break;
1438 	}
1439 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1440 		for (i = 0; i < 3000; i++) {
1441 			DELAY(50);
1442 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1443 				break;
1444 		}
1445 	}
1446 
1447 	DELAY(2000);
1448 
1449 	/* Soft reset. */
1450 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1451 
1452 	for (i = 0; i < RGE_TIMEOUT; i++) {
1453 		DELAY(100);
1454 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1455 			break;
1456 	}
1457 	if (i == RGE_TIMEOUT)
1458 		device_printf(sc->sc_dev, "reset never completed!\n");
1459 }
1460 
1461 void
1462 rge_iff(struct rge_softc *sc)
1463 {
1464 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1465 	struct ethercom *ec = &sc->sc_ec;
1466 	struct ether_multi *enm;
1467 	struct ether_multistep step;
1468 	uint32_t hashes[2];
1469 	uint32_t rxfilt;
1470 	int h = 0;
1471 
1472 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1473 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1474 	ifp->if_flags &= ~IFF_ALLMULTI;
1475 
1476 	/*
1477 	 * Always accept frames destined to our station address.
1478 	 * Always accept broadcast frames.
1479 	 */
1480 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1481 
1482 	if (ifp->if_flags & IFF_PROMISC) {
1483  allmulti:
1484 		ifp->if_flags |= IFF_ALLMULTI;
1485 		rxfilt |= RGE_RXCFG_MULTI;
1486 		if (ifp->if_flags & IFF_PROMISC)
1487 			rxfilt |= RGE_RXCFG_ALLPHYS;
1488 		hashes[0] = hashes[1] = 0xffffffff;
1489 	} else {
1490 		rxfilt |= RGE_RXCFG_MULTI;
1491 		/* Program new filter. */
1492 		memset(hashes, 0, sizeof(hashes));
1493 
1494 		ETHER_LOCK(ec);
1495 		ETHER_FIRST_MULTI(step, ec, enm);
1496 		while (enm != NULL) {
1497 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1498 			    ETHER_ADDR_LEN) != 0) {
1499 			    	ETHER_UNLOCK(ec);
1500 				goto allmulti;
1501 			}
1502 			h = ether_crc32_be(enm->enm_addrlo,
1503 			    ETHER_ADDR_LEN) >> 26;
1504 
1505 			if (h < 32)
1506 				hashes[0] |= (1U << h);
1507 			else
1508 				hashes[1] |= (1U << (h - 32));
1509 
1510 			ETHER_NEXT_MULTI(step, enm);
1511 		}
1512 		ETHER_UNLOCK(ec);
1513 	}
1514 
1515 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1516 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1517 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1518 }
1519 
1520 void
1521 rge_set_phy_power(struct rge_softc *sc, int on)
1522 {
1523 	int i;
1524 
1525 	if (on) {
1526 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1527 
1528 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1529 
1530 		for (i = 0; i < RGE_TIMEOUT; i++) {
1531 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1532 				break;
1533 			DELAY(1000);
1534 		}
1535 	} else {
1536 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1537 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1538 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1539 	}
1540 }
1541 
1542 void
1543 rge_phy_config(struct rge_softc *sc)
1544 {
1545 	/* Read microcode version. */
1546 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1547 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1548 
1549 	switch (sc->rge_type) {
1550 	case MAC_CFG2:
1551 		rge_phy_config_mac_cfg2(sc);
1552 		break;
1553 	case MAC_CFG3:
1554 		rge_phy_config_mac_cfg3(sc);
1555 		break;
1556 	case MAC_CFG4:
1557 		rge_phy_config_mac_cfg4(sc);
1558 		break;
1559 	case MAC_CFG5:
1560 		rge_phy_config_mac_cfg5(sc);
1561 		break;
1562 	default:
1563 		break;	/* Can't happen. */
1564 	}
1565 
1566 	rge_write_phy(sc, 0x0a5b, 0x12,
1567 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1568 
1569 	/* Disable EEE. */
1570 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1571 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1572 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1573 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1574 	}
1575 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1576 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1577 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1578 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1579 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1580 
1581 	rge_patch_phy_mcu(sc, 1);
1582 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1583 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1584 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1585 	rge_patch_phy_mcu(sc, 0);
1586 }
1587 
1588 void
1589 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1590 {
1591 	uint16_t val;
1592 	int i;
1593 
1594 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1595 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1596 		    rtl8125_mac_cfg2_ephy[i].val);
1597 
1598 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1599 
1600 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1601 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1602 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1603 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1604 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1605 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1606 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1607 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1608 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1609 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1610 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1611 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1612 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1613 
1614 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1615 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1616 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1617 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1618 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1619 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1620 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1621 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1622 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1623 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1624 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1625 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1626 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1627 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1628 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1629 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1630 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1631 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1632 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1633 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1634 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1635 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1636 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1637 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1638 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1639 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1640 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1641 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1642 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1643 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1644 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1645 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1646 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1647 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1648 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1649 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1650 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1651 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1652 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1653 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1654 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1655 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1656 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1657 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1658 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1659 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1660 }
1661 
1662 void
1663 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1664 {
1665 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1666 	uint16_t val;
1667 	int i;
1668 	static const uint16_t mac_cfg3_a438_value[] =
1669 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1670 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1671 
1672 	static const uint16_t mac_cfg3_b88e_value[] =
1673 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1674 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1675 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1676 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1677 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1678 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1679 
1680 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1681 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1682 		    rtl8125_mac_cfg3_ephy[i].val);
1683 
1684 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1685 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1686 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1687 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1688 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1689 	rge_write_ephy(sc, 0x0002, 0x6042);
1690 	rge_write_ephy(sc, 0x0006, 0x0014);
1691 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1692 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1693 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1694 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1695 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1696 	rge_write_ephy(sc, 0x0042, 0x6042);
1697 	rge_write_ephy(sc, 0x0046, 0x0014);
1698 
1699 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1700 
1701 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1702 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1703 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1704 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1705 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1706 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1707 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1708 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1709 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1710 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1711 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1712 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1713 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1714 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1715 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1716 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1717 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1718 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1719 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1720 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1721 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1722 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1723 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1724 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1725 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1726 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1727 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1728 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1729 	    32);
1730 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1731 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1732 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1733 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1734 
1735 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1736 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1737 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1738 	for (i = 0; i < 26; i++)
1739 		rge_write_phy_ocp(sc, 0xa438, 0);
1740 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1741 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1742 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1743 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1744 
1745 	rge_patch_phy_mcu(sc, 1);
1746 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1747 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1748 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1749 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1750 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1751 	}
1752 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1753 	rge_patch_phy_mcu(sc, 0);
1754 
1755 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1756 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1757 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1758 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1759 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1760 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1761 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1762 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1763 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1764 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1765 }
1766 
1767 void
1768 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1769 {
1770 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1771 	uint16_t val;
1772 	int i;
1773 	static const uint16_t mac_cfg4_b87c_value[] =
1774 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1775 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1776 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1777 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1778 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1779 	      0x80b0, 0x0f31 };
1780 
1781 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1782 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1783 		    rtl8125_mac_cfg4_ephy[i].val);
1784 
1785 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1786 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1787 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1788 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1789 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1790 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1791 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1792 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1793 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1794 
1795 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1796 
1797 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1798 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1799 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1800 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1801 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1802 	for (i = 0; i < 6; i++) {
1803 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1804 		if (i < 3)
1805 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1806 		else
1807 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1808 	}
1809 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1810 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1811 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1812 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1813 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1814 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1815 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1816 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1817 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1818 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1819 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1820 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1821 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1822 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1823 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1824 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1825 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1826 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1827 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1828 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1829 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1830 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1831 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1832 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1833 	}
1834 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1835 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1836 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1837 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1838 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1839 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1840 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1841 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1842 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1843 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1844 	    32);
1845 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1846 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1847 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1848 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1849 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1850 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1851 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1852 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1853 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1854 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1855 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1856 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1857 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1858 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1859 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1860 	for (i = 0; i < 6; i++) {
1861 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1862 		if (i == 2)
1863 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1864 		else
1865 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1866 	}
1867 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1868 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1869 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1870 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1871 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1872 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1873 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1874 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1875 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1876 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1877 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1878 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1879 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1880 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1881 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1882 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1883 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1884 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1885 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1886 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1887 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1888 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1889 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1890 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1891 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1892 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1893 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1894 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1895 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1896 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1897 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1898 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1899 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1900 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1901 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1902 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1903 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1904 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1905 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1906 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1907 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1908 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1909 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1910 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1911 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1912 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1913 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1914 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1915 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1916 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1917 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1918 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1919 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1920 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1921 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1922 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1923 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1924 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1925 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1926 	rge_patch_phy_mcu(sc, 1);
1927 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1928 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1929 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1930 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1931 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1932 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1933 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1934 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1935 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1936 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1937 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1938 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1939 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1940 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1941 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1942 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1943 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1944 	rge_patch_phy_mcu(sc, 0);
1945 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1946 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1947 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1948 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1949 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1950 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1951 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1952 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1953 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1954 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1955 }
1956 
1957 void
1958 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1959 {
1960 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1961 	uint16_t val;
1962 	int i;
1963 
1964 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1965 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1966 		    rtl8125_mac_cfg5_ephy[i].val);
1967 
1968 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1969 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1970 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1971 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1972 
1973 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1974 
1975 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1976 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1977 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1978 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1979 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1980 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1981 	    32);
1982 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1983 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1984 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1985 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1986 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1987 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1988 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1989 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1990 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1991 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1992 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1993 	for (i = 0; i < 10; i++) {
1994 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1995 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1996 	}
1997 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1998 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1999 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
2000 }
2001 
2002 void
2003 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2004 {
2005 	if (sc->rge_mcodever != mcode_version) {
2006 		int i;
2007 
2008 		rge_patch_phy_mcu(sc, 1);
2009 
2010 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2011 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2012 			if (sc->rge_type == MAC_CFG2)
2013 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
2014 			else
2015 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
2016 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2017 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
2018 
2019 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2020 		}
2021 
2022 		if (sc->rge_type == MAC_CFG2) {
2023 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2024 				rge_write_phy_ocp(sc,
2025 				    rtl8125_mac_cfg2_mcu[i].reg,
2026 				    rtl8125_mac_cfg2_mcu[i].val);
2027 			}
2028 		} else if (sc->rge_type == MAC_CFG3) {
2029 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2030 				rge_write_phy_ocp(sc,
2031 				    rtl8125_mac_cfg3_mcu[i].reg,
2032 				    rtl8125_mac_cfg3_mcu[i].val);
2033 			}
2034 		} else if (sc->rge_type == MAC_CFG4) {
2035 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2036 				rge_write_phy_ocp(sc,
2037 				    rtl8125_mac_cfg4_mcu[i].reg,
2038 				    rtl8125_mac_cfg4_mcu[i].val);
2039 			}
2040 		} else if (sc->rge_type == MAC_CFG5) {
2041 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2042 				rge_write_phy_ocp(sc,
2043 				    rtl8125_mac_cfg5_mcu[i].reg,
2044 				    rtl8125_mac_cfg5_mcu[i].val);
2045 			}
2046 		}
2047 
2048 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2049 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2050 
2051 			rge_write_phy_ocp(sc, 0xa436, 0);
2052 			rge_write_phy_ocp(sc, 0xa438, 0);
2053 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2054 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2055 			rge_write_phy_ocp(sc, 0xa438, 0);
2056 		}
2057 
2058 		rge_patch_phy_mcu(sc, 0);
2059 
2060 		/* Write microcode version. */
2061 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2062 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2063 	}
2064 }
2065 
2066 void
2067 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2068 {
2069 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2070 	RGE_WRITE_4(sc, RGE_MAC0,
2071 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2072 	RGE_WRITE_4(sc, RGE_MAC4,
2073 	    addr[5] <<  8 | addr[4]);
2074 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2075 }
2076 
2077 void
2078 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2079 {
2080 	int i;
2081 
2082 	for (i = 0; i < ETHER_ADDR_LEN; i++)
2083 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
2084 }
2085 
2086 void
2087 rge_hw_init(struct rge_softc *sc)
2088 {
2089 	int i;
2090 
2091 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2092 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2093 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2094 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2095 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2096 
2097 	/* Disable UPS. */
2098 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2099 
2100 	/* Configure MAC MCU. */
2101 	rge_write_mac_ocp(sc, 0xfc38, 0);
2102 
2103 	for (i = 0xfc28; i < 0xfc38; i += 2)
2104 		rge_write_mac_ocp(sc, i, 0);
2105 
2106 	DELAY(3000);
2107 	rge_write_mac_ocp(sc, 0xfc26, 0);
2108 
2109 	if (sc->rge_type == MAC_CFG3) {
2110 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2111 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2112 			    rtl8125_mac_bps[i].val);
2113 		}
2114 	} else if (sc->rge_type == MAC_CFG5) {
2115 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2116 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2117 			    rtl8125b_mac_bps[i].val);
2118 		}
2119 	}
2120 
2121 	/* Disable PHY power saving. */
2122 	rge_disable_phy_ocp_pwrsave(sc);
2123 
2124 	/* Set PCIe uncorrectable error status. */
2125 	rge_write_csi(sc, 0x108,
2126 	    rge_read_csi(sc, 0x108) | 0x00100000);
2127 }
2128 
2129 void
2130 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2131 {
2132 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2133 		rge_patch_phy_mcu(sc, 1);
2134 		rge_write_phy_ocp(sc, 0xc416, 0);
2135 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2136 		rge_patch_phy_mcu(sc, 0);
2137 	}
2138 }
2139 
2140 void
2141 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2142 {
2143 	int i;
2144 
2145 	if (set)
2146 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2147 	else
2148 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2149 
2150 	for (i = 0; i < 1000; i++) {
2151 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2152 			break;
2153 		DELAY(100);
2154 	}
2155 	if (i == 1000) {
2156 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2157 		return;
2158 	}
2159 }
2160 
2161 void
2162 rge_add_media_types(struct rge_softc *sc)
2163 {
2164 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2165 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2166 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2167 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2168 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2169 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2170 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2171 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2172 }
2173 
2174 void
2175 rge_config_imtype(struct rge_softc *sc, int imtype)
2176 {
2177 	switch (imtype) {
2178 	case RGE_IMTYPE_NONE:
2179 		sc->rge_intrs = RGE_INTRS;
2180 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2181 		    RGE_ISR_RX_FIFO_OFLOW;
2182 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2183 		break;
2184 	case RGE_IMTYPE_SIM:
2185 		sc->rge_intrs = RGE_INTRS_TIMER;
2186 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2187 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2188 		break;
2189 	default:
2190 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2191 	}
2192 }
2193 
2194 void
2195 rge_disable_hw_im(struct rge_softc *sc)
2196 {
2197 	RGE_WRITE_2(sc, RGE_IM, 0);
2198 }
2199 
2200 void
2201 rge_disable_sim_im(struct rge_softc *sc)
2202 {
2203 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2204 	sc->rge_timerintr = 0;
2205 }
2206 
2207 void
2208 rge_setup_sim_im(struct rge_softc *sc)
2209 {
2210 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2211 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2212 	sc->rge_timerintr = 1;
2213 }
2214 
2215 void
2216 rge_setup_intr(struct rge_softc *sc, int imtype)
2217 {
2218 	rge_config_imtype(sc, imtype);
2219 
2220 	/* Enable interrupts. */
2221 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2222 
2223 	switch (imtype) {
2224 	case RGE_IMTYPE_NONE:
2225 		rge_disable_sim_im(sc);
2226 		rge_disable_hw_im(sc);
2227 		break;
2228 	case RGE_IMTYPE_SIM:
2229 		rge_disable_hw_im(sc);
2230 		rge_setup_sim_im(sc);
2231 		break;
2232 	default:
2233 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2234 	}
2235 }
2236 
2237 void
2238 rge_exit_oob(struct rge_softc *sc)
2239 {
2240 	int i;
2241 
2242 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2243 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2244 	    RGE_RXCFG_ERRPKT);
2245 
2246 	/* Disable RealWoW. */
2247 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2248 
2249 	rge_reset(sc);
2250 
2251 	/* Disable OOB. */
2252 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2253 
2254 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2255 
2256 	for (i = 0; i < 10; i++) {
2257 		DELAY(100);
2258 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2259 			break;
2260 	}
2261 
2262 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2263 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2264 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2265 
2266 	for (i = 0; i < 10; i++) {
2267 		DELAY(100);
2268 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2269 			break;
2270 	}
2271 
2272 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2273 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2274 		    device_xname(sc->sc_dev));
2275 		for (i = 0; i < RGE_TIMEOUT; i++) {
2276 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2277 				break;
2278 			DELAY(1000);
2279 		}
2280 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2281 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2282 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2283 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2284 	}
2285 }
2286 
2287 void
2288 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2289 {
2290 	int i;
2291 
2292 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2293 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2294 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2295 
2296 	for (i = 0; i < 10; i++) {
2297 		 DELAY(100);
2298 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2299 			break;
2300 	}
2301 
2302 	DELAY(20);
2303 }
2304 
2305 uint32_t
2306 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2307 {
2308 	int i;
2309 
2310 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2311 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2312 
2313 	for (i = 0; i < 10; i++) {
2314 		 DELAY(100);
2315 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2316 			break;
2317 	}
2318 
2319 	DELAY(20);
2320 
2321 	return (RGE_READ_4(sc, RGE_CSIDR));
2322 }
2323 
2324 void
2325 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2326 {
2327 	uint32_t tmp;
2328 
2329 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2330 	tmp += val;
2331 	tmp |= RGE_MACOCP_BUSY;
2332 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2333 }
2334 
2335 uint16_t
2336 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2337 {
2338 	uint32_t val;
2339 
2340 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2341 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2342 
2343 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2344 }
2345 
2346 void
2347 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2348 {
2349 	uint32_t tmp;
2350 	int i;
2351 
2352 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2353 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2354 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2355 
2356 	for (i = 0; i < 10; i++) {
2357 		DELAY(100);
2358 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2359 			break;
2360 	}
2361 
2362 	DELAY(20);
2363 }
2364 
2365 uint16_t
2366 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2367 {
2368 	uint32_t val;
2369 	int i;
2370 
2371 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2372 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2373 
2374 	for (i = 0; i < 10; i++) {
2375 		DELAY(100);
2376 		val = RGE_READ_4(sc, RGE_EPHYAR);
2377 		if (val & RGE_EPHYAR_BUSY)
2378 			break;
2379 	}
2380 
2381 	DELAY(20);
2382 
2383 	return (val & RGE_EPHYAR_DATA_MASK);
2384 }
2385 
2386 void
2387 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2388 {
2389 	uint16_t off, phyaddr;
2390 
2391 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2392 	phyaddr <<= 4;
2393 
2394 	off = addr ? reg : 0x10 + (reg % 8);
2395 
2396 	phyaddr += (off - 16) << 1;
2397 
2398 	rge_write_phy_ocp(sc, phyaddr, val);
2399 }
2400 
2401 uint16_t
2402 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2403 {
2404 	uint16_t off, phyaddr;
2405 
2406 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2407 	phyaddr <<= 4;
2408 
2409 	off = addr ? reg : 0x10 + (reg % 8);
2410 
2411 	phyaddr += (off - 16) << 1;
2412 
2413 	return (rge_read_phy_ocp(sc, phyaddr));
2414 }
2415 
2416 void
2417 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2418 {
2419 	uint32_t tmp;
2420 	int i;
2421 
2422 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2423 	tmp |= RGE_PHYOCP_BUSY | val;
2424 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2425 
2426 	for (i = 0; i < RGE_TIMEOUT; i++) {
2427 		DELAY(1);
2428 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2429 			break;
2430 	}
2431 }
2432 
2433 uint16_t
2434 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2435 {
2436 	uint32_t val;
2437 	int i;
2438 
2439 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2440 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2441 
2442 	for (i = 0; i < RGE_TIMEOUT; i++) {
2443 		DELAY(1);
2444 		val = RGE_READ_4(sc, RGE_PHYOCP);
2445 		if (val & RGE_PHYOCP_BUSY)
2446 			break;
2447 	}
2448 
2449 	return (val & RGE_PHYOCP_DATA_MASK);
2450 }
2451 
2452 int
2453 rge_get_link_status(struct rge_softc *sc)
2454 {
2455 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2456 }
2457 
2458 void
2459 rge_txstart(struct work *wk, void *arg)
2460 {
2461 	struct rge_softc *sc = arg;
2462 
2463 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2464 }
2465 
2466 void
2467 rge_tick(void *arg)
2468 {
2469 	struct rge_softc *sc = arg;
2470 	int s;
2471 
2472 	s = splnet();
2473 	rge_link_state(sc);
2474 	splx(s);
2475 
2476 	callout_schedule(&sc->sc_timeout, hz);
2477 }
2478 
2479 void
2480 rge_link_state(struct rge_softc *sc)
2481 {
2482 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2483 	int link = LINK_STATE_DOWN;
2484 
2485 	if (rge_get_link_status(sc))
2486 		link = LINK_STATE_UP;
2487 
2488 	if (ifp->if_link_state != link) { /* XXX not safe to access */
2489 		if_link_state_change(ifp, link);
2490 	}
2491 }
2492