xref: /netbsd-src/sys/dev/pci/if_rge.c (revision 965ff70d6cc168e208e3ec6b725c8ce156e95fd0)
1 /*	$NetBSD: if_rge.c,v 1.33 2024/11/10 11:45:48 mlelstv Exp $	*/
2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
3 
4 /*
5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.33 2024/11/10 11:45:48 mlelstv Exp $");
22 
23 #if defined(_KERNEL_OPT)
24 #include "opt_net_mpsafe.h"
25 #endif
26 
27 #include <sys/types.h>
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/kernel.h>
34 #include <sys/socket.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/callout.h>
38 #include <sys/workqueue.h>
39 
40 #include <net/if.h>
41 
42 #include <net/if_dl.h>
43 #include <net/if_ether.h>
44 
45 #include <net/if_media.h>
46 
47 #include <netinet/in.h>
48 #include <net/if_ether.h>
49 
50 #include <net/bpf.h>
51 
52 #include <sys/bus.h>
53 #include <machine/intr.h>
54 
55 #include <dev/mii/mii.h>
56 
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcidevs.h>
60 
61 #include <dev/pci/if_rgereg.h>
62 
63 #ifdef __NetBSD__
64 #define letoh32 	htole32
65 #define nitems(x) 	__arraycount(x)
66 
67 static struct mbuf *
68 MCLGETL(struct rge_softc *sc __unused, int how,
69     u_int size)
70 {
71 	struct mbuf *m;
72 
73 	MGETHDR(m, how, MT_DATA);
74 	if (m == NULL)
75 		return NULL;
76 
77 	MEXTMALLOC(m, size, how);
78 	if ((m->m_flags & M_EXT) == 0) {
79 		m_freem(m);
80 		return NULL;
81 	}
82 	return m;
83 }
84 
85 #ifdef NET_MPSAFE
86 #define 	RGE_MPSAFE	1
87 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
88 #else
89 #define 	CALLOUT_FLAGS	0
90 #endif
91 #endif
92 
93 #ifdef RGE_DEBUG
94 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
95 int rge_debug = 0;
96 #else
97 #define DPRINTF(x)
98 #endif
99 
100 static int		rge_match(device_t, cfdata_t, void *);
101 static void		rge_attach(device_t, device_t, void *);
102 int		rge_intr(void *);
103 int		rge_encap(struct rge_softc *, struct mbuf *, int);
104 int		rge_ioctl(struct ifnet *, u_long, void *);
105 void		rge_start(struct ifnet *);
106 void		rge_watchdog(struct ifnet *);
107 int		rge_init(struct ifnet *);
108 void		rge_stop(struct ifnet *, int);
109 int		rge_ifmedia_upd(struct ifnet *);
110 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
111 int		rge_allocmem(struct rge_softc *);
112 int		rge_newbuf(struct rge_softc *, int);
113 static int	rge_rx_list_init(struct rge_softc *);
114 static void	rge_rx_list_fini(struct rge_softc *);
115 static void	rge_tx_list_init(struct rge_softc *);
116 static void	rge_tx_list_fini(struct rge_softc *);
117 int		rge_rxeof(struct rge_softc *);
118 int		rge_txeof(struct rge_softc *);
119 void		rge_reset(struct rge_softc *);
120 void		rge_iff(struct rge_softc *);
121 void		rge_set_phy_power(struct rge_softc *, int);
122 void		rge_phy_config(struct rge_softc *);
123 void		rge_phy_config_mac_cfg2(struct rge_softc *);
124 void		rge_phy_config_mac_cfg3(struct rge_softc *);
125 void		rge_phy_config_mac_cfg4(struct rge_softc *);
126 void		rge_phy_config_mac_cfg5(struct rge_softc *);
127 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
128 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
129 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
130 void		rge_hw_init(struct rge_softc *);
131 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
132 void		rge_patch_phy_mcu(struct rge_softc *, int);
133 void		rge_add_media_types(struct rge_softc *);
134 void		rge_config_imtype(struct rge_softc *, int);
135 void		rge_disable_hw_im(struct rge_softc *);
136 void		rge_disable_sim_im(struct rge_softc *);
137 void		rge_setup_sim_im(struct rge_softc *);
138 void		rge_setup_intr(struct rge_softc *, int);
139 void		rge_exit_oob(struct rge_softc *);
140 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
141 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
142 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
143 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
144 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
145 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
146 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
147 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
148 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
149 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
150 int		rge_get_link_status(struct rge_softc *);
151 void		rge_txstart(void *);
152 void		rge_tick(void *);
153 void		rge_link_state(struct rge_softc *);
154 
155 static const struct {
156 	uint16_t reg;
157 	uint16_t val;
158 }  rtl8125_mac_cfg2_mcu[] = {
159 	RTL8125_MAC_CFG2_MCU
160 }, rtl8125_mac_cfg3_mcu[] = {
161 	RTL8125_MAC_CFG3_MCU
162 }, rtl8125_mac_cfg4_mcu[] = {
163 	RTL8125_MAC_CFG4_MCU
164 }, rtl8125_mac_cfg5_mcu[] = {
165 	RTL8125_MAC_CFG5_MCU
166 };
167 
168 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
169 		NULL, NULL); /* Sevan - detach function? */
170 
171 static const struct device_compatible_entry compat_data[] = {
172 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
173 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
174 
175 	PCI_COMPAT_EOL
176 };
177 
178 static int
179 rge_match(device_t parent, cfdata_t match, void *aux)
180 {
181 	struct pci_attach_args *pa =aux;
182 
183 	return pci_compatible_match(pa, compat_data);
184 }
185 
186 void
187 rge_attach(device_t parent, device_t self, void *aux)
188 {
189 	struct rge_softc *sc = device_private(self);
190 	struct pci_attach_args *pa = aux;
191 	pci_chipset_tag_t pc = pa->pa_pc;
192 	pci_intr_handle_t *ihp;
193 	char intrbuf[PCI_INTRSTR_LEN];
194 	const char *intrstr = NULL;
195 	struct ifnet *ifp;
196 	pcireg_t reg;
197 	uint32_t hwrev;
198 	uint8_t eaddr[ETHER_ADDR_LEN];
199 	int offset;
200 	pcireg_t command;
201 	const char *revstr;
202 
203 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
204 
205 	sc->sc_dev = self;
206 
207 	pci_aprint_devinfo(pa, "Ethernet controller");
208 
209 	/*
210 	 * Map control/status registers.
211 	 */
212 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
213 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
214 	    NULL, &sc->rge_bsize)) {
215 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
216 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
217 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
218 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
219 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
220 			    &sc->rge_bsize)) {
221 				aprint_error(": can't map mem or i/o space\n");
222 				return;
223 			}
224 		}
225 	}
226 
227 	int counts[PCI_INTR_TYPE_SIZE] = {
228  		[PCI_INTR_TYPE_INTX] = 1,
229  		[PCI_INTR_TYPE_MSI] = 1,
230  		[PCI_INTR_TYPE_MSIX] = 1,
231  	};
232 	int max_type = PCI_INTR_TYPE_MSIX;
233 	/*
234 	 * Allocate interrupt.
235 	 */
236 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
237 		aprint_error(": couldn't map interrupt\n");
238 		return;
239 	}
240 	switch (pci_intr_type(pc, ihp[0])) {
241 	case PCI_INTR_TYPE_MSIX:
242 	case PCI_INTR_TYPE_MSI:
243 		sc->rge_flags |= RGE_FLAG_MSI;
244 		break;
245 	default:
246 		break;
247 	}
248 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
249 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
250 	    sc, device_xname(sc->sc_dev));
251 	if (sc->sc_ih == NULL) {
252 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
253 		if (intrstr != NULL)
254 			aprint_error(" at %s\n", intrstr);
255 		aprint_error("\n");
256 		return;
257 	}
258 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
259 
260 	if (pci_dma64_available(pa))
261 		sc->sc_dmat = pa->pa_dmat64;
262 	else
263 		sc->sc_dmat = pa->pa_dmat;
264 
265 	sc->sc_pc = pa->pa_pc;
266 	sc->sc_tag = pa->pa_tag;
267 
268 	/* Determine hardware revision */
269 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
270 	switch (hwrev) {
271 	case 0x60800000:
272 		sc->rge_type = MAC_CFG2;
273 		revstr = "Z1";
274 		break;
275 	case 0x60900000:
276 		sc->rge_type = MAC_CFG3;
277 		revstr = "Z2";
278 		break;
279 	case 0x64000000:
280 		sc->rge_type = MAC_CFG4;
281 		revstr = "A";
282 		break;
283 	case 0x64100000:
284 		sc->rge_type = MAC_CFG5;
285 		revstr = "B";
286 		break;
287 	default:
288 		aprint_error(": unknown version 0x%08x\n", hwrev);
289 		return;
290 	}
291 
292 	aprint_normal_dev(sc->sc_dev, "HW rev. %s\n", revstr);
293 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
294 
295 	/*
296 	 * PCI Express check.
297 	 */
298 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
299 	    &offset, NULL)) {
300 		/* Disable PCIe ASPM and ECPM. */
301 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
302 		    offset + PCIE_LCSR);
303 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
304 		    PCIE_LCSR_ENCLKPM);
305 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
306 		    reg);
307 	}
308 
309 	rge_exit_oob(sc);
310 	rge_hw_init(sc);
311 
312 	rge_get_macaddr(sc, eaddr);
313 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
314 	    ether_sprintf(eaddr));
315 
316 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
317 
318 	rge_set_phy_power(sc, 1);
319 	rge_phy_config(sc);
320 
321 	if (rge_allocmem(sc))
322 		return;
323 
324 	ifp = &sc->sc_ec.ec_if;
325 	ifp->if_softc = sc;
326 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
327 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
328 #ifdef RGE_MPSAFE
329 	ifp->if_extflags = IFEF_MPSAFE;
330 #endif
331 	ifp->if_ioctl = rge_ioctl;
332 	ifp->if_stop = rge_stop;
333 	ifp->if_start = rge_start;
334 	ifp->if_init = rge_init;
335 	ifp->if_watchdog = rge_watchdog;
336 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
337 
338 #if notyet
339 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
340 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
341 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
342 #endif
343 
344 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
345 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
346 
347 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
348 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
349 
350 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
351 	command |= PCI_COMMAND_MASTER_ENABLE;
352 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
353 
354 	/* Initialize ifmedia structures. */
355 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
356 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
357 	    rge_ifmedia_sts);
358 	rge_add_media_types(sc);
359 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
360 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
361 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
362 
363 	if_attach(ifp);
364 	if_deferred_start_init(ifp, NULL);
365 	ether_ifattach(ifp, eaddr);
366 
367 	if (pmf_device_register(self, NULL, NULL))
368 		pmf_class_network_register(self, ifp);
369 	else
370 		aprint_error_dev(self, "couldn't establish power handler\n");
371 }
372 
373 int
374 rge_intr(void *arg)
375 {
376 	struct rge_softc *sc = arg;
377 	struct ifnet *ifp = &sc->sc_ec.ec_if;
378 	uint32_t status;
379 	int claimed = 0, rx, tx;
380 
381 	if (!(ifp->if_flags & IFF_RUNNING))
382 		return (0);
383 
384 	/* Disable interrupts. */
385 	RGE_WRITE_4(sc, RGE_IMR, 0);
386 
387 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
388 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
389 			return (0);
390 	}
391 
392 	status = RGE_READ_4(sc, RGE_ISR);
393 	if (status)
394 		RGE_WRITE_4(sc, RGE_ISR, status);
395 
396 	if (status & RGE_ISR_PCS_TIMEOUT)
397 		claimed = 1;
398 
399 	rx = tx = 0;
400 	if (status & sc->rge_intrs) {
401 		if (status &
402 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
403 			rx |= rge_rxeof(sc);
404 			claimed = 1;
405 		}
406 
407 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
408 			tx |= rge_txeof(sc);
409 			claimed = 1;
410 		}
411 
412 		if (status & RGE_ISR_SYSTEM_ERR) {
413 			KERNEL_LOCK(1, NULL);
414 			rge_init(ifp);
415 			KERNEL_UNLOCK_ONE(NULL);
416 			claimed = 1;
417 		}
418 	}
419 
420 	if (sc->rge_timerintr) {
421 		if ((tx | rx) == 0) {
422 			/*
423 			 * Nothing needs to be processed, fallback
424 			 * to use TX/RX interrupts.
425 			 */
426 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
427 
428 			/*
429 			 * Recollect, mainly to avoid the possible
430 			 * race introduced by changing interrupt
431 			 * masks.
432 			 */
433 			rge_rxeof(sc);
434 			rge_txeof(sc);
435 		} else
436 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
437 	} else if (tx | rx) {
438 		/*
439 		 * Assume that using simulated interrupt moderation
440 		 * (hardware timer based) could reduce the interrupt
441 		 * rate.
442 		 */
443 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
444 	}
445 
446 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
447 
448 	return (claimed);
449 }
450 
451 int
452 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
453 {
454 	struct rge_tx_desc *d = NULL;
455 	struct rge_txq *txq;
456 	bus_dmamap_t txmap;
457 	uint32_t cmdsts, cflags = 0;
458 	int cur, error, i, last, nsegs;
459 
460 #if notyet
461 	/*
462 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
463 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
464 	 * take affect.
465 	 */
466 	if ((m->m_pkthdr.csum_flags &
467 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
468 		cflags |= RGE_TDEXTSTS_IPCSUM;
469 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
470 			cflags |= RGE_TDEXTSTS_TCPCSUM;
471 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
472 			cflags |= RGE_TDEXTSTS_UDPCSUM;
473 	}
474 #endif
475 
476 	txq = &sc->rge_ldata.rge_txq[idx];
477 	txmap = txq->txq_dmamap;
478 
479 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
480 	switch (error) {
481 	case 0:
482 		break;
483 	case EFBIG: /* mbuf chain is too fragmented */
484 		if (m_defrag(m, M_DONTWAIT) == 0 &&
485 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
486 		    BUS_DMA_NOWAIT) == 0)
487 			break;
488 
489 		/* FALLTHROUGH */
490 	default:
491 		return (0);
492 	}
493 
494 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
495 	    BUS_DMASYNC_PREWRITE);
496 
497 	nsegs = txmap->dm_nsegs;
498 
499 	/* Set up hardware VLAN tagging. */
500 	if (vlan_has_tag(m))
501 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
502 
503 	last = cur = idx;
504 	cmdsts = RGE_TDCMDSTS_SOF;
505 
506 	for (i = 0; i < txmap->dm_nsegs; i++) {
507 		d = &sc->rge_ldata.rge_tx_list[cur];
508 
509 		d->rge_extsts = htole32(cflags);
510 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
511 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
512 
513 		cmdsts |= txmap->dm_segs[i].ds_len;
514 
515 		if (cur == RGE_TX_LIST_CNT - 1)
516 			cmdsts |= RGE_TDCMDSTS_EOR;
517 
518 		d->rge_cmdsts = htole32(cmdsts);
519 
520 		last = cur;
521 		cmdsts = RGE_TDCMDSTS_OWN;
522 		cur = RGE_NEXT_TX_DESC(cur);
523 	}
524 
525 	/* Set EOF on the last descriptor. */
526 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
527 
528 	/* Transfer ownership of packet to the chip. */
529 	d = &sc->rge_ldata.rge_tx_list[idx];
530 
531 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
532 
533 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
534 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
535 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
536 
537 	/* Update info of TX queue and descriptors. */
538 	txq->txq_mbuf = m;
539 	txq->txq_descidx = last;
540 
541 	return (nsegs);
542 }
543 
544 int
545 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
546 {
547 	struct rge_softc *sc = ifp->if_softc;
548 	//struct ifreq *ifr = (struct ifreq *)data;
549 	int s, error = 0;
550 
551 	s = splnet();
552 
553 	switch (cmd) {
554 	case SIOCSIFFLAGS:
555 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
556 			break;
557 		/* XXX set an ifflags callback and let ether_ioctl
558 		 * handle all of this.
559 		 */
560 		if (ifp->if_flags & IFF_UP) {
561 			if (ifp->if_flags & IFF_RUNNING)
562 				error = ENETRESET;
563 			else
564 				rge_init(ifp);
565 		} else {
566 			if (ifp->if_flags & IFF_RUNNING)
567 				rge_stop(ifp, 1);
568 		}
569 		break;
570 	default:
571 		error = ether_ioctl(ifp, cmd, data);
572 	}
573 
574 	if (error == ENETRESET) {
575 		if (ifp->if_flags & IFF_RUNNING)
576 			rge_iff(sc);
577 		error = 0;
578 	}
579 
580 	splx(s);
581 	return (error);
582 }
583 
584 void
585 rge_start(struct ifnet *ifp)
586 {
587 	struct rge_softc *sc = ifp->if_softc;
588 	struct mbuf *m;
589 	int free, idx, used;
590 	int queued = 0;
591 
592 #define LINK_STATE_IS_UP(_s)    \
593 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
594 
595 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
596 		IFQ_PURGE(&ifp->if_snd);
597 		return;
598 	}
599 
600 	/* Calculate free space. */
601 	idx = sc->rge_ldata.rge_txq_prodidx;
602 	free = sc->rge_ldata.rge_txq_considx;
603 	if (free <= idx)
604 		free += RGE_TX_LIST_CNT;
605 	free -= idx;
606 
607 	for (;;) {
608 		if (RGE_TX_NSEGS >= free + 2) {
609 			SET(ifp->if_flags, IFF_OACTIVE);
610 			break;
611 		}
612 
613 		IFQ_DEQUEUE(&ifp->if_snd, m);
614 		if (m == NULL)
615 			break;
616 
617 		used = rge_encap(sc, m, idx);
618 		if (used == 0) {
619 			m_freem(m);
620 			continue;
621 		}
622 
623 		KASSERT(used <= free);
624 		free -= used;
625 
626 		bpf_mtap(ifp, m, BPF_D_OUT);
627 
628 		idx += used;
629 		if (idx >= RGE_TX_LIST_CNT)
630 			idx -= RGE_TX_LIST_CNT;
631 
632 		queued++;
633 	}
634 
635 	if (queued == 0)
636 		return;
637 
638 	/* Set a timeout in case the chip goes out to lunch. */
639 	ifp->if_timer = 5;
640 
641 	sc->rge_ldata.rge_txq_prodidx = idx;
642 	rge_txstart(sc);
643 }
644 
645 void
646 rge_watchdog(struct ifnet *ifp)
647 {
648 	struct rge_softc *sc = ifp->if_softc;
649 
650 	device_printf(sc->sc_dev, "watchdog timeout\n");
651 	if_statinc(ifp, if_oerrors);
652 
653 	rge_init(ifp);
654 }
655 
656 int
657 rge_init(struct ifnet *ifp)
658 {
659 	struct rge_softc *sc = ifp->if_softc;
660 	uint32_t val;
661 	unsigned i;
662 
663 	rge_stop(ifp, 0);
664 
665 	/* Set MAC address. */
666 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
667 
668 	/* Set Maximum frame size. */
669 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
670 
671 	/* Initialize RX descriptors list. */
672 	int error = rge_rx_list_init(sc);
673 	if (error != 0) {
674 		device_printf(sc->sc_dev,
675 		    "init failed: no memory for RX buffers\n");
676 		rge_stop(ifp, 1);
677 		return error;
678 	}
679 
680 	/* Initialize TX descriptors. */
681 	rge_tx_list_init(sc);
682 
683 	/* Load the addresses of the RX and TX lists into the chip. */
684 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
685 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
686 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
687 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
688 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
689 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
690 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
691 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
692 
693 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
694 
695 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
696 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
697 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
698 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
699 
700 	/* Clear interrupt moderation timer. */
701 	for (i = 0; i < 64; i++)
702 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
703 
704 	/* Set the initial RX and TX configurations. */
705 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
706 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
707 
708 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
709 	rge_write_csi(sc, 0x70c, val | 0x27000000);
710 
711 	/* Enable hardware optimization function. */
712 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
713 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
714 
715 	RGE_WRITE_2(sc, 0x0382, 0x221b);
716 	RGE_WRITE_1(sc, 0x4500, 0);
717 	RGE_WRITE_2(sc, 0x4800, 0);
718 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
719 
720 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
721 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
722 
723 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
724 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
725 
726 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
727 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
728 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
729 
730 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
731 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
732 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
733 	else
734 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
735 
736 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
737 
738 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
739 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
740 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
741 	} else
742 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
743 
744 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
745 
746 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
747 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
748 
749 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
750 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
751 
752 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
753 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
754 
755 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
756 
757 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
758 
759 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
760 
761 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
762 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
763 
764 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
765 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
766 
767 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
768 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
769 
770 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
771 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
772 
773 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
774 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
775 
776 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
777 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
778 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
779 	else
780 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
781 
782 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
783 
784 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
785 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
786 
787 	/* Disable EEE plus. */
788 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
789 
790 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
791 
792 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
793 	DELAY(1);
794 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
795 
796 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
797 
798 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
799 
800 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
801 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
802 	else
803 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
804 
805 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
806 
807 	for (i = 0; i < 10; i++) {
808 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
809 			break;
810 		DELAY(1000);
811 	}
812 
813 	/* Disable RXDV gate. */
814 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
815 	DELAY(2000);
816 
817 	rge_ifmedia_upd(ifp);
818 
819 	/* Enable transmit and receive. */
820 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
821 
822 	/* Program promiscuous mode and multicast filters. */
823 	rge_iff(sc);
824 
825 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
826 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
827 
828 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
829 
830 	/* Enable interrupts. */
831 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
832 
833 	ifp->if_flags |= IFF_RUNNING;
834 	CLR(ifp->if_flags, IFF_OACTIVE);
835 
836 	callout_schedule(&sc->sc_timeout, 1);
837 
838 	return (0);
839 }
840 
841 /*
842  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
843  */
844 void
845 rge_stop(struct ifnet *ifp, int disable)
846 {
847 	struct rge_softc *sc = ifp->if_softc;
848 
849 	callout_halt(&sc->sc_timeout, NULL);
850 
851 	ifp->if_timer = 0;
852 	ifp->if_flags &= ~IFF_RUNNING;
853 	sc->rge_timerintr = 0;
854 
855 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
856 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
857 	    RGE_RXCFG_ERRPKT);
858 
859 	RGE_WRITE_4(sc, RGE_IMR, 0);
860 
861 	/* Clear timer interrupts. */
862 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
863 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
864 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
865 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
866 
867 	rge_reset(sc);
868 
869 //	intr_barrier(sc->sc_ih);
870 //	ifq_barrier(&ifp->if_snd);
871 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
872 
873 	if (sc->rge_head != NULL) {
874 		m_freem(sc->rge_head);
875 		sc->rge_head = sc->rge_tail = NULL;
876 	}
877 
878 	rge_tx_list_fini(sc);
879 	rge_rx_list_fini(sc);
880 }
881 
882 /*
883  * Set media options.
884  */
885 int
886 rge_ifmedia_upd(struct ifnet *ifp)
887 {
888 	struct rge_softc *sc = ifp->if_softc;
889 	struct ifmedia *ifm = &sc->sc_media;
890 	int anar, gig, val;
891 
892 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
893 		return (EINVAL);
894 
895 	/* Disable Gigabit Lite. */
896 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
897 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
898 
899 	val = rge_read_phy_ocp(sc, 0xa5d4);
900 	val &= ~RGE_ADV_2500TFDX;
901 
902 	anar = gig = 0;
903 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
904 	case IFM_AUTO:
905 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
906 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
907 		val |= RGE_ADV_2500TFDX;
908 		break;
909 	case IFM_2500_T:
910 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
911 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
912 		val |= RGE_ADV_2500TFDX;
913 		ifp->if_baudrate = IF_Mbps(2500);
914 		break;
915 	case IFM_1000_T:
916 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
917 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
918 		ifp->if_baudrate = IF_Gbps(1);
919 		break;
920 	case IFM_100_TX:
921 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
922 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
923 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
924 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
925 		    ANAR_TX | ANAR_10_FD | ANAR_10;
926 		ifp->if_baudrate = IF_Mbps(100);
927 		break;
928 	case IFM_10_T:
929 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
930 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
931 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
932 		    ANAR_10_FD | ANAR_10 : ANAR_10;
933 		ifp->if_baudrate = IF_Mbps(10);
934 		break;
935 	default:
936 		device_printf(sc->sc_dev,
937 		    "unsupported media type\n");
938 		return (EINVAL);
939 	}
940 
941 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
942 	rge_write_phy(sc, 0, MII_100T2CR, gig);
943 	rge_write_phy_ocp(sc, 0xa5d4, val);
944 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
945 	    BMCR_STARTNEG);
946 
947 	return (0);
948 }
949 
950 /*
951  * Report current media status.
952  */
953 void
954 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
955 {
956 	struct rge_softc *sc = ifp->if_softc;
957 	uint16_t status = 0;
958 
959 	ifmr->ifm_status = IFM_AVALID;
960 	ifmr->ifm_active = IFM_ETHER;
961 
962 	if (rge_get_link_status(sc)) {
963 		ifmr->ifm_status |= IFM_ACTIVE;
964 
965 		status = RGE_READ_2(sc, RGE_PHYSTAT);
966 		if ((status & RGE_PHYSTAT_FDX) ||
967 		    (status & RGE_PHYSTAT_2500MBPS))
968 			ifmr->ifm_active |= IFM_FDX;
969 		else
970 			ifmr->ifm_active |= IFM_HDX;
971 
972 		if (status & RGE_PHYSTAT_10MBPS)
973 			ifmr->ifm_active |= IFM_10_T;
974 		else if (status & RGE_PHYSTAT_100MBPS)
975 			ifmr->ifm_active |= IFM_100_TX;
976 		else if (status & RGE_PHYSTAT_1000MBPS)
977 			ifmr->ifm_active |= IFM_1000_T;
978 		else if (status & RGE_PHYSTAT_2500MBPS)
979 			ifmr->ifm_active |= IFM_2500_T;
980 	}
981 }
982 
983 /*
984  * Allocate memory for RX/TX rings.
985  *
986  * XXX There is no tear-down for this if it any part fails, so everything
987  * remains allocated.
988  */
989 int
990 rge_allocmem(struct rge_softc *sc)
991 {
992 	int error, i;
993 
994 	/* Allocate DMA'able memory for the TX ring. */
995 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
996 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
997 	if (error) {
998 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
999 		return (error);
1000 	}
1001 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1002 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
1003 	    BUS_DMA_NOWAIT);
1004 	if (error) {
1005 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1006 		return (error);
1007 	}
1008 
1009 	/* Load the map for the TX ring. */
1010 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1011 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1012 	    (void **) &sc->rge_ldata.rge_tx_list,
1013 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1014 	if (error) {
1015 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1016 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1017 		    sc->rge_ldata.rge_tx_listnseg);
1018 		return (error);
1019 	}
1020 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1021 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1022 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1023 	if (error) {
1024 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1025 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1026 		bus_dmamem_unmap(sc->sc_dmat,
1027 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1028 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1029 		    sc->rge_ldata.rge_tx_listnseg);
1030 		return (error);
1031 	}
1032 
1033 	/* Create DMA maps for TX buffers. */
1034 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1035 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1036 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1037 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
1038 		if (error) {
1039 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1040 			return (error);
1041 		}
1042 	}
1043 
1044 	/* Allocate DMA'able memory for the RX ring. */
1045 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1046 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1047 	if (error) {
1048 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1049 		return (error);
1050 	}
1051 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1052 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1053 	    BUS_DMA_NOWAIT);
1054 	if (error) {
1055 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1056 		return (error);
1057 	}
1058 
1059 	/* Load the map for the RX ring. */
1060 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1061 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1062 	    (void **) &sc->rge_ldata.rge_rx_list,
1063 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1064 	if (error) {
1065 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1066 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1067 		    sc->rge_ldata.rge_rx_listnseg);
1068 		return (error);
1069 	}
1070 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1071 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1072 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1073 	if (error) {
1074 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1075 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1076 		bus_dmamem_unmap(sc->sc_dmat,
1077 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1078 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1079 		    sc->rge_ldata.rge_rx_listnseg);
1080 		return (error);
1081 	}
1082 
1083 	/*
1084 	 * Create DMA maps for RX buffers.  Use BUS_DMA_ALLOCNOW to avoid any
1085 	 * potential failure in bus_dmamap_load_mbuf() in the RX path.
1086 	 */
1087 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1088 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1089 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_ALLOCNOW,
1090 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1091 		if (error) {
1092 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1093 			return (error);
1094 		}
1095 	}
1096 
1097 	return (error);
1098 }
1099 
1100 /*
1101  * Set an RX descriptor and sync it.
1102  */
1103 static void
1104 rge_load_rxbuf(struct rge_softc *sc, int idx)
1105 {
1106 	struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
1107 	struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
1108 	bus_dmamap_t rxmap = rxq->rxq_dmamap;
1109 	uint32_t cmdsts;
1110 
1111 	cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
1112 	if (idx == RGE_RX_LIST_CNT - 1)
1113 		cmdsts |= RGE_RDCMDSTS_EOR;
1114 
1115 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1116 	r->hi_qword1.rx_qword4.rge_extsts = 0;
1117 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
1118 
1119 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1120 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1121 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1122 }
1123 
1124 /*
1125  * Initialize the RX descriptor and attach an mbuf cluster.
1126  */
1127 int
1128 rge_newbuf(struct rge_softc *sc, int idx)
1129 {
1130 	struct mbuf *m;
1131 	struct rge_rxq *rxq;
1132 	bus_dmamap_t rxmap;
1133 	int error __diagused;
1134 
1135 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1136 	if (m == NULL)
1137 		return (ENOBUFS);
1138 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
1139 
1140 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1141 
1142 	rxq = &sc->rge_ldata.rge_rxq[idx];
1143 	rxmap = rxq->rxq_dmamap;
1144 
1145 	if (rxq->rxq_mbuf != NULL)
1146 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1147 
1148 	/* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
1149 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
1150 	KASSERTMSG(error == 0, "error=%d", error);
1151 
1152 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1153 	    BUS_DMASYNC_PREREAD);
1154 
1155 	/* Map the segments into RX descriptors. */
1156 
1157 	rxq->rxq_mbuf = m;
1158 	rge_load_rxbuf(sc, idx);
1159 
1160 	return 0;
1161 }
1162 
1163 static int
1164 rge_rx_list_init(struct rge_softc *sc)
1165 {
1166 	unsigned i;
1167 
1168 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1169 
1170 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1171 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1172 		if (rge_newbuf(sc, i) != 0) {
1173 			rge_rx_list_fini(sc);
1174 			return (ENOBUFS);
1175 		}
1176 	}
1177 
1178 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1179 	sc->rge_head = sc->rge_tail = NULL;
1180 
1181 	return (0);
1182 }
1183 
1184 static void
1185 rge_rx_list_fini(struct rge_softc *sc)
1186 {
1187 	unsigned i;
1188 
1189 	/* Free the RX list buffers. */
1190 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1191 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
1192 			bus_dmamap_unload(sc->sc_dmat,
1193 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1194 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
1195 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1196 		}
1197 	}
1198 }
1199 
1200 static void
1201 rge_tx_list_init(struct rge_softc *sc)
1202 {
1203 	unsigned i;
1204 
1205 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1206 
1207 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1208 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1209 
1210 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1211 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1212 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1213 
1214 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1215 }
1216 
1217 static void
1218 rge_tx_list_fini(struct rge_softc *sc)
1219 {
1220 	unsigned i;
1221 
1222 	/* Free the TX list buffers. */
1223 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1224 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
1225 			bus_dmamap_unload(sc->sc_dmat,
1226 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
1227 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
1228 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1229 		}
1230 	}
1231 }
1232 
1233 int
1234 rge_rxeof(struct rge_softc *sc)
1235 {
1236 	struct mbuf *m;
1237 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1238 	struct rge_rx_desc *cur_rx;
1239 	struct rge_rxq *rxq;
1240 	uint32_t rxstat, extsts;
1241 	int i, total_len, rx = 0;
1242 
1243 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1244 		/* Invalidate the descriptor memory. */
1245 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1246 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1247 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1248 
1249 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1250 
1251 		if (RGE_OWN(cur_rx))
1252 			break;
1253 
1254 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1255 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1256 
1257 		total_len = RGE_RXBYTES(cur_rx);
1258 		rxq = &sc->rge_ldata.rge_rxq[i];
1259 		m = rxq->rxq_mbuf;
1260 		rx = 1;
1261 
1262 		/* Invalidate the RX mbuf. */
1263 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1264 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1265 
1266 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1267 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1268 			if_statinc(ifp, if_ierrors);
1269 			rge_load_rxbuf(sc, i);
1270 			continue;
1271 		}
1272 
1273 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1274 			if_statinc(ifp, if_ierrors);
1275 			/*
1276 			 * If this is part of a multi-fragment packet,
1277 			 * discard all the pieces.
1278 			 */
1279 			if (sc->rge_head != NULL) {
1280 				m_freem(sc->rge_head);
1281 				sc->rge_head = sc->rge_tail = NULL;
1282 			}
1283 			rge_load_rxbuf(sc, i);
1284 			continue;
1285 		}
1286 
1287 		/*
1288 		 * If allocating a replacement mbuf fails,
1289 		 * reload the current one.
1290 		 */
1291 		if (rge_newbuf(sc, i) != 0) {
1292 			if_statinc(ifp, if_iqdrops);
1293 			if (sc->rge_head != NULL) {
1294 				m_freem(sc->rge_head);
1295 				sc->rge_head = sc->rge_tail = NULL;
1296 			}
1297 			rge_load_rxbuf(sc, i);
1298 			continue;
1299 		}
1300 
1301 		m_set_rcvif(m, ifp);
1302 		if (sc->rge_head != NULL) {
1303 			m->m_len = total_len;
1304 			/*
1305 			 * Special case: if there's 4 bytes or less
1306 			 * in this buffer, the mbuf can be discarded:
1307 			 * the last 4 bytes is the CRC, which we don't
1308 			 * care about anyway.
1309 			 */
1310 			if (m->m_len <= ETHER_CRC_LEN) {
1311 				sc->rge_tail->m_len -=
1312 				    (ETHER_CRC_LEN - m->m_len);
1313 				m_freem(m);
1314 			} else {
1315 				m->m_len -= ETHER_CRC_LEN;
1316 				m->m_flags &= ~M_PKTHDR;
1317 				sc->rge_tail->m_next = m;
1318 			}
1319 			m = sc->rge_head;
1320 			sc->rge_head = sc->rge_tail = NULL;
1321 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1322 		} else
1323 	#if 0
1324 			m->m_pkthdr.len = m->m_len =
1325 			    (total_len - ETHER_CRC_LEN);
1326 	#else
1327 		{
1328 			m->m_pkthdr.len = m->m_len = total_len;
1329 			m->m_flags |= M_HASFCS;
1330 		}
1331 	#endif
1332 
1333 #if notyet
1334 		/* Check IP header checksum. */
1335 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1336 		    (extsts & RGE_RDEXTSTS_IPV4))
1337 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1338 
1339 		/* Check TCP/UDP checksum. */
1340 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1341 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1342 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1343 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1344 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1345 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1346 			    M_UDP_CSUM_IN_OK;
1347 #endif
1348 
1349 		if (extsts & RGE_RDEXTSTS_VTAG) {
1350 			vlan_set_tag(m,
1351 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1352 		}
1353 
1354 		if_percpuq_enqueue(ifp->if_percpuq, m);
1355 	}
1356 
1357 	sc->rge_ldata.rge_rxq_considx = i;
1358 
1359 	return (rx);
1360 }
1361 
1362 int
1363 rge_txeof(struct rge_softc *sc)
1364 {
1365 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1366 	struct rge_txq *txq;
1367 	uint32_t txstat;
1368 	int cons, idx, prod;
1369 	int free = 0;
1370 
1371 	prod = sc->rge_ldata.rge_txq_prodidx;
1372 	cons = sc->rge_ldata.rge_txq_considx;
1373 
1374 	while (prod != cons) {
1375 		txq = &sc->rge_ldata.rge_txq[cons];
1376 		idx = txq->txq_descidx;
1377 
1378 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1379 		    idx * sizeof(struct rge_tx_desc),
1380 		    sizeof(struct rge_tx_desc),
1381 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1382 
1383 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1384 
1385 		if (txstat & RGE_TDCMDSTS_OWN) {
1386 			free = 2;
1387 			break;
1388 		}
1389 
1390 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1391 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1392 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1393 		m_freem(txq->txq_mbuf);
1394 		txq->txq_mbuf = NULL;
1395 
1396 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1397 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1398 			if_statinc_ref(ifp, nsr, if_collisions);
1399 		if (txstat & RGE_TDCMDSTS_TXERR)
1400 			if_statinc_ref(ifp, nsr, if_oerrors);
1401 		else
1402 			if_statinc_ref(ifp, nsr, if_opackets);
1403 		IF_STAT_PUTREF(ifp);
1404 
1405 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1406 		    idx * sizeof(struct rge_tx_desc),
1407 		    sizeof(struct rge_tx_desc),
1408 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1409 
1410 		cons = RGE_NEXT_TX_DESC(idx);
1411 		free = 1;
1412 	}
1413 
1414 	if (free == 0)
1415 		return (0);
1416 
1417 	sc->rge_ldata.rge_txq_considx = cons;
1418 
1419 	if (free == 2)
1420 		rge_txstart(sc);
1421 
1422 	CLR(ifp->if_flags, IFF_OACTIVE);
1423 	ifp->if_timer = 0;
1424 	if_schedule_deferred_start(ifp);
1425 
1426 	return (1);
1427 }
1428 
1429 void
1430 rge_reset(struct rge_softc *sc)
1431 {
1432 	int i;
1433 
1434 	/* Enable RXDV gate. */
1435 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1436 	DELAY(2000);
1437 
1438 	for (i = 0; i < 3000; i++) {
1439 		DELAY(50);
1440 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1441 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1442 		    RGE_MCUCMD_TXFIFO_EMPTY))
1443 			break;
1444 	}
1445 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1446 		for (i = 0; i < 3000; i++) {
1447 			DELAY(50);
1448 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1449 				break;
1450 		}
1451 	}
1452 
1453 	DELAY(2000);
1454 
1455 	/* Soft reset. */
1456 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1457 
1458 	for (i = 0; i < RGE_TIMEOUT; i++) {
1459 		DELAY(100);
1460 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1461 			break;
1462 	}
1463 	if (i == RGE_TIMEOUT)
1464 		device_printf(sc->sc_dev, "reset never completed!\n");
1465 }
1466 
1467 void
1468 rge_iff(struct rge_softc *sc)
1469 {
1470 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1471 	struct ethercom *ec = &sc->sc_ec;
1472 	struct ether_multi *enm;
1473 	struct ether_multistep step;
1474 	uint32_t hashes[2];
1475 	uint32_t rxfilt;
1476 	int h = 0;
1477 
1478 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1479 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1480 	ifp->if_flags &= ~IFF_ALLMULTI;
1481 
1482 	/*
1483 	 * Always accept frames destined to our station address.
1484 	 * Always accept broadcast frames.
1485 	 */
1486 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1487 
1488 	if (ifp->if_flags & IFF_PROMISC) {
1489  allmulti:
1490 		ifp->if_flags |= IFF_ALLMULTI;
1491 		rxfilt |= RGE_RXCFG_MULTI;
1492 		if (ifp->if_flags & IFF_PROMISC)
1493 			rxfilt |= RGE_RXCFG_ALLPHYS;
1494 		hashes[0] = hashes[1] = 0xffffffff;
1495 	} else {
1496 		rxfilt |= RGE_RXCFG_MULTI;
1497 		/* Program new filter. */
1498 		memset(hashes, 0, sizeof(hashes));
1499 
1500 		ETHER_LOCK(ec);
1501 		ETHER_FIRST_MULTI(step, ec, enm);
1502 		while (enm != NULL) {
1503 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1504 			    ETHER_ADDR_LEN) != 0) {
1505 			    	ETHER_UNLOCK(ec);
1506 				goto allmulti;
1507 			}
1508 			h = ether_crc32_be(enm->enm_addrlo,
1509 			    ETHER_ADDR_LEN) >> 26;
1510 
1511 			if (h < 32)
1512 				hashes[0] |= (1U << h);
1513 			else
1514 				hashes[1] |= (1U << (h - 32));
1515 
1516 			ETHER_NEXT_MULTI(step, enm);
1517 		}
1518 		ETHER_UNLOCK(ec);
1519 	}
1520 
1521 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1522 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1523 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1524 }
1525 
1526 void
1527 rge_set_phy_power(struct rge_softc *sc, int on)
1528 {
1529 	int i;
1530 
1531 	if (on) {
1532 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1533 
1534 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1535 
1536 		for (i = 0; i < RGE_TIMEOUT; i++) {
1537 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1538 				break;
1539 			DELAY(1000);
1540 		}
1541 	} else {
1542 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1543 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1544 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1545 	}
1546 }
1547 
1548 void
1549 rge_phy_config(struct rge_softc *sc)
1550 {
1551 	/* Read microcode version. */
1552 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1553 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1554 
1555 	switch (sc->rge_type) {
1556 	case MAC_CFG2:
1557 		rge_phy_config_mac_cfg2(sc);
1558 		break;
1559 	case MAC_CFG3:
1560 		rge_phy_config_mac_cfg3(sc);
1561 		break;
1562 	case MAC_CFG4:
1563 		rge_phy_config_mac_cfg4(sc);
1564 		break;
1565 	case MAC_CFG5:
1566 		rge_phy_config_mac_cfg5(sc);
1567 		break;
1568 	default:
1569 		break;	/* Can't happen. */
1570 	}
1571 
1572 	rge_write_phy(sc, 0x0a5b, 0x12,
1573 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1574 
1575 	/* Disable EEE. */
1576 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1577 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1578 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1579 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1580 	}
1581 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1582 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1583 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1584 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1585 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1586 
1587 	rge_patch_phy_mcu(sc, 1);
1588 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1589 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1590 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1591 	rge_patch_phy_mcu(sc, 0);
1592 }
1593 
1594 void
1595 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1596 {
1597 	uint16_t val;
1598 	int i;
1599 
1600 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1601 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1602 		    rtl8125_mac_cfg2_ephy[i].val);
1603 
1604 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1605 
1606 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1607 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1608 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1609 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1610 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1611 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1612 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1613 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1614 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1615 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1616 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1617 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1618 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1619 
1620 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1621 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1622 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1623 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1624 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1625 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1626 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1627 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1628 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1629 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1630 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1631 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1632 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1633 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1634 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1635 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1636 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1637 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1638 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1639 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1640 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1641 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1642 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1643 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1644 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1645 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1646 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1647 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1648 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1649 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1650 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1651 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1652 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1653 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1654 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1655 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1656 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1657 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1658 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1659 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1660 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1661 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1662 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1663 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1664 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1665 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1666 }
1667 
1668 void
1669 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1670 {
1671 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1672 	uint16_t val;
1673 	int i;
1674 	static const uint16_t mac_cfg3_a438_value[] =
1675 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1676 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1677 
1678 	static const uint16_t mac_cfg3_b88e_value[] =
1679 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1680 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1681 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1682 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1683 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1684 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1685 
1686 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1687 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1688 		    rtl8125_mac_cfg3_ephy[i].val);
1689 
1690 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1691 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1692 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1693 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1694 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1695 	rge_write_ephy(sc, 0x0002, 0x6042);
1696 	rge_write_ephy(sc, 0x0006, 0x0014);
1697 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1698 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1699 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1700 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1701 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1702 	rge_write_ephy(sc, 0x0042, 0x6042);
1703 	rge_write_ephy(sc, 0x0046, 0x0014);
1704 
1705 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1706 
1707 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1708 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1709 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1710 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1711 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1712 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1713 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1714 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1715 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1716 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1717 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1718 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1719 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1720 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1721 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1722 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1723 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1724 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1725 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1726 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1727 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1728 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1729 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1730 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1731 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1732 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1733 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1734 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1735 	    32);
1736 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1737 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1738 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1739 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1740 
1741 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1742 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1743 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1744 	for (i = 0; i < 26; i++)
1745 		rge_write_phy_ocp(sc, 0xa438, 0);
1746 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1747 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1748 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1749 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1750 
1751 	rge_patch_phy_mcu(sc, 1);
1752 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1753 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1754 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1755 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1756 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1757 	}
1758 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1759 	rge_patch_phy_mcu(sc, 0);
1760 
1761 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1762 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1763 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1764 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1765 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1766 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1767 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1768 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1769 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1770 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1771 }
1772 
1773 void
1774 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1775 {
1776 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1777 	uint16_t val;
1778 	int i;
1779 	static const uint16_t mac_cfg4_b87c_value[] =
1780 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1781 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1782 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1783 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1784 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1785 	      0x80b0, 0x0f31 };
1786 
1787 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1788 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1789 		    rtl8125_mac_cfg4_ephy[i].val);
1790 
1791 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1792 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1793 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1794 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1795 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1796 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1797 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1798 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1799 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1800 
1801 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1802 
1803 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1804 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1805 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1806 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1807 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1808 	for (i = 0; i < 6; i++) {
1809 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1810 		if (i < 3)
1811 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1812 		else
1813 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1814 	}
1815 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1816 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1817 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1818 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1819 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1820 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1821 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1822 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1823 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1824 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1825 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1826 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1827 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1828 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1829 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1830 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1831 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1832 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1833 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1834 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1835 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1836 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1837 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1838 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1839 	}
1840 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1841 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1842 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1843 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1844 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1845 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1846 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1847 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1848 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1849 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1850 	    32);
1851 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1852 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1853 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1854 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1855 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1856 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1857 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1858 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1859 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1860 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1861 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1862 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1863 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1864 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1865 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1866 	for (i = 0; i < 6; i++) {
1867 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1868 		if (i == 2)
1869 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1870 		else
1871 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1872 	}
1873 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1874 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1875 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1876 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1877 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1878 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1879 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1880 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1881 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1882 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1883 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1884 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1885 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1886 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1887 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1888 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1889 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1890 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1891 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1892 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1893 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1894 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1895 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1896 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1897 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1898 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1899 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1900 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1901 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1902 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1903 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1904 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1905 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1906 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1907 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1908 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1909 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1910 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1911 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1912 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1913 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1914 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1915 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1916 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1917 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1918 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1919 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1920 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1921 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1922 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1923 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1924 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1925 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1926 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1927 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1928 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1929 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1930 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1931 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1932 	rge_patch_phy_mcu(sc, 1);
1933 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1934 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1935 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1936 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1937 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1938 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1939 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1940 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1941 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1942 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1943 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1944 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1945 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1946 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1947 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1948 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1949 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1950 	rge_patch_phy_mcu(sc, 0);
1951 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1952 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1953 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1954 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1955 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1956 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1957 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1958 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1959 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1960 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1961 }
1962 
1963 void
1964 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1965 {
1966 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1967 	uint16_t val;
1968 	int i;
1969 
1970 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1971 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1972 		    rtl8125_mac_cfg5_ephy[i].val);
1973 
1974 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1975 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1976 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1977 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1978 
1979 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1980 
1981 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1982 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1983 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1984 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1985 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1986 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1987 	    32);
1988 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1989 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1990 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1991 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1992 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1993 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1994 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1995 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1996 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1997 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1998 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1999 	for (i = 0; i < 10; i++) {
2000 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
2001 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
2002 	}
2003 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
2004 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
2005 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
2006 }
2007 
2008 void
2009 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2010 {
2011 	if (sc->rge_mcodever != mcode_version) {
2012 		int i;
2013 
2014 		rge_patch_phy_mcu(sc, 1);
2015 
2016 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2017 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2018 			if (sc->rge_type == MAC_CFG2)
2019 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
2020 			else
2021 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
2022 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2023 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
2024 
2025 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2026 		}
2027 
2028 		if (sc->rge_type == MAC_CFG2) {
2029 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2030 				rge_write_phy_ocp(sc,
2031 				    rtl8125_mac_cfg2_mcu[i].reg,
2032 				    rtl8125_mac_cfg2_mcu[i].val);
2033 			}
2034 		} else if (sc->rge_type == MAC_CFG3) {
2035 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2036 				rge_write_phy_ocp(sc,
2037 				    rtl8125_mac_cfg3_mcu[i].reg,
2038 				    rtl8125_mac_cfg3_mcu[i].val);
2039 			}
2040 		} else if (sc->rge_type == MAC_CFG4) {
2041 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2042 				rge_write_phy_ocp(sc,
2043 				    rtl8125_mac_cfg4_mcu[i].reg,
2044 				    rtl8125_mac_cfg4_mcu[i].val);
2045 			}
2046 		} else if (sc->rge_type == MAC_CFG5) {
2047 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2048 				rge_write_phy_ocp(sc,
2049 				    rtl8125_mac_cfg5_mcu[i].reg,
2050 				    rtl8125_mac_cfg5_mcu[i].val);
2051 			}
2052 		}
2053 
2054 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2055 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2056 
2057 			rge_write_phy_ocp(sc, 0xa436, 0);
2058 			rge_write_phy_ocp(sc, 0xa438, 0);
2059 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2060 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2061 			rge_write_phy_ocp(sc, 0xa438, 0);
2062 		}
2063 
2064 		rge_patch_phy_mcu(sc, 0);
2065 
2066 		/* Write microcode version. */
2067 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2068 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2069 	}
2070 }
2071 
2072 void
2073 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2074 {
2075 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2076 	RGE_WRITE_4(sc, RGE_MAC0,
2077 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2078 	RGE_WRITE_4(sc, RGE_MAC4,
2079 	    addr[5] <<  8 | addr[4]);
2080 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2081 }
2082 
2083 void
2084 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2085 {
2086 	int i;
2087 
2088 	for (i = 0; i < ETHER_ADDR_LEN; i++)
2089 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
2090 }
2091 
2092 void
2093 rge_hw_init(struct rge_softc *sc)
2094 {
2095 	int i;
2096 
2097 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2098 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2099 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2100 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2101 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2102 
2103 	/* Disable UPS. */
2104 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2105 
2106 	/* Configure MAC MCU. */
2107 	rge_write_mac_ocp(sc, 0xfc38, 0);
2108 
2109 	for (i = 0xfc28; i < 0xfc38; i += 2)
2110 		rge_write_mac_ocp(sc, i, 0);
2111 
2112 	DELAY(3000);
2113 	rge_write_mac_ocp(sc, 0xfc26, 0);
2114 
2115 	if (sc->rge_type == MAC_CFG3) {
2116 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2117 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2118 			    rtl8125_mac_bps[i].val);
2119 		}
2120 	} else if (sc->rge_type == MAC_CFG5) {
2121 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2122 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2123 			    rtl8125b_mac_bps[i].val);
2124 		}
2125 	}
2126 
2127 	/* Disable PHY power saving. */
2128 	rge_disable_phy_ocp_pwrsave(sc);
2129 
2130 	/* Set PCIe uncorrectable error status. */
2131 	rge_write_csi(sc, 0x108,
2132 	    rge_read_csi(sc, 0x108) | 0x00100000);
2133 }
2134 
2135 void
2136 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2137 {
2138 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2139 		rge_patch_phy_mcu(sc, 1);
2140 		rge_write_phy_ocp(sc, 0xc416, 0);
2141 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2142 		rge_patch_phy_mcu(sc, 0);
2143 	}
2144 }
2145 
2146 void
2147 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2148 {
2149 	int i;
2150 
2151 	if (set)
2152 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2153 	else
2154 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2155 
2156 	for (i = 0; i < 1000; i++) {
2157 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2158 			break;
2159 		DELAY(100);
2160 	}
2161 	if (i == 1000) {
2162 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2163 		return;
2164 	}
2165 }
2166 
2167 void
2168 rge_add_media_types(struct rge_softc *sc)
2169 {
2170 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2171 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2172 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2173 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2174 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2175 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2176 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2177 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2178 }
2179 
2180 void
2181 rge_config_imtype(struct rge_softc *sc, int imtype)
2182 {
2183 	switch (imtype) {
2184 	case RGE_IMTYPE_NONE:
2185 		sc->rge_intrs = RGE_INTRS;
2186 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2187 		    RGE_ISR_RX_FIFO_OFLOW;
2188 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2189 		break;
2190 	case RGE_IMTYPE_SIM:
2191 		sc->rge_intrs = RGE_INTRS_TIMER;
2192 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2193 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2194 		break;
2195 	default:
2196 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2197 	}
2198 }
2199 
2200 void
2201 rge_disable_hw_im(struct rge_softc *sc)
2202 {
2203 	RGE_WRITE_2(sc, RGE_IM, 0);
2204 }
2205 
2206 void
2207 rge_disable_sim_im(struct rge_softc *sc)
2208 {
2209 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2210 	sc->rge_timerintr = 0;
2211 }
2212 
2213 void
2214 rge_setup_sim_im(struct rge_softc *sc)
2215 {
2216 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2217 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2218 	sc->rge_timerintr = 1;
2219 }
2220 
2221 void
2222 rge_setup_intr(struct rge_softc *sc, int imtype)
2223 {
2224 	rge_config_imtype(sc, imtype);
2225 
2226 	/* Enable interrupts. */
2227 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2228 
2229 	switch (imtype) {
2230 	case RGE_IMTYPE_NONE:
2231 		rge_disable_sim_im(sc);
2232 		rge_disable_hw_im(sc);
2233 		break;
2234 	case RGE_IMTYPE_SIM:
2235 		rge_disable_hw_im(sc);
2236 		rge_setup_sim_im(sc);
2237 		break;
2238 	default:
2239 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2240 	}
2241 }
2242 
2243 void
2244 rge_exit_oob(struct rge_softc *sc)
2245 {
2246 	int i;
2247 
2248 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2249 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2250 	    RGE_RXCFG_ERRPKT);
2251 
2252 	/* Disable RealWoW. */
2253 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2254 
2255 	rge_reset(sc);
2256 
2257 	/* Disable OOB. */
2258 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2259 
2260 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2261 
2262 	for (i = 0; i < 10; i++) {
2263 		DELAY(100);
2264 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2265 			break;
2266 	}
2267 
2268 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2269 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2270 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2271 
2272 	for (i = 0; i < 10; i++) {
2273 		DELAY(100);
2274 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2275 			break;
2276 	}
2277 
2278 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2279 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2280 		    device_xname(sc->sc_dev));
2281 		for (i = 0; i < RGE_TIMEOUT; i++) {
2282 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2283 				break;
2284 			DELAY(1000);
2285 		}
2286 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2287 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2288 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2289 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2290 	}
2291 }
2292 
2293 void
2294 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2295 {
2296 	int i;
2297 
2298 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2299 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2300 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2301 
2302 	for (i = 0; i < 10; i++) {
2303 		 DELAY(100);
2304 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2305 			break;
2306 	}
2307 
2308 	DELAY(20);
2309 }
2310 
2311 uint32_t
2312 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2313 {
2314 	int i;
2315 
2316 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2317 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2318 
2319 	for (i = 0; i < 10; i++) {
2320 		 DELAY(100);
2321 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2322 			break;
2323 	}
2324 
2325 	DELAY(20);
2326 
2327 	return (RGE_READ_4(sc, RGE_CSIDR));
2328 }
2329 
2330 void
2331 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2332 {
2333 	uint32_t tmp;
2334 
2335 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2336 	tmp += val;
2337 	tmp |= RGE_MACOCP_BUSY;
2338 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2339 }
2340 
2341 uint16_t
2342 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2343 {
2344 	uint32_t val;
2345 
2346 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2347 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2348 
2349 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2350 }
2351 
2352 void
2353 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2354 {
2355 	uint32_t tmp;
2356 	int i;
2357 
2358 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2359 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2360 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2361 
2362 	for (i = 0; i < 10; i++) {
2363 		DELAY(100);
2364 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2365 			break;
2366 	}
2367 
2368 	DELAY(20);
2369 }
2370 
2371 uint16_t
2372 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2373 {
2374 	uint32_t val;
2375 	int i;
2376 
2377 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2378 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2379 
2380 	for (i = 0; i < 10; i++) {
2381 		DELAY(100);
2382 		val = RGE_READ_4(sc, RGE_EPHYAR);
2383 		if (val & RGE_EPHYAR_BUSY)
2384 			break;
2385 	}
2386 
2387 	DELAY(20);
2388 
2389 	return (val & RGE_EPHYAR_DATA_MASK);
2390 }
2391 
2392 void
2393 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2394 {
2395 	uint16_t off, phyaddr;
2396 
2397 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2398 	phyaddr <<= 4;
2399 
2400 	off = addr ? reg : 0x10 + (reg % 8);
2401 
2402 	phyaddr += (off - 16) << 1;
2403 
2404 	rge_write_phy_ocp(sc, phyaddr, val);
2405 }
2406 
2407 uint16_t
2408 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2409 {
2410 	uint16_t off, phyaddr;
2411 
2412 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2413 	phyaddr <<= 4;
2414 
2415 	off = addr ? reg : 0x10 + (reg % 8);
2416 
2417 	phyaddr += (off - 16) << 1;
2418 
2419 	return (rge_read_phy_ocp(sc, phyaddr));
2420 }
2421 
2422 void
2423 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2424 {
2425 	uint32_t tmp;
2426 	int i;
2427 
2428 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2429 	tmp |= RGE_PHYOCP_BUSY | val;
2430 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2431 
2432 	for (i = 0; i < RGE_TIMEOUT; i++) {
2433 		DELAY(1);
2434 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2435 			break;
2436 	}
2437 }
2438 
2439 uint16_t
2440 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2441 {
2442 	uint32_t val;
2443 	int i;
2444 
2445 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2446 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2447 
2448 	for (i = 0; i < RGE_TIMEOUT; i++) {
2449 		DELAY(1);
2450 		val = RGE_READ_4(sc, RGE_PHYOCP);
2451 		if (val & RGE_PHYOCP_BUSY)
2452 			break;
2453 	}
2454 
2455 	return (val & RGE_PHYOCP_DATA_MASK);
2456 }
2457 
2458 int
2459 rge_get_link_status(struct rge_softc *sc)
2460 {
2461 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2462 }
2463 
2464 void
2465 rge_txstart(void *arg)
2466 {
2467 	struct rge_softc *sc = arg;
2468 
2469 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2470 }
2471 
2472 void
2473 rge_tick(void *arg)
2474 {
2475 	struct rge_softc *sc = arg;
2476 	int s;
2477 
2478 	s = splnet();
2479 	rge_link_state(sc);
2480 	splx(s);
2481 
2482 	callout_schedule(&sc->sc_timeout, hz);
2483 }
2484 
2485 void
2486 rge_link_state(struct rge_softc *sc)
2487 {
2488 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2489 	int link = LINK_STATE_DOWN;
2490 
2491 	if (rge_get_link_status(sc))
2492 		link = LINK_STATE_UP;
2493 
2494 	if (ifp->if_link_state != link) { /* XXX not safe to access */
2495 		if_link_state_change(ifp, link);
2496 	}
2497 }
2498