xref: /netbsd-src/sys/dev/pci/if_rge.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /*	$NetBSD: if_rge.c,v 1.18 2021/03/02 07:55:16 knakahara Exp $	*/
2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
3 
4 /*
5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.18 2021/03/02 07:55:16 knakahara Exp $");
22 
23 #include <sys/types.h>
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/malloc.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/device.h>
33 #include <sys/endian.h>
34 #include <sys/callout.h>
35 #include <sys/workqueue.h>
36 
37 #include <net/if.h>
38 
39 #include <net/if_dl.h>
40 #include <net/if_ether.h>
41 
42 #include <net/if_media.h>
43 
44 #include <netinet/in.h>
45 #include <net/if_ether.h>
46 
47 #include <net/bpf.h>
48 
49 #include <sys/bus.h>
50 #include <machine/intr.h>
51 
52 #include <dev/mii/mii.h>
53 
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcidevs.h>
57 
58 #include <dev/pci/if_rgereg.h>
59 
60 #ifdef __NetBSD__
61 #define letoh32 	htole32
62 #define nitems(x) 	__arraycount(x)
63 
64 static struct mbuf *
65 MCLGETL(struct rge_softc *sc __unused, int how,
66     u_int size)
67 {
68 	struct mbuf *m;
69 
70 	MGETHDR(m, how, MT_DATA);
71 	if (m == NULL)
72 		return NULL;
73 
74 	MEXTMALLOC(m, size, how);
75 	if ((m->m_flags & M_EXT) == 0) {
76 		m_freem(m);
77 		return NULL;
78 	}
79 	return m;
80 }
81 
82 #ifdef NET_MPSAFE
83 #define 	RGE_MPSAFE	1
84 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
85 #else
86 #define 	CALLOUT_FLAGS	0
87 #endif
88 #endif
89 
90 #ifdef RGE_DEBUG
91 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
92 int rge_debug = 0;
93 #else
94 #define DPRINTF(x)
95 #endif
96 
97 static int		rge_match(device_t, cfdata_t, void *);
98 static void		rge_attach(device_t, device_t, void *);
99 int		rge_intr(void *);
100 int		rge_encap(struct rge_softc *, struct mbuf *, int);
101 int		rge_ioctl(struct ifnet *, u_long, void *);
102 void		rge_start(struct ifnet *);
103 void		rge_watchdog(struct ifnet *);
104 int		rge_init(struct ifnet *);
105 void		rge_stop(struct ifnet *, int);
106 int		rge_ifmedia_upd(struct ifnet *);
107 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
108 int		rge_allocmem(struct rge_softc *);
109 int		rge_newbuf(struct rge_softc *, int);
110 void		rge_discard_rxbuf(struct rge_softc *, int);
111 int		rge_rx_list_init(struct rge_softc *);
112 void		rge_tx_list_init(struct rge_softc *);
113 int		rge_rxeof(struct rge_softc *);
114 int		rge_txeof(struct rge_softc *);
115 void		rge_reset(struct rge_softc *);
116 void		rge_iff(struct rge_softc *);
117 void		rge_set_phy_power(struct rge_softc *, int);
118 void		rge_phy_config(struct rge_softc *);
119 void		rge_phy_config_mac_cfg2(struct rge_softc *);
120 void		rge_phy_config_mac_cfg3(struct rge_softc *);
121 void		rge_phy_config_mac_cfg4(struct rge_softc *);
122 void		rge_phy_config_mac_cfg5(struct rge_softc *);
123 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
124 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
125 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
126 void		rge_hw_init(struct rge_softc *);
127 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
128 void		rge_patch_phy_mcu(struct rge_softc *, int);
129 void		rge_add_media_types(struct rge_softc *);
130 void		rge_config_imtype(struct rge_softc *, int);
131 void		rge_disable_hw_im(struct rge_softc *);
132 void		rge_disable_sim_im(struct rge_softc *);
133 void		rge_setup_sim_im(struct rge_softc *);
134 void		rge_setup_intr(struct rge_softc *, int);
135 void		rge_exit_oob(struct rge_softc *);
136 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
137 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
138 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
139 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
140 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
141 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
142 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
143 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
144 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
145 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
146 int		rge_get_link_status(struct rge_softc *);
147 void		rge_txstart(struct work *, void *);
148 void		rge_tick(void *);
149 void		rge_link_state(struct rge_softc *);
150 
151 static const struct {
152 	uint16_t reg;
153 	uint16_t val;
154 }  rtl8125_mac_cfg2_mcu[] = {
155 	RTL8125_MAC_CFG2_MCU
156 }, rtl8125_mac_cfg3_mcu[] = {
157 	RTL8125_MAC_CFG3_MCU
158 }, rtl8125_mac_cfg4_mcu[] = {
159 	RTL8125_MAC_CFG4_MCU
160 }, rtl8125_mac_cfg5_mcu[] = {
161 	RTL8125_MAC_CFG5_MCU
162 };
163 
164 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
165 		NULL, NULL); /* Sevan - detach function? */
166 
167 static const struct {
168 	pci_vendor_id_t 	vendor;
169 	pci_product_id_t 	product;
170 }rge_devices[] = {
171 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
172 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
173 };
174 
175 static int
176 rge_match(device_t parent, cfdata_t match, void *aux)
177 {
178 	struct pci_attach_args *pa =aux;
179 	int n;
180 
181 	for (n =0; n < __arraycount(rge_devices); n++) {
182 		if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
183 		    PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
184 			return 3;
185 	}
186 
187 	return 0;
188 }
189 
190 void
191 rge_attach(device_t parent, device_t self, void *aux)
192 {
193 	struct rge_softc *sc = device_private(self);
194 	struct pci_attach_args *pa = aux;
195 	pci_chipset_tag_t pc = pa->pa_pc;
196 	pci_intr_handle_t *ihp;
197 	char intrbuf[PCI_INTRSTR_LEN];
198 	const char *intrstr = NULL;
199 	struct ifnet *ifp;
200 	pcireg_t reg;
201 	uint32_t hwrev;
202 	uint8_t eaddr[ETHER_ADDR_LEN];
203 	int offset;
204 	pcireg_t command;
205 
206 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
207 
208 	sc->sc_dev = self;
209 
210 	pci_aprint_devinfo(pa, "Ethernet controller");
211 
212 	/*
213 	 * Map control/status registers.
214 	 */
215 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
216 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
217 	    NULL, &sc->rge_bsize)) {
218 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
219 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
220 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
221 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
222 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
223 			    &sc->rge_bsize)) {
224 				aprint_error(": can't map mem or i/o space\n");
225 				return;
226 			}
227 		}
228 	}
229 
230 	int counts[PCI_INTR_TYPE_SIZE] = {
231  		[PCI_INTR_TYPE_INTX] = 1,
232  		[PCI_INTR_TYPE_MSI] = 1,
233  		[PCI_INTR_TYPE_MSIX] = 1,
234  	};
235 	int max_type = PCI_INTR_TYPE_MSIX;
236 	/*
237 	 * Allocate interrupt.
238 	 */
239 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
240 		aprint_error(": couldn't map interrupt\n");
241 		return;
242 	}
243 	switch (pci_intr_type(pc, ihp[0])) {
244 	case PCI_INTR_TYPE_MSIX:
245 	case PCI_INTR_TYPE_MSI:
246 		sc->rge_flags |= RGE_FLAG_MSI;
247 		break;
248 	default:
249 		break;
250 	}
251 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
252 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
253 	    sc, device_xname(sc->sc_dev));
254 	if (sc->sc_ih == NULL) {
255 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
256 		if (intrstr != NULL)
257 			aprint_error(" at %s\n", intrstr);
258 		aprint_error("\n");
259 		return;
260 	}
261 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
262 
263 	if (pci_dma64_available(pa))
264 		sc->sc_dmat = pa->pa_dmat64;
265 	else
266 		sc->sc_dmat = pa->pa_dmat;
267 
268 	sc->sc_pc = pa->pa_pc;
269 	sc->sc_tag = pa->pa_tag;
270 
271 	/* Determine hardware revision */
272 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
273 	switch (hwrev) {
274 	case 0x60800000:
275 		sc->rge_type = MAC_CFG2;
276 		break;
277 	case 0x60900000:
278 		sc->rge_type = MAC_CFG3;
279 		break;
280 	case 0x64000000:
281 		sc->rge_type = MAC_CFG4;
282 		break;
283 	case 0x64100000:
284 		sc->rge_type = MAC_CFG5;
285 		break;
286 	default:
287 		aprint_error(": unknown version 0x%08x\n", hwrev);
288 		return;
289 	}
290 
291 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
292 
293 	/*
294 	 * PCI Express check.
295 	 */
296 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
297 	    &offset, NULL)) {
298 		/* Disable PCIe ASPM and ECPM. */
299 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
300 		    offset + PCIE_LCSR);
301 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
302 		    PCIE_LCSR_ENCLKPM);
303 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
304 		    reg);
305 	}
306 
307 	rge_exit_oob(sc);
308 	rge_hw_init(sc);
309 
310 	rge_get_macaddr(sc, eaddr);
311 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
312 	    ether_sprintf(eaddr));
313 
314 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
315 
316 	rge_set_phy_power(sc, 1);
317 	rge_phy_config(sc);
318 
319 	if (rge_allocmem(sc))
320 		return;
321 
322 	ifp = &sc->sc_ec.ec_if;
323 	ifp->if_softc = sc;
324 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
325 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
326 #ifdef RGE_MPSAFE
327 	ifp->if_extflags = IFEF_MPSAFE;
328 #endif
329 	ifp->if_ioctl = rge_ioctl;
330 	ifp->if_stop = rge_stop;
331 	ifp->if_start = rge_start;
332 	ifp->if_init = rge_init;
333 	ifp->if_watchdog = rge_watchdog;
334 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
335 
336 #if notyet
337 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
338 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
339 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
340 #endif
341 
342 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
343 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
344 
345 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
346 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
347 
348 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
349 	command |= PCI_COMMAND_MASTER_ENABLE;
350 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
351 
352 	/* Initialize ifmedia structures. */
353 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
354 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
355 	    rge_ifmedia_sts);
356 	rge_add_media_types(sc);
357 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
358 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
359 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
360 
361 	if_attach(ifp);
362 	ether_ifattach(ifp, eaddr);
363 }
364 
365 int
366 rge_intr(void *arg)
367 {
368 	struct rge_softc *sc = arg;
369 	struct ifnet *ifp = &sc->sc_ec.ec_if;
370 	uint32_t status;
371 	int claimed = 0, rx, tx;
372 
373 	if (!(ifp->if_flags & IFF_RUNNING))
374 		return (0);
375 
376 	/* Disable interrupts. */
377 	RGE_WRITE_4(sc, RGE_IMR, 0);
378 
379 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
380 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
381 			return (0);
382 	}
383 
384 	status = RGE_READ_4(sc, RGE_ISR);
385 	if (status)
386 		RGE_WRITE_4(sc, RGE_ISR, status);
387 
388 	if (status & RGE_ISR_PCS_TIMEOUT)
389 		claimed = 1;
390 
391 	rx = tx = 0;
392 	if (status & sc->rge_intrs) {
393 		if (status &
394 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
395 			rx |= rge_rxeof(sc);
396 			claimed = 1;
397 		}
398 
399 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
400 			tx |= rge_txeof(sc);
401 			claimed = 1;
402 		}
403 
404 		if (status & RGE_ISR_SYSTEM_ERR) {
405 			KERNEL_LOCK(1, NULL);
406 			rge_init(ifp);
407 			KERNEL_UNLOCK_ONE(NULL);
408 			claimed = 1;
409 		}
410 	}
411 
412 	if (sc->rge_timerintr) {
413 		if ((tx | rx) == 0) {
414 			/*
415 			 * Nothing needs to be processed, fallback
416 			 * to use TX/RX interrupts.
417 			 */
418 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
419 
420 			/*
421 			 * Recollect, mainly to avoid the possible
422 			 * race introduced by changing interrupt
423 			 * masks.
424 			 */
425 			rge_rxeof(sc);
426 			rge_txeof(sc);
427 		} else
428 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
429 	} else if (tx | rx) {
430 		/*
431 		 * Assume that using simulated interrupt moderation
432 		 * (hardware timer based) could reduce the interrupt
433 		 * rate.
434 		 */
435 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
436 	}
437 
438 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
439 
440 	return (claimed);
441 }
442 
443 int
444 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
445 {
446 	struct rge_tx_desc *d = NULL;
447 	struct rge_txq *txq;
448 	bus_dmamap_t txmap;
449 	uint32_t cmdsts, cflags = 0;
450 	int cur, error, i, last, nsegs;
451 
452 #if notyet
453 	/*
454 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
455 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
456 	 * take affect.
457 	 */
458 	if ((m->m_pkthdr.csum_flags &
459 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
460 		cflags |= RGE_TDEXTSTS_IPCSUM;
461 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
462 			cflags |= RGE_TDEXTSTS_TCPCSUM;
463 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
464 			cflags |= RGE_TDEXTSTS_UDPCSUM;
465 	}
466 #endif
467 
468 	txq = &sc->rge_ldata.rge_txq[idx];
469 	txmap = txq->txq_dmamap;
470 
471 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
472 	switch (error) {
473 	case 0:
474 		break;
475 	case EFBIG: /* mbuf chain is too fragmented */
476 		if (m_defrag(m, M_DONTWAIT) == 0 &&
477 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
478 		    BUS_DMA_NOWAIT) == 0)
479 			break;
480 
481 		/* FALLTHROUGH */
482 	default:
483 		return (0);
484 	}
485 
486 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
487 	    BUS_DMASYNC_PREWRITE);
488 
489 	nsegs = txmap->dm_nsegs;
490 
491 	/* Set up hardware VLAN tagging. */
492 	if (vlan_has_tag(m))
493 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
494 
495 	last = cur = idx;
496 	cmdsts = RGE_TDCMDSTS_SOF;
497 
498 	for (i = 0; i < txmap->dm_nsegs; i++) {
499 		d = &sc->rge_ldata.rge_tx_list[cur];
500 
501 		d->rge_extsts = htole32(cflags);
502 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
503 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
504 
505 		cmdsts |= txmap->dm_segs[i].ds_len;
506 
507 		if (cur == RGE_TX_LIST_CNT - 1)
508 			cmdsts |= RGE_TDCMDSTS_EOR;
509 
510 		d->rge_cmdsts = htole32(cmdsts);
511 
512 		last = cur;
513 		cmdsts = RGE_TDCMDSTS_OWN;
514 		cur = RGE_NEXT_TX_DESC(cur);
515 	}
516 
517 	/* Set EOF on the last descriptor. */
518 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
519 
520 	/* Transfer ownership of packet to the chip. */
521 	d = &sc->rge_ldata.rge_tx_list[idx];
522 
523 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
524 
525 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
526 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
527 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
528 
529 	/* Update info of TX queue and descriptors. */
530 	txq->txq_mbuf = m;
531 	txq->txq_descidx = last;
532 
533 	return (nsegs);
534 }
535 
536 int
537 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
538 {
539 	struct rge_softc *sc = ifp->if_softc;
540 	//struct ifreq *ifr = (struct ifreq *)data;
541 	int s, error = 0;
542 
543 	s = splnet();
544 
545 	switch (cmd) {
546 	case SIOCSIFFLAGS:
547 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
548 			break;
549 		/* XXX set an ifflags callback and let ether_ioctl
550 		 * handle all of this.
551 		 */
552 		if (ifp->if_flags & IFF_UP) {
553 			if (ifp->if_flags & IFF_RUNNING)
554 				error = ENETRESET;
555 			else
556 				rge_init(ifp);
557 		} else {
558 			if (ifp->if_flags & IFF_RUNNING)
559 				rge_stop(ifp, 1);
560 		}
561 		break;
562 	default:
563 		error = ether_ioctl(ifp, cmd, data);
564 	}
565 
566 	if (error == ENETRESET) {
567 		if (ifp->if_flags & IFF_RUNNING)
568 			rge_iff(sc);
569 		error = 0;
570 	}
571 
572 	splx(s);
573 	return (error);
574 }
575 
576 void
577 rge_start(struct ifnet *ifp)
578 {
579 	struct rge_softc *sc = ifp->if_softc;
580 	struct mbuf *m;
581 	int free, idx, used;
582 	int queued = 0;
583 
584 #define LINK_STATE_IS_UP(_s)    \
585 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
586 
587 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
588 		IFQ_PURGE(&ifp->if_snd);
589 		return;
590 	}
591 
592 	/* Calculate free space. */
593 	idx = sc->rge_ldata.rge_txq_prodidx;
594 	free = sc->rge_ldata.rge_txq_considx;
595 	if (free <= idx)
596 		free += RGE_TX_LIST_CNT;
597 	free -= idx;
598 
599 	for (;;) {
600 		if (RGE_TX_NSEGS >= free + 2) {
601 			SET(ifp->if_flags, IFF_OACTIVE);
602 			break;
603 		}
604 
605 		IFQ_DEQUEUE(&ifp->if_snd, m);
606 		if (m == NULL)
607 			break;
608 
609 		used = rge_encap(sc, m, idx);
610 		if (used == 0) {
611 			m_freem(m);
612 			continue;
613 		}
614 
615 		KASSERT(used <= free);
616 		free -= used;
617 
618 		bpf_mtap(ifp, m, BPF_D_OUT);
619 
620 		idx += used;
621 		if (idx >= RGE_TX_LIST_CNT)
622 			idx -= RGE_TX_LIST_CNT;
623 
624 		queued++;
625 	}
626 
627 	if (queued == 0)
628 		return;
629 
630 	/* Set a timeout in case the chip goes out to lunch. */
631 	ifp->if_timer = 5;
632 
633 	sc->rge_ldata.rge_txq_prodidx = idx;
634 #if 0
635 	ifq_serialize(ifq, &sc->sc_task);
636 #else
637 	rge_txstart(&sc->sc_task, sc);
638 #endif
639 }
640 
641 void
642 rge_watchdog(struct ifnet *ifp)
643 {
644 	struct rge_softc *sc = ifp->if_softc;
645 
646 	device_printf(sc->sc_dev, "watchdog timeout\n");
647 	if_statinc(ifp, if_oerrors);
648 
649 	rge_init(ifp);
650 }
651 
652 int
653 rge_init(struct ifnet *ifp)
654 {
655 	struct rge_softc *sc = ifp->if_softc;
656 	uint32_t val;
657 	int i;
658 
659 	rge_stop(ifp, 0);
660 
661 	/* Set MAC address. */
662 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
663 
664 	/* Set Maximum frame size. */
665 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
666 
667 	/* Initialize RX descriptors list. */
668 	if (rge_rx_list_init(sc) == ENOBUFS) {
669 		device_printf(sc->sc_dev,
670 		    "init failed: no memory for RX buffers\n");
671 		rge_stop(ifp, 1);
672 		return (ENOBUFS);
673 	}
674 
675 	/* Initialize TX descriptors. */
676 	rge_tx_list_init(sc);
677 
678 	/* Load the addresses of the RX and TX lists into the chip. */
679 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
680 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
681 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
682 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
683 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
684 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
685 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
686 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
687 
688 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
689 
690 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
691 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
692 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
693 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
694 
695 	/* Clear interrupt moderation timer. */
696 	for (i = 0; i < 64; i++)
697 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
698 
699 	/* Set the initial RX and TX configurations. */
700 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
701 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
702 
703 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
704 	rge_write_csi(sc, 0x70c, val | 0x27000000);
705 
706 	/* Enable hardware optimization function. */
707 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
708 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
709 
710 	RGE_WRITE_2(sc, 0x0382, 0x221b);
711 	RGE_WRITE_1(sc, 0x4500, 0);
712 	RGE_WRITE_2(sc, 0x4800, 0);
713 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
714 
715 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
716 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
717 
718 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
719 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
720 
721 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
722 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
723 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
724 
725 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
726 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
727 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
728 	else
729 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
730 
731 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
732 
733 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
734 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
735 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
736 	} else
737 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
738 
739 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
740 
741 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
742 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
743 
744 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
745 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
746 
747 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
748 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
749 
750 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
751 
752 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
753 
754 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
755 
756 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
757 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
758 
759 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
760 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
761 
762 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
763 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
764 
765 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
766 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
767 
768 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
769 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
770 
771 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
772 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
773 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
774 	else
775 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
776 
777 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
778 
779 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
780 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
781 
782 	/* Disable EEE plus. */
783 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
784 
785 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
786 
787 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
788 	DELAY(1);
789 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
790 
791 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
792 
793 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
794 
795 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
796 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
797 	else
798 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
799 
800 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
801 
802 	for (i = 0; i < 10; i++) {
803 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
804 			break;
805 		DELAY(1000);
806 	}
807 
808 	/* Disable RXDV gate. */
809 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
810 	DELAY(2000);
811 
812 	rge_ifmedia_upd(ifp);
813 
814 	/* Enable transmit and receive. */
815 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
816 
817 	/* Program promiscuous mode and multicast filters. */
818 	rge_iff(sc);
819 
820 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
821 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
822 
823 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
824 
825 	/* Enable interrupts. */
826 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
827 
828 	ifp->if_flags |= IFF_RUNNING;
829 	CLR(ifp->if_flags, IFF_OACTIVE);
830 
831 	callout_schedule(&sc->sc_timeout, 1);
832 
833 	return (0);
834 }
835 
836 /*
837  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
838  */
839 void
840 rge_stop(struct ifnet *ifp, int disable)
841 {
842 	struct rge_softc *sc = ifp->if_softc;
843 	int i;
844 
845 	if (disable) {
846 		callout_halt(&sc->sc_timeout, NULL);
847 	} else
848 		callout_stop(&sc->sc_timeout);
849 
850 	ifp->if_timer = 0;
851 	ifp->if_flags &= ~IFF_RUNNING;
852 	sc->rge_timerintr = 0;
853 
854 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
855 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
856 	    RGE_RXCFG_ERRPKT);
857 
858 	RGE_WRITE_4(sc, RGE_IMR, 0);
859 
860 	/* Clear timer interrupts. */
861 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
862 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
863 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
864 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
865 
866 	rge_reset(sc);
867 
868 //	intr_barrier(sc->sc_ih);
869 //	ifq_barrier(&ifp->if_snd);
870 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
871 
872 	if (sc->rge_head != NULL) {
873 		m_freem(sc->rge_head);
874 		sc->rge_head = sc->rge_tail = NULL;
875 	}
876 
877 	/* Free the TX list buffers. */
878 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
879 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
880 			bus_dmamap_unload(sc->sc_dmat,
881 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
882 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
883 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
884 		}
885 	}
886 
887 	/* Free the RX list buffers. */
888 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
889 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
890 			bus_dmamap_unload(sc->sc_dmat,
891 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
892 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
893 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
894 		}
895 	}
896 }
897 
898 /*
899  * Set media options.
900  */
901 int
902 rge_ifmedia_upd(struct ifnet *ifp)
903 {
904 	struct rge_softc *sc = ifp->if_softc;
905 	struct ifmedia *ifm = &sc->sc_media;
906 	int anar, gig, val;
907 
908 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
909 		return (EINVAL);
910 
911 	/* Disable Gigabit Lite. */
912 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
913 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
914 
915 	val = rge_read_phy_ocp(sc, 0xa5d4);
916 	val &= ~RGE_ADV_2500TFDX;
917 
918 	anar = gig = 0;
919 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
920 	case IFM_AUTO:
921 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
922 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
923 		val |= RGE_ADV_2500TFDX;
924 		break;
925 	case IFM_2500_T:
926 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
927 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
928 		val |= RGE_ADV_2500TFDX;
929 		ifp->if_baudrate = IF_Mbps(2500);
930 		break;
931 	case IFM_1000_T:
932 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
933 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
934 		ifp->if_baudrate = IF_Gbps(1);
935 		break;
936 	case IFM_100_TX:
937 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
938 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
939 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
940 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
941 		    ANAR_TX | ANAR_10_FD | ANAR_10;
942 		ifp->if_baudrate = IF_Mbps(100);
943 		break;
944 	case IFM_10_T:
945 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
946 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
947 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
948 		    ANAR_10_FD | ANAR_10 : ANAR_10;
949 		ifp->if_baudrate = IF_Mbps(10);
950 		break;
951 	default:
952 		device_printf(sc->sc_dev,
953 		    "unsupported media type\n");
954 		return (EINVAL);
955 	}
956 
957 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
958 	rge_write_phy(sc, 0, MII_100T2CR, gig);
959 	rge_write_phy_ocp(sc, 0xa5d4, val);
960 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
961 	    BMCR_STARTNEG);
962 
963 	return (0);
964 }
965 
966 /*
967  * Report current media status.
968  */
969 void
970 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
971 {
972 	struct rge_softc *sc = ifp->if_softc;
973 	uint16_t status = 0;
974 
975 	ifmr->ifm_status = IFM_AVALID;
976 	ifmr->ifm_active = IFM_ETHER;
977 
978 	if (rge_get_link_status(sc)) {
979 		ifmr->ifm_status |= IFM_ACTIVE;
980 
981 		status = RGE_READ_2(sc, RGE_PHYSTAT);
982 		if ((status & RGE_PHYSTAT_FDX) ||
983 		    (status & RGE_PHYSTAT_2500MBPS))
984 			ifmr->ifm_active |= IFM_FDX;
985 		else
986 			ifmr->ifm_active |= IFM_HDX;
987 
988 		if (status & RGE_PHYSTAT_10MBPS)
989 			ifmr->ifm_active |= IFM_10_T;
990 		else if (status & RGE_PHYSTAT_100MBPS)
991 			ifmr->ifm_active |= IFM_100_TX;
992 		else if (status & RGE_PHYSTAT_1000MBPS)
993 			ifmr->ifm_active |= IFM_1000_T;
994 		else if (status & RGE_PHYSTAT_2500MBPS)
995 			ifmr->ifm_active |= IFM_2500_T;
996 	}
997 }
998 
999 /*
1000  * Allocate memory for RX/TX rings.
1001  */
1002 int
1003 rge_allocmem(struct rge_softc *sc)
1004 {
1005 	int error, i;
1006 
1007 	/* Allocate DMA'able memory for the TX ring. */
1008 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1009 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
1010 	if (error) {
1011 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
1012 		return (error);
1013 	}
1014 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1015 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
1016 	    BUS_DMA_NOWAIT);
1017 	if (error) {
1018 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1019 		return (error);
1020 	}
1021 
1022 	/* Load the map for the TX ring. */
1023 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1024 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1025 	    (void **) &sc->rge_ldata.rge_tx_list,
1026 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1027 	if (error) {
1028 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1029 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1030 		    sc->rge_ldata.rge_tx_listnseg);
1031 		return (error);
1032 	}
1033 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1034 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1035 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1036 	if (error) {
1037 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1038 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1039 		bus_dmamem_unmap(sc->sc_dmat,
1040 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1041 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1042 		    sc->rge_ldata.rge_tx_listnseg);
1043 		return (error);
1044 	}
1045 
1046 	/* Create DMA maps for TX buffers. */
1047 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1048 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1049 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1050 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
1051 		if (error) {
1052 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1053 			return (error);
1054 		}
1055 	}
1056 
1057 	/* Allocate DMA'able memory for the RX ring. */
1058 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1059 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1060 	if (error) {
1061 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1062 		return (error);
1063 	}
1064 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1065 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1066 	    BUS_DMA_NOWAIT);
1067 	if (error) {
1068 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1069 		return (error);
1070 	}
1071 
1072 	/* Load the map for the RX ring. */
1073 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1074 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1075 	    (void **) &sc->rge_ldata.rge_rx_list,
1076 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1077 	if (error) {
1078 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1079 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1080 		    sc->rge_ldata.rge_rx_listnseg);
1081 		return (error);
1082 	}
1083 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1084 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1085 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1086 	if (error) {
1087 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1088 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1089 		bus_dmamem_unmap(sc->sc_dmat,
1090 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1091 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1092 		    sc->rge_ldata.rge_rx_listnseg);
1093 		return (error);
1094 	}
1095 
1096 	/* Create DMA maps for RX buffers. */
1097 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1098 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1099 		    RGE_JUMBO_FRAMELEN, 0, 0,
1100 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1101 		if (error) {
1102 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1103 			return (error);
1104 		}
1105 	}
1106 
1107 	return (error);
1108 }
1109 
1110 /*
1111  * Initialize the RX descriptor and attach an mbuf cluster.
1112  */
1113 int
1114 rge_newbuf(struct rge_softc *sc, int idx)
1115 {
1116 	struct mbuf *m;
1117 	struct rge_rx_desc *r;
1118 	struct rge_rxq *rxq;
1119 	bus_dmamap_t rxmap;
1120 
1121 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1122 	if (m == NULL)
1123 		return (ENOBUFS);
1124 
1125 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1126 
1127 	rxq = &sc->rge_ldata.rge_rxq[idx];
1128 	rxmap = rxq->rxq_dmamap;
1129 
1130 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1131 		goto out;
1132 
1133 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1134 	    BUS_DMASYNC_PREREAD);
1135 
1136 	/* Map the segments into RX descriptors. */
1137 	r = &sc->rge_ldata.rge_rx_list[idx];
1138 
1139 	if (RGE_OWN(r)) {
1140 		device_printf(sc->sc_dev, "tried to map busy RX descriptor\n");
1141 		goto out;
1142 	}
1143 
1144 	rxq->rxq_mbuf = m;
1145 
1146 	r->rge_extsts = 0;
1147 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1148 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1149 
1150 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1151 	if (idx == RGE_RX_LIST_CNT - 1)
1152 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1153 
1154 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1155 
1156 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1157 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1158 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1159 
1160 	return (0);
1161 out:
1162 	if (m != NULL)
1163 		m_freem(m);
1164 	return (ENOMEM);
1165 }
1166 
1167 void
1168 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1169 {
1170 	struct rge_rx_desc *r;
1171 
1172 	r = &sc->rge_ldata.rge_rx_list[idx];
1173 
1174 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1175 	r->rge_extsts = 0;
1176 	if (idx == RGE_RX_LIST_CNT - 1)
1177 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1178 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1179 
1180 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1181 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1182 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1183 }
1184 
1185 int
1186 rge_rx_list_init(struct rge_softc *sc)
1187 {
1188 	int i;
1189 
1190 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1191 
1192 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1193 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1194 		if (rge_newbuf(sc, i) == ENOBUFS)
1195 			return (ENOBUFS);
1196 	}
1197 
1198 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1199 	sc->rge_head = sc->rge_tail = NULL;
1200 
1201 	return (0);
1202 }
1203 
1204 void
1205 rge_tx_list_init(struct rge_softc *sc)
1206 {
1207 	int i;
1208 
1209 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1210 
1211 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
1212 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1213 
1214 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1215 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1216 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1217 
1218 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1219 }
1220 
1221 int
1222 rge_rxeof(struct rge_softc *sc)
1223 {
1224 	struct mbuf *m;
1225 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1226 	struct rge_rx_desc *cur_rx;
1227 	struct rge_rxq *rxq;
1228 	uint32_t rxstat, extsts;
1229 	int i, total_len, rx = 0;
1230 
1231 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1232 		/* Invalidate the descriptor memory. */
1233 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1234 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1235 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1236 
1237 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
1238 
1239 		if (RGE_OWN(cur_rx))
1240 			break;
1241 
1242 		rxstat = letoh32(cur_rx->rge_cmdsts);
1243 		extsts = letoh32(cur_rx->rge_extsts);
1244 
1245 		total_len = RGE_RXBYTES(cur_rx);
1246 		rxq = &sc->rge_ldata.rge_rxq[i];
1247 		m = rxq->rxq_mbuf;
1248 		rxq->rxq_mbuf = NULL;
1249 		rx = 1;
1250 
1251 		/* Invalidate the RX mbuf and unload its map. */
1252 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1253 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1254 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1255 
1256 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1257 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1258 			rge_discard_rxbuf(sc, i);
1259 			continue;
1260 		}
1261 
1262 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1263 			if_statinc(ifp, if_ierrors);
1264 			/*
1265 			 * If this is part of a multi-fragment packet,
1266 			 * discard all the pieces.
1267 			 */
1268 			 if (sc->rge_head != NULL) {
1269 				m_freem(sc->rge_head);
1270 				sc->rge_head = sc->rge_tail = NULL;
1271 			}
1272 			rge_discard_rxbuf(sc, i);
1273 			continue;
1274 		}
1275 
1276 		/*
1277 		 * If allocating a replacement mbuf fails,
1278 		 * reload the current one.
1279 		 */
1280 
1281 		if (rge_newbuf(sc, i) == ENOBUFS) {
1282 			if (sc->rge_head != NULL) {
1283 				m_freem(sc->rge_head);
1284 				sc->rge_head = sc->rge_tail = NULL;
1285 			}
1286 			rge_discard_rxbuf(sc, i);
1287 			continue;
1288 		}
1289 
1290 		m_set_rcvif(m, ifp);
1291 		if (sc->rge_head != NULL) {
1292 			m->m_len = total_len;
1293 			/*
1294 			 * Special case: if there's 4 bytes or less
1295 			 * in this buffer, the mbuf can be discarded:
1296 			 * the last 4 bytes is the CRC, which we don't
1297 			 * care about anyway.
1298 			 */
1299 			if (m->m_len <= ETHER_CRC_LEN) {
1300 				sc->rge_tail->m_len -=
1301 				    (ETHER_CRC_LEN - m->m_len);
1302 				m_freem(m);
1303 			} else {
1304 				m->m_len -= ETHER_CRC_LEN;
1305 				m->m_flags &= ~M_PKTHDR;
1306 				sc->rge_tail->m_next = m;
1307 			}
1308 			m = sc->rge_head;
1309 			sc->rge_head = sc->rge_tail = NULL;
1310 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1311 		} else
1312 	#if 0
1313 			m->m_pkthdr.len = m->m_len =
1314 			    (total_len - ETHER_CRC_LEN);
1315 	#else
1316 		{
1317 			m->m_pkthdr.len = m->m_len = total_len;
1318 			m->m_flags |= M_HASFCS;
1319 		}
1320 	#endif
1321 
1322 #if notyet
1323 		/* Check IP header checksum. */
1324 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1325 		    (extsts & RGE_RDEXTSTS_IPV4))
1326 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1327 
1328 		/* Check TCP/UDP checksum. */
1329 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1330 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1331 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1332 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1333 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1334 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1335 			    M_UDP_CSUM_IN_OK;
1336 #endif
1337 
1338 		if (extsts & RGE_RDEXTSTS_VTAG) {
1339 			vlan_set_tag(m,
1340 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1341 		}
1342 
1343 		if_percpuq_enqueue(ifp->if_percpuq, m);
1344 	}
1345 
1346 	sc->rge_ldata.rge_rxq_considx = i;
1347 
1348 	return (rx);
1349 }
1350 
1351 int
1352 rge_txeof(struct rge_softc *sc)
1353 {
1354 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1355 	struct rge_txq *txq;
1356 	uint32_t txstat;
1357 	int cons, idx, prod;
1358 	int free = 0;
1359 
1360 	prod = sc->rge_ldata.rge_txq_prodidx;
1361 	cons = sc->rge_ldata.rge_txq_considx;
1362 
1363 	while (prod != cons) {
1364 		txq = &sc->rge_ldata.rge_txq[cons];
1365 		idx = txq->txq_descidx;
1366 
1367 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1368 		    idx * sizeof(struct rge_tx_desc),
1369 		    sizeof(struct rge_tx_desc),
1370 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1371 
1372 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1373 
1374 		if (txstat & RGE_TDCMDSTS_OWN) {
1375 			free = 2;
1376 			break;
1377 		}
1378 
1379 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1380 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1381 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1382 		m_freem(txq->txq_mbuf);
1383 		txq->txq_mbuf = NULL;
1384 
1385 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1386 			if_statinc(ifp, if_collisions);
1387 		if (txstat & RGE_TDCMDSTS_TXERR)
1388 			if_statinc(ifp, if_oerrors);
1389 
1390 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1391 		    idx * sizeof(struct rge_tx_desc),
1392 		    sizeof(struct rge_tx_desc),
1393 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1394 
1395 		cons = RGE_NEXT_TX_DESC(idx);
1396 		free = 1;
1397 	}
1398 
1399 	if (free == 0)
1400 		return (0);
1401 
1402 	sc->rge_ldata.rge_txq_considx = cons;
1403 
1404 #if 0
1405 	if (ifq_is_oactive(&ifp->if_snd))
1406 		ifq_restart(&ifp->if_snd);
1407 	else if (free == 2)
1408 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
1409 	else
1410 		ifp->if_timer = 0;
1411 #else
1412 #if 0
1413 	if (!IF_IS_EMPTY(&ifp->if_snd))
1414 		rge_start(ifp);
1415 	else
1416 	if (free == 2)
1417 		if (0) { rge_txstart(&sc->sc_task, sc); }
1418 	else
1419 #endif
1420 		ifp->if_timer = 0;
1421 #endif
1422 
1423 	return (1);
1424 }
1425 
1426 void
1427 rge_reset(struct rge_softc *sc)
1428 {
1429 	int i;
1430 
1431 	/* Enable RXDV gate. */
1432 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1433 	DELAY(2000);
1434 
1435 	for (i = 0; i < 3000; i++) {
1436 		DELAY(50);
1437 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1438 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1439 		    RGE_MCUCMD_TXFIFO_EMPTY))
1440 			break;
1441 	}
1442 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1443 		for (i = 0; i < 3000; i++) {
1444 			DELAY(50);
1445 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1446 				break;
1447 		}
1448 	}
1449 
1450 	DELAY(2000);
1451 
1452 	/* Soft reset. */
1453 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1454 
1455 	for (i = 0; i < RGE_TIMEOUT; i++) {
1456 		DELAY(100);
1457 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1458 			break;
1459 	}
1460 	if (i == RGE_TIMEOUT)
1461 		device_printf(sc->sc_dev, "reset never completed!\n");
1462 }
1463 
1464 void
1465 rge_iff(struct rge_softc *sc)
1466 {
1467 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1468 	struct ethercom *ec = &sc->sc_ec;
1469 	struct ether_multi *enm;
1470 	struct ether_multistep step;
1471 	uint32_t hashes[2];
1472 	uint32_t rxfilt;
1473 	int h = 0;
1474 
1475 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1476 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1477 	ifp->if_flags &= ~IFF_ALLMULTI;
1478 
1479 	/*
1480 	 * Always accept frames destined to our station address.
1481 	 * Always accept broadcast frames.
1482 	 */
1483 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1484 
1485 	if (ifp->if_flags & IFF_PROMISC) {
1486  allmulti:
1487 		ifp->if_flags |= IFF_ALLMULTI;
1488 		rxfilt |= RGE_RXCFG_MULTI;
1489 		if (ifp->if_flags & IFF_PROMISC)
1490 			rxfilt |= RGE_RXCFG_ALLPHYS;
1491 		hashes[0] = hashes[1] = 0xffffffff;
1492 	} else {
1493 		rxfilt |= RGE_RXCFG_MULTI;
1494 		/* Program new filter. */
1495 		memset(hashes, 0, sizeof(hashes));
1496 
1497 		ETHER_LOCK(ec);
1498 		ETHER_FIRST_MULTI(step, ec, enm);
1499 		while (enm != NULL) {
1500 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1501 			    ETHER_ADDR_LEN) != 0) {
1502 			    	ETHER_UNLOCK(ec);
1503 				goto allmulti;
1504 			}
1505 			h = ether_crc32_be(enm->enm_addrlo,
1506 			    ETHER_ADDR_LEN) >> 26;
1507 
1508 			if (h < 32)
1509 				hashes[0] |= (1 << h);
1510 			else
1511 				hashes[1] |= (1 << (h - 32));
1512 
1513 			ETHER_NEXT_MULTI(step, enm);
1514 		}
1515 		ETHER_UNLOCK(ec);
1516 	}
1517 
1518 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1519 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1520 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1521 }
1522 
1523 void
1524 rge_set_phy_power(struct rge_softc *sc, int on)
1525 {
1526 	int i;
1527 
1528 	if (on) {
1529 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1530 
1531 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1532 
1533 		for (i = 0; i < RGE_TIMEOUT; i++) {
1534 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1535 				break;
1536 			DELAY(1000);
1537 		}
1538 	} else {
1539 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1540 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1541 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1542 	}
1543 }
1544 
1545 void
1546 rge_phy_config(struct rge_softc *sc)
1547 {
1548 	/* Read microcode version. */
1549 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
1550 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1551 
1552 	switch (sc->rge_type) {
1553 	case MAC_CFG2:
1554 		rge_phy_config_mac_cfg2(sc);
1555 		break;
1556 	case MAC_CFG3:
1557 		rge_phy_config_mac_cfg3(sc);
1558 		break;
1559 	case MAC_CFG4:
1560 		rge_phy_config_mac_cfg4(sc);
1561 		break;
1562 	case MAC_CFG5:
1563 		rge_phy_config_mac_cfg5(sc);
1564 		break;
1565 	default:
1566 		break;	/* Can't happen. */
1567 	}
1568 
1569 	rge_write_phy(sc, 0x0a5b, 0x12,
1570 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1571 
1572 	/* Disable EEE. */
1573 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1574 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1575 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1576 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1577 	}
1578 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1579 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1580 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1581 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1582 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1583 
1584 	rge_patch_phy_mcu(sc, 1);
1585 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1586 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1587 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1588 	rge_patch_phy_mcu(sc, 0);
1589 }
1590 
1591 void
1592 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1593 {
1594 	uint16_t val;
1595 	int i;
1596 
1597 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1598 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1599 		    rtl8125_mac_cfg2_ephy[i].val);
1600 
1601 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1602 
1603 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1604 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1605 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1606 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1607 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1608 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1609 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1610 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1611 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1612 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1613 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1614 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1615 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1616 
1617 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1618 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1619 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1620 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1621 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1622 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1623 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1624 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1625 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1626 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1627 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1628 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1629 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1630 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1631 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1632 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
1633 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1634 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1635 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
1636 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1637 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1638 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
1639 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1640 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1641 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
1642 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1643 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1644 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
1645 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1646 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1647 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1648 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1649 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1650 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1651 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1652 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1653 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1654 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1655 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1656 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
1657 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1658 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1659 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1660 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1661 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1662 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1663 }
1664 
1665 void
1666 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1667 {
1668 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1669 	uint16_t val;
1670 	int i;
1671 	static const uint16_t mac_cfg3_a438_value[] =
1672 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1673 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1674 
1675 	static const uint16_t mac_cfg3_b88e_value[] =
1676 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1677 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1678 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1679 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1680 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1681 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1682 
1683 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1684 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1685 		    rtl8125_mac_cfg3_ephy[i].val);
1686 
1687 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1688 	rge_write_ephy(sc, 0x002a, val | 0x3000);
1689 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1690 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1691 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1692 	rge_write_ephy(sc, 0x0002, 0x6042);
1693 	rge_write_ephy(sc, 0x0006, 0x0014);
1694 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1695 	rge_write_ephy(sc, 0x006a, val | 0x3000);
1696 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1697 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1698 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1699 	rge_write_ephy(sc, 0x0042, 0x6042);
1700 	rge_write_ephy(sc, 0x0046, 0x0014);
1701 
1702 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1703 
1704 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1705 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1706 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1707 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1708 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1709 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1710 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1711 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1712 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1713 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1714 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1715 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1716 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1717 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1718 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1719 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1720 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1721 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1722 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1723 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1724 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1725 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1726 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1727 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1728 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1729 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1730 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1731 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1732 	    32);
1733 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1734 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1735 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1736 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1737 
1738 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1739 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1740 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1741 	for (i = 0; i < 26; i++)
1742 		rge_write_phy_ocp(sc, 0xa438, 0);
1743 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
1744 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
1745 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1746 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
1747 
1748 	rge_patch_phy_mcu(sc, 1);
1749 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1750 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1751 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1752 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1753 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1754 	}
1755 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1756 	rge_patch_phy_mcu(sc, 0);
1757 
1758 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1759 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1760 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1761 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1762 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1763 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1764 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1765 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1766 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1767 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1768 }
1769 
1770 void
1771 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1772 {
1773 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1774 	uint16_t val;
1775 	int i;
1776 	static const uint16_t mac_cfg4_b87c_value[] =
1777 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1778 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1779 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1780 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1781 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1782 	      0x80b0, 0x0f31 };
1783 
1784 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1785 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1786 		    rtl8125_mac_cfg4_ephy[i].val);
1787 
1788 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1789 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1790 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1791 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1792 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1793 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1794 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1795 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1796 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1797 
1798 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1799 
1800 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1801 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1802 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1803 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1804 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1805 	for (i = 0; i < 6; i++) {
1806 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1807 		if (i < 3)
1808 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1809 		else
1810 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1811 	}
1812 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1813 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1814 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1815 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1816 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1817 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1818 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1819 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1820 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1821 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1822 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1823 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1824 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1825 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1826 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1827 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1828 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1829 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1830 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1831 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1832 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
1833 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1834 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1835 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1836 	}
1837 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1838 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1839 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1840 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1841 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1842 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1843 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1844 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1845 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1846 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1847 	    32);
1848 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
1849 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1850 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
1851 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1852 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
1853 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1854 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
1855 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1856 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
1857 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
1858 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1859 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
1860 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1861 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
1862 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1863 	for (i = 0; i < 6; i++) {
1864 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1865 		if (i == 2)
1866 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1867 		else
1868 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1869 	}
1870 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1871 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1872 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1873 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1874 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1875 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1876 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1877 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1878 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
1879 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1880 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1881 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
1882 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1883 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1884 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
1885 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
1886 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1887 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1888 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1889 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1890 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
1891 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1892 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1893 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1894 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1895 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1896 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1897 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
1898 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1899 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1900 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1901 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1902 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
1903 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1904 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1905 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
1906 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1907 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1908 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1909 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
1910 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1911 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1912 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1913 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1914 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
1915 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1916 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1917 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1918 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1919 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
1920 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1921 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1922 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
1923 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
1924 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1925 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
1926 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1927 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1928 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1929 	rge_patch_phy_mcu(sc, 1);
1930 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1931 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1932 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1933 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
1934 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1935 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
1936 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1937 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
1938 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1939 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
1940 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1941 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1942 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1943 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
1944 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1945 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
1946 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1947 	rge_patch_phy_mcu(sc, 0);
1948 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1949 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1950 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1951 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1952 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1953 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1954 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1955 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1956 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
1957 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1958 }
1959 
1960 void
1961 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1962 {
1963 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1964 	uint16_t val;
1965 	int i;
1966 
1967 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1968 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1969 		    rtl8125_mac_cfg5_ephy[i].val);
1970 
1971 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1972 	rge_write_ephy(sc, 0x0022, val | 0x0020);
1973 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1974 	rge_write_ephy(sc, 0x0062, val | 0x0020);
1975 
1976 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1977 
1978 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1979 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1980 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1981 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1982 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1983 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1984 	    32);
1985 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1986 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1987 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1988 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1989 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1990 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1991 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1992 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1993 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1994 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1995 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1996 	for (i = 0; i < 10; i++) {
1997 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1998 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
1999 	}
2000 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
2001 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
2002 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
2003 }
2004 
2005 void
2006 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2007 {
2008 	if (sc->rge_mcodever != mcode_version) {
2009 		int i;
2010 
2011 		rge_patch_phy_mcu(sc, 1);
2012 
2013 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2014 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2015 			if (sc->rge_type == MAC_CFG2)
2016 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
2017 			else
2018 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
2019 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2020 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
2021 
2022 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2023 		}
2024 
2025 		if (sc->rge_type == MAC_CFG2) {
2026 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2027 				rge_write_phy_ocp(sc,
2028 				    rtl8125_mac_cfg2_mcu[i].reg,
2029 				    rtl8125_mac_cfg2_mcu[i].val);
2030 			}
2031 		} else if (sc->rge_type == MAC_CFG3) {
2032 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2033 				rge_write_phy_ocp(sc,
2034 				    rtl8125_mac_cfg3_mcu[i].reg,
2035 				    rtl8125_mac_cfg3_mcu[i].val);
2036 			}
2037 		} else if (sc->rge_type == MAC_CFG4) {
2038 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2039 				rge_write_phy_ocp(sc,
2040 				    rtl8125_mac_cfg4_mcu[i].reg,
2041 				    rtl8125_mac_cfg4_mcu[i].val);
2042 			}
2043 		} else if (sc->rge_type == MAC_CFG5) {
2044 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2045 				rge_write_phy_ocp(sc,
2046 				    rtl8125_mac_cfg5_mcu[i].reg,
2047 				    rtl8125_mac_cfg5_mcu[i].val);
2048 			}
2049 		}
2050 
2051 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2052 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2053 
2054 			rge_write_phy_ocp(sc, 0xa436, 0);
2055 			rge_write_phy_ocp(sc, 0xa438, 0);
2056 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2057 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
2058 			rge_write_phy_ocp(sc, 0xa438, 0);
2059 		}
2060 
2061 		rge_patch_phy_mcu(sc, 0);
2062 
2063 		/* Write microcode version. */
2064 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
2065 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
2066 	}
2067 }
2068 
2069 void
2070 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2071 {
2072 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2073 	RGE_WRITE_4(sc, RGE_MAC0,
2074 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2075 	RGE_WRITE_4(sc, RGE_MAC4,
2076 	    addr[5] <<  8 | addr[4]);
2077 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2078 }
2079 
2080 void
2081 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2082 {
2083 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2084 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2085 }
2086 
2087 void
2088 rge_hw_init(struct rge_softc *sc)
2089 {
2090 	int i;
2091 
2092 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2093 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2094 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2095 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2096 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
2097 
2098 	/* Disable UPS. */
2099 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2100 
2101 	/* Configure MAC MCU. */
2102 	rge_write_mac_ocp(sc, 0xfc38, 0);
2103 
2104 	for (i = 0xfc28; i < 0xfc38; i += 2)
2105 		rge_write_mac_ocp(sc, i, 0);
2106 
2107 	DELAY(3000);
2108 	rge_write_mac_ocp(sc, 0xfc26, 0);
2109 
2110 	if (sc->rge_type == MAC_CFG3) {
2111 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2112 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2113 			    rtl8125_mac_bps[i].val);
2114 		}
2115 	} else if (sc->rge_type == MAC_CFG5) {
2116 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2117 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2118 			    rtl8125b_mac_bps[i].val);
2119 		}
2120 	}
2121 
2122 	/* Disable PHY power saving. */
2123 	rge_disable_phy_ocp_pwrsave(sc);
2124 
2125 	/* Set PCIe uncorrectable error status. */
2126 	rge_write_csi(sc, 0x108,
2127 	    rge_read_csi(sc, 0x108) | 0x00100000);
2128 }
2129 
2130 void
2131 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2132 {
2133 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2134 		rge_patch_phy_mcu(sc, 1);
2135 		rge_write_phy_ocp(sc, 0xc416, 0);
2136 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
2137 		rge_patch_phy_mcu(sc, 0);
2138 	}
2139 }
2140 
2141 void
2142 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2143 {
2144 	int i;
2145 
2146 	if (set)
2147 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2148 	else
2149 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2150 
2151 	for (i = 0; i < 1000; i++) {
2152 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2153 			break;
2154 		DELAY(100);
2155 	}
2156 	if (i == 1000) {
2157 		DPRINTF(("timeout waiting to patch phy mcu\n"));
2158 		return;
2159 	}
2160 }
2161 
2162 void
2163 rge_add_media_types(struct rge_softc *sc)
2164 {
2165 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2166 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2167 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2168 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2169 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2170 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2171 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2172 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2173 }
2174 
2175 void
2176 rge_config_imtype(struct rge_softc *sc, int imtype)
2177 {
2178 	switch (imtype) {
2179 	case RGE_IMTYPE_NONE:
2180 		sc->rge_intrs = RGE_INTRS;
2181 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2182 		    RGE_ISR_RX_FIFO_OFLOW;
2183 		sc->rge_tx_ack = RGE_ISR_TX_OK;
2184 		break;
2185 	case RGE_IMTYPE_SIM:
2186 		sc->rge_intrs = RGE_INTRS_TIMER;
2187 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2188 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2189 		break;
2190 	default:
2191 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2192 	}
2193 }
2194 
2195 void
2196 rge_disable_hw_im(struct rge_softc *sc)
2197 {
2198 	RGE_WRITE_2(sc, RGE_IM, 0);
2199 }
2200 
2201 void
2202 rge_disable_sim_im(struct rge_softc *sc)
2203 {
2204 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2205 	sc->rge_timerintr = 0;
2206 }
2207 
2208 void
2209 rge_setup_sim_im(struct rge_softc *sc)
2210 {
2211 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2212 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2213 	sc->rge_timerintr = 1;
2214 }
2215 
2216 void
2217 rge_setup_intr(struct rge_softc *sc, int imtype)
2218 {
2219 	rge_config_imtype(sc, imtype);
2220 
2221 	/* Enable interrupts. */
2222 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2223 
2224 	switch (imtype) {
2225 	case RGE_IMTYPE_NONE:
2226 		rge_disable_sim_im(sc);
2227 		rge_disable_hw_im(sc);
2228 		break;
2229 	case RGE_IMTYPE_SIM:
2230 		rge_disable_hw_im(sc);
2231 		rge_setup_sim_im(sc);
2232 		break;
2233 	default:
2234 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2235 	}
2236 }
2237 
2238 void
2239 rge_exit_oob(struct rge_softc *sc)
2240 {
2241 	int i;
2242 
2243 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2244 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2245 	    RGE_RXCFG_ERRPKT);
2246 
2247 	/* Disable RealWoW. */
2248 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2249 
2250 	rge_reset(sc);
2251 
2252 	/* Disable OOB. */
2253 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2254 
2255 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2256 
2257 	for (i = 0; i < 10; i++) {
2258 		DELAY(100);
2259 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2260 			break;
2261 	}
2262 
2263 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2264 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2265 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2266 
2267 	for (i = 0; i < 10; i++) {
2268 		DELAY(100);
2269 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2270 			break;
2271 	}
2272 
2273 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2274 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2275 		    device_xname(sc->sc_dev));
2276 		for (i = 0; i < RGE_TIMEOUT; i++) {
2277 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2278 				break;
2279 			DELAY(1000);
2280 		}
2281 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2282 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2283 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2284 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2285 	}
2286 }
2287 
2288 void
2289 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2290 {
2291 	int i;
2292 
2293 	RGE_WRITE_4(sc, RGE_CSIDR, val);
2294 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2295 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2296 
2297 	for (i = 0; i < 10; i++) {
2298 		 DELAY(100);
2299 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2300 			break;
2301 	}
2302 
2303 	DELAY(20);
2304 }
2305 
2306 uint32_t
2307 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2308 {
2309 	int i;
2310 
2311 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2312 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2313 
2314 	for (i = 0; i < 10; i++) {
2315 		 DELAY(100);
2316 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2317 			break;
2318 	}
2319 
2320 	DELAY(20);
2321 
2322 	return (RGE_READ_4(sc, RGE_CSIDR));
2323 }
2324 
2325 void
2326 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2327 {
2328 	uint32_t tmp;
2329 
2330 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2331 	tmp += val;
2332 	tmp |= RGE_MACOCP_BUSY;
2333 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2334 }
2335 
2336 uint16_t
2337 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2338 {
2339 	uint32_t val;
2340 
2341 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2342 	RGE_WRITE_4(sc, RGE_MACOCP, val);
2343 
2344 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2345 }
2346 
2347 void
2348 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2349 {
2350 	uint32_t tmp;
2351 	int i;
2352 
2353 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2354 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2355 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2356 
2357 	for (i = 0; i < 10; i++) {
2358 		DELAY(100);
2359 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2360 			break;
2361 	}
2362 
2363 	DELAY(20);
2364 }
2365 
2366 uint16_t
2367 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2368 {
2369 	uint32_t val;
2370 	int i;
2371 
2372 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2373 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
2374 
2375 	for (i = 0; i < 10; i++) {
2376 		DELAY(100);
2377 		val = RGE_READ_4(sc, RGE_EPHYAR);
2378 		if (val & RGE_EPHYAR_BUSY)
2379 			break;
2380 	}
2381 
2382 	DELAY(20);
2383 
2384 	return (val & RGE_EPHYAR_DATA_MASK);
2385 }
2386 
2387 void
2388 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2389 {
2390 	uint16_t off, phyaddr;
2391 
2392 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2393 	phyaddr <<= 4;
2394 
2395 	off = addr ? reg : 0x10 + (reg % 8);
2396 
2397 	phyaddr += (off - 16) << 1;
2398 
2399 	rge_write_phy_ocp(sc, phyaddr, val);
2400 }
2401 
2402 uint16_t
2403 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2404 {
2405 	uint16_t off, phyaddr;
2406 
2407 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2408 	phyaddr <<= 4;
2409 
2410 	off = addr ? reg : 0x10 + (reg % 8);
2411 
2412 	phyaddr += (off - 16) << 1;
2413 
2414 	return (rge_read_phy_ocp(sc, phyaddr));
2415 }
2416 
2417 void
2418 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2419 {
2420 	uint32_t tmp;
2421 	int i;
2422 
2423 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2424 	tmp |= RGE_PHYOCP_BUSY | val;
2425 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2426 
2427 	for (i = 0; i < RGE_TIMEOUT; i++) {
2428 		DELAY(1);
2429 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2430 			break;
2431 	}
2432 }
2433 
2434 uint16_t
2435 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2436 {
2437 	uint32_t val;
2438 	int i;
2439 
2440 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2441 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
2442 
2443 	for (i = 0; i < RGE_TIMEOUT; i++) {
2444 		DELAY(1);
2445 		val = RGE_READ_4(sc, RGE_PHYOCP);
2446 		if (val & RGE_PHYOCP_BUSY)
2447 			break;
2448 	}
2449 
2450 	return (val & RGE_PHYOCP_DATA_MASK);
2451 }
2452 
2453 int
2454 rge_get_link_status(struct rge_softc *sc)
2455 {
2456 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2457 }
2458 
2459 void
2460 rge_txstart(struct work *wk, void *arg)
2461 {
2462 	struct rge_softc *sc = arg;
2463 
2464 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2465 }
2466 
2467 void
2468 rge_tick(void *arg)
2469 {
2470 	struct rge_softc *sc = arg;
2471 	int s;
2472 
2473 	s = splnet();
2474 	rge_link_state(sc);
2475 	splx(s);
2476 
2477 	callout_schedule(&sc->sc_timeout, hz);
2478 }
2479 
2480 void
2481 rge_link_state(struct rge_softc *sc)
2482 {
2483 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2484 	int link = LINK_STATE_DOWN;
2485 
2486 	if (rge_get_link_status(sc))
2487 		link = LINK_STATE_UP;
2488 
2489 	if (ifp->if_link_state != link) { /* XXX not safe to access */
2490 		if_link_state_change(ifp, link);
2491 	}
2492 }
2493