xref: /openbsd-src/sys/dev/ic/rtl81x9.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: rtl81x9.c,v 1.81 2013/12/28 03:35:00 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * RealTek 8129/8139 PCI NIC driver
37  *
38  * Supports several extremely cheap PCI 10/100 adapters based on
39  * the RealTek chipset. Datasheets can be obtained from
40  * www.realtek.com.tw.
41  *
42  * Written by Bill Paul <wpaul@ctr.columbia.edu>
43  * Electrical Engineering Department
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
49  * probably the worst PCI ethernet controller ever made, with the possible
50  * exception of the FEAST chip made by SMC. The 8139 supports bus-master
51  * DMA, but it has a terrible interface that nullifies any performance
52  * gains that bus-master DMA usually offers.
53  *
54  * For transmission, the chip offers a series of four TX descriptor
55  * registers. Each transmit frame must be in a contiguous buffer, aligned
56  * on a longword (32-bit) boundary. This means we almost always have to
57  * do mbuf copies in order to transmit a frame, except in the unlikely
58  * case where a) the packet fits into a single mbuf, and b) the packet
59  * is 32-bit aligned within the mbuf's data area. The presence of only
60  * four descriptor registers means that we can never have more than four
61  * packets queued for transmission at any one time.
62  *
63  * Reception is not much better. The driver has to allocate a single large
64  * buffer area (up to 64K in size) into which the chip will DMA received
65  * frames. Because we don't know where within this region received packets
66  * will begin or end, we have no choice but to copy data from the buffer
67  * area into mbufs in order to pass the packets up to the higher protocol
68  * levels.
69  *
70  * It's impossible given this rotten design to really achieve decent
71  * performance at 100Mbps, unless you happen to have a 400MHz PII or
72  * some equally overmuscled CPU to drive it.
73  *
74  * On the bright side, the 8139 does have a built-in PHY, although
75  * rather than using an MDIO serial interface like most other NICs, the
76  * PHY registers are directly accessible through the 8139's register
77  * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
78  * filter.
79  *
80  * The 8129 chip is an older version of the 8139 that uses an external PHY
81  * chip. The 8129 has a serial MDIO interface for accessing the MII where
82  * the 8139 lets you directly access the on-board PHY registers. We need
83  * to select which interface to use depending on the chip type.
84  */
85 
86 #include "bpfilter.h"
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/socket.h>
95 #include <sys/device.h>
96 #include <sys/timeout.h>
97 
98 #include <net/if.h>
99 #include <net/if_dl.h>
100 #include <net/if_types.h>
101 
102 #ifdef INET
103 #include <netinet/in.h>
104 #include <netinet/in_systm.h>
105 #include <netinet/ip.h>
106 #include <netinet/if_ether.h>
107 #endif
108 
109 #include <net/if_media.h>
110 
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114 
115 #include <machine/bus.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121 #include <dev/pci/pcidevs.h>
122 
123 #include <dev/ic/rtl81x9reg.h>
124 
125 /*
126  * Various supported PHY vendors/types and their names. Note that
127  * this driver will work with pretty much any MII-compliant PHY,
128  * so failure to positively identify the chip is not a fatal error.
129  */
130 
131 void rl_tick(void *);
132 
133 int rl_encap(struct rl_softc *, struct mbuf * );
134 
135 void rl_rxeof(struct rl_softc *);
136 void rl_txeof(struct rl_softc *);
137 void rl_start(struct ifnet *);
138 int rl_ioctl(struct ifnet *, u_long, caddr_t);
139 void rl_init(void *);
140 void rl_stop(struct rl_softc *);
141 void rl_watchdog(struct ifnet *);
142 int rl_ifmedia_upd(struct ifnet *);
143 void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
144 
145 void rl_eeprom_getword(struct rl_softc *, int, int, u_int16_t *);
146 void rl_eeprom_putbyte(struct rl_softc *, int, int);
147 void rl_read_eeprom(struct rl_softc *, caddr_t, int, int, int, int);
148 
149 void rl_mii_sync(struct rl_softc *);
150 void rl_mii_send(struct rl_softc *, u_int32_t, int);
151 int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *);
152 int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *);
153 
154 int rl_miibus_readreg(struct device *, int, int);
155 void rl_miibus_writereg(struct device *, int, int, int);
156 void rl_miibus_statchg(struct device *);
157 
158 void rl_iff(struct rl_softc *);
159 void rl_reset(struct rl_softc *);
160 int rl_list_tx_init(struct rl_softc *);
161 
162 #define EE_SET(x)					\
163 	CSR_WRITE_1(sc, RL_EECMD,			\
164 		CSR_READ_1(sc, RL_EECMD) | x)
165 
166 #define EE_CLR(x)					\
167 	CSR_WRITE_1(sc, RL_EECMD,			\
168 		CSR_READ_1(sc, RL_EECMD) & ~x)
169 
170 /*
171  * Send a read command and address to the EEPROM, check for ACK.
172  */
173 void
174 rl_eeprom_putbyte(struct rl_softc *sc, int addr, int addr_len)
175 {
176 	int	d, i;
177 
178 	d = (RL_EECMD_READ << addr_len) | addr;
179 
180 	/*
181 	 * Feed in each bit and strobe the clock.
182 	 */
183 	for (i = RL_EECMD_LEN + addr_len; i; i--) {
184 		if (d & (1 << (i - 1)))
185 			EE_SET(RL_EE_DATAIN);
186 		else
187 			EE_CLR(RL_EE_DATAIN);
188 
189 		DELAY(100);
190 		EE_SET(RL_EE_CLK);
191 		DELAY(150);
192 		EE_CLR(RL_EE_CLK);
193 		DELAY(100);
194 	}
195 }
196 
197 /*
198  * Read a word of data stored in the EEPROM at address 'addr.'
199  */
200 void
201 rl_eeprom_getword(struct rl_softc *sc, int addr, int addr_len,
202     u_int16_t *dest)
203 {
204 	int		i;
205 	u_int16_t	word = 0;
206 
207 	/* Enter EEPROM access mode. */
208 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
209 
210 	/*
211 	 * Send address of word we want to read.
212 	 */
213 	rl_eeprom_putbyte(sc, addr, addr_len);
214 
215 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
216 
217 	/*
218 	 * Start reading bits from EEPROM.
219 	 */
220 	for (i = 16; i > 0; i--) {
221 		EE_SET(RL_EE_CLK);
222 		DELAY(100);
223 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
224 			word |= 1 << (i - 1);
225 		EE_CLR(RL_EE_CLK);
226 		DELAY(100);
227 	}
228 
229 	/* Turn off EEPROM access mode. */
230 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
231 
232 	*dest = word;
233 }
234 
235 /*
236  * Read a sequence of words from the EEPROM.
237  */
238 void
239 rl_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int addr_len,
240     int cnt, int swap)
241 {
242 	int		i;
243 	u_int16_t	word = 0, *ptr;
244 
245 	for (i = 0; i < cnt; i++) {
246 		rl_eeprom_getword(sc, off + i, addr_len, &word);
247 		ptr = (u_int16_t *)(dest + (i * 2));
248 		if (swap)
249 			*ptr = letoh16(word);
250 		else
251 			*ptr = word;
252 	}
253 }
254 
255 /*
256  * MII access routines are provided for the 8129, which
257  * doesn't have a built-in PHY. For the 8139, we fake things
258  * up by diverting rl_phy_readreg()/rl_phy_writereg() to the
259  * direct access PHY registers.
260  */
261 #define MII_SET(x)					\
262 	CSR_WRITE_1(sc, RL_MII,				\
263 		CSR_READ_1(sc, RL_MII) | x)
264 
265 #define MII_CLR(x)					\
266 	CSR_WRITE_1(sc, RL_MII,				\
267 		CSR_READ_1(sc, RL_MII) & ~x)
268 
269 /*
270  * Sync the PHYs by setting data bit and strobing the clock 32 times.
271  */
272 void
273 rl_mii_sync(struct rl_softc *sc)
274 {
275 	int	i;
276 
277 	MII_SET(RL_MII_DIR|RL_MII_DATAOUT);
278 
279 	for (i = 0; i < 32; i++) {
280 		MII_SET(RL_MII_CLK);
281 		DELAY(1);
282 		MII_CLR(RL_MII_CLK);
283 		DELAY(1);
284 	}
285 }
286 
287 /*
288  * Clock a series of bits through the MII.
289  */
290 void
291 rl_mii_send(struct rl_softc *sc, u_int32_t bits, int cnt)
292 {
293 	int	i;
294 
295 	MII_CLR(RL_MII_CLK);
296 
297 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
298 		if (bits & i)
299 			MII_SET(RL_MII_DATAOUT);
300 		else
301 			MII_CLR(RL_MII_DATAOUT);
302 		DELAY(1);
303 		MII_CLR(RL_MII_CLK);
304 		DELAY(1);
305 		MII_SET(RL_MII_CLK);
306 	}
307 }
308 
309 /*
310  * Read an PHY register through the MII.
311  */
312 int
313 rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame)
314 {
315 	int	i, ack, s;
316 
317 	s = splnet();
318 
319 	/*
320 	 * Set up frame for RX.
321 	 */
322 	frame->mii_stdelim = RL_MII_STARTDELIM;
323 	frame->mii_opcode = RL_MII_READOP;
324 	frame->mii_turnaround = 0;
325 	frame->mii_data = 0;
326 
327 	CSR_WRITE_2(sc, RL_MII, 0);
328 
329 	/*
330 	 * Turn on data xmit.
331 	 */
332 	MII_SET(RL_MII_DIR);
333 
334 	rl_mii_sync(sc);
335 
336 	/*
337 	 * Send command/address info.
338 	 */
339 	rl_mii_send(sc, frame->mii_stdelim, 2);
340 	rl_mii_send(sc, frame->mii_opcode, 2);
341 	rl_mii_send(sc, frame->mii_phyaddr, 5);
342 	rl_mii_send(sc, frame->mii_regaddr, 5);
343 
344 	/* Idle bit */
345 	MII_CLR((RL_MII_CLK|RL_MII_DATAOUT));
346 	DELAY(1);
347 	MII_SET(RL_MII_CLK);
348 	DELAY(1);
349 
350 	/* Turn off xmit. */
351 	MII_CLR(RL_MII_DIR);
352 
353 	/* Check for ack */
354 	MII_CLR(RL_MII_CLK);
355 	DELAY(1);
356 	ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN;
357 	MII_SET(RL_MII_CLK);
358 	DELAY(1);
359 
360 	/*
361 	 * Now try reading data bits. If the ack failed, we still
362 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
363 	 */
364 	if (ack) {
365 		for(i = 0; i < 16; i++) {
366 			MII_CLR(RL_MII_CLK);
367 			DELAY(1);
368 			MII_SET(RL_MII_CLK);
369 			DELAY(1);
370 		}
371 		goto fail;
372 	}
373 
374 	for (i = 0x8000; i; i >>= 1) {
375 		MII_CLR(RL_MII_CLK);
376 		DELAY(1);
377 		if (!ack) {
378 			if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN)
379 				frame->mii_data |= i;
380 			DELAY(1);
381 		}
382 		MII_SET(RL_MII_CLK);
383 		DELAY(1);
384 	}
385 
386 fail:
387 
388 	MII_CLR(RL_MII_CLK);
389 	DELAY(1);
390 	MII_SET(RL_MII_CLK);
391 	DELAY(1);
392 
393 	splx(s);
394 
395 	if (ack)
396 		return(1);
397 	return(0);
398 }
399 
400 /*
401  * Write to a PHY register through the MII.
402  */
403 int
404 rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame)
405 {
406 	int	s;
407 
408 	s = splnet();
409 	/*
410 	 * Set up frame for TX.
411 	 */
412 
413 	frame->mii_stdelim = RL_MII_STARTDELIM;
414 	frame->mii_opcode = RL_MII_WRITEOP;
415 	frame->mii_turnaround = RL_MII_TURNAROUND;
416 
417 	/*
418 	 * Turn on data output.
419 	 */
420 	MII_SET(RL_MII_DIR);
421 
422 	rl_mii_sync(sc);
423 
424 	rl_mii_send(sc, frame->mii_stdelim, 2);
425 	rl_mii_send(sc, frame->mii_opcode, 2);
426 	rl_mii_send(sc, frame->mii_phyaddr, 5);
427 	rl_mii_send(sc, frame->mii_regaddr, 5);
428 	rl_mii_send(sc, frame->mii_turnaround, 2);
429 	rl_mii_send(sc, frame->mii_data, 16);
430 
431 	/* Idle bit. */
432 	MII_SET(RL_MII_CLK);
433 	DELAY(1);
434 	MII_CLR(RL_MII_CLK);
435 	DELAY(1);
436 
437 	/*
438 	 * Turn off xmit.
439 	 */
440 	MII_CLR(RL_MII_DIR);
441 
442 	splx(s);
443 
444 	return(0);
445 }
446 
447 void
448 rl_iff(struct rl_softc *sc)
449 {
450 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
451 	int			h = 0;
452 	u_int32_t		hashes[2];
453 	struct arpcom		*ac = &sc->sc_arpcom;
454 	struct ether_multi	*enm;
455 	struct ether_multistep	step;
456 	u_int32_t		rxfilt;
457 
458 	rxfilt = CSR_READ_4(sc, RL_RXCFG);
459 	rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
460 	    RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI);
461 	ifp->if_flags &= ~IFF_ALLMULTI;
462 
463 	/*
464 	 * Always accept frames destined to our station address.
465 	 * Always accept broadcast frames.
466 	 */
467 	rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
468 
469 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
470 		ifp ->if_flags |= IFF_ALLMULTI;
471 		rxfilt |= RL_RXCFG_RX_MULTI;
472 		if (ifp->if_flags & IFF_PROMISC)
473 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
474 		hashes[0] = hashes[1] = 0xFFFFFFFF;
475 	} else {
476 		rxfilt |= RL_RXCFG_RX_MULTI;
477 		/* Program new filter. */
478 		bzero(hashes, sizeof(hashes));
479 
480 		ETHER_FIRST_MULTI(step, ac, enm);
481 		while (enm != NULL) {
482 			h = ether_crc32_be(enm->enm_addrlo,
483 			    ETHER_ADDR_LEN) >> 26;
484 
485 			if (h < 32)
486 				hashes[0] |= (1 << h);
487 			else
488 				hashes[1] |= (1 << (h - 32));
489 
490 			ETHER_NEXT_MULTI(step, enm);
491 		}
492 	}
493 
494 	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
495 	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
496 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
497 }
498 
499 void
500 rl_reset(struct rl_softc *sc)
501 {
502 	int	i;
503 
504 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
505 
506 	for (i = 0; i < RL_TIMEOUT; i++) {
507 		DELAY(10);
508 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
509 			break;
510 	}
511 	if (i == RL_TIMEOUT)
512 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
513 
514 }
515 
516 /*
517  * Initialize the transmit descriptors.
518  */
519 int
520 rl_list_tx_init(struct rl_softc *sc)
521 {
522 	struct rl_chain_data	*cd = &sc->rl_cdata;
523 	int			i;
524 
525 	for (i = 0; i < RL_TX_LIST_CNT; i++) {
526 		cd->rl_tx_chain[i] = NULL;
527 		CSR_WRITE_4(sc,
528 		    RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000);
529 	}
530 
531 	sc->rl_cdata.cur_tx = 0;
532 	sc->rl_cdata.last_tx = 0;
533 
534 	return(0);
535 }
536 
537 /*
538  * A frame has been uploaded: pass the resulting mbuf chain up to
539  * the higher level protocols.
540  *
541  * You know there's something wrong with a PCI bus-master chip design
542  * when you have to use m_devget().
543  *
544  * The receive operation is badly documented in the datasheet, so I'll
545  * attempt to document it here. The driver provides a buffer area and
546  * places its base address in the RX buffer start address register.
547  * The chip then begins copying frames into the RX buffer. Each frame
548  * is preceded by a 32-bit RX status word which specifies the length
549  * of the frame and certain other status bits. Each frame (starting with
550  * the status word) is also 32-bit aligned. The frame length is in the
551  * first 16 bits of the status word; the lower 15 bits correspond with
552  * the 'rx status register' mentioned in the datasheet.
553  *
554  * Note: to make the Alpha happy, the frame payload needs to be aligned
555  * on a 32-bit boundary. To achieve this, we cheat a bit by copying from
556  * the ring buffer starting at an address two bytes before the actual
557  * data location. We can then shave off the first two bytes using m_adj().
558  * The reason we do this is because m_devget() doesn't let us specify an
559  * offset into the mbuf storage space, so we have to artificially create
560  * one. The ring is allocated in such a way that there are a few unused
561  * bytes of space preceding it so that it will be safe for us to do the
562  * 2-byte backstep even if reading from the ring at offset 0.
563  */
564 void
565 rl_rxeof(struct rl_softc *sc)
566 {
567 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
568 	struct mbuf	*m;
569 	int		total_len;
570 	u_int32_t	rxstat;
571 	caddr_t		rxbufpos;
572 	int		wrap = 0;
573 	u_int16_t	cur_rx;
574 	u_int16_t	limit;
575 	u_int16_t	rx_bytes = 0, max_bytes;
576 
577 	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
578 
579 	/* Do not try to read past this point. */
580 	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
581 
582 	if (limit < cur_rx)
583 		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
584 	else
585 		max_bytes = limit - cur_rx;
586 
587 	while ((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
588 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap,
589 		    0, sc->sc_rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
590 		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
591 		rxstat = *(u_int32_t *)rxbufpos;
592 
593 		/*
594 		 * Here's a totally undocumented fact for you. When the
595 		 * RealTek chip is in the process of copying a packet into
596 		 * RAM for you, the length will be 0xfff0. If you spot a
597 		 * packet header with this value, you need to stop. The
598 		 * datasheet makes absolutely no mention of this and
599 		 * RealTek should be shot for this.
600 		 */
601 		rxstat = htole32(rxstat);
602 		total_len = rxstat >> 16;
603 		if (total_len == RL_RXSTAT_UNFINISHED) {
604 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap,
605 			    0, sc->sc_rx_dmamap->dm_mapsize,
606 			    BUS_DMASYNC_PREREAD);
607 			break;
608 		}
609 
610 		if (!(rxstat & RL_RXSTAT_RXOK) ||
611 		    total_len < ETHER_MIN_LEN ||
612 		    total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
613 			ifp->if_ierrors++;
614 			rl_init(sc);
615 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap,
616 			    0, sc->sc_rx_dmamap->dm_mapsize,
617 			    BUS_DMASYNC_PREREAD);
618 			return;
619 		}
620 
621 		/* No errors; receive the packet. */
622 		rx_bytes += total_len + 4;
623 
624 		/*
625 		 * XXX The RealTek chip includes the CRC with every
626 		 * received frame, and there's no way to turn this
627 		 * behavior off (at least, I can't find anything in
628 		 * the manual that explains how to do it) so we have
629 		 * to trim off the CRC manually.
630 		 */
631 		total_len -= ETHER_CRC_LEN;
632 
633 		/*
634 		 * Avoid trying to read more bytes than we know
635 		 * the chip has prepared for us.
636 		 */
637 		if (rx_bytes > max_bytes) {
638 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap,
639 			    0, sc->sc_rx_dmamap->dm_mapsize,
640 			    BUS_DMASYNC_PREREAD);
641 			break;
642 		}
643 
644 		rxbufpos = sc->rl_cdata.rl_rx_buf +
645 			((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN);
646 
647 		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
648 			rxbufpos = sc->rl_cdata.rl_rx_buf;
649 
650 		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
651 
652 		if (total_len > wrap) {
653 			m = m_devget(rxbufpos, wrap, ETHER_ALIGN, ifp);
654 			if (m != NULL) {
655 				m_copyback(m, wrap, total_len - wrap,
656 				    sc->rl_cdata.rl_rx_buf, M_NOWAIT);
657 				if (m->m_pkthdr.len < total_len) {
658 					m_freem(m);
659 					m = NULL;
660 				}
661 			}
662 			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
663 		} else {
664 			m = m_devget(rxbufpos, total_len, ETHER_ALIGN, ifp);
665 			cur_rx += total_len + 4 + ETHER_CRC_LEN;
666 		}
667 
668 		/*
669 		 * Round up to 32-bit boundary.
670 		 */
671 		cur_rx = (cur_rx + 3) & ~3;
672 		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
673 
674 		if (m == NULL) {
675 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap,
676 			    0, sc->sc_rx_dmamap->dm_mapsize,
677 			    BUS_DMASYNC_PREREAD);
678 			ifp->if_ierrors++;
679 			continue;
680 		}
681 
682 		ifp->if_ipackets++;
683 
684 #if NBPFILTER > 0
685 		/*
686 		 * Handle BPF listeners. Let the BPF user see the packet.
687 		 */
688 		if (ifp->if_bpf)
689 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
690 #endif
691 		ether_input_mbuf(ifp, m);
692 
693 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap,
694 		    0, sc->sc_rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
695 	}
696 }
697 
698 /*
699  * A frame was downloaded to the chip. It's safe for us to clean up
700  * the list buffers.
701  */
702 void
703 rl_txeof(struct rl_softc *sc)
704 {
705 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
706 	u_int32_t	txstat;
707 
708 	/*
709 	 * Go through our tx list and free mbufs for those
710 	 * frames that have been uploaded.
711 	 */
712 	do {
713 		if (RL_LAST_TXMBUF(sc) == NULL)
714 			break;
715 		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
716 		if (!(txstat & (RL_TXSTAT_TX_OK|
717 		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
718 			break;
719 
720 		ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24;
721 
722 		bus_dmamap_sync(sc->sc_dmat, RL_LAST_TXMAP(sc),
723 		    0, RL_LAST_TXMAP(sc)->dm_mapsize,
724 		    BUS_DMASYNC_POSTWRITE);
725 		bus_dmamap_unload(sc->sc_dmat, RL_LAST_TXMAP(sc));
726 		m_freem(RL_LAST_TXMBUF(sc));
727 		RL_LAST_TXMBUF(sc) = NULL;
728 		/*
729 		 * If there was a transmit underrun, bump the TX threshold.
730 		 * Make sure not to overflow the 63 * 32byte we can address
731 		 * with the 6 available bit.
732 		 */
733 		if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
734 		    (sc->rl_txthresh < 2016))
735 			sc->rl_txthresh += 32;
736 		if (txstat & RL_TXSTAT_TX_OK)
737 			ifp->if_opackets++;
738 		else {
739 			int oldthresh;
740 
741 			ifp->if_oerrors++;
742 			if ((txstat & RL_TXSTAT_TXABRT) ||
743 			    (txstat & RL_TXSTAT_OUTOFWIN))
744 				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
745 			oldthresh = sc->rl_txthresh;
746 			/* error recovery */
747 			rl_reset(sc);
748 			rl_init(sc);
749 			/* restore original threshold */
750 			sc->rl_txthresh = oldthresh;
751 			return;
752 		}
753 		RL_INC(sc->rl_cdata.last_tx);
754 		ifp->if_flags &= ~IFF_OACTIVE;
755 	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
756 
757 	if (RL_LAST_TXMBUF(sc) == NULL)
758 		ifp->if_timer = 0;
759 	else if (ifp->if_timer == 0)
760 		ifp->if_timer = 5;
761 }
762 
763 int
764 rl_intr(void *arg)
765 {
766 	struct rl_softc	*sc = arg;
767 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
768 	int		claimed = 0;
769 	u_int16_t	status;
770 
771 	/* Disable interrupts. */
772 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
773 
774 	for (;;) {
775 		status = CSR_READ_2(sc, RL_ISR);
776 		/* If the card has gone away, the read returns 0xffff. */
777 		if (status == 0xffff)
778 			break;
779 		if (status != 0)
780 			CSR_WRITE_2(sc, RL_ISR, status);
781 		if ((status & RL_INTRS) == 0)
782 			break;
783 		if ((status & RL_ISR_RX_OK) || (status & RL_ISR_RX_ERR))
784 			rl_rxeof(sc);
785 		if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR))
786 			rl_txeof(sc);
787 		if (status & RL_ISR_SYSTEM_ERR) {
788 			rl_reset(sc);
789 			rl_init(sc);
790 		}
791 		claimed = 1;
792 	}
793 
794 	/* Re-enable interrupts. */
795 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
796 
797 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
798 		rl_start(ifp);
799 
800 	return (claimed);
801 }
802 
803 /*
804  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
805  * pointers to the fragment pointers.
806  */
807 int
808 rl_encap(struct rl_softc *sc, struct mbuf *m_head)
809 {
810 	struct mbuf	*m_new;
811 
812 	/*
813 	 * The RealTek is brain damaged and wants longword-aligned
814 	 * TX buffers, plus we can only have one fragment buffer
815 	 * per packet. We have to copy pretty much all the time.
816 	 */
817 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
818 	if (m_new == NULL) {
819 		m_freem(m_head);
820 		return(1);
821 	}
822 	if (m_head->m_pkthdr.len > MHLEN) {
823 		MCLGET(m_new, M_DONTWAIT);
824 		if (!(m_new->m_flags & M_EXT)) {
825 			m_freem(m_new);
826 			m_freem(m_head);
827 			return(1);
828 		}
829 	}
830 	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
831 	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
832 
833 	/* Pad frames to at least 60 bytes. */
834 	if (m_new->m_pkthdr.len < RL_MIN_FRAMELEN) {
835 		/*
836 		 * Make security-conscious people happy: zero out the
837 		 * bytes in the pad area, since we don't know what
838 		 * this mbuf cluster buffer's previous user might
839 		 * have left in it.
840 		 */
841 		bzero(mtod(m_new, char *) + m_new->m_pkthdr.len,
842 		    RL_MIN_FRAMELEN - m_new->m_pkthdr.len);
843 		m_new->m_pkthdr.len +=
844 		    (RL_MIN_FRAMELEN - m_new->m_pkthdr.len);
845 		m_new->m_len = m_new->m_pkthdr.len;
846 	}
847 
848 	if (bus_dmamap_load_mbuf(sc->sc_dmat, RL_CUR_TXMAP(sc),
849 	    m_new, BUS_DMA_NOWAIT) != 0) {
850 		m_freem(m_new);
851 		m_freem(m_head);
852 		return (1);
853 	}
854 	m_freem(m_head);
855 
856 	RL_CUR_TXMBUF(sc) = m_new;
857 	bus_dmamap_sync(sc->sc_dmat, RL_CUR_TXMAP(sc), 0,
858 	    RL_CUR_TXMAP(sc)->dm_mapsize, BUS_DMASYNC_PREWRITE);
859 	return(0);
860 }
861 
862 /*
863  * Main transmit routine.
864  */
865 void
866 rl_start(struct ifnet *ifp)
867 {
868 	struct rl_softc	*sc = ifp->if_softc;
869 	struct mbuf	*m_head = NULL;
870 	int		pkts = 0;
871 
872 	while (RL_CUR_TXMBUF(sc) == NULL) {
873 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
874 		if (m_head == NULL)
875 			break;
876 
877 		/* Pack the data into the descriptor. */
878 		if (rl_encap(sc, m_head))
879 			break;
880 		pkts++;
881 
882 #if NBPFILTER > 0
883 		/*
884 		 * If there's a BPF listener, bounce a copy of this frame
885 		 * to him.
886 		 */
887 		if (ifp->if_bpf)
888 			bpf_mtap(ifp->if_bpf, RL_CUR_TXMBUF(sc),
889 			    BPF_DIRECTION_OUT);
890 #endif
891 		/*
892 		 * Transmit the frame.
893 		 */
894 		CSR_WRITE_4(sc, RL_CUR_TXADDR(sc),
895 		    RL_CUR_TXMAP(sc)->dm_segs[0].ds_addr);
896 		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
897 		    RL_TXTHRESH(sc->rl_txthresh) |
898 		    RL_CUR_TXMAP(sc)->dm_segs[0].ds_len);
899 
900 		RL_INC(sc->rl_cdata.cur_tx);
901 
902 		/*
903 		 * Set a timeout in case the chip goes out to lunch.
904 		 */
905 		ifp->if_timer = 5;
906 	}
907 
908 	if (pkts == 0)
909 		return;
910 
911 	/*
912 	 * We broke out of the loop because all our TX slots are
913 	 * full. Mark the NIC as busy until it drains some of the
914 	 * packets from the queue.
915 	 */
916 	if (RL_CUR_TXMBUF(sc) != NULL)
917 		ifp->if_flags |= IFF_OACTIVE;
918 }
919 
920 void
921 rl_init(void *xsc)
922 {
923 	struct rl_softc	*sc = xsc;
924 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
925 	int		s;
926 
927 	s = splnet();
928 
929 	/*
930 	 * Cancel pending I/O and free all RX/TX buffers.
931 	 */
932 	rl_stop(sc);
933 
934 	/*
935 	 * Init our MAC address.  Even though the chipset
936 	 * documentation doesn't mention it, we need to enter "Config
937 	 * register write enable" mode to modify the ID registers.
938 	 */
939 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
940 	CSR_WRITE_RAW_4(sc, RL_IDR0,
941 	    (u_int8_t *)(&sc->sc_arpcom.ac_enaddr[0]));
942 	CSR_WRITE_RAW_4(sc, RL_IDR4,
943 	    (u_int8_t *)(&sc->sc_arpcom.ac_enaddr[4]));
944 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
945 
946 	/* Init the RX buffer pointer register. */
947 	CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_pa);
948 
949 	/* Init TX descriptors. */
950 	rl_list_tx_init(sc);
951 
952 	/*
953 	 * Enable transmit and receive.
954 	 */
955 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
956 
957 	/*
958 	 * Set the initial TX and RX configuration.
959 	 */
960 	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
961 	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
962 
963 	/*
964 	 * Program promiscuous mode and multicast filters.
965 	 */
966 	rl_iff(sc);
967 
968 	/*
969 	 * Enable interrupts.
970 	 */
971 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
972 
973 	/* Set initial TX threshold */
974 	sc->rl_txthresh = RL_TX_THRESH_INIT;
975 
976 	/* Start RX/TX process. */
977 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
978 
979 	/* Enable receiver and transmitter. */
980 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
981 
982 	mii_mediachg(&sc->sc_mii);
983 
984 	CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
985 
986 	ifp->if_flags |= IFF_RUNNING;
987 	ifp->if_flags &= ~IFF_OACTIVE;
988 
989 	splx(s);
990 
991 	timeout_add_sec(&sc->sc_tick_tmo, 1);
992 }
993 
994 /*
995  * Set media options.
996  */
997 int
998 rl_ifmedia_upd(struct ifnet *ifp)
999 {
1000 	struct rl_softc *sc = (struct rl_softc *)ifp->if_softc;
1001 
1002 	mii_mediachg(&sc->sc_mii);
1003 	return (0);
1004 }
1005 
1006 /*
1007  * Report current media status.
1008  */
1009 void
1010 rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1011 {
1012 	struct rl_softc *sc = ifp->if_softc;
1013 
1014 	mii_pollstat(&sc->sc_mii);
1015 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1016 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1017 }
1018 
1019 int
1020 rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1021 {
1022 	struct rl_softc	*sc = ifp->if_softc;
1023 	struct ifreq	*ifr = (struct ifreq *) data;
1024 	struct ifaddr	*ifa = (struct ifaddr *) data;
1025 	int		s, error = 0;
1026 
1027 	s = splnet();
1028 
1029 	switch(command) {
1030 	case SIOCSIFADDR:
1031 		ifp->if_flags |= IFF_UP;
1032 		if (!(ifp->if_flags & IFF_RUNNING))
1033 			rl_init(sc);
1034 #ifdef INET
1035 		if (ifa->ifa_addr->sa_family == AF_INET)
1036 			arp_ifinit(&sc->sc_arpcom, ifa);
1037 #endif
1038 		break;
1039 
1040 	case SIOCSIFFLAGS:
1041 		if (ifp->if_flags & IFF_UP) {
1042 			if (ifp->if_flags & IFF_RUNNING)
1043 				error = ENETRESET;
1044 			else
1045 				rl_init(sc);
1046 		} else {
1047 			if (ifp->if_flags & IFF_RUNNING)
1048 				rl_stop(sc);
1049 		}
1050 		break;
1051 
1052 	case SIOCGIFMEDIA:
1053 	case SIOCSIFMEDIA:
1054 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1055 		break;
1056 
1057 	default:
1058 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
1059 	}
1060 
1061 	if (error == ENETRESET) {
1062 		if (ifp->if_flags & IFF_RUNNING)
1063 			rl_iff(sc);
1064 		error = 0;
1065 	}
1066 
1067 	splx(s);
1068 	return(error);
1069 }
1070 
1071 void
1072 rl_watchdog(struct ifnet *ifp)
1073 {
1074 	struct rl_softc	*sc = ifp->if_softc;
1075 
1076 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1077 	ifp->if_oerrors++;
1078 	rl_txeof(sc);
1079 	rl_rxeof(sc);
1080 	rl_init(sc);
1081 }
1082 
1083 /*
1084  * Stop the adapter and free any mbufs allocated to the
1085  * RX and TX lists.
1086  */
1087 void
1088 rl_stop(struct rl_softc *sc)
1089 {
1090 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
1091 	int		i;
1092 
1093 	ifp->if_timer = 0;
1094 
1095 	timeout_del(&sc->sc_tick_tmo);
1096 
1097 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1098 
1099 	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1100 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
1101 
1102 	/*
1103 	 * Free the TX list buffers.
1104 	 */
1105 	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1106 		if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1107 			bus_dmamap_sync(sc->sc_dmat,
1108 			    sc->rl_cdata.rl_tx_dmamap[i], 0,
1109 			    sc->rl_cdata.rl_tx_dmamap[i]->dm_mapsize,
1110 			    BUS_DMASYNC_POSTWRITE);
1111 			bus_dmamap_unload(sc->sc_dmat,
1112 			    sc->rl_cdata.rl_tx_dmamap[i]);
1113 			m_freem(sc->rl_cdata.rl_tx_chain[i]);
1114 			sc->rl_cdata.rl_tx_chain[i] = NULL;
1115 			CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(u_int32_t)),
1116 				0x00000000);
1117 		}
1118 	}
1119 }
1120 
1121 int
1122 rl_attach(struct rl_softc *sc)
1123 {
1124 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
1125 	int		rseg, i;
1126 	u_int16_t	rl_id;
1127 	caddr_t		kva;
1128 	int		addr_len;
1129 
1130 	rl_reset(sc);
1131 
1132 	/*
1133 	 * Check EEPROM type 9346 or 9356.
1134 	 */
1135 	rl_read_eeprom(sc, (caddr_t)&rl_id, RL_EE_ID, RL_EEADDR_LEN1, 1, 0);
1136 	if (rl_id == 0x8129)
1137 		addr_len = RL_EEADDR_LEN1;
1138 	else
1139 		addr_len = RL_EEADDR_LEN0;
1140 
1141 	/*
1142 	 * Get station address.
1143 	 */
1144 	rl_read_eeprom(sc, (caddr_t)sc->sc_arpcom.ac_enaddr, RL_EE_EADDR,
1145 	    addr_len, 3, 1);
1146 
1147 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1148 
1149 	if (bus_dmamem_alloc(sc->sc_dmat, RL_RXBUFLEN + 32, PAGE_SIZE, 0,
1150 	    &sc->sc_rx_seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
1151 		printf("\n%s: can't alloc rx buffers\n", sc->sc_dev.dv_xname);
1152 		return (1);
1153 	}
1154 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, rseg,
1155 	    RL_RXBUFLEN + 32, &kva, BUS_DMA_NOWAIT)) {
1156 		printf("%s: can't map dma buffers (%d bytes)\n",
1157 		    sc->sc_dev.dv_xname, RL_RXBUFLEN + 32);
1158 		bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, rseg);
1159 		return (1);
1160 	}
1161 	if (bus_dmamap_create(sc->sc_dmat, RL_RXBUFLEN + 32, 1,
1162 	    RL_RXBUFLEN + 32, 0, BUS_DMA_NOWAIT, &sc->sc_rx_dmamap)) {
1163 		printf("%s: can't create dma map\n", sc->sc_dev.dv_xname);
1164 		bus_dmamem_unmap(sc->sc_dmat, kva, RL_RXBUFLEN + 32);
1165 		bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, rseg);
1166 		return (1);
1167 	}
1168 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_dmamap, kva,
1169 	    RL_RXBUFLEN + 32, NULL, BUS_DMA_NOWAIT)) {
1170 		printf("%s: can't load dma map\n", sc->sc_dev.dv_xname);
1171 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamap);
1172 		bus_dmamem_unmap(sc->sc_dmat, kva, RL_RXBUFLEN + 32);
1173 		bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, rseg);
1174 		return (1);
1175 	}
1176 	sc->rl_cdata.rl_rx_buf = kva;
1177 	sc->rl_cdata.rl_rx_buf_pa = sc->sc_rx_dmamap->dm_segs[0].ds_addr;
1178 
1179 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap,
1180 	    0, sc->sc_rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1181 
1182 	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1183 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1184 		    BUS_DMA_NOWAIT, &sc->rl_cdata.rl_tx_dmamap[i]) != 0) {
1185 			printf("%s: can't create tx maps\n",
1186 			    sc->sc_dev.dv_xname);
1187 			/* XXX free any allocated... */
1188 			return (1);
1189 		}
1190 	}
1191 
1192 	/* Leave a few bytes before the start of the RX ring buffer. */
1193 	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1194 	sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t);
1195 	sc->rl_cdata.rl_rx_buf_pa += sizeof(u_int64_t);
1196 
1197 	ifp->if_softc = sc;
1198 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1199 	ifp->if_ioctl = rl_ioctl;
1200 	ifp->if_start = rl_start;
1201 	ifp->if_watchdog = rl_watchdog;
1202 	IFQ_SET_READY(&ifp->if_snd);
1203 
1204 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1205 
1206 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1207 
1208 	timeout_set(&sc->sc_tick_tmo, rl_tick, sc);
1209 
1210 	/*
1211 	 * Initialize our media structures and probe the MII.
1212 	 */
1213 	sc->sc_mii.mii_ifp = ifp;
1214 	sc->sc_mii.mii_readreg = rl_miibus_readreg;
1215 	sc->sc_mii.mii_writereg = rl_miibus_writereg;
1216 	sc->sc_mii.mii_statchg = rl_miibus_statchg;
1217 	ifmedia_init(&sc->sc_mii.mii_media, 0, rl_ifmedia_upd, rl_ifmedia_sts);
1218 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1219 	    MII_OFFSET_ANY, 0);
1220 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1221 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1222 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1223 	} else
1224 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1225 
1226 	/*
1227 	 * Attach us everywhere
1228 	 */
1229 	if_attach(ifp);
1230 	ether_ifattach(ifp);
1231 
1232 	return (0);
1233 }
1234 
1235 int
1236 rl_activate(struct device *self, int act)
1237 {
1238 	struct rl_softc	*sc = (struct rl_softc *)self;
1239 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1240 	int rv = 0;
1241 
1242 	switch (act) {
1243 	case DVACT_SUSPEND:
1244 		if (ifp->if_flags & IFF_RUNNING)
1245 			rl_stop(sc);
1246 		rv = config_activate_children(self, act);
1247 		break;
1248 	case DVACT_RESUME:
1249 		if (ifp->if_flags & IFF_UP)
1250 			rl_init(sc);
1251 		break;
1252 	default:
1253 		rv = config_activate_children(self, act);
1254 		break;
1255 	}
1256 	return (rv);
1257 }
1258 
1259 int
1260 rl_miibus_readreg(struct device *self, int phy, int reg)
1261 {
1262 	struct rl_softc		*sc = (struct rl_softc *)self;
1263 	struct rl_mii_frame	frame;
1264 	u_int16_t		rl8139_reg;
1265 
1266 	if (sc->rl_type == RL_8139) {
1267 		/*
1268 		* The RTL8139 PHY is mapped into PCI registers, unfortunately
1269 		* it has no phyid, or phyaddr, so assume it is phyaddr 0.
1270 		*/
1271 		if (phy != 0)
1272 			return(0);
1273 
1274 		switch (reg) {
1275 		case MII_BMCR:
1276 			rl8139_reg = RL_BMCR;
1277 			break;
1278 		case MII_BMSR:
1279 			rl8139_reg = RL_BMSR;
1280 			break;
1281 		case MII_ANAR:
1282 			rl8139_reg = RL_ANAR;
1283 			break;
1284 		case MII_ANER:
1285 			rl8139_reg = RL_ANER;
1286 			break;
1287 		case MII_ANLPAR:
1288 			rl8139_reg = RL_LPAR;
1289 			break;
1290 		case RL_MEDIASTAT:
1291 			return (CSR_READ_1(sc, RL_MEDIASTAT));
1292 		case MII_PHYIDR1:
1293 		case MII_PHYIDR2:
1294 		default:
1295 			return (0);
1296 		}
1297 		return (CSR_READ_2(sc, rl8139_reg));
1298 	}
1299 
1300 	bzero(&frame, sizeof(frame));
1301 
1302 	frame.mii_phyaddr = phy;
1303 	frame.mii_regaddr = reg;
1304 	rl_mii_readreg(sc, &frame);
1305 
1306 	return(frame.mii_data);
1307 }
1308 
1309 void
1310 rl_miibus_writereg(struct device *self, int phy, int reg, int val)
1311 {
1312 	struct rl_softc		*sc = (struct rl_softc *)self;
1313 	struct rl_mii_frame	frame;
1314 	u_int16_t		rl8139_reg = 0;
1315 
1316 	if (sc->rl_type == RL_8139) {
1317 		if (phy)
1318 			return;
1319 
1320 		switch (reg) {
1321 		case MII_BMCR:
1322 			rl8139_reg = RL_BMCR;
1323 			break;
1324 		case MII_BMSR:
1325 			rl8139_reg = RL_BMSR;
1326 			break;
1327 		case MII_ANAR:
1328 			rl8139_reg = RL_ANAR;
1329 			break;
1330 		case MII_ANER:
1331 			rl8139_reg = RL_ANER;
1332 			break;
1333 		case MII_ANLPAR:
1334 			rl8139_reg = RL_LPAR;
1335 			break;
1336 		case MII_PHYIDR1:
1337 		case MII_PHYIDR2:
1338 			return;
1339 		}
1340 		CSR_WRITE_2(sc, rl8139_reg, val);
1341 		return;
1342 	}
1343 
1344 	bzero(&frame, sizeof(frame));
1345 	frame.mii_phyaddr = phy;
1346 	frame.mii_regaddr = reg;
1347 	frame.mii_data = val;
1348 	rl_mii_writereg(sc, &frame);
1349 }
1350 
1351 void
1352 rl_miibus_statchg(struct device *self)
1353 {
1354 }
1355 
1356 void
1357 rl_tick(void *v)
1358 {
1359 	struct rl_softc	*sc = v;
1360 	int		s;
1361 
1362 	s = splnet();
1363 	mii_tick(&sc->sc_mii);
1364 	splx(s);
1365 
1366 	timeout_add_sec(&sc->sc_tick_tmo, 1);
1367 }
1368 
1369 int
1370 rl_detach(struct rl_softc *sc)
1371 {
1372 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
1373 
1374 	/* Unhook our tick handler. */
1375 	timeout_del(&sc->sc_tick_tmo);
1376 
1377 	/* Detach any PHYs we might have. */
1378 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
1379 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1380 
1381 	/* Delete any remaining media. */
1382 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
1383 
1384 	ether_ifdetach(ifp);
1385 	if_detach(ifp);
1386 
1387 	return (0);
1388 }
1389 
1390 struct cfdriver rl_cd = {
1391 	0, "rl", DV_IFNET
1392 };
1393