xref: /netbsd-src/sys/dev/ic/rtl81x9.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: rtl81x9.c,v 1.79 2007/12/09 20:27:59 jmcneill Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  *	FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35  */
36 
37 /*
38  * RealTek 8129/8139 PCI NIC driver
39  *
40  * Supports several extremely cheap PCI 10/100 adapters based on
41  * the RealTek chipset. Datasheets can be obtained from
42  * www.realtek.com.tw.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51  * probably the worst PCI ethernet controller ever made, with the possible
52  * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53  * DMA, but it has a terrible interface that nullifies any performance
54  * gains that bus-master DMA usually offers.
55  *
56  * For transmission, the chip offers a series of four TX descriptor
57  * registers. Each transmit frame must be in a contiguous buffer, aligned
58  * on a longword (32-bit) boundary. This means we almost always have to
59  * do mbuf copies in order to transmit a frame, except in the unlikely
60  * case where a) the packet fits into a single mbuf, and b) the packet
61  * is 32-bit aligned within the mbuf's data area. The presence of only
62  * four descriptor registers means that we can never have more than four
63  * packets queued for transmission at any one time.
64  *
65  * Reception is not much better. The driver has to allocate a single large
66  * buffer area (up to 64K in size) into which the chip will DMA received
67  * frames. Because we don't know where within this region received packets
68  * will begin or end, we have no choice but to copy data from the buffer
69  * area into mbufs in order to pass the packets up to the higher protocol
70  * levels.
71  *
72  * It's impossible given this rotten design to really achieve decent
73  * performance at 100Mbps, unless you happen to have a 400MHz PII or
74  * some equally overmuscled CPU to drive it.
75  *
76  * On the bright side, the 8139 does have a built-in PHY, although
77  * rather than using an MDIO serial interface like most other NICs, the
78  * PHY registers are directly accessible through the 8139's register
79  * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80  * filter.
81  *
82  * The 8129 chip is an older version of the 8139 that uses an external PHY
83  * chip. The 8129 has a serial MDIO interface for accessing the MII where
84  * the 8139 lets you directly access the on-board PHY registers. We need
85  * to select which interface to use depending on the chip type.
86  */
87 
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.79 2007/12/09 20:27:59 jmcneill Exp $");
90 
91 #include "bpfilter.h"
92 #include "rnd.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/device.h>
98 #include <sys/sockio.h>
99 #include <sys/mbuf.h>
100 #include <sys/malloc.h>
101 #include <sys/kernel.h>
102 #include <sys/socket.h>
103 
104 #include <uvm/uvm_extern.h>
105 
106 #include <net/if.h>
107 #include <net/if_arp.h>
108 #include <net/if_ether.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
111 
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #endif
115 #if NRND > 0
116 #include <sys/rnd.h>
117 #endif
118 
119 #include <sys/bus.h>
120 #include <machine/endian.h>
121 
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 
125 #include <dev/ic/rtl81x9reg.h>
126 #include <dev/ic/rtl81x9var.h>
127 
128 #if defined(DEBUG)
129 #define STATIC
130 #else
131 #define STATIC static
132 #endif
133 
134 STATIC void rtk_reset(struct rtk_softc *);
135 STATIC void rtk_rxeof(struct rtk_softc *);
136 STATIC void rtk_txeof(struct rtk_softc *);
137 STATIC void rtk_start(struct ifnet *);
138 STATIC int rtk_ioctl(struct ifnet *, u_long, void *);
139 STATIC int rtk_init(struct ifnet *);
140 STATIC void rtk_stop(struct ifnet *, int);
141 
142 STATIC void rtk_watchdog(struct ifnet *);
143 STATIC int rtk_ifmedia_upd(struct ifnet *);
144 STATIC void rtk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
145 
146 STATIC void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
147 STATIC void rtk_mii_sync(struct rtk_softc *);
148 STATIC void rtk_mii_send(struct rtk_softc *, uint32_t, int);
149 STATIC int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
150 STATIC int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
151 
152 STATIC int rtk_phy_readreg(device_t, int, int);
153 STATIC void rtk_phy_writereg(device_t, int, int, int);
154 STATIC void rtk_phy_statchg(device_t);
155 STATIC void rtk_tick(void *);
156 
157 STATIC int rtk_enable(struct rtk_softc *);
158 STATIC void rtk_disable(struct rtk_softc *);
159 
160 STATIC void rtk_list_tx_init(struct rtk_softc *);
161 
162 #define EE_SET(x)					\
163 	CSR_WRITE_1(sc, RTK_EECMD,			\
164 		CSR_READ_1(sc, RTK_EECMD) | (x))
165 
166 #define EE_CLR(x)					\
167 	CSR_WRITE_1(sc, RTK_EECMD,			\
168 		CSR_READ_1(sc, RTK_EECMD) & ~(x))
169 
170 #define EE_DELAY()	DELAY(100)
171 
172 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
173 
174 /*
175  * Send a read command and address to the EEPROM, check for ACK.
176  */
177 STATIC void
178 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
179 {
180 	int d, i;
181 
182 	d = (RTK_EECMD_READ << addr_len) | addr;
183 
184 	/*
185 	 * Feed in each bit and stobe the clock.
186 	 */
187 	for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
188 		if (d & (1 << (i - 1))) {
189 			EE_SET(RTK_EE_DATAIN);
190 		} else {
191 			EE_CLR(RTK_EE_DATAIN);
192 		}
193 		EE_DELAY();
194 		EE_SET(RTK_EE_CLK);
195 		EE_DELAY();
196 		EE_CLR(RTK_EE_CLK);
197 		EE_DELAY();
198 	}
199 }
200 
201 /*
202  * Read a word of data stored in the EEPROM at address 'addr.'
203  */
204 uint16_t
205 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
206 {
207 	uint16_t word;
208 	int i;
209 
210 	/* Enter EEPROM access mode. */
211 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
212 	EE_DELAY();
213 	EE_SET(RTK_EE_SEL);
214 
215 	/*
216 	 * Send address of word we want to read.
217 	 */
218 	rtk_eeprom_putbyte(sc, addr, addr_len);
219 
220 	/*
221 	 * Start reading bits from EEPROM.
222 	 */
223 	word = 0;
224 	for (i = 16; i > 0; i--) {
225 		EE_SET(RTK_EE_CLK);
226 		EE_DELAY();
227 		if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
228 			word |= 1 << (i - 1);
229 		EE_CLR(RTK_EE_CLK);
230 		EE_DELAY();
231 	}
232 
233 	/* Turn off EEPROM access mode. */
234 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
235 
236 	return word;
237 }
238 
239 /*
240  * MII access routines are provided for the 8129, which
241  * doesn't have a built-in PHY. For the 8139, we fake things
242  * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
243  * direct access PHY registers.
244  */
245 #define MII_SET(x)					\
246 	CSR_WRITE_1(sc, RTK_MII,			\
247 		CSR_READ_1(sc, RTK_MII) | (x))
248 
249 #define MII_CLR(x)					\
250 	CSR_WRITE_1(sc, RTK_MII,			\
251 		CSR_READ_1(sc, RTK_MII) & ~(x))
252 
253 /*
254  * Sync the PHYs by setting data bit and strobing the clock 32 times.
255  */
256 STATIC void
257 rtk_mii_sync(struct rtk_softc *sc)
258 {
259 	int i;
260 
261 	MII_SET(RTK_MII_DIR|RTK_MII_DATAOUT);
262 
263 	for (i = 0; i < 32; i++) {
264 		MII_SET(RTK_MII_CLK);
265 		DELAY(1);
266 		MII_CLR(RTK_MII_CLK);
267 		DELAY(1);
268 	}
269 }
270 
271 /*
272  * Clock a series of bits through the MII.
273  */
274 STATIC void
275 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
276 {
277 	int i;
278 
279 	MII_CLR(RTK_MII_CLK);
280 
281 	for (i = cnt; i > 0; i--) {
282 		if (bits & (1 << (i - 1))) {
283 			MII_SET(RTK_MII_DATAOUT);
284 		} else {
285 			MII_CLR(RTK_MII_DATAOUT);
286 		}
287 		DELAY(1);
288 		MII_CLR(RTK_MII_CLK);
289 		DELAY(1);
290 		MII_SET(RTK_MII_CLK);
291 	}
292 }
293 
294 /*
295  * Read an PHY register through the MII.
296  */
297 STATIC int
298 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
299 {
300 	int i, ack, s;
301 
302 	s = splnet();
303 
304 	/*
305 	 * Set up frame for RX.
306 	 */
307 	frame->mii_stdelim = RTK_MII_STARTDELIM;
308 	frame->mii_opcode = RTK_MII_READOP;
309 	frame->mii_turnaround = 0;
310 	frame->mii_data = 0;
311 
312 	CSR_WRITE_2(sc, RTK_MII, 0);
313 
314 	/*
315 	 * Turn on data xmit.
316 	 */
317 	MII_SET(RTK_MII_DIR);
318 
319 	rtk_mii_sync(sc);
320 
321 	/*
322 	 * Send command/address info.
323 	 */
324 	rtk_mii_send(sc, frame->mii_stdelim, 2);
325 	rtk_mii_send(sc, frame->mii_opcode, 2);
326 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
327 	rtk_mii_send(sc, frame->mii_regaddr, 5);
328 
329 	/* Idle bit */
330 	MII_CLR((RTK_MII_CLK|RTK_MII_DATAOUT));
331 	DELAY(1);
332 	MII_SET(RTK_MII_CLK);
333 	DELAY(1);
334 
335 	/* Turn off xmit. */
336 	MII_CLR(RTK_MII_DIR);
337 
338 	/* Check for ack */
339 	MII_CLR(RTK_MII_CLK);
340 	DELAY(1);
341 	ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
342 	MII_SET(RTK_MII_CLK);
343 	DELAY(1);
344 
345 	/*
346 	 * Now try reading data bits. If the ack failed, we still
347 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
348 	 */
349 	if (ack) {
350 		for (i = 0; i < 16; i++) {
351 			MII_CLR(RTK_MII_CLK);
352 			DELAY(1);
353 			MII_SET(RTK_MII_CLK);
354 			DELAY(1);
355 		}
356 		goto fail;
357 	}
358 
359 	for (i = 16; i > 0; i--) {
360 		MII_CLR(RTK_MII_CLK);
361 		DELAY(1);
362 		if (!ack) {
363 			if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
364 				frame->mii_data |= 1 << (i - 1);
365 			DELAY(1);
366 		}
367 		MII_SET(RTK_MII_CLK);
368 		DELAY(1);
369 	}
370 
371  fail:
372 	MII_CLR(RTK_MII_CLK);
373 	DELAY(1);
374 	MII_SET(RTK_MII_CLK);
375 	DELAY(1);
376 
377 	splx(s);
378 
379 	if (ack)
380 		return 1;
381 	return 0;
382 }
383 
384 /*
385  * Write to a PHY register through the MII.
386  */
387 STATIC int
388 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
389 {
390 	int s;
391 
392 	s = splnet();
393 	/*
394 	 * Set up frame for TX.
395 	 */
396 	frame->mii_stdelim = RTK_MII_STARTDELIM;
397 	frame->mii_opcode = RTK_MII_WRITEOP;
398 	frame->mii_turnaround = RTK_MII_TURNAROUND;
399 
400 	/*
401 	 * Turn on data output.
402 	 */
403 	MII_SET(RTK_MII_DIR);
404 
405 	rtk_mii_sync(sc);
406 
407 	rtk_mii_send(sc, frame->mii_stdelim, 2);
408 	rtk_mii_send(sc, frame->mii_opcode, 2);
409 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
410 	rtk_mii_send(sc, frame->mii_regaddr, 5);
411 	rtk_mii_send(sc, frame->mii_turnaround, 2);
412 	rtk_mii_send(sc, frame->mii_data, 16);
413 
414 	/* Idle bit. */
415 	MII_SET(RTK_MII_CLK);
416 	DELAY(1);
417 	MII_CLR(RTK_MII_CLK);
418 	DELAY(1);
419 
420 	/*
421 	 * Turn off xmit.
422 	 */
423 	MII_CLR(RTK_MII_DIR);
424 
425 	splx(s);
426 
427 	return 0;
428 }
429 
430 STATIC int
431 rtk_phy_readreg(device_t self, int phy, int reg)
432 {
433 	struct rtk_softc *sc = device_private(self);
434 	struct rtk_mii_frame frame;
435 	int rval;
436 	int rtk8139_reg;
437 
438 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
439 		if (phy != 7)
440 			return 0;
441 
442 		switch (reg) {
443 		case MII_BMCR:
444 			rtk8139_reg = RTK_BMCR;
445 			break;
446 		case MII_BMSR:
447 			rtk8139_reg = RTK_BMSR;
448 			break;
449 		case MII_ANAR:
450 			rtk8139_reg = RTK_ANAR;
451 			break;
452 		case MII_ANER:
453 			rtk8139_reg = RTK_ANER;
454 			break;
455 		case MII_ANLPAR:
456 			rtk8139_reg = RTK_LPAR;
457 			break;
458 		default:
459 #if 0
460 			printf("%s: bad phy register\n", device_xname(self));
461 #endif
462 			return 0;
463 		}
464 		rval = CSR_READ_2(sc, rtk8139_reg);
465 		return rval;
466 	}
467 
468 	memset((char *)&frame, 0, sizeof(frame));
469 
470 	frame.mii_phyaddr = phy;
471 	frame.mii_regaddr = reg;
472 	rtk_mii_readreg(sc, &frame);
473 
474 	return frame.mii_data;
475 }
476 
477 STATIC void
478 rtk_phy_writereg(device_t self, int phy, int reg, int data)
479 {
480 	struct rtk_softc *sc = device_private(self);
481 	struct rtk_mii_frame frame;
482 	int rtk8139_reg;
483 
484 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
485 		if (phy != 7)
486 			return;
487 
488 		switch (reg) {
489 		case MII_BMCR:
490 			rtk8139_reg = RTK_BMCR;
491 			break;
492 		case MII_BMSR:
493 			rtk8139_reg = RTK_BMSR;
494 			break;
495 		case MII_ANAR:
496 			rtk8139_reg = RTK_ANAR;
497 			break;
498 		case MII_ANER:
499 			rtk8139_reg = RTK_ANER;
500 			break;
501 		case MII_ANLPAR:
502 			rtk8139_reg = RTK_LPAR;
503 			break;
504 		default:
505 #if 0
506 			printf("%s: bad phy register\n", device_xname(self));
507 #endif
508 			return;
509 		}
510 		CSR_WRITE_2(sc, rtk8139_reg, data);
511 		return;
512 	}
513 
514 	memset((char *)&frame, 0, sizeof(frame));
515 
516 	frame.mii_phyaddr = phy;
517 	frame.mii_regaddr = reg;
518 	frame.mii_data = data;
519 
520 	rtk_mii_writereg(sc, &frame);
521 }
522 
523 STATIC void
524 rtk_phy_statchg(device_t v)
525 {
526 
527 	/* Nothing to do. */
528 }
529 
530 #define	rtk_calchash(addr) \
531 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
532 
533 /*
534  * Program the 64-bit multicast hash filter.
535  */
536 void
537 rtk_setmulti(struct rtk_softc *sc)
538 {
539 	struct ifnet *ifp;
540 	uint32_t hashes[2] = { 0, 0 };
541 	uint32_t rxfilt;
542 	struct ether_multi *enm;
543 	struct ether_multistep step;
544 	int h, mcnt;
545 
546 	ifp = &sc->ethercom.ec_if;
547 
548 	rxfilt = CSR_READ_4(sc, RTK_RXCFG);
549 
550 	if (ifp->if_flags & IFF_PROMISC) {
551  allmulti:
552 		ifp->if_flags |= IFF_ALLMULTI;
553 		rxfilt |= RTK_RXCFG_RX_MULTI;
554 		CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
555 		CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
556 		CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
557 		return;
558 	}
559 
560 	/* first, zot all the existing hash bits */
561 	CSR_WRITE_4(sc, RTK_MAR0, 0);
562 	CSR_WRITE_4(sc, RTK_MAR4, 0);
563 
564 	/* now program new ones */
565 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
566 	mcnt = 0;
567 	while (enm != NULL) {
568 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
569 		    ETHER_ADDR_LEN) != 0)
570 			goto allmulti;
571 
572 		h = rtk_calchash(enm->enm_addrlo);
573 		if (h < 32)
574 			hashes[0] |= (1 << h);
575 		else
576 			hashes[1] |= (1 << (h - 32));
577 		mcnt++;
578 		ETHER_NEXT_MULTI(step, enm);
579 	}
580 
581 	ifp->if_flags &= ~IFF_ALLMULTI;
582 
583 	if (mcnt)
584 		rxfilt |= RTK_RXCFG_RX_MULTI;
585 	else
586 		rxfilt &= ~RTK_RXCFG_RX_MULTI;
587 
588 	CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
589 
590 	/*
591 	 * For some unfathomable reason, RealTek decided to reverse
592 	 * the order of the multicast hash registers in the PCI Express
593 	 * parts. This means we have to write the hash pattern in reverse
594 	 * order for those devices.
595 	 */
596 	if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
597 		CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
598 		CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
599 	} else {
600 		CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
601 		CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
602 	}
603 }
604 
605 void
606 rtk_reset(struct rtk_softc *sc)
607 {
608 	int i;
609 
610 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
611 
612 	for (i = 0; i < RTK_TIMEOUT; i++) {
613 		DELAY(10);
614 		if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
615 			break;
616 	}
617 	if (i == RTK_TIMEOUT)
618 		printf("%s: reset never completed!\n", device_xname(&sc->sc_dev));
619 }
620 
621 /*
622  * Attach the interface. Allocate softc structures, do ifmedia
623  * setup and ethernet/BPF attach.
624  */
625 void
626 rtk_attach(struct rtk_softc *sc)
627 {
628 	device_t self = &sc->sc_dev;
629 	struct ifnet *ifp;
630 	struct rtk_tx_desc *txd;
631 	uint16_t val;
632 	uint8_t eaddr[ETHER_ADDR_LEN];
633 	int error;
634 	int i, addr_len;
635 
636 	callout_init(&sc->rtk_tick_ch, 0);
637 
638 	/*
639 	 * Check EEPROM type 9346 or 9356.
640 	 */
641 	if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
642 		addr_len = RTK_EEADDR_LEN1;
643 	else
644 		addr_len = RTK_EEADDR_LEN0;
645 
646 	/*
647 	 * Get station address.
648 	 */
649 	val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
650 	eaddr[0] = val & 0xff;
651 	eaddr[1] = val >> 8;
652 	val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
653 	eaddr[2] = val & 0xff;
654 	eaddr[3] = val >> 8;
655 	val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
656 	eaddr[4] = val & 0xff;
657 	eaddr[5] = val >> 8;
658 
659 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
660 	    RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
661 	    BUS_DMA_NOWAIT)) != 0) {
662 		aprint_error_dev(self,
663 			"can't allocate recv buffer, error = %d\n", error);
664 		goto fail_0;
665 	}
666 
667 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
668 	    RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
669 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
670 		aprint_error_dev(self,
671 			"can't map recv buffer, error = %d\n", error);
672 		goto fail_1;
673 	}
674 
675 	if ((error = bus_dmamap_create(sc->sc_dmat,
676 	    RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
677 	    &sc->recv_dmamap)) != 0) {
678 		aprint_error_dev(self,
679 			"can't create recv buffer DMA map, error = %d\n", error);
680 		goto fail_2;
681 	}
682 
683 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
684 	    sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
685 	    NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
686 		aprint_error_dev(self,
687 			"can't load recv buffer DMA map, error = %d\n", error);
688 		goto fail_3;
689 	}
690 
691 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
692 		txd = &sc->rtk_tx_descs[i];
693 		if ((error = bus_dmamap_create(sc->sc_dmat,
694 		    MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
695 		    &txd->txd_dmamap)) != 0) {
696 			aprint_error_dev(self,
697 				"can't create snd buffer DMA map,"
698 				" error = %d\n", error);
699 			goto fail_4;
700 		}
701 		txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
702 		txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
703 	}
704 	SIMPLEQ_INIT(&sc->rtk_tx_free);
705 	SIMPLEQ_INIT(&sc->rtk_tx_dirty);
706 
707 	/*
708 	 * From this point forward, the attachment cannot fail. A failure
709 	 * before this releases all resources thar may have been
710 	 * allocated.
711 	 */
712 	sc->sc_flags |= RTK_ATTACHED;
713 
714 	/* Reset the adapter. */
715 	rtk_reset(sc);
716 
717 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
718 
719 	ifp = &sc->ethercom.ec_if;
720 	ifp->if_softc = sc;
721 	strcpy(ifp->if_xname, device_xname(self));
722 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
723 	ifp->if_ioctl = rtk_ioctl;
724 	ifp->if_start = rtk_start;
725 	ifp->if_watchdog = rtk_watchdog;
726 	ifp->if_init = rtk_init;
727 	ifp->if_stop = rtk_stop;
728 	IFQ_SET_READY(&ifp->if_snd);
729 
730 	/*
731 	 * Do ifmedia setup.
732 	 */
733 	sc->mii.mii_ifp = ifp;
734 	sc->mii.mii_readreg = rtk_phy_readreg;
735 	sc->mii.mii_writereg = rtk_phy_writereg;
736 	sc->mii.mii_statchg = rtk_phy_statchg;
737 	ifmedia_init(&sc->mii.mii_media, IFM_IMASK, rtk_ifmedia_upd,
738 	    rtk_ifmedia_sts);
739 	mii_attach(self, &sc->mii, 0xffffffff,
740 	    MII_PHY_ANY, MII_OFFSET_ANY, 0);
741 
742 	/* Choose a default media. */
743 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
744 		ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
745 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE);
746 	} else {
747 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO);
748 	}
749 
750 	/*
751 	 * Call MI attach routines.
752 	 */
753 	if_attach(ifp);
754 	ether_ifattach(ifp, eaddr);
755 
756 #if NRND > 0
757 	rnd_attach_source(&sc->rnd_source, device_xname(self),
758 	    RND_TYPE_NET, 0);
759 #endif
760 
761 	return;
762  fail_4:
763 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
764 		txd = &sc->rtk_tx_descs[i];
765 		if (txd->txd_dmamap != NULL)
766 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
767 	}
768  fail_3:
769 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
770  fail_2:
771 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->rtk_rx_buf,
772 	    RTK_RXBUFLEN + 16);
773  fail_1:
774 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
775  fail_0:
776 	return;
777 }
778 
779 /*
780  * Initialize the transmit descriptors.
781  */
782 STATIC void
783 rtk_list_tx_init(struct rtk_softc *sc)
784 {
785 	struct rtk_tx_desc *txd;
786 	int i;
787 
788 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
789 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
790 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
791 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
792 
793 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
794 		txd = &sc->rtk_tx_descs[i];
795 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
796 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
797 	}
798 }
799 
800 /*
801  * rtk_activate:
802  *     Handle device activation/deactivation requests.
803  */
804 int
805 rtk_activate(device_t self, enum devact act)
806 {
807 	struct rtk_softc *sc = device_private(self);
808 	int s, error;
809 
810 	error = 0;
811 	s = splnet();
812 	switch (act) {
813 	case DVACT_ACTIVATE:
814 		error = EOPNOTSUPP;
815 		break;
816 	case DVACT_DEACTIVATE:
817 		mii_activate(&sc->mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
818 		if_deactivate(&sc->ethercom.ec_if);
819 		break;
820 	}
821 	splx(s);
822 
823 	return error;
824 }
825 
826 /*
827  * rtk_detach:
828  *     Detach a rtk interface.
829  */
830 int
831 rtk_detach(struct rtk_softc *sc)
832 {
833 	struct ifnet *ifp = &sc->ethercom.ec_if;
834 	struct rtk_tx_desc *txd;
835 	int i;
836 
837 	/*
838 	 * Succeed now if there isn't any work to do.
839 	 */
840 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
841 		return 0;
842 
843 	/* Unhook our tick handler. */
844 	callout_stop(&sc->rtk_tick_ch);
845 
846 	/* Detach all PHYs. */
847 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
848 
849 	/* Delete all remaining media. */
850 	ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
851 
852 #if NRND > 0
853 	rnd_detach_source(&sc->rnd_source);
854 #endif
855 
856 	ether_ifdetach(ifp);
857 	if_detach(ifp);
858 
859 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
860 		txd = &sc->rtk_tx_descs[i];
861 		if (txd->txd_dmamap != NULL)
862 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
863 	}
864 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
865 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->rtk_rx_buf,
866 	    RTK_RXBUFLEN + 16);
867 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
868 
869 	return 0;
870 }
871 
872 /*
873  * rtk_enable:
874  *     Enable the RTL81X9 chip.
875  */
876 int
877 rtk_enable(struct rtk_softc *sc)
878 {
879 
880 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
881 		if ((*sc->sc_enable)(sc) != 0) {
882 			printf("%s: device enable failed\n",
883 			    device_xname(&sc->sc_dev));
884 			return EIO;
885 		}
886 		sc->sc_flags |= RTK_ENABLED;
887 	}
888 	return 0;
889 }
890 
891 /*
892  * rtk_disable:
893  *     Disable the RTL81X9 chip.
894  */
895 void
896 rtk_disable(struct rtk_softc *sc)
897 {
898 
899 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
900 		(*sc->sc_disable)(sc);
901 		sc->sc_flags &= ~RTK_ENABLED;
902 	}
903 }
904 
905 /*
906  * A frame has been uploaded: pass the resulting mbuf chain up to
907  * the higher level protocols.
908  *
909  * You know there's something wrong with a PCI bus-master chip design.
910  *
911  * The receive operation is badly documented in the datasheet, so I'll
912  * attempt to document it here. The driver provides a buffer area and
913  * places its base address in the RX buffer start address register.
914  * The chip then begins copying frames into the RX buffer. Each frame
915  * is preceded by a 32-bit RX status word which specifies the length
916  * of the frame and certain other status bits. Each frame (starting with
917  * the status word) is also 32-bit aligned. The frame length is in the
918  * first 16 bits of the status word; the lower 15 bits correspond with
919  * the 'rx status register' mentioned in the datasheet.
920  *
921  * Note: to make the Alpha happy, the frame payload needs to be aligned
922  * on a 32-bit boundary. To achieve this, we copy the data to mbuf
923  * shifted forward 2 bytes.
924  */
925 STATIC void
926 rtk_rxeof(struct rtk_softc *sc)
927 {
928 	struct mbuf *m;
929 	struct ifnet *ifp;
930 	char *rxbufpos, *dst;
931 	u_int total_len, wrap;
932 	uint32_t rxstat;
933 	uint16_t cur_rx, new_rx;
934 	uint16_t limit;
935 	uint16_t rx_bytes, max_bytes;
936 
937 	ifp = &sc->ethercom.ec_if;
938 
939 	cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
940 
941 	/* Do not try to read past this point. */
942 	limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
943 
944 	if (limit < cur_rx)
945 		max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
946 	else
947 		max_bytes = limit - cur_rx;
948 	rx_bytes = 0;
949 
950 	while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
951 		rxbufpos = (char *)sc->rtk_rx_buf + cur_rx;
952 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
953 		    RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
954 		rxstat = le32toh(*(uint32_t *)rxbufpos);
955 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
956 		    RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
957 
958 		/*
959 		 * Here's a totally undocumented fact for you. When the
960 		 * RealTek chip is in the process of copying a packet into
961 		 * RAM for you, the length will be 0xfff0. If you spot a
962 		 * packet header with this value, you need to stop. The
963 		 * datasheet makes absolutely no mention of this and
964 		 * RealTek should be shot for this.
965 		 */
966 		total_len = rxstat >> 16;
967 		if (total_len == RTK_RXSTAT_UNFINISHED)
968 			break;
969 
970 		if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
971 		    total_len < ETHER_MIN_LEN ||
972 		    total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
973 			ifp->if_ierrors++;
974 
975 			/*
976 			 * submitted by:[netbsd-pcmcia:00484]
977 			 *	Takahiro Kambe <taca@sky.yamashina.kyoto.jp>
978 			 * obtain from:
979 			 *     FreeBSD if_rl.c rev 1.24->1.25
980 			 *
981 			 */
982 #if 0
983 			if (rxstat & (RTK_RXSTAT_BADSYM|RTK_RXSTAT_RUNT|
984 			    RTK_RXSTAT_GIANT|RTK_RXSTAT_CRCERR|
985 			    RTK_RXSTAT_ALIGNERR)) {
986 				CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
987 				CSR_WRITE_2(sc, RTK_COMMAND,
988 				    RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
989 				CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
990 				CSR_WRITE_4(sc, RTK_RXADDR,
991 				    sc->recv_dmamap->dm_segs[0].ds_addr);
992 				cur_rx = 0;
993 			}
994 			break;
995 #else
996 			rtk_init(ifp);
997 			return;
998 #endif
999 		}
1000 
1001 		/* No errors; receive the packet. */
1002 		rx_bytes += total_len + RTK_RXSTAT_LEN;
1003 
1004 		/*
1005 		 * Avoid trying to read more bytes than we know
1006 		 * the chip has prepared for us.
1007 		 */
1008 		if (rx_bytes > max_bytes)
1009 			break;
1010 
1011 		/*
1012 		 * Skip the status word, wrapping around to the beginning
1013 		 * of the Rx area, if necessary.
1014 		 */
1015 		cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1016 		rxbufpos = (char *)sc->rtk_rx_buf + cur_rx;
1017 
1018 		/*
1019 		 * Compute the number of bytes at which the packet
1020 		 * will wrap to the beginning of the ring buffer.
1021 		 */
1022 		wrap = RTK_RXBUFLEN - cur_rx;
1023 
1024 		/*
1025 		 * Compute where the next pending packet is.
1026 		 */
1027 		if (total_len > wrap)
1028 			new_rx = total_len - wrap;
1029 		else
1030 			new_rx = cur_rx + total_len;
1031 		/* Round up to 32-bit boundary. */
1032 		new_rx = ((new_rx + 3) & ~3) % RTK_RXBUFLEN;
1033 
1034 		/*
1035 		 * The RealTek chip includes the CRC with every
1036 		 * incoming packet; trim it off here.
1037 		 */
1038 		total_len -= ETHER_CRC_LEN;
1039 
1040 		/*
1041 		 * Now allocate an mbuf (and possibly a cluster) to hold
1042 		 * the packet. Note we offset the packet 2 bytes so that
1043 		 * data after the Ethernet header will be 4-byte aligned.
1044 		 */
1045 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1046 		if (m == NULL) {
1047 			printf("%s: unable to allocate Rx mbuf\n",
1048 			    device_xname(&sc->sc_dev));
1049 			ifp->if_ierrors++;
1050 			goto next_packet;
1051 		}
1052 		if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1053 			MCLGET(m, M_DONTWAIT);
1054 			if ((m->m_flags & M_EXT) == 0) {
1055 				printf("%s: unable to allocate Rx cluster\n",
1056 				    device_xname(&sc->sc_dev));
1057 				ifp->if_ierrors++;
1058 				m_freem(m);
1059 				m = NULL;
1060 				goto next_packet;
1061 			}
1062 		}
1063 		m->m_data += RTK_ETHER_ALIGN;	/* for alignment */
1064 		m->m_pkthdr.rcvif = ifp;
1065 		m->m_pkthdr.len = m->m_len = total_len;
1066 		dst = mtod(m, void *);
1067 
1068 		/*
1069 		 * If the packet wraps, copy up to the wrapping point.
1070 		 */
1071 		if (total_len > wrap) {
1072 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1073 			    cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1074 			memcpy(dst, rxbufpos, wrap);
1075 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1076 			    cur_rx, wrap, BUS_DMASYNC_PREREAD);
1077 			cur_rx = 0;
1078 			rxbufpos = sc->rtk_rx_buf;
1079 			total_len -= wrap;
1080 			dst += wrap;
1081 		}
1082 
1083 		/*
1084 		 * ...and now the rest.
1085 		 */
1086 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1087 		    cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1088 		memcpy(dst, rxbufpos, total_len);
1089 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1090 		    cur_rx, total_len, BUS_DMASYNC_PREREAD);
1091 
1092  next_packet:
1093 		CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1094 		cur_rx = new_rx;
1095 
1096 		if (m == NULL)
1097 			continue;
1098 
1099 		ifp->if_ipackets++;
1100 
1101 #if NBPFILTER > 0
1102 		if (ifp->if_bpf)
1103 			bpf_mtap(ifp->if_bpf, m);
1104 #endif
1105 		/* pass it on. */
1106 		(*ifp->if_input)(ifp, m);
1107 	}
1108 }
1109 
1110 /*
1111  * A frame was downloaded to the chip. It's safe for us to clean up
1112  * the list buffers.
1113  */
1114 STATIC void
1115 rtk_txeof(struct rtk_softc *sc)
1116 {
1117 	struct ifnet *ifp;
1118 	struct rtk_tx_desc *txd;
1119 	uint32_t txstat;
1120 
1121 	ifp = &sc->ethercom.ec_if;
1122 
1123 	/*
1124 	 * Go through our tx list and free mbufs for those
1125 	 * frames that have been uploaded.
1126 	 */
1127 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1128 		txstat = CSR_READ_4(sc, txd->txd_txstat);
1129 		if ((txstat & (RTK_TXSTAT_TX_OK|
1130 		    RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0)
1131 			break;
1132 
1133 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1134 
1135 		bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1136 		    txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1137 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1138 		m_freem(txd->txd_mbuf);
1139 		txd->txd_mbuf = NULL;
1140 
1141 		ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24;
1142 
1143 		if (txstat & RTK_TXSTAT_TX_OK)
1144 			ifp->if_opackets++;
1145 		else {
1146 			ifp->if_oerrors++;
1147 
1148 			/*
1149 			 * Increase Early TX threshold if underrun occurred.
1150 			 * Increase step 64 bytes.
1151 			 */
1152 			if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1153 #ifdef DEBUG
1154 				printf("%s: transmit underrun;",
1155 				    device_xname(&sc->sc_dev));
1156 #endif
1157 				if (sc->sc_txthresh < RTK_TXTH_MAX) {
1158 					sc->sc_txthresh += 2;
1159 #ifdef DEBUG
1160 					printf(" new threshold: %d bytes",
1161 					    sc->sc_txthresh * 32);
1162 #endif
1163 				}
1164 				printf("\n");
1165 			}
1166 			if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN))
1167 				CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1168 		}
1169 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1170 		ifp->if_flags &= ~IFF_OACTIVE;
1171 	}
1172 
1173 	/* Clear the timeout timer if there is no pending packet. */
1174 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1175 		ifp->if_timer = 0;
1176 
1177 }
1178 
1179 int
1180 rtk_intr(void *arg)
1181 {
1182 	struct rtk_softc *sc;
1183 	struct ifnet *ifp;
1184 	uint16_t status;
1185 	int handled;
1186 
1187 	sc = arg;
1188 	ifp = &sc->ethercom.ec_if;
1189 
1190 	/* Disable interrupts. */
1191 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1192 
1193 	handled = 0;
1194 	for (;;) {
1195 
1196 		status = CSR_READ_2(sc, RTK_ISR);
1197 
1198 		if (status == 0xffff)
1199 			break; /* Card is gone... */
1200 
1201 		if (status)
1202 			CSR_WRITE_2(sc, RTK_ISR, status);
1203 
1204 		if ((status & RTK_INTRS) == 0)
1205 			break;
1206 
1207 		handled = 1;
1208 
1209 		if (status & RTK_ISR_RX_OK)
1210 			rtk_rxeof(sc);
1211 
1212 		if (status & RTK_ISR_RX_ERR)
1213 			rtk_rxeof(sc);
1214 
1215 		if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR))
1216 			rtk_txeof(sc);
1217 
1218 		if (status & RTK_ISR_SYSTEM_ERR) {
1219 			rtk_reset(sc);
1220 			rtk_init(ifp);
1221 		}
1222 	}
1223 
1224 	/* Re-enable interrupts. */
1225 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1226 
1227 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1228 		rtk_start(ifp);
1229 
1230 #if NRND > 0
1231 	if (RND_ENABLED(&sc->rnd_source))
1232 		rnd_add_uint32(&sc->rnd_source, status);
1233 #endif
1234 
1235 	return handled;
1236 }
1237 
1238 /*
1239  * Main transmit routine.
1240  */
1241 
1242 STATIC void
1243 rtk_start(struct ifnet *ifp)
1244 {
1245 	struct rtk_softc *sc;
1246 	struct rtk_tx_desc *txd;
1247 	struct mbuf *m_head, *m_new;
1248 	int error, len;
1249 
1250 	sc = ifp->if_softc;
1251 
1252 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1253 		IFQ_POLL(&ifp->if_snd, m_head);
1254 		if (m_head == NULL)
1255 			break;
1256 		m_new = NULL;
1257 
1258 		/*
1259 		 * Load the DMA map.  If this fails, the packet didn't
1260 		 * fit in one DMA segment, and we need to copy.  Note,
1261 		 * the packet must also be aligned.
1262 		 * if the packet is too small, copy it too, so we're sure
1263 		 * so have enouth room for the pad buffer.
1264 		 */
1265 		if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1266 		    m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1267 		    bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1268 			m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1269 			MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1270 			if (m_new == NULL) {
1271 				printf("%s: unable to allocate Tx mbuf\n",
1272 				    device_xname(&sc->sc_dev));
1273 				break;
1274 			}
1275 			if (m_head->m_pkthdr.len > MHLEN) {
1276 				MCLGET(m_new, M_DONTWAIT);
1277 				if ((m_new->m_flags & M_EXT) == 0) {
1278 					printf("%s: unable to allocate Tx "
1279 					    "cluster\n", device_xname(&sc->sc_dev));
1280 					m_freem(m_new);
1281 					break;
1282 				}
1283 			}
1284 			m_copydata(m_head, 0, m_head->m_pkthdr.len,
1285 			    mtod(m_new, void *));
1286 			m_new->m_pkthdr.len = m_new->m_len =
1287 			    m_head->m_pkthdr.len;
1288 			if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1289 				memset(
1290 				    mtod(m_new, char *) + m_head->m_pkthdr.len,
1291 				    0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1292 				m_new->m_pkthdr.len = m_new->m_len =
1293 				    ETHER_PAD_LEN;
1294 			}
1295 			error = bus_dmamap_load_mbuf(sc->sc_dmat,
1296 			    txd->txd_dmamap, m_new,
1297 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1298 			if (error) {
1299 				printf("%s: unable to load Tx buffer, "
1300 				    "error = %d\n", device_xname(&sc->sc_dev), error);
1301 				break;
1302 			}
1303 		}
1304 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1305 #if NBPFILTER > 0
1306 		/*
1307 		 * If there's a BPF listener, bounce a copy of this frame
1308 		 * to him.
1309 		 */
1310 		if (ifp->if_bpf)
1311 			bpf_mtap(ifp->if_bpf, m_head);
1312 #endif
1313 		if (m_new != NULL) {
1314 			m_freem(m_head);
1315 			m_head = m_new;
1316 		}
1317 		txd->txd_mbuf = m_head;
1318 
1319 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1320 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1321 
1322 		/*
1323 		 * Transmit the frame.
1324 		 */
1325 		bus_dmamap_sync(sc->sc_dmat,
1326 		    txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1327 		    BUS_DMASYNC_PREWRITE);
1328 
1329 		len = txd->txd_dmamap->dm_segs[0].ds_len;
1330 
1331 		CSR_WRITE_4(sc, txd->txd_txaddr,
1332 		    txd->txd_dmamap->dm_segs[0].ds_addr);
1333 		CSR_WRITE_4(sc, txd->txd_txstat,
1334 		    RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1335 
1336 		/*
1337 		 * Set a timeout in case the chip goes out to lunch.
1338 		 */
1339 		ifp->if_timer = 5;
1340 	}
1341 
1342 	/*
1343 	 * We broke out of the loop because all our TX slots are
1344 	 * full. Mark the NIC as busy until it drains some of the
1345 	 * packets from the queue.
1346 	 */
1347 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1348 		ifp->if_flags |= IFF_OACTIVE;
1349 }
1350 
1351 STATIC int
1352 rtk_init(struct ifnet *ifp)
1353 {
1354 	struct rtk_softc *sc = ifp->if_softc;
1355 	int error, i;
1356 	uint32_t rxcfg;
1357 
1358 	if ((error = rtk_enable(sc)) != 0)
1359 		goto out;
1360 
1361 	/*
1362 	 * Cancel pending I/O.
1363 	 */
1364 	rtk_stop(ifp, 0);
1365 
1366 	/* Init our MAC address */
1367 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1368 		CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1369 	}
1370 
1371 	/* Init the RX buffer pointer register. */
1372 	bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1373 	    sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1374 	CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1375 
1376 	/* Init TX descriptors. */
1377 	rtk_list_tx_init(sc);
1378 
1379 	/* Init Early TX threshold. */
1380 	sc->sc_txthresh = RTK_TXTH_256;
1381 	/*
1382 	 * Enable transmit and receive.
1383 	 */
1384 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1385 
1386 	/*
1387 	 * Set the initial TX and RX configuration.
1388 	 */
1389 	CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1390 	CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1391 
1392 	/* Set the individual bit to receive frames for this host only. */
1393 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1394 	rxcfg |= RTK_RXCFG_RX_INDIV;
1395 
1396 	/* If we want promiscuous mode, set the allframes bit. */
1397 	if (ifp->if_flags & IFF_PROMISC) {
1398 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1399 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1400 	} else {
1401 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1402 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1403 	}
1404 
1405 	/*
1406 	 * Set capture broadcast bit to capture broadcast frames.
1407 	 */
1408 	if (ifp->if_flags & IFF_BROADCAST) {
1409 		rxcfg |= RTK_RXCFG_RX_BROAD;
1410 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1411 	} else {
1412 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1413 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1414 	}
1415 
1416 	/*
1417 	 * Program the multicast filter, if necessary.
1418 	 */
1419 	rtk_setmulti(sc);
1420 
1421 	/*
1422 	 * Enable interrupts.
1423 	 */
1424 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1425 
1426 	/* Start RX/TX process. */
1427 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1428 
1429 	/* Enable receiver and transmitter. */
1430 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1431 
1432 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX);
1433 
1434 	/*
1435 	 * Set current media.
1436 	 */
1437 	mii_mediachg(&sc->mii);
1438 
1439 	ifp->if_flags |= IFF_RUNNING;
1440 	ifp->if_flags &= ~IFF_OACTIVE;
1441 
1442 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1443 
1444  out:
1445 	if (error) {
1446 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1447 		ifp->if_timer = 0;
1448 		printf("%s: interface not running\n", device_xname(&sc->sc_dev));
1449 	}
1450 	return error;
1451 }
1452 
1453 /*
1454  * Set media options.
1455  */
1456 STATIC int
1457 rtk_ifmedia_upd(struct ifnet *ifp)
1458 {
1459 	struct rtk_softc *sc;
1460 
1461 	sc = ifp->if_softc;
1462 
1463 	return mii_mediachg(&sc->mii);
1464 }
1465 
1466 /*
1467  * Report current media status.
1468  */
1469 STATIC void
1470 rtk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1471 {
1472 	struct rtk_softc *sc;
1473 
1474 	sc = ifp->if_softc;
1475 
1476 	mii_pollstat(&sc->mii);
1477 	ifmr->ifm_status = sc->mii.mii_media_status;
1478 	ifmr->ifm_active = sc->mii.mii_media_active;
1479 }
1480 
1481 STATIC int
1482 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1483 {
1484 	struct rtk_softc *sc = ifp->if_softc;
1485 	struct ifreq *ifr = (struct ifreq *)data;
1486 	int s, error;
1487 
1488 	s = splnet();
1489 
1490 	switch (command) {
1491 	case SIOCGIFMEDIA:
1492 	case SIOCSIFMEDIA:
1493 		error = ifmedia_ioctl(ifp, ifr, &sc->mii.mii_media, command);
1494 		break;
1495 
1496 	default:
1497 		error = ether_ioctl(ifp, command, data);
1498 		if (error == ENETRESET) {
1499 			if (ifp->if_flags & IFF_RUNNING) {
1500 				/*
1501 				 * Multicast list has changed.  Set the
1502 				 * hardware filter accordingly.
1503 				 */
1504 				rtk_setmulti(sc);
1505 			}
1506 			error = 0;
1507 		}
1508 		break;
1509 	}
1510 
1511 	splx(s);
1512 
1513 	return error;
1514 }
1515 
1516 STATIC void
1517 rtk_watchdog(struct ifnet *ifp)
1518 {
1519 	struct rtk_softc *sc;
1520 
1521 	sc = ifp->if_softc;
1522 
1523 	printf("%s: watchdog timeout\n", device_xname(&sc->sc_dev));
1524 	ifp->if_oerrors++;
1525 	rtk_txeof(sc);
1526 	rtk_rxeof(sc);
1527 	rtk_init(ifp);
1528 }
1529 
1530 /*
1531  * Stop the adapter and free any mbufs allocated to the
1532  * RX and TX lists.
1533  */
1534 STATIC void
1535 rtk_stop(struct ifnet *ifp, int disable)
1536 {
1537 	struct rtk_softc *sc = ifp->if_softc;
1538 	struct rtk_tx_desc *txd;
1539 
1540 	callout_stop(&sc->rtk_tick_ch);
1541 
1542 	mii_down(&sc->mii);
1543 
1544 	CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1545 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1546 
1547 	/*
1548 	 * Free the TX list buffers.
1549 	 */
1550 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1551 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1552 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1553 		m_freem(txd->txd_mbuf);
1554 		txd->txd_mbuf = NULL;
1555 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1556 	}
1557 
1558 	if (disable)
1559 		rtk_disable(sc);
1560 
1561 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1562 	ifp->if_timer = 0;
1563 }
1564 
1565 STATIC void
1566 rtk_tick(void *arg)
1567 {
1568 	struct rtk_softc *sc = arg;
1569 	int s;
1570 
1571 	s = splnet();
1572 	mii_tick(&sc->mii);
1573 	splx(s);
1574 
1575 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1576 }
1577