xref: /netbsd-src/sys/dev/ic/rtl81x9.c (revision 10ad5ffa714ce1a679dcc9dd8159648df2d67b5a)
1 /*	$NetBSD: rtl81x9.c,v 1.86 2009/04/27 14:52:50 tsutsui Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  *	FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35  */
36 
37 /*
38  * RealTek 8129/8139 PCI NIC driver
39  *
40  * Supports several extremely cheap PCI 10/100 adapters based on
41  * the RealTek chipset. Datasheets can be obtained from
42  * www.realtek.com.tw.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51  * probably the worst PCI ethernet controller ever made, with the possible
52  * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53  * DMA, but it has a terrible interface that nullifies any performance
54  * gains that bus-master DMA usually offers.
55  *
56  * For transmission, the chip offers a series of four TX descriptor
57  * registers. Each transmit frame must be in a contiguous buffer, aligned
58  * on a longword (32-bit) boundary. This means we almost always have to
59  * do mbuf copies in order to transmit a frame, except in the unlikely
60  * case where a) the packet fits into a single mbuf, and b) the packet
61  * is 32-bit aligned within the mbuf's data area. The presence of only
62  * four descriptor registers means that we can never have more than four
63  * packets queued for transmission at any one time.
64  *
65  * Reception is not much better. The driver has to allocate a single large
66  * buffer area (up to 64K in size) into which the chip will DMA received
67  * frames. Because we don't know where within this region received packets
68  * will begin or end, we have no choice but to copy data from the buffer
69  * area into mbufs in order to pass the packets up to the higher protocol
70  * levels.
71  *
72  * It's impossible given this rotten design to really achieve decent
73  * performance at 100Mbps, unless you happen to have a 400MHz PII or
74  * some equally overmuscled CPU to drive it.
75  *
76  * On the bright side, the 8139 does have a built-in PHY, although
77  * rather than using an MDIO serial interface like most other NICs, the
78  * PHY registers are directly accessible through the 8139's register
79  * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80  * filter.
81  *
82  * The 8129 chip is an older version of the 8139 that uses an external PHY
83  * chip. The 8129 has a serial MDIO interface for accessing the MII where
84  * the 8139 lets you directly access the on-board PHY registers. We need
85  * to select which interface to use depending on the chip type.
86  */
87 
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.86 2009/04/27 14:52:50 tsutsui Exp $");
90 
91 #include "bpfilter.h"
92 #include "rnd.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/device.h>
98 #include <sys/sockio.h>
99 #include <sys/mbuf.h>
100 #include <sys/malloc.h>
101 #include <sys/kernel.h>
102 #include <sys/socket.h>
103 
104 #include <uvm/uvm_extern.h>
105 
106 #include <net/if.h>
107 #include <net/if_arp.h>
108 #include <net/if_ether.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
111 
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #endif
115 #if NRND > 0
116 #include <sys/rnd.h>
117 #endif
118 
119 #include <sys/bus.h>
120 #include <machine/endian.h>
121 
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 
125 #include <dev/ic/rtl81x9reg.h>
126 #include <dev/ic/rtl81x9var.h>
127 
128 static void rtk_reset(struct rtk_softc *);
129 static void rtk_rxeof(struct rtk_softc *);
130 static void rtk_txeof(struct rtk_softc *);
131 static void rtk_start(struct ifnet *);
132 static int rtk_ioctl(struct ifnet *, u_long, void *);
133 static int rtk_init(struct ifnet *);
134 static void rtk_stop(struct ifnet *, int);
135 
136 static void rtk_watchdog(struct ifnet *);
137 
138 static void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
139 static void rtk_mii_sync(struct rtk_softc *);
140 static void rtk_mii_send(struct rtk_softc *, uint32_t, int);
141 static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
142 static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
143 
144 static int rtk_phy_readreg(device_t, int, int);
145 static void rtk_phy_writereg(device_t, int, int, int);
146 static void rtk_phy_statchg(device_t);
147 static void rtk_tick(void *);
148 
149 static int rtk_enable(struct rtk_softc *);
150 static void rtk_disable(struct rtk_softc *);
151 
152 static void rtk_list_tx_init(struct rtk_softc *);
153 
154 #define EE_SET(x)					\
155 	CSR_WRITE_1(sc, RTK_EECMD,			\
156 		CSR_READ_1(sc, RTK_EECMD) | (x))
157 
158 #define EE_CLR(x)					\
159 	CSR_WRITE_1(sc, RTK_EECMD,			\
160 		CSR_READ_1(sc, RTK_EECMD) & ~(x))
161 
162 #define EE_DELAY()	DELAY(100)
163 
164 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
165 
166 /*
167  * Send a read command and address to the EEPROM, check for ACK.
168  */
169 static void
170 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
171 {
172 	int d, i;
173 
174 	d = (RTK_EECMD_READ << addr_len) | addr;
175 
176 	/*
177 	 * Feed in each bit and stobe the clock.
178 	 */
179 	for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
180 		if (d & (1 << (i - 1))) {
181 			EE_SET(RTK_EE_DATAIN);
182 		} else {
183 			EE_CLR(RTK_EE_DATAIN);
184 		}
185 		EE_DELAY();
186 		EE_SET(RTK_EE_CLK);
187 		EE_DELAY();
188 		EE_CLR(RTK_EE_CLK);
189 		EE_DELAY();
190 	}
191 }
192 
193 /*
194  * Read a word of data stored in the EEPROM at address 'addr.'
195  */
196 uint16_t
197 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
198 {
199 	uint16_t word;
200 	int i;
201 
202 	/* Enter EEPROM access mode. */
203 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
204 	EE_DELAY();
205 	EE_SET(RTK_EE_SEL);
206 
207 	/*
208 	 * Send address of word we want to read.
209 	 */
210 	rtk_eeprom_putbyte(sc, addr, addr_len);
211 
212 	/*
213 	 * Start reading bits from EEPROM.
214 	 */
215 	word = 0;
216 	for (i = 16; i > 0; i--) {
217 		EE_SET(RTK_EE_CLK);
218 		EE_DELAY();
219 		if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
220 			word |= 1 << (i - 1);
221 		EE_CLR(RTK_EE_CLK);
222 		EE_DELAY();
223 	}
224 
225 	/* Turn off EEPROM access mode. */
226 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
227 
228 	return word;
229 }
230 
231 /*
232  * MII access routines are provided for the 8129, which
233  * doesn't have a built-in PHY. For the 8139, we fake things
234  * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
235  * direct access PHY registers.
236  */
237 #define MII_SET(x)					\
238 	CSR_WRITE_1(sc, RTK_MII,			\
239 		CSR_READ_1(sc, RTK_MII) | (x))
240 
241 #define MII_CLR(x)					\
242 	CSR_WRITE_1(sc, RTK_MII,			\
243 		CSR_READ_1(sc, RTK_MII) & ~(x))
244 
245 /*
246  * Sync the PHYs by setting data bit and strobing the clock 32 times.
247  */
248 static void
249 rtk_mii_sync(struct rtk_softc *sc)
250 {
251 	int i;
252 
253 	MII_SET(RTK_MII_DIR|RTK_MII_DATAOUT);
254 
255 	for (i = 0; i < 32; i++) {
256 		MII_SET(RTK_MII_CLK);
257 		DELAY(1);
258 		MII_CLR(RTK_MII_CLK);
259 		DELAY(1);
260 	}
261 }
262 
263 /*
264  * Clock a series of bits through the MII.
265  */
266 static void
267 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
268 {
269 	int i;
270 
271 	MII_CLR(RTK_MII_CLK);
272 
273 	for (i = cnt; i > 0; i--) {
274 		if (bits & (1 << (i - 1))) {
275 			MII_SET(RTK_MII_DATAOUT);
276 		} else {
277 			MII_CLR(RTK_MII_DATAOUT);
278 		}
279 		DELAY(1);
280 		MII_CLR(RTK_MII_CLK);
281 		DELAY(1);
282 		MII_SET(RTK_MII_CLK);
283 	}
284 }
285 
286 /*
287  * Read an PHY register through the MII.
288  */
289 static int
290 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
291 {
292 	int i, ack, s;
293 
294 	s = splnet();
295 
296 	/*
297 	 * Set up frame for RX.
298 	 */
299 	frame->mii_stdelim = RTK_MII_STARTDELIM;
300 	frame->mii_opcode = RTK_MII_READOP;
301 	frame->mii_turnaround = 0;
302 	frame->mii_data = 0;
303 
304 	CSR_WRITE_2(sc, RTK_MII, 0);
305 
306 	/*
307 	 * Turn on data xmit.
308 	 */
309 	MII_SET(RTK_MII_DIR);
310 
311 	rtk_mii_sync(sc);
312 
313 	/*
314 	 * Send command/address info.
315 	 */
316 	rtk_mii_send(sc, frame->mii_stdelim, 2);
317 	rtk_mii_send(sc, frame->mii_opcode, 2);
318 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
319 	rtk_mii_send(sc, frame->mii_regaddr, 5);
320 
321 	/* Idle bit */
322 	MII_CLR((RTK_MII_CLK|RTK_MII_DATAOUT));
323 	DELAY(1);
324 	MII_SET(RTK_MII_CLK);
325 	DELAY(1);
326 
327 	/* Turn off xmit. */
328 	MII_CLR(RTK_MII_DIR);
329 
330 	/* Check for ack */
331 	MII_CLR(RTK_MII_CLK);
332 	DELAY(1);
333 	ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
334 	MII_SET(RTK_MII_CLK);
335 	DELAY(1);
336 
337 	/*
338 	 * Now try reading data bits. If the ack failed, we still
339 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
340 	 */
341 	if (ack) {
342 		for (i = 0; i < 16; i++) {
343 			MII_CLR(RTK_MII_CLK);
344 			DELAY(1);
345 			MII_SET(RTK_MII_CLK);
346 			DELAY(1);
347 		}
348 		goto fail;
349 	}
350 
351 	for (i = 16; i > 0; i--) {
352 		MII_CLR(RTK_MII_CLK);
353 		DELAY(1);
354 		if (!ack) {
355 			if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
356 				frame->mii_data |= 1 << (i - 1);
357 			DELAY(1);
358 		}
359 		MII_SET(RTK_MII_CLK);
360 		DELAY(1);
361 	}
362 
363  fail:
364 	MII_CLR(RTK_MII_CLK);
365 	DELAY(1);
366 	MII_SET(RTK_MII_CLK);
367 	DELAY(1);
368 
369 	splx(s);
370 
371 	if (ack)
372 		return 1;
373 	return 0;
374 }
375 
376 /*
377  * Write to a PHY register through the MII.
378  */
379 static int
380 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
381 {
382 	int s;
383 
384 	s = splnet();
385 	/*
386 	 * Set up frame for TX.
387 	 */
388 	frame->mii_stdelim = RTK_MII_STARTDELIM;
389 	frame->mii_opcode = RTK_MII_WRITEOP;
390 	frame->mii_turnaround = RTK_MII_TURNAROUND;
391 
392 	/*
393 	 * Turn on data output.
394 	 */
395 	MII_SET(RTK_MII_DIR);
396 
397 	rtk_mii_sync(sc);
398 
399 	rtk_mii_send(sc, frame->mii_stdelim, 2);
400 	rtk_mii_send(sc, frame->mii_opcode, 2);
401 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
402 	rtk_mii_send(sc, frame->mii_regaddr, 5);
403 	rtk_mii_send(sc, frame->mii_turnaround, 2);
404 	rtk_mii_send(sc, frame->mii_data, 16);
405 
406 	/* Idle bit. */
407 	MII_SET(RTK_MII_CLK);
408 	DELAY(1);
409 	MII_CLR(RTK_MII_CLK);
410 	DELAY(1);
411 
412 	/*
413 	 * Turn off xmit.
414 	 */
415 	MII_CLR(RTK_MII_DIR);
416 
417 	splx(s);
418 
419 	return 0;
420 }
421 
422 static int
423 rtk_phy_readreg(device_t self, int phy, int reg)
424 {
425 	struct rtk_softc *sc = device_private(self);
426 	struct rtk_mii_frame frame;
427 	int rval;
428 	int rtk8139_reg;
429 
430 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
431 		if (phy != 7)
432 			return 0;
433 
434 		switch (reg) {
435 		case MII_BMCR:
436 			rtk8139_reg = RTK_BMCR;
437 			break;
438 		case MII_BMSR:
439 			rtk8139_reg = RTK_BMSR;
440 			break;
441 		case MII_ANAR:
442 			rtk8139_reg = RTK_ANAR;
443 			break;
444 		case MII_ANER:
445 			rtk8139_reg = RTK_ANER;
446 			break;
447 		case MII_ANLPAR:
448 			rtk8139_reg = RTK_LPAR;
449 			break;
450 		default:
451 #if 0
452 			printf("%s: bad phy register\n", device_xname(self));
453 #endif
454 			return 0;
455 		}
456 		rval = CSR_READ_2(sc, rtk8139_reg);
457 		return rval;
458 	}
459 
460 	memset(&frame, 0, sizeof(frame));
461 
462 	frame.mii_phyaddr = phy;
463 	frame.mii_regaddr = reg;
464 	rtk_mii_readreg(sc, &frame);
465 
466 	return frame.mii_data;
467 }
468 
469 static void
470 rtk_phy_writereg(device_t self, int phy, int reg, int data)
471 {
472 	struct rtk_softc *sc = device_private(self);
473 	struct rtk_mii_frame frame;
474 	int rtk8139_reg;
475 
476 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
477 		if (phy != 7)
478 			return;
479 
480 		switch (reg) {
481 		case MII_BMCR:
482 			rtk8139_reg = RTK_BMCR;
483 			break;
484 		case MII_BMSR:
485 			rtk8139_reg = RTK_BMSR;
486 			break;
487 		case MII_ANAR:
488 			rtk8139_reg = RTK_ANAR;
489 			break;
490 		case MII_ANER:
491 			rtk8139_reg = RTK_ANER;
492 			break;
493 		case MII_ANLPAR:
494 			rtk8139_reg = RTK_LPAR;
495 			break;
496 		default:
497 #if 0
498 			printf("%s: bad phy register\n", device_xname(self));
499 #endif
500 			return;
501 		}
502 		CSR_WRITE_2(sc, rtk8139_reg, data);
503 		return;
504 	}
505 
506 	memset(&frame, 0, sizeof(frame));
507 
508 	frame.mii_phyaddr = phy;
509 	frame.mii_regaddr = reg;
510 	frame.mii_data = data;
511 
512 	rtk_mii_writereg(sc, &frame);
513 }
514 
515 static void
516 rtk_phy_statchg(device_t v)
517 {
518 
519 	/* Nothing to do. */
520 }
521 
522 #define	rtk_calchash(addr) \
523 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
524 
525 /*
526  * Program the 64-bit multicast hash filter.
527  */
528 void
529 rtk_setmulti(struct rtk_softc *sc)
530 {
531 	struct ifnet *ifp;
532 	uint32_t hashes[2] = { 0, 0 };
533 	uint32_t rxfilt;
534 	struct ether_multi *enm;
535 	struct ether_multistep step;
536 	int h, mcnt;
537 
538 	ifp = &sc->ethercom.ec_if;
539 
540 	rxfilt = CSR_READ_4(sc, RTK_RXCFG);
541 
542 	if (ifp->if_flags & IFF_PROMISC) {
543  allmulti:
544 		ifp->if_flags |= IFF_ALLMULTI;
545 		rxfilt |= RTK_RXCFG_RX_MULTI;
546 		CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
547 		CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
548 		CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
549 		return;
550 	}
551 
552 	/* first, zot all the existing hash bits */
553 	CSR_WRITE_4(sc, RTK_MAR0, 0);
554 	CSR_WRITE_4(sc, RTK_MAR4, 0);
555 
556 	/* now program new ones */
557 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
558 	mcnt = 0;
559 	while (enm != NULL) {
560 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
561 		    ETHER_ADDR_LEN) != 0)
562 			goto allmulti;
563 
564 		h = rtk_calchash(enm->enm_addrlo);
565 		if (h < 32)
566 			hashes[0] |= (1 << h);
567 		else
568 			hashes[1] |= (1 << (h - 32));
569 		mcnt++;
570 		ETHER_NEXT_MULTI(step, enm);
571 	}
572 
573 	ifp->if_flags &= ~IFF_ALLMULTI;
574 
575 	if (mcnt)
576 		rxfilt |= RTK_RXCFG_RX_MULTI;
577 	else
578 		rxfilt &= ~RTK_RXCFG_RX_MULTI;
579 
580 	CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
581 
582 	/*
583 	 * For some unfathomable reason, RealTek decided to reverse
584 	 * the order of the multicast hash registers in the PCI Express
585 	 * parts. This means we have to write the hash pattern in reverse
586 	 * order for those devices.
587 	 */
588 	if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
589 		CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
590 		CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
591 	} else {
592 		CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
593 		CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
594 	}
595 }
596 
597 void
598 rtk_reset(struct rtk_softc *sc)
599 {
600 	int i;
601 
602 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
603 
604 	for (i = 0; i < RTK_TIMEOUT; i++) {
605 		DELAY(10);
606 		if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
607 			break;
608 	}
609 	if (i == RTK_TIMEOUT)
610 		printf("%s: reset never completed!\n",
611 		    device_xname(sc->sc_dev));
612 }
613 
614 /*
615  * Attach the interface. Allocate softc structures, do ifmedia
616  * setup and ethernet/BPF attach.
617  */
618 void
619 rtk_attach(struct rtk_softc *sc)
620 {
621 	device_t self = sc->sc_dev;
622 	struct ifnet *ifp;
623 	struct rtk_tx_desc *txd;
624 	uint16_t val;
625 	uint8_t eaddr[ETHER_ADDR_LEN];
626 	int error;
627 	int i, addr_len;
628 
629 	callout_init(&sc->rtk_tick_ch, 0);
630 
631 	/*
632 	 * Check EEPROM type 9346 or 9356.
633 	 */
634 	if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
635 		addr_len = RTK_EEADDR_LEN1;
636 	else
637 		addr_len = RTK_EEADDR_LEN0;
638 
639 	/*
640 	 * Get station address.
641 	 */
642 	val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
643 	eaddr[0] = val & 0xff;
644 	eaddr[1] = val >> 8;
645 	val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
646 	eaddr[2] = val & 0xff;
647 	eaddr[3] = val >> 8;
648 	val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
649 	eaddr[4] = val & 0xff;
650 	eaddr[5] = val >> 8;
651 
652 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
653 	    RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
654 	    BUS_DMA_NOWAIT)) != 0) {
655 		aprint_error_dev(self,
656 		    "can't allocate recv buffer, error = %d\n", error);
657 		goto fail_0;
658 	}
659 
660 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
661 	    RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
662 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
663 		aprint_error_dev(self,
664 		    "can't map recv buffer, error = %d\n", error);
665 		goto fail_1;
666 	}
667 
668 	if ((error = bus_dmamap_create(sc->sc_dmat,
669 	    RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
670 	    &sc->recv_dmamap)) != 0) {
671 		aprint_error_dev(self,
672 		    "can't create recv buffer DMA map, error = %d\n", error);
673 		goto fail_2;
674 	}
675 
676 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
677 	    sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
678 	    NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
679 		aprint_error_dev(self,
680 		    "can't load recv buffer DMA map, error = %d\n", error);
681 		goto fail_3;
682 	}
683 
684 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
685 		txd = &sc->rtk_tx_descs[i];
686 		if ((error = bus_dmamap_create(sc->sc_dmat,
687 		    MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
688 		    &txd->txd_dmamap)) != 0) {
689 			aprint_error_dev(self,
690 			    "can't create snd buffer DMA map, error = %d\n",
691 			    error);
692 			goto fail_4;
693 		}
694 		txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
695 		txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
696 	}
697 	SIMPLEQ_INIT(&sc->rtk_tx_free);
698 	SIMPLEQ_INIT(&sc->rtk_tx_dirty);
699 
700 	/*
701 	 * From this point forward, the attachment cannot fail. A failure
702 	 * before this releases all resources thar may have been
703 	 * allocated.
704 	 */
705 	sc->sc_flags |= RTK_ATTACHED;
706 
707 	/* Reset the adapter. */
708 	rtk_reset(sc);
709 
710 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
711 
712 	ifp = &sc->ethercom.ec_if;
713 	ifp->if_softc = sc;
714 	strcpy(ifp->if_xname, device_xname(self));
715 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
716 	ifp->if_ioctl = rtk_ioctl;
717 	ifp->if_start = rtk_start;
718 	ifp->if_watchdog = rtk_watchdog;
719 	ifp->if_init = rtk_init;
720 	ifp->if_stop = rtk_stop;
721 	IFQ_SET_READY(&ifp->if_snd);
722 
723 	/*
724 	 * Do ifmedia setup.
725 	 */
726 	sc->mii.mii_ifp = ifp;
727 	sc->mii.mii_readreg = rtk_phy_readreg;
728 	sc->mii.mii_writereg = rtk_phy_writereg;
729 	sc->mii.mii_statchg = rtk_phy_statchg;
730 	sc->ethercom.ec_mii = &sc->mii;
731 	ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange,
732 	    ether_mediastatus);
733 	mii_attach(self, &sc->mii, 0xffffffff,
734 	    MII_PHY_ANY, MII_OFFSET_ANY, 0);
735 
736 	/* Choose a default media. */
737 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
738 		ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
739 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE);
740 	} else {
741 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO);
742 	}
743 
744 	/*
745 	 * Call MI attach routines.
746 	 */
747 	if_attach(ifp);
748 	ether_ifattach(ifp, eaddr);
749 
750 #if NRND > 0
751 	rnd_attach_source(&sc->rnd_source, device_xname(self),
752 	    RND_TYPE_NET, 0);
753 #endif
754 
755 	return;
756  fail_4:
757 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
758 		txd = &sc->rtk_tx_descs[i];
759 		if (txd->txd_dmamap != NULL)
760 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
761 	}
762  fail_3:
763 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
764  fail_2:
765 	bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
766 	    RTK_RXBUFLEN + 16);
767  fail_1:
768 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
769  fail_0:
770 	return;
771 }
772 
773 /*
774  * Initialize the transmit descriptors.
775  */
776 static void
777 rtk_list_tx_init(struct rtk_softc *sc)
778 {
779 	struct rtk_tx_desc *txd;
780 	int i;
781 
782 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
783 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
784 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
785 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
786 
787 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
788 		txd = &sc->rtk_tx_descs[i];
789 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
790 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
791 	}
792 }
793 
794 /*
795  * rtk_activate:
796  *     Handle device activation/deactivation requests.
797  */
798 int
799 rtk_activate(device_t self, enum devact act)
800 {
801 	struct rtk_softc *sc = device_private(self);
802 	int s, error;
803 
804 	error = 0;
805 	s = splnet();
806 	switch (act) {
807 	case DVACT_ACTIVATE:
808 		error = EOPNOTSUPP;
809 		break;
810 	case DVACT_DEACTIVATE:
811 		mii_activate(&sc->mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
812 		if_deactivate(&sc->ethercom.ec_if);
813 		break;
814 	}
815 	splx(s);
816 
817 	return error;
818 }
819 
820 /*
821  * rtk_detach:
822  *     Detach a rtk interface.
823  */
824 int
825 rtk_detach(struct rtk_softc *sc)
826 {
827 	struct ifnet *ifp = &sc->ethercom.ec_if;
828 	struct rtk_tx_desc *txd;
829 	int i;
830 
831 	/*
832 	 * Succeed now if there isn't any work to do.
833 	 */
834 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
835 		return 0;
836 
837 	/* Unhook our tick handler. */
838 	callout_stop(&sc->rtk_tick_ch);
839 
840 	/* Detach all PHYs. */
841 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
842 
843 	/* Delete all remaining media. */
844 	ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
845 
846 #if NRND > 0
847 	rnd_detach_source(&sc->rnd_source);
848 #endif
849 
850 	ether_ifdetach(ifp);
851 	if_detach(ifp);
852 
853 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
854 		txd = &sc->rtk_tx_descs[i];
855 		if (txd->txd_dmamap != NULL)
856 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
857 	}
858 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
859 	bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
860 	    RTK_RXBUFLEN + 16);
861 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
862 
863 	return 0;
864 }
865 
866 /*
867  * rtk_enable:
868  *     Enable the RTL81X9 chip.
869  */
870 int
871 rtk_enable(struct rtk_softc *sc)
872 {
873 
874 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
875 		if ((*sc->sc_enable)(sc) != 0) {
876 			printf("%s: device enable failed\n",
877 			    device_xname(sc->sc_dev));
878 			return EIO;
879 		}
880 		sc->sc_flags |= RTK_ENABLED;
881 	}
882 	return 0;
883 }
884 
885 /*
886  * rtk_disable:
887  *     Disable the RTL81X9 chip.
888  */
889 void
890 rtk_disable(struct rtk_softc *sc)
891 {
892 
893 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
894 		(*sc->sc_disable)(sc);
895 		sc->sc_flags &= ~RTK_ENABLED;
896 	}
897 }
898 
899 /*
900  * A frame has been uploaded: pass the resulting mbuf chain up to
901  * the higher level protocols.
902  *
903  * You know there's something wrong with a PCI bus-master chip design.
904  *
905  * The receive operation is badly documented in the datasheet, so I'll
906  * attempt to document it here. The driver provides a buffer area and
907  * places its base address in the RX buffer start address register.
908  * The chip then begins copying frames into the RX buffer. Each frame
909  * is preceded by a 32-bit RX status word which specifies the length
910  * of the frame and certain other status bits. Each frame (starting with
911  * the status word) is also 32-bit aligned. The frame length is in the
912  * first 16 bits of the status word; the lower 15 bits correspond with
913  * the 'rx status register' mentioned in the datasheet.
914  *
915  * Note: to make the Alpha happy, the frame payload needs to be aligned
916  * on a 32-bit boundary. To achieve this, we copy the data to mbuf
917  * shifted forward 2 bytes.
918  */
919 static void
920 rtk_rxeof(struct rtk_softc *sc)
921 {
922 	struct mbuf *m;
923 	struct ifnet *ifp;
924 	uint8_t *rxbufpos, *dst;
925 	u_int total_len, wrap;
926 	uint32_t rxstat;
927 	uint16_t cur_rx, new_rx;
928 	uint16_t limit;
929 	uint16_t rx_bytes, max_bytes;
930 
931 	ifp = &sc->ethercom.ec_if;
932 
933 	cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
934 
935 	/* Do not try to read past this point. */
936 	limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
937 
938 	if (limit < cur_rx)
939 		max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
940 	else
941 		max_bytes = limit - cur_rx;
942 	rx_bytes = 0;
943 
944 	while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
945 		rxbufpos = sc->rtk_rx_buf + cur_rx;
946 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
947 		    RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
948 		rxstat = le32toh(*(uint32_t *)rxbufpos);
949 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
950 		    RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
951 
952 		/*
953 		 * Here's a totally undocumented fact for you. When the
954 		 * RealTek chip is in the process of copying a packet into
955 		 * RAM for you, the length will be 0xfff0. If you spot a
956 		 * packet header with this value, you need to stop. The
957 		 * datasheet makes absolutely no mention of this and
958 		 * RealTek should be shot for this.
959 		 */
960 		total_len = rxstat >> 16;
961 		if (total_len == RTK_RXSTAT_UNFINISHED)
962 			break;
963 
964 		if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
965 		    total_len < ETHER_MIN_LEN ||
966 		    total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
967 			ifp->if_ierrors++;
968 
969 			/*
970 			 * submitted by:[netbsd-pcmcia:00484]
971 			 *	Takahiro Kambe <taca@sky.yamashina.kyoto.jp>
972 			 * obtain from:
973 			 *     FreeBSD if_rl.c rev 1.24->1.25
974 			 *
975 			 */
976 #if 0
977 			if (rxstat & (RTK_RXSTAT_BADSYM|RTK_RXSTAT_RUNT|
978 			    RTK_RXSTAT_GIANT|RTK_RXSTAT_CRCERR|
979 			    RTK_RXSTAT_ALIGNERR)) {
980 				CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
981 				CSR_WRITE_2(sc, RTK_COMMAND,
982 				    RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
983 				CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
984 				CSR_WRITE_4(sc, RTK_RXADDR,
985 				    sc->recv_dmamap->dm_segs[0].ds_addr);
986 				cur_rx = 0;
987 			}
988 			break;
989 #else
990 			rtk_init(ifp);
991 			return;
992 #endif
993 		}
994 
995 		/* No errors; receive the packet. */
996 		rx_bytes += total_len + RTK_RXSTAT_LEN;
997 
998 		/*
999 		 * Avoid trying to read more bytes than we know
1000 		 * the chip has prepared for us.
1001 		 */
1002 		if (rx_bytes > max_bytes)
1003 			break;
1004 
1005 		/*
1006 		 * Skip the status word, wrapping around to the beginning
1007 		 * of the Rx area, if necessary.
1008 		 */
1009 		cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1010 		rxbufpos = sc->rtk_rx_buf + cur_rx;
1011 
1012 		/*
1013 		 * Compute the number of bytes at which the packet
1014 		 * will wrap to the beginning of the ring buffer.
1015 		 */
1016 		wrap = RTK_RXBUFLEN - cur_rx;
1017 
1018 		/*
1019 		 * Compute where the next pending packet is.
1020 		 */
1021 		if (total_len > wrap)
1022 			new_rx = total_len - wrap;
1023 		else
1024 			new_rx = cur_rx + total_len;
1025 		/* Round up to 32-bit boundary. */
1026 		new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN;
1027 
1028 		/*
1029 		 * The RealTek chip includes the CRC with every
1030 		 * incoming packet; trim it off here.
1031 		 */
1032 		total_len -= ETHER_CRC_LEN;
1033 
1034 		/*
1035 		 * Now allocate an mbuf (and possibly a cluster) to hold
1036 		 * the packet. Note we offset the packet 2 bytes so that
1037 		 * data after the Ethernet header will be 4-byte aligned.
1038 		 */
1039 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1040 		if (m == NULL) {
1041 			printf("%s: unable to allocate Rx mbuf\n",
1042 			    device_xname(sc->sc_dev));
1043 			ifp->if_ierrors++;
1044 			goto next_packet;
1045 		}
1046 		if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1047 			MCLGET(m, M_DONTWAIT);
1048 			if ((m->m_flags & M_EXT) == 0) {
1049 				printf("%s: unable to allocate Rx cluster\n",
1050 				    device_xname(sc->sc_dev));
1051 				ifp->if_ierrors++;
1052 				m_freem(m);
1053 				m = NULL;
1054 				goto next_packet;
1055 			}
1056 		}
1057 		m->m_data += RTK_ETHER_ALIGN;	/* for alignment */
1058 		m->m_pkthdr.rcvif = ifp;
1059 		m->m_pkthdr.len = m->m_len = total_len;
1060 		dst = mtod(m, void *);
1061 
1062 		/*
1063 		 * If the packet wraps, copy up to the wrapping point.
1064 		 */
1065 		if (total_len > wrap) {
1066 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1067 			    cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1068 			memcpy(dst, rxbufpos, wrap);
1069 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1070 			    cur_rx, wrap, BUS_DMASYNC_PREREAD);
1071 			cur_rx = 0;
1072 			rxbufpos = sc->rtk_rx_buf;
1073 			total_len -= wrap;
1074 			dst += wrap;
1075 		}
1076 
1077 		/*
1078 		 * ...and now the rest.
1079 		 */
1080 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1081 		    cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1082 		memcpy(dst, rxbufpos, total_len);
1083 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1084 		    cur_rx, total_len, BUS_DMASYNC_PREREAD);
1085 
1086  next_packet:
1087 		CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1088 		cur_rx = new_rx;
1089 
1090 		if (m == NULL)
1091 			continue;
1092 
1093 		ifp->if_ipackets++;
1094 
1095 #if NBPFILTER > 0
1096 		if (ifp->if_bpf)
1097 			bpf_mtap(ifp->if_bpf, m);
1098 #endif
1099 		/* pass it on. */
1100 		(*ifp->if_input)(ifp, m);
1101 	}
1102 }
1103 
1104 /*
1105  * A frame was downloaded to the chip. It's safe for us to clean up
1106  * the list buffers.
1107  */
1108 static void
1109 rtk_txeof(struct rtk_softc *sc)
1110 {
1111 	struct ifnet *ifp;
1112 	struct rtk_tx_desc *txd;
1113 	uint32_t txstat;
1114 
1115 	ifp = &sc->ethercom.ec_if;
1116 
1117 	/*
1118 	 * Go through our tx list and free mbufs for those
1119 	 * frames that have been uploaded.
1120 	 */
1121 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1122 		txstat = CSR_READ_4(sc, txd->txd_txstat);
1123 		if ((txstat & (RTK_TXSTAT_TX_OK|
1124 		    RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0)
1125 			break;
1126 
1127 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1128 
1129 		bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1130 		    txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1131 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1132 		m_freem(txd->txd_mbuf);
1133 		txd->txd_mbuf = NULL;
1134 
1135 		ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24;
1136 
1137 		if (txstat & RTK_TXSTAT_TX_OK)
1138 			ifp->if_opackets++;
1139 		else {
1140 			ifp->if_oerrors++;
1141 
1142 			/*
1143 			 * Increase Early TX threshold if underrun occurred.
1144 			 * Increase step 64 bytes.
1145 			 */
1146 			if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1147 #ifdef DEBUG
1148 				printf("%s: transmit underrun;",
1149 				    device_xname(sc->sc_dev));
1150 #endif
1151 				if (sc->sc_txthresh < RTK_TXTH_MAX) {
1152 					sc->sc_txthresh += 2;
1153 #ifdef DEBUG
1154 					printf(" new threshold: %d bytes",
1155 					    sc->sc_txthresh * 32);
1156 #endif
1157 				}
1158 #ifdef DEBUG
1159 				printf("\n");
1160 #endif
1161 			}
1162 			if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN))
1163 				CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1164 		}
1165 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1166 		ifp->if_flags &= ~IFF_OACTIVE;
1167 	}
1168 
1169 	/* Clear the timeout timer if there is no pending packet. */
1170 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1171 		ifp->if_timer = 0;
1172 
1173 }
1174 
1175 int
1176 rtk_intr(void *arg)
1177 {
1178 	struct rtk_softc *sc;
1179 	struct ifnet *ifp;
1180 	uint16_t status;
1181 	int handled;
1182 
1183 	sc = arg;
1184 	ifp = &sc->ethercom.ec_if;
1185 
1186 	if (!device_has_power(sc->sc_dev))
1187 		return 0;
1188 
1189 	/* Disable interrupts. */
1190 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1191 
1192 	handled = 0;
1193 	for (;;) {
1194 
1195 		status = CSR_READ_2(sc, RTK_ISR);
1196 
1197 		if (status == 0xffff)
1198 			break; /* Card is gone... */
1199 
1200 		if (status)
1201 			CSR_WRITE_2(sc, RTK_ISR, status);
1202 
1203 		if ((status & RTK_INTRS) == 0)
1204 			break;
1205 
1206 		handled = 1;
1207 
1208 		if (status & RTK_ISR_RX_OK)
1209 			rtk_rxeof(sc);
1210 
1211 		if (status & RTK_ISR_RX_ERR)
1212 			rtk_rxeof(sc);
1213 
1214 		if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR))
1215 			rtk_txeof(sc);
1216 
1217 		if (status & RTK_ISR_SYSTEM_ERR) {
1218 			rtk_reset(sc);
1219 			rtk_init(ifp);
1220 		}
1221 	}
1222 
1223 	/* Re-enable interrupts. */
1224 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1225 
1226 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1227 		rtk_start(ifp);
1228 
1229 #if NRND > 0
1230 	if (RND_ENABLED(&sc->rnd_source))
1231 		rnd_add_uint32(&sc->rnd_source, status);
1232 #endif
1233 
1234 	return handled;
1235 }
1236 
1237 /*
1238  * Main transmit routine.
1239  */
1240 
1241 static void
1242 rtk_start(struct ifnet *ifp)
1243 {
1244 	struct rtk_softc *sc;
1245 	struct rtk_tx_desc *txd;
1246 	struct mbuf *m_head, *m_new;
1247 	int error, len;
1248 
1249 	sc = ifp->if_softc;
1250 
1251 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1252 		IFQ_POLL(&ifp->if_snd, m_head);
1253 		if (m_head == NULL)
1254 			break;
1255 		m_new = NULL;
1256 
1257 		/*
1258 		 * Load the DMA map.  If this fails, the packet didn't
1259 		 * fit in one DMA segment, and we need to copy.  Note,
1260 		 * the packet must also be aligned.
1261 		 * if the packet is too small, copy it too, so we're sure
1262 		 * so have enouth room for the pad buffer.
1263 		 */
1264 		if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1265 		    m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1266 		    bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1267 			m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1268 			MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1269 			if (m_new == NULL) {
1270 				printf("%s: unable to allocate Tx mbuf\n",
1271 				    device_xname(sc->sc_dev));
1272 				break;
1273 			}
1274 			if (m_head->m_pkthdr.len > MHLEN) {
1275 				MCLGET(m_new, M_DONTWAIT);
1276 				if ((m_new->m_flags & M_EXT) == 0) {
1277 					printf("%s: unable to allocate Tx "
1278 					    "cluster\n",
1279 					    device_xname(sc->sc_dev));
1280 					m_freem(m_new);
1281 					break;
1282 				}
1283 			}
1284 			m_copydata(m_head, 0, m_head->m_pkthdr.len,
1285 			    mtod(m_new, void *));
1286 			m_new->m_pkthdr.len = m_new->m_len =
1287 			    m_head->m_pkthdr.len;
1288 			if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1289 				memset(
1290 				    mtod(m_new, char *) + m_head->m_pkthdr.len,
1291 				    0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1292 				m_new->m_pkthdr.len = m_new->m_len =
1293 				    ETHER_PAD_LEN;
1294 			}
1295 			error = bus_dmamap_load_mbuf(sc->sc_dmat,
1296 			    txd->txd_dmamap, m_new,
1297 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1298 			if (error) {
1299 				printf("%s: unable to load Tx buffer, "
1300 				    "error = %d\n",
1301 				    device_xname(sc->sc_dev), error);
1302 				break;
1303 			}
1304 		}
1305 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1306 #if NBPFILTER > 0
1307 		/*
1308 		 * If there's a BPF listener, bounce a copy of this frame
1309 		 * to him.
1310 		 */
1311 		if (ifp->if_bpf)
1312 			bpf_mtap(ifp->if_bpf, m_head);
1313 #endif
1314 		if (m_new != NULL) {
1315 			m_freem(m_head);
1316 			m_head = m_new;
1317 		}
1318 		txd->txd_mbuf = m_head;
1319 
1320 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1321 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1322 
1323 		/*
1324 		 * Transmit the frame.
1325 		 */
1326 		bus_dmamap_sync(sc->sc_dmat,
1327 		    txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1328 		    BUS_DMASYNC_PREWRITE);
1329 
1330 		len = txd->txd_dmamap->dm_segs[0].ds_len;
1331 
1332 		CSR_WRITE_4(sc, txd->txd_txaddr,
1333 		    txd->txd_dmamap->dm_segs[0].ds_addr);
1334 		CSR_WRITE_4(sc, txd->txd_txstat,
1335 		    RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1336 
1337 		/*
1338 		 * Set a timeout in case the chip goes out to lunch.
1339 		 */
1340 		ifp->if_timer = 5;
1341 	}
1342 
1343 	/*
1344 	 * We broke out of the loop because all our TX slots are
1345 	 * full. Mark the NIC as busy until it drains some of the
1346 	 * packets from the queue.
1347 	 */
1348 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1349 		ifp->if_flags |= IFF_OACTIVE;
1350 }
1351 
1352 static int
1353 rtk_init(struct ifnet *ifp)
1354 {
1355 	struct rtk_softc *sc = ifp->if_softc;
1356 	int error, i;
1357 	uint32_t rxcfg;
1358 
1359 	if ((error = rtk_enable(sc)) != 0)
1360 		goto out;
1361 
1362 	/*
1363 	 * Cancel pending I/O.
1364 	 */
1365 	rtk_stop(ifp, 0);
1366 
1367 	/* Init our MAC address */
1368 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1369 		CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1370 	}
1371 
1372 	/* Init the RX buffer pointer register. */
1373 	bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1374 	    sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1375 	CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1376 
1377 	/* Init TX descriptors. */
1378 	rtk_list_tx_init(sc);
1379 
1380 	/* Init Early TX threshold. */
1381 	sc->sc_txthresh = RTK_TXTH_256;
1382 	/*
1383 	 * Enable transmit and receive.
1384 	 */
1385 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1386 
1387 	/*
1388 	 * Set the initial TX and RX configuration.
1389 	 */
1390 	CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1391 	CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1392 
1393 	/* Set the individual bit to receive frames for this host only. */
1394 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1395 	rxcfg |= RTK_RXCFG_RX_INDIV;
1396 
1397 	/* If we want promiscuous mode, set the allframes bit. */
1398 	if (ifp->if_flags & IFF_PROMISC) {
1399 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1400 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1401 	} else {
1402 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1403 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1404 	}
1405 
1406 	/*
1407 	 * Set capture broadcast bit to capture broadcast frames.
1408 	 */
1409 	if (ifp->if_flags & IFF_BROADCAST) {
1410 		rxcfg |= RTK_RXCFG_RX_BROAD;
1411 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1412 	} else {
1413 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1414 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1415 	}
1416 
1417 	/*
1418 	 * Program the multicast filter, if necessary.
1419 	 */
1420 	rtk_setmulti(sc);
1421 
1422 	/*
1423 	 * Enable interrupts.
1424 	 */
1425 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1426 
1427 	/* Start RX/TX process. */
1428 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1429 
1430 	/* Enable receiver and transmitter. */
1431 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1432 
1433 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX);
1434 
1435 	/*
1436 	 * Set current media.
1437 	 */
1438 	if ((error = ether_mediachange(ifp)) != 0)
1439 		goto out;
1440 
1441 	ifp->if_flags |= IFF_RUNNING;
1442 	ifp->if_flags &= ~IFF_OACTIVE;
1443 
1444 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1445 
1446  out:
1447 	if (error) {
1448 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1449 		ifp->if_timer = 0;
1450 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1451 	}
1452 	return error;
1453 }
1454 
1455 static int
1456 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1457 {
1458 	struct rtk_softc *sc = ifp->if_softc;
1459 	int s, error;
1460 
1461 	s = splnet();
1462 	error = ether_ioctl(ifp, command, data);
1463 	if (error == ENETRESET) {
1464 		if (ifp->if_flags & IFF_RUNNING) {
1465 			/*
1466 			 * Multicast list has changed.  Set the
1467 			 * hardware filter accordingly.
1468 			 */
1469 			rtk_setmulti(sc);
1470 		}
1471 		error = 0;
1472 	}
1473 	splx(s);
1474 
1475 	return error;
1476 }
1477 
1478 static void
1479 rtk_watchdog(struct ifnet *ifp)
1480 {
1481 	struct rtk_softc *sc;
1482 
1483 	sc = ifp->if_softc;
1484 
1485 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1486 	ifp->if_oerrors++;
1487 	rtk_txeof(sc);
1488 	rtk_rxeof(sc);
1489 	rtk_init(ifp);
1490 }
1491 
1492 /*
1493  * Stop the adapter and free any mbufs allocated to the
1494  * RX and TX lists.
1495  */
1496 static void
1497 rtk_stop(struct ifnet *ifp, int disable)
1498 {
1499 	struct rtk_softc *sc = ifp->if_softc;
1500 	struct rtk_tx_desc *txd;
1501 
1502 	callout_stop(&sc->rtk_tick_ch);
1503 
1504 	mii_down(&sc->mii);
1505 
1506 	CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1507 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1508 
1509 	/*
1510 	 * Free the TX list buffers.
1511 	 */
1512 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1513 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1514 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1515 		m_freem(txd->txd_mbuf);
1516 		txd->txd_mbuf = NULL;
1517 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1518 	}
1519 
1520 	if (disable)
1521 		rtk_disable(sc);
1522 
1523 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1524 	ifp->if_timer = 0;
1525 }
1526 
1527 static void
1528 rtk_tick(void *arg)
1529 {
1530 	struct rtk_softc *sc = arg;
1531 	int s;
1532 
1533 	s = splnet();
1534 	mii_tick(&sc->mii);
1535 	splx(s);
1536 
1537 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1538 }
1539