xref: /netbsd-src/sys/dev/ic/rtl81x9.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: rtl81x9.c,v 1.91 2010/07/27 21:02:00 jakllsch Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  *	FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35  */
36 
37 /*
38  * RealTek 8129/8139 PCI NIC driver
39  *
40  * Supports several extremely cheap PCI 10/100 adapters based on
41  * the RealTek chipset. Datasheets can be obtained from
42  * www.realtek.com.tw.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51  * probably the worst PCI ethernet controller ever made, with the possible
52  * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53  * DMA, but it has a terrible interface that nullifies any performance
54  * gains that bus-master DMA usually offers.
55  *
56  * For transmission, the chip offers a series of four TX descriptor
57  * registers. Each transmit frame must be in a contiguous buffer, aligned
58  * on a longword (32-bit) boundary. This means we almost always have to
59  * do mbuf copies in order to transmit a frame, except in the unlikely
60  * case where a) the packet fits into a single mbuf, and b) the packet
61  * is 32-bit aligned within the mbuf's data area. The presence of only
62  * four descriptor registers means that we can never have more than four
63  * packets queued for transmission at any one time.
64  *
65  * Reception is not much better. The driver has to allocate a single large
66  * buffer area (up to 64K in size) into which the chip will DMA received
67  * frames. Because we don't know where within this region received packets
68  * will begin or end, we have no choice but to copy data from the buffer
69  * area into mbufs in order to pass the packets up to the higher protocol
70  * levels.
71  *
72  * It's impossible given this rotten design to really achieve decent
73  * performance at 100Mbps, unless you happen to have a 400MHz PII or
74  * some equally overmuscled CPU to drive it.
75  *
76  * On the bright side, the 8139 does have a built-in PHY, although
77  * rather than using an MDIO serial interface like most other NICs, the
78  * PHY registers are directly accessible through the 8139's register
79  * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80  * filter.
81  *
82  * The 8129 chip is an older version of the 8139 that uses an external PHY
83  * chip. The 8129 has a serial MDIO interface for accessing the MII where
84  * the 8139 lets you directly access the on-board PHY registers. We need
85  * to select which interface to use depending on the chip type.
86  */
87 
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.91 2010/07/27 21:02:00 jakllsch Exp $");
90 
91 #include "rnd.h"
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/device.h>
97 #include <sys/sockio.h>
98 #include <sys/mbuf.h>
99 #include <sys/malloc.h>
100 #include <sys/kernel.h>
101 #include <sys/socket.h>
102 
103 #include <uvm/uvm_extern.h>
104 
105 #include <net/if.h>
106 #include <net/if_arp.h>
107 #include <net/if_ether.h>
108 #include <net/if_dl.h>
109 #include <net/if_media.h>
110 
111 #include <net/bpf.h>
112 #if NRND > 0
113 #include <sys/rnd.h>
114 #endif
115 
116 #include <sys/bus.h>
117 #include <machine/endian.h>
118 
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 
122 #include <dev/ic/rtl81x9reg.h>
123 #include <dev/ic/rtl81x9var.h>
124 
125 static void rtk_reset(struct rtk_softc *);
126 static void rtk_rxeof(struct rtk_softc *);
127 static void rtk_txeof(struct rtk_softc *);
128 static void rtk_start(struct ifnet *);
129 static int rtk_ioctl(struct ifnet *, u_long, void *);
130 static int rtk_init(struct ifnet *);
131 static void rtk_stop(struct ifnet *, int);
132 
133 static void rtk_watchdog(struct ifnet *);
134 
135 static void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
136 static void rtk_mii_sync(struct rtk_softc *);
137 static void rtk_mii_send(struct rtk_softc *, uint32_t, int);
138 static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
139 static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
140 
141 static int rtk_phy_readreg(device_t, int, int);
142 static void rtk_phy_writereg(device_t, int, int, int);
143 static void rtk_phy_statchg(device_t);
144 static void rtk_tick(void *);
145 
146 static int rtk_enable(struct rtk_softc *);
147 static void rtk_disable(struct rtk_softc *);
148 
149 static void rtk_list_tx_init(struct rtk_softc *);
150 
151 #define EE_SET(x)					\
152 	CSR_WRITE_1(sc, RTK_EECMD,			\
153 		CSR_READ_1(sc, RTK_EECMD) | (x))
154 
155 #define EE_CLR(x)					\
156 	CSR_WRITE_1(sc, RTK_EECMD,			\
157 		CSR_READ_1(sc, RTK_EECMD) & ~(x))
158 
159 #define EE_DELAY()	DELAY(100)
160 
161 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
162 
163 /*
164  * Send a read command and address to the EEPROM, check for ACK.
165  */
166 static void
167 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
168 {
169 	int d, i;
170 
171 	d = (RTK_EECMD_READ << addr_len) | addr;
172 
173 	/*
174 	 * Feed in each bit and stobe the clock.
175 	 */
176 	for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
177 		if (d & (1 << (i - 1))) {
178 			EE_SET(RTK_EE_DATAIN);
179 		} else {
180 			EE_CLR(RTK_EE_DATAIN);
181 		}
182 		EE_DELAY();
183 		EE_SET(RTK_EE_CLK);
184 		EE_DELAY();
185 		EE_CLR(RTK_EE_CLK);
186 		EE_DELAY();
187 	}
188 }
189 
190 /*
191  * Read a word of data stored in the EEPROM at address 'addr.'
192  */
193 uint16_t
194 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
195 {
196 	uint16_t word;
197 	int i;
198 
199 	/* Enter EEPROM access mode. */
200 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
201 	EE_DELAY();
202 	EE_SET(RTK_EE_SEL);
203 
204 	/*
205 	 * Send address of word we want to read.
206 	 */
207 	rtk_eeprom_putbyte(sc, addr, addr_len);
208 
209 	/*
210 	 * Start reading bits from EEPROM.
211 	 */
212 	word = 0;
213 	for (i = 16; i > 0; i--) {
214 		EE_SET(RTK_EE_CLK);
215 		EE_DELAY();
216 		if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
217 			word |= 1 << (i - 1);
218 		EE_CLR(RTK_EE_CLK);
219 		EE_DELAY();
220 	}
221 
222 	/* Turn off EEPROM access mode. */
223 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
224 
225 	return word;
226 }
227 
228 /*
229  * MII access routines are provided for the 8129, which
230  * doesn't have a built-in PHY. For the 8139, we fake things
231  * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
232  * direct access PHY registers.
233  */
234 #define MII_SET(x)					\
235 	CSR_WRITE_1(sc, RTK_MII,			\
236 		CSR_READ_1(sc, RTK_MII) | (x))
237 
238 #define MII_CLR(x)					\
239 	CSR_WRITE_1(sc, RTK_MII,			\
240 		CSR_READ_1(sc, RTK_MII) & ~(x))
241 
242 /*
243  * Sync the PHYs by setting data bit and strobing the clock 32 times.
244  */
245 static void
246 rtk_mii_sync(struct rtk_softc *sc)
247 {
248 	int i;
249 
250 	MII_SET(RTK_MII_DIR|RTK_MII_DATAOUT);
251 
252 	for (i = 0; i < 32; i++) {
253 		MII_SET(RTK_MII_CLK);
254 		DELAY(1);
255 		MII_CLR(RTK_MII_CLK);
256 		DELAY(1);
257 	}
258 }
259 
260 /*
261  * Clock a series of bits through the MII.
262  */
263 static void
264 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
265 {
266 	int i;
267 
268 	MII_CLR(RTK_MII_CLK);
269 
270 	for (i = cnt; i > 0; i--) {
271 		if (bits & (1 << (i - 1))) {
272 			MII_SET(RTK_MII_DATAOUT);
273 		} else {
274 			MII_CLR(RTK_MII_DATAOUT);
275 		}
276 		DELAY(1);
277 		MII_CLR(RTK_MII_CLK);
278 		DELAY(1);
279 		MII_SET(RTK_MII_CLK);
280 	}
281 }
282 
283 /*
284  * Read an PHY register through the MII.
285  */
286 static int
287 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
288 {
289 	int i, ack, s;
290 
291 	s = splnet();
292 
293 	/*
294 	 * Set up frame for RX.
295 	 */
296 	frame->mii_stdelim = RTK_MII_STARTDELIM;
297 	frame->mii_opcode = RTK_MII_READOP;
298 	frame->mii_turnaround = 0;
299 	frame->mii_data = 0;
300 
301 	CSR_WRITE_2(sc, RTK_MII, 0);
302 
303 	/*
304 	 * Turn on data xmit.
305 	 */
306 	MII_SET(RTK_MII_DIR);
307 
308 	rtk_mii_sync(sc);
309 
310 	/*
311 	 * Send command/address info.
312 	 */
313 	rtk_mii_send(sc, frame->mii_stdelim, 2);
314 	rtk_mii_send(sc, frame->mii_opcode, 2);
315 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
316 	rtk_mii_send(sc, frame->mii_regaddr, 5);
317 
318 	/* Idle bit */
319 	MII_CLR((RTK_MII_CLK|RTK_MII_DATAOUT));
320 	DELAY(1);
321 	MII_SET(RTK_MII_CLK);
322 	DELAY(1);
323 
324 	/* Turn off xmit. */
325 	MII_CLR(RTK_MII_DIR);
326 
327 	/* Check for ack */
328 	MII_CLR(RTK_MII_CLK);
329 	DELAY(1);
330 	ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
331 	MII_SET(RTK_MII_CLK);
332 	DELAY(1);
333 
334 	/*
335 	 * Now try reading data bits. If the ack failed, we still
336 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
337 	 */
338 	if (ack) {
339 		for (i = 0; i < 16; i++) {
340 			MII_CLR(RTK_MII_CLK);
341 			DELAY(1);
342 			MII_SET(RTK_MII_CLK);
343 			DELAY(1);
344 		}
345 		goto fail;
346 	}
347 
348 	for (i = 16; i > 0; i--) {
349 		MII_CLR(RTK_MII_CLK);
350 		DELAY(1);
351 		if (!ack) {
352 			if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
353 				frame->mii_data |= 1 << (i - 1);
354 			DELAY(1);
355 		}
356 		MII_SET(RTK_MII_CLK);
357 		DELAY(1);
358 	}
359 
360  fail:
361 	MII_CLR(RTK_MII_CLK);
362 	DELAY(1);
363 	MII_SET(RTK_MII_CLK);
364 	DELAY(1);
365 
366 	splx(s);
367 
368 	if (ack)
369 		return 1;
370 	return 0;
371 }
372 
373 /*
374  * Write to a PHY register through the MII.
375  */
376 static int
377 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
378 {
379 	int s;
380 
381 	s = splnet();
382 	/*
383 	 * Set up frame for TX.
384 	 */
385 	frame->mii_stdelim = RTK_MII_STARTDELIM;
386 	frame->mii_opcode = RTK_MII_WRITEOP;
387 	frame->mii_turnaround = RTK_MII_TURNAROUND;
388 
389 	/*
390 	 * Turn on data output.
391 	 */
392 	MII_SET(RTK_MII_DIR);
393 
394 	rtk_mii_sync(sc);
395 
396 	rtk_mii_send(sc, frame->mii_stdelim, 2);
397 	rtk_mii_send(sc, frame->mii_opcode, 2);
398 	rtk_mii_send(sc, frame->mii_phyaddr, 5);
399 	rtk_mii_send(sc, frame->mii_regaddr, 5);
400 	rtk_mii_send(sc, frame->mii_turnaround, 2);
401 	rtk_mii_send(sc, frame->mii_data, 16);
402 
403 	/* Idle bit. */
404 	MII_SET(RTK_MII_CLK);
405 	DELAY(1);
406 	MII_CLR(RTK_MII_CLK);
407 	DELAY(1);
408 
409 	/*
410 	 * Turn off xmit.
411 	 */
412 	MII_CLR(RTK_MII_DIR);
413 
414 	splx(s);
415 
416 	return 0;
417 }
418 
419 static int
420 rtk_phy_readreg(device_t self, int phy, int reg)
421 {
422 	struct rtk_softc *sc = device_private(self);
423 	struct rtk_mii_frame frame;
424 	int rval;
425 	int rtk8139_reg;
426 
427 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
428 		if (phy != 7)
429 			return 0;
430 
431 		switch (reg) {
432 		case MII_BMCR:
433 			rtk8139_reg = RTK_BMCR;
434 			break;
435 		case MII_BMSR:
436 			rtk8139_reg = RTK_BMSR;
437 			break;
438 		case MII_ANAR:
439 			rtk8139_reg = RTK_ANAR;
440 			break;
441 		case MII_ANER:
442 			rtk8139_reg = RTK_ANER;
443 			break;
444 		case MII_ANLPAR:
445 			rtk8139_reg = RTK_LPAR;
446 			break;
447 		default:
448 #if 0
449 			printf("%s: bad phy register\n", device_xname(self));
450 #endif
451 			return 0;
452 		}
453 		rval = CSR_READ_2(sc, rtk8139_reg);
454 		return rval;
455 	}
456 
457 	memset(&frame, 0, sizeof(frame));
458 
459 	frame.mii_phyaddr = phy;
460 	frame.mii_regaddr = reg;
461 	rtk_mii_readreg(sc, &frame);
462 
463 	return frame.mii_data;
464 }
465 
466 static void
467 rtk_phy_writereg(device_t self, int phy, int reg, int data)
468 {
469 	struct rtk_softc *sc = device_private(self);
470 	struct rtk_mii_frame frame;
471 	int rtk8139_reg;
472 
473 	if ((sc->sc_quirk & RTKQ_8129) == 0) {
474 		if (phy != 7)
475 			return;
476 
477 		switch (reg) {
478 		case MII_BMCR:
479 			rtk8139_reg = RTK_BMCR;
480 			break;
481 		case MII_BMSR:
482 			rtk8139_reg = RTK_BMSR;
483 			break;
484 		case MII_ANAR:
485 			rtk8139_reg = RTK_ANAR;
486 			break;
487 		case MII_ANER:
488 			rtk8139_reg = RTK_ANER;
489 			break;
490 		case MII_ANLPAR:
491 			rtk8139_reg = RTK_LPAR;
492 			break;
493 		default:
494 #if 0
495 			printf("%s: bad phy register\n", device_xname(self));
496 #endif
497 			return;
498 		}
499 		CSR_WRITE_2(sc, rtk8139_reg, data);
500 		return;
501 	}
502 
503 	memset(&frame, 0, sizeof(frame));
504 
505 	frame.mii_phyaddr = phy;
506 	frame.mii_regaddr = reg;
507 	frame.mii_data = data;
508 
509 	rtk_mii_writereg(sc, &frame);
510 }
511 
512 static void
513 rtk_phy_statchg(device_t v)
514 {
515 
516 	/* Nothing to do. */
517 }
518 
519 #define	rtk_calchash(addr) \
520 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
521 
522 /*
523  * Program the 64-bit multicast hash filter.
524  */
525 void
526 rtk_setmulti(struct rtk_softc *sc)
527 {
528 	struct ifnet *ifp;
529 	uint32_t hashes[2] = { 0, 0 };
530 	uint32_t rxfilt;
531 	struct ether_multi *enm;
532 	struct ether_multistep step;
533 	int h, mcnt;
534 
535 	ifp = &sc->ethercom.ec_if;
536 
537 	rxfilt = CSR_READ_4(sc, RTK_RXCFG);
538 
539 	if (ifp->if_flags & IFF_PROMISC) {
540  allmulti:
541 		ifp->if_flags |= IFF_ALLMULTI;
542 		rxfilt |= RTK_RXCFG_RX_MULTI;
543 		CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
544 		CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
545 		CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
546 		return;
547 	}
548 
549 	/* first, zot all the existing hash bits */
550 	CSR_WRITE_4(sc, RTK_MAR0, 0);
551 	CSR_WRITE_4(sc, RTK_MAR4, 0);
552 
553 	/* now program new ones */
554 	ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
555 	mcnt = 0;
556 	while (enm != NULL) {
557 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
558 		    ETHER_ADDR_LEN) != 0)
559 			goto allmulti;
560 
561 		h = rtk_calchash(enm->enm_addrlo);
562 		if (h < 32)
563 			hashes[0] |= (1 << h);
564 		else
565 			hashes[1] |= (1 << (h - 32));
566 		mcnt++;
567 		ETHER_NEXT_MULTI(step, enm);
568 	}
569 
570 	ifp->if_flags &= ~IFF_ALLMULTI;
571 
572 	if (mcnt)
573 		rxfilt |= RTK_RXCFG_RX_MULTI;
574 	else
575 		rxfilt &= ~RTK_RXCFG_RX_MULTI;
576 
577 	CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
578 
579 	/*
580 	 * For some unfathomable reason, RealTek decided to reverse
581 	 * the order of the multicast hash registers in the PCI Express
582 	 * parts. This means we have to write the hash pattern in reverse
583 	 * order for those devices.
584 	 */
585 	if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
586 		CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
587 		CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
588 	} else {
589 		CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
590 		CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
591 	}
592 }
593 
594 void
595 rtk_reset(struct rtk_softc *sc)
596 {
597 	int i;
598 
599 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
600 
601 	for (i = 0; i < RTK_TIMEOUT; i++) {
602 		DELAY(10);
603 		if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
604 			break;
605 	}
606 	if (i == RTK_TIMEOUT)
607 		printf("%s: reset never completed!\n",
608 		    device_xname(sc->sc_dev));
609 }
610 
611 /*
612  * Attach the interface. Allocate softc structures, do ifmedia
613  * setup and ethernet/BPF attach.
614  */
615 void
616 rtk_attach(struct rtk_softc *sc)
617 {
618 	device_t self = sc->sc_dev;
619 	struct ifnet *ifp;
620 	struct rtk_tx_desc *txd;
621 	uint16_t val;
622 	uint8_t eaddr[ETHER_ADDR_LEN];
623 	int error;
624 	int i, addr_len;
625 
626 	callout_init(&sc->rtk_tick_ch, 0);
627 
628 	/*
629 	 * Check EEPROM type 9346 or 9356.
630 	 */
631 	if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
632 		addr_len = RTK_EEADDR_LEN1;
633 	else
634 		addr_len = RTK_EEADDR_LEN0;
635 
636 	/*
637 	 * Get station address.
638 	 */
639 	val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
640 	eaddr[0] = val & 0xff;
641 	eaddr[1] = val >> 8;
642 	val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
643 	eaddr[2] = val & 0xff;
644 	eaddr[3] = val >> 8;
645 	val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
646 	eaddr[4] = val & 0xff;
647 	eaddr[5] = val >> 8;
648 
649 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
650 	    RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
651 	    BUS_DMA_NOWAIT)) != 0) {
652 		aprint_error_dev(self,
653 		    "can't allocate recv buffer, error = %d\n", error);
654 		goto fail_0;
655 	}
656 
657 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
658 	    RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
659 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
660 		aprint_error_dev(self,
661 		    "can't map recv buffer, error = %d\n", error);
662 		goto fail_1;
663 	}
664 
665 	if ((error = bus_dmamap_create(sc->sc_dmat,
666 	    RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
667 	    &sc->recv_dmamap)) != 0) {
668 		aprint_error_dev(self,
669 		    "can't create recv buffer DMA map, error = %d\n", error);
670 		goto fail_2;
671 	}
672 
673 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
674 	    sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
675 	    NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
676 		aprint_error_dev(self,
677 		    "can't load recv buffer DMA map, error = %d\n", error);
678 		goto fail_3;
679 	}
680 
681 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
682 		txd = &sc->rtk_tx_descs[i];
683 		if ((error = bus_dmamap_create(sc->sc_dmat,
684 		    MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
685 		    &txd->txd_dmamap)) != 0) {
686 			aprint_error_dev(self,
687 			    "can't create snd buffer DMA map, error = %d\n",
688 			    error);
689 			goto fail_4;
690 		}
691 		txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
692 		txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
693 	}
694 	SIMPLEQ_INIT(&sc->rtk_tx_free);
695 	SIMPLEQ_INIT(&sc->rtk_tx_dirty);
696 
697 	/*
698 	 * From this point forward, the attachment cannot fail. A failure
699 	 * before this releases all resources thar may have been
700 	 * allocated.
701 	 */
702 	sc->sc_flags |= RTK_ATTACHED;
703 
704 	/* Reset the adapter. */
705 	rtk_reset(sc);
706 
707 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
708 
709 	ifp = &sc->ethercom.ec_if;
710 	ifp->if_softc = sc;
711 	strcpy(ifp->if_xname, device_xname(self));
712 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
713 	ifp->if_ioctl = rtk_ioctl;
714 	ifp->if_start = rtk_start;
715 	ifp->if_watchdog = rtk_watchdog;
716 	ifp->if_init = rtk_init;
717 	ifp->if_stop = rtk_stop;
718 	IFQ_SET_READY(&ifp->if_snd);
719 
720 	/*
721 	 * Do ifmedia setup.
722 	 */
723 	sc->mii.mii_ifp = ifp;
724 	sc->mii.mii_readreg = rtk_phy_readreg;
725 	sc->mii.mii_writereg = rtk_phy_writereg;
726 	sc->mii.mii_statchg = rtk_phy_statchg;
727 	sc->ethercom.ec_mii = &sc->mii;
728 	ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange,
729 	    ether_mediastatus);
730 	mii_attach(self, &sc->mii, 0xffffffff,
731 	    MII_PHY_ANY, MII_OFFSET_ANY, 0);
732 
733 	/* Choose a default media. */
734 	if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
735 		ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
736 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE);
737 	} else {
738 		ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO);
739 	}
740 
741 	/*
742 	 * Call MI attach routines.
743 	 */
744 	if_attach(ifp);
745 	ether_ifattach(ifp, eaddr);
746 
747 #if NRND > 0
748 	rnd_attach_source(&sc->rnd_source, device_xname(self),
749 	    RND_TYPE_NET, 0);
750 #endif
751 
752 	return;
753  fail_4:
754 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
755 		txd = &sc->rtk_tx_descs[i];
756 		if (txd->txd_dmamap != NULL)
757 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
758 	}
759  fail_3:
760 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
761  fail_2:
762 	bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
763 	    RTK_RXBUFLEN + 16);
764  fail_1:
765 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
766  fail_0:
767 	return;
768 }
769 
770 /*
771  * Initialize the transmit descriptors.
772  */
773 static void
774 rtk_list_tx_init(struct rtk_softc *sc)
775 {
776 	struct rtk_tx_desc *txd;
777 	int i;
778 
779 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
780 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
781 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
782 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
783 
784 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
785 		txd = &sc->rtk_tx_descs[i];
786 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
787 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
788 	}
789 }
790 
791 /*
792  * rtk_activate:
793  *     Handle device activation/deactivation requests.
794  */
795 int
796 rtk_activate(device_t self, enum devact act)
797 {
798 	struct rtk_softc *sc = device_private(self);
799 
800 	switch (act) {
801 	case DVACT_DEACTIVATE:
802 		if_deactivate(&sc->ethercom.ec_if);
803 		return 0;
804 	default:
805 		return EOPNOTSUPP;
806 	}
807 }
808 
809 /*
810  * rtk_detach:
811  *     Detach a rtk interface.
812  */
813 int
814 rtk_detach(struct rtk_softc *sc)
815 {
816 	struct ifnet *ifp = &sc->ethercom.ec_if;
817 	struct rtk_tx_desc *txd;
818 	int i;
819 
820 	/*
821 	 * Succeed now if there isn't any work to do.
822 	 */
823 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
824 		return 0;
825 
826 	/* Unhook our tick handler. */
827 	callout_stop(&sc->rtk_tick_ch);
828 
829 	/* Detach all PHYs. */
830 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
831 
832 	/* Delete all remaining media. */
833 	ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
834 
835 #if NRND > 0
836 	rnd_detach_source(&sc->rnd_source);
837 #endif
838 
839 	ether_ifdetach(ifp);
840 	if_detach(ifp);
841 
842 	for (i = 0; i < RTK_TX_LIST_CNT; i++) {
843 		txd = &sc->rtk_tx_descs[i];
844 		if (txd->txd_dmamap != NULL)
845 			bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
846 	}
847 	bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
848 	bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
849 	    RTK_RXBUFLEN + 16);
850 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
851 
852 	/* we don't want to run again */
853 	sc->sc_flags &= ~RTK_ATTACHED;
854 
855 	return 0;
856 }
857 
858 /*
859  * rtk_enable:
860  *     Enable the RTL81X9 chip.
861  */
862 int
863 rtk_enable(struct rtk_softc *sc)
864 {
865 
866 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
867 		if ((*sc->sc_enable)(sc) != 0) {
868 			printf("%s: device enable failed\n",
869 			    device_xname(sc->sc_dev));
870 			return EIO;
871 		}
872 		sc->sc_flags |= RTK_ENABLED;
873 	}
874 	return 0;
875 }
876 
877 /*
878  * rtk_disable:
879  *     Disable the RTL81X9 chip.
880  */
881 void
882 rtk_disable(struct rtk_softc *sc)
883 {
884 
885 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
886 		(*sc->sc_disable)(sc);
887 		sc->sc_flags &= ~RTK_ENABLED;
888 	}
889 }
890 
891 /*
892  * A frame has been uploaded: pass the resulting mbuf chain up to
893  * the higher level protocols.
894  *
895  * You know there's something wrong with a PCI bus-master chip design.
896  *
897  * The receive operation is badly documented in the datasheet, so I'll
898  * attempt to document it here. The driver provides a buffer area and
899  * places its base address in the RX buffer start address register.
900  * The chip then begins copying frames into the RX buffer. Each frame
901  * is preceded by a 32-bit RX status word which specifies the length
902  * of the frame and certain other status bits. Each frame (starting with
903  * the status word) is also 32-bit aligned. The frame length is in the
904  * first 16 bits of the status word; the lower 15 bits correspond with
905  * the 'rx status register' mentioned in the datasheet.
906  *
907  * Note: to make the Alpha happy, the frame payload needs to be aligned
908  * on a 32-bit boundary. To achieve this, we copy the data to mbuf
909  * shifted forward 2 bytes.
910  */
911 static void
912 rtk_rxeof(struct rtk_softc *sc)
913 {
914 	struct mbuf *m;
915 	struct ifnet *ifp;
916 	uint8_t *rxbufpos, *dst;
917 	u_int total_len, wrap;
918 	uint32_t rxstat;
919 	uint16_t cur_rx, new_rx;
920 	uint16_t limit;
921 	uint16_t rx_bytes, max_bytes;
922 
923 	ifp = &sc->ethercom.ec_if;
924 
925 	cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
926 
927 	/* Do not try to read past this point. */
928 	limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
929 
930 	if (limit < cur_rx)
931 		max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
932 	else
933 		max_bytes = limit - cur_rx;
934 	rx_bytes = 0;
935 
936 	while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
937 		rxbufpos = sc->rtk_rx_buf + cur_rx;
938 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
939 		    RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
940 		rxstat = le32toh(*(uint32_t *)rxbufpos);
941 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
942 		    RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
943 
944 		/*
945 		 * Here's a totally undocumented fact for you. When the
946 		 * RealTek chip is in the process of copying a packet into
947 		 * RAM for you, the length will be 0xfff0. If you spot a
948 		 * packet header with this value, you need to stop. The
949 		 * datasheet makes absolutely no mention of this and
950 		 * RealTek should be shot for this.
951 		 */
952 		total_len = rxstat >> 16;
953 		if (total_len == RTK_RXSTAT_UNFINISHED)
954 			break;
955 
956 		if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
957 		    total_len < ETHER_MIN_LEN ||
958 		    total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
959 			ifp->if_ierrors++;
960 
961 			/*
962 			 * submitted by:[netbsd-pcmcia:00484]
963 			 *	Takahiro Kambe <taca@sky.yamashina.kyoto.jp>
964 			 * obtain from:
965 			 *     FreeBSD if_rl.c rev 1.24->1.25
966 			 *
967 			 */
968 #if 0
969 			if (rxstat & (RTK_RXSTAT_BADSYM|RTK_RXSTAT_RUNT|
970 			    RTK_RXSTAT_GIANT|RTK_RXSTAT_CRCERR|
971 			    RTK_RXSTAT_ALIGNERR)) {
972 				CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
973 				CSR_WRITE_2(sc, RTK_COMMAND,
974 				    RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
975 				CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
976 				CSR_WRITE_4(sc, RTK_RXADDR,
977 				    sc->recv_dmamap->dm_segs[0].ds_addr);
978 				cur_rx = 0;
979 			}
980 			break;
981 #else
982 			rtk_init(ifp);
983 			return;
984 #endif
985 		}
986 
987 		/* No errors; receive the packet. */
988 		rx_bytes += total_len + RTK_RXSTAT_LEN;
989 
990 		/*
991 		 * Avoid trying to read more bytes than we know
992 		 * the chip has prepared for us.
993 		 */
994 		if (rx_bytes > max_bytes)
995 			break;
996 
997 		/*
998 		 * Skip the status word, wrapping around to the beginning
999 		 * of the Rx area, if necessary.
1000 		 */
1001 		cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1002 		rxbufpos = sc->rtk_rx_buf + cur_rx;
1003 
1004 		/*
1005 		 * Compute the number of bytes at which the packet
1006 		 * will wrap to the beginning of the ring buffer.
1007 		 */
1008 		wrap = RTK_RXBUFLEN - cur_rx;
1009 
1010 		/*
1011 		 * Compute where the next pending packet is.
1012 		 */
1013 		if (total_len > wrap)
1014 			new_rx = total_len - wrap;
1015 		else
1016 			new_rx = cur_rx + total_len;
1017 		/* Round up to 32-bit boundary. */
1018 		new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN;
1019 
1020 		/*
1021 		 * The RealTek chip includes the CRC with every
1022 		 * incoming packet; trim it off here.
1023 		 */
1024 		total_len -= ETHER_CRC_LEN;
1025 
1026 		/*
1027 		 * Now allocate an mbuf (and possibly a cluster) to hold
1028 		 * the packet. Note we offset the packet 2 bytes so that
1029 		 * data after the Ethernet header will be 4-byte aligned.
1030 		 */
1031 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1032 		if (m == NULL) {
1033 			printf("%s: unable to allocate Rx mbuf\n",
1034 			    device_xname(sc->sc_dev));
1035 			ifp->if_ierrors++;
1036 			goto next_packet;
1037 		}
1038 		if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1039 			MCLGET(m, M_DONTWAIT);
1040 			if ((m->m_flags & M_EXT) == 0) {
1041 				printf("%s: unable to allocate Rx cluster\n",
1042 				    device_xname(sc->sc_dev));
1043 				ifp->if_ierrors++;
1044 				m_freem(m);
1045 				m = NULL;
1046 				goto next_packet;
1047 			}
1048 		}
1049 		m->m_data += RTK_ETHER_ALIGN;	/* for alignment */
1050 		m->m_pkthdr.rcvif = ifp;
1051 		m->m_pkthdr.len = m->m_len = total_len;
1052 		dst = mtod(m, void *);
1053 
1054 		/*
1055 		 * If the packet wraps, copy up to the wrapping point.
1056 		 */
1057 		if (total_len > wrap) {
1058 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1059 			    cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1060 			memcpy(dst, rxbufpos, wrap);
1061 			bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1062 			    cur_rx, wrap, BUS_DMASYNC_PREREAD);
1063 			cur_rx = 0;
1064 			rxbufpos = sc->rtk_rx_buf;
1065 			total_len -= wrap;
1066 			dst += wrap;
1067 		}
1068 
1069 		/*
1070 		 * ...and now the rest.
1071 		 */
1072 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1073 		    cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1074 		memcpy(dst, rxbufpos, total_len);
1075 		bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1076 		    cur_rx, total_len, BUS_DMASYNC_PREREAD);
1077 
1078  next_packet:
1079 		CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1080 		cur_rx = new_rx;
1081 
1082 		if (m == NULL)
1083 			continue;
1084 
1085 		ifp->if_ipackets++;
1086 
1087 		bpf_mtap(ifp, m);
1088 		/* pass it on. */
1089 		(*ifp->if_input)(ifp, m);
1090 	}
1091 }
1092 
1093 /*
1094  * A frame was downloaded to the chip. It's safe for us to clean up
1095  * the list buffers.
1096  */
1097 static void
1098 rtk_txeof(struct rtk_softc *sc)
1099 {
1100 	struct ifnet *ifp;
1101 	struct rtk_tx_desc *txd;
1102 	uint32_t txstat;
1103 
1104 	ifp = &sc->ethercom.ec_if;
1105 
1106 	/*
1107 	 * Go through our tx list and free mbufs for those
1108 	 * frames that have been uploaded.
1109 	 */
1110 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1111 		txstat = CSR_READ_4(sc, txd->txd_txstat);
1112 		if ((txstat & (RTK_TXSTAT_TX_OK|
1113 		    RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0)
1114 			break;
1115 
1116 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1117 
1118 		bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1119 		    txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1120 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1121 		m_freem(txd->txd_mbuf);
1122 		txd->txd_mbuf = NULL;
1123 
1124 		ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24;
1125 
1126 		if (txstat & RTK_TXSTAT_TX_OK)
1127 			ifp->if_opackets++;
1128 		else {
1129 			ifp->if_oerrors++;
1130 
1131 			/*
1132 			 * Increase Early TX threshold if underrun occurred.
1133 			 * Increase step 64 bytes.
1134 			 */
1135 			if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1136 #ifdef DEBUG
1137 				printf("%s: transmit underrun;",
1138 				    device_xname(sc->sc_dev));
1139 #endif
1140 				if (sc->sc_txthresh < RTK_TXTH_MAX) {
1141 					sc->sc_txthresh += 2;
1142 #ifdef DEBUG
1143 					printf(" new threshold: %d bytes",
1144 					    sc->sc_txthresh * 32);
1145 #endif
1146 				}
1147 #ifdef DEBUG
1148 				printf("\n");
1149 #endif
1150 			}
1151 			if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN))
1152 				CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1153 		}
1154 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1155 		ifp->if_flags &= ~IFF_OACTIVE;
1156 	}
1157 
1158 	/* Clear the timeout timer if there is no pending packet. */
1159 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1160 		ifp->if_timer = 0;
1161 
1162 }
1163 
1164 int
1165 rtk_intr(void *arg)
1166 {
1167 	struct rtk_softc *sc;
1168 	struct ifnet *ifp;
1169 	uint16_t status;
1170 	int handled;
1171 
1172 	sc = arg;
1173 	ifp = &sc->ethercom.ec_if;
1174 
1175 	if (!device_has_power(sc->sc_dev))
1176 		return 0;
1177 
1178 	/* Disable interrupts. */
1179 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1180 
1181 	handled = 0;
1182 	for (;;) {
1183 
1184 		status = CSR_READ_2(sc, RTK_ISR);
1185 
1186 		if (status == 0xffff)
1187 			break; /* Card is gone... */
1188 
1189 		if (status)
1190 			CSR_WRITE_2(sc, RTK_ISR, status);
1191 
1192 		if ((status & RTK_INTRS) == 0)
1193 			break;
1194 
1195 		handled = 1;
1196 
1197 		if (status & RTK_ISR_RX_OK)
1198 			rtk_rxeof(sc);
1199 
1200 		if (status & RTK_ISR_RX_ERR)
1201 			rtk_rxeof(sc);
1202 
1203 		if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR))
1204 			rtk_txeof(sc);
1205 
1206 		if (status & RTK_ISR_SYSTEM_ERR) {
1207 			rtk_reset(sc);
1208 			rtk_init(ifp);
1209 		}
1210 	}
1211 
1212 	/* Re-enable interrupts. */
1213 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1214 
1215 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1216 		rtk_start(ifp);
1217 
1218 #if NRND > 0
1219 	if (RND_ENABLED(&sc->rnd_source))
1220 		rnd_add_uint32(&sc->rnd_source, status);
1221 #endif
1222 
1223 	return handled;
1224 }
1225 
1226 /*
1227  * Main transmit routine.
1228  */
1229 
1230 static void
1231 rtk_start(struct ifnet *ifp)
1232 {
1233 	struct rtk_softc *sc;
1234 	struct rtk_tx_desc *txd;
1235 	struct mbuf *m_head, *m_new;
1236 	int error, len;
1237 
1238 	sc = ifp->if_softc;
1239 
1240 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1241 		IFQ_POLL(&ifp->if_snd, m_head);
1242 		if (m_head == NULL)
1243 			break;
1244 		m_new = NULL;
1245 
1246 		/*
1247 		 * Load the DMA map.  If this fails, the packet didn't
1248 		 * fit in one DMA segment, and we need to copy.  Note,
1249 		 * the packet must also be aligned.
1250 		 * if the packet is too small, copy it too, so we're sure
1251 		 * so have enough room for the pad buffer.
1252 		 */
1253 		if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1254 		    m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1255 		    bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1256 			m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1257 			MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1258 			if (m_new == NULL) {
1259 				printf("%s: unable to allocate Tx mbuf\n",
1260 				    device_xname(sc->sc_dev));
1261 				break;
1262 			}
1263 			if (m_head->m_pkthdr.len > MHLEN) {
1264 				MCLGET(m_new, M_DONTWAIT);
1265 				if ((m_new->m_flags & M_EXT) == 0) {
1266 					printf("%s: unable to allocate Tx "
1267 					    "cluster\n",
1268 					    device_xname(sc->sc_dev));
1269 					m_freem(m_new);
1270 					break;
1271 				}
1272 			}
1273 			m_copydata(m_head, 0, m_head->m_pkthdr.len,
1274 			    mtod(m_new, void *));
1275 			m_new->m_pkthdr.len = m_new->m_len =
1276 			    m_head->m_pkthdr.len;
1277 			if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1278 				memset(
1279 				    mtod(m_new, char *) + m_head->m_pkthdr.len,
1280 				    0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1281 				m_new->m_pkthdr.len = m_new->m_len =
1282 				    ETHER_PAD_LEN;
1283 			}
1284 			error = bus_dmamap_load_mbuf(sc->sc_dmat,
1285 			    txd->txd_dmamap, m_new,
1286 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1287 			if (error) {
1288 				printf("%s: unable to load Tx buffer, "
1289 				    "error = %d\n",
1290 				    device_xname(sc->sc_dev), error);
1291 				break;
1292 			}
1293 		}
1294 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1295 		/*
1296 		 * If there's a BPF listener, bounce a copy of this frame
1297 		 * to him.
1298 		 */
1299 		bpf_mtap(ifp, m_head);
1300 		if (m_new != NULL) {
1301 			m_freem(m_head);
1302 			m_head = m_new;
1303 		}
1304 		txd->txd_mbuf = m_head;
1305 
1306 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1307 		SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1308 
1309 		/*
1310 		 * Transmit the frame.
1311 		 */
1312 		bus_dmamap_sync(sc->sc_dmat,
1313 		    txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1314 		    BUS_DMASYNC_PREWRITE);
1315 
1316 		len = txd->txd_dmamap->dm_segs[0].ds_len;
1317 
1318 		CSR_WRITE_4(sc, txd->txd_txaddr,
1319 		    txd->txd_dmamap->dm_segs[0].ds_addr);
1320 		CSR_WRITE_4(sc, txd->txd_txstat,
1321 		    RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1322 
1323 		/*
1324 		 * Set a timeout in case the chip goes out to lunch.
1325 		 */
1326 		ifp->if_timer = 5;
1327 	}
1328 
1329 	/*
1330 	 * We broke out of the loop because all our TX slots are
1331 	 * full. Mark the NIC as busy until it drains some of the
1332 	 * packets from the queue.
1333 	 */
1334 	if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1335 		ifp->if_flags |= IFF_OACTIVE;
1336 }
1337 
1338 static int
1339 rtk_init(struct ifnet *ifp)
1340 {
1341 	struct rtk_softc *sc = ifp->if_softc;
1342 	int error, i;
1343 	uint32_t rxcfg;
1344 
1345 	if ((error = rtk_enable(sc)) != 0)
1346 		goto out;
1347 
1348 	/*
1349 	 * Cancel pending I/O.
1350 	 */
1351 	rtk_stop(ifp, 0);
1352 
1353 	/* Init our MAC address */
1354 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1355 		CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1356 	}
1357 
1358 	/* Init the RX buffer pointer register. */
1359 	bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1360 	    sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1361 	CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1362 
1363 	/* Init TX descriptors. */
1364 	rtk_list_tx_init(sc);
1365 
1366 	/* Init Early TX threshold. */
1367 	sc->sc_txthresh = RTK_TXTH_256;
1368 	/*
1369 	 * Enable transmit and receive.
1370 	 */
1371 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1372 
1373 	/*
1374 	 * Set the initial TX and RX configuration.
1375 	 */
1376 	CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1377 	CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1378 
1379 	/* Set the individual bit to receive frames for this host only. */
1380 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1381 	rxcfg |= RTK_RXCFG_RX_INDIV;
1382 
1383 	/* If we want promiscuous mode, set the allframes bit. */
1384 	if (ifp->if_flags & IFF_PROMISC) {
1385 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1386 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1387 	} else {
1388 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1389 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1390 	}
1391 
1392 	/*
1393 	 * Set capture broadcast bit to capture broadcast frames.
1394 	 */
1395 	if (ifp->if_flags & IFF_BROADCAST) {
1396 		rxcfg |= RTK_RXCFG_RX_BROAD;
1397 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1398 	} else {
1399 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1400 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1401 	}
1402 
1403 	/*
1404 	 * Program the multicast filter, if necessary.
1405 	 */
1406 	rtk_setmulti(sc);
1407 
1408 	/*
1409 	 * Enable interrupts.
1410 	 */
1411 	CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1412 
1413 	/* Start RX/TX process. */
1414 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1415 
1416 	/* Enable receiver and transmitter. */
1417 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1418 
1419 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX);
1420 
1421 	/*
1422 	 * Set current media.
1423 	 */
1424 	if ((error = ether_mediachange(ifp)) != 0)
1425 		goto out;
1426 
1427 	ifp->if_flags |= IFF_RUNNING;
1428 	ifp->if_flags &= ~IFF_OACTIVE;
1429 
1430 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1431 
1432  out:
1433 	if (error) {
1434 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1435 		ifp->if_timer = 0;
1436 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1437 	}
1438 	return error;
1439 }
1440 
1441 static int
1442 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1443 {
1444 	struct rtk_softc *sc = ifp->if_softc;
1445 	int s, error;
1446 
1447 	s = splnet();
1448 	error = ether_ioctl(ifp, command, data);
1449 	if (error == ENETRESET) {
1450 		if (ifp->if_flags & IFF_RUNNING) {
1451 			/*
1452 			 * Multicast list has changed.  Set the
1453 			 * hardware filter accordingly.
1454 			 */
1455 			rtk_setmulti(sc);
1456 		}
1457 		error = 0;
1458 	}
1459 	splx(s);
1460 
1461 	return error;
1462 }
1463 
1464 static void
1465 rtk_watchdog(struct ifnet *ifp)
1466 {
1467 	struct rtk_softc *sc;
1468 
1469 	sc = ifp->if_softc;
1470 
1471 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1472 	ifp->if_oerrors++;
1473 	rtk_txeof(sc);
1474 	rtk_rxeof(sc);
1475 	rtk_init(ifp);
1476 }
1477 
1478 /*
1479  * Stop the adapter and free any mbufs allocated to the
1480  * RX and TX lists.
1481  */
1482 static void
1483 rtk_stop(struct ifnet *ifp, int disable)
1484 {
1485 	struct rtk_softc *sc = ifp->if_softc;
1486 	struct rtk_tx_desc *txd;
1487 
1488 	callout_stop(&sc->rtk_tick_ch);
1489 
1490 	mii_down(&sc->mii);
1491 
1492 	CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1493 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1494 
1495 	/*
1496 	 * Free the TX list buffers.
1497 	 */
1498 	while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1499 		SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1500 		bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1501 		m_freem(txd->txd_mbuf);
1502 		txd->txd_mbuf = NULL;
1503 		CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1504 	}
1505 
1506 	if (disable)
1507 		rtk_disable(sc);
1508 
1509 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1510 	ifp->if_timer = 0;
1511 }
1512 
1513 static void
1514 rtk_tick(void *arg)
1515 {
1516 	struct rtk_softc *sc = arg;
1517 	int s;
1518 
1519 	s = splnet();
1520 	mii_tick(&sc->mii);
1521 	splx(s);
1522 
1523 	callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1524 }
1525