xref: /netbsd-src/sys/dev/ic/rtl8169.c (revision fd5cb0acea84d278e04e640d37ca2398f894991f)
1 /*	$NetBSD: rtl8169.c,v 1.6 2005/01/13 14:24:24 kanaoka Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
37 
38 /*
39  * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Networking Software Engineer
43  * Wind River Systems
44  */
45 
46 /*
47  * This driver is designed to support RealTek's next generation of
48  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
49  * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
50  * and the RTL8110S.
51  *
52  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
53  * with the older 8139 family, however it also supports a special
54  * C+ mode of operation that provides several new performance enhancing
55  * features. These include:
56  *
57  *	o Descriptor based DMA mechanism. Each descriptor represents
58  *	  a single packet fragment. Data buffers may be aligned on
59  *	  any byte boundary.
60  *
61  *	o 64-bit DMA
62  *
63  *	o TCP/IP checksum offload for both RX and TX
64  *
65  *	o High and normal priority transmit DMA rings
66  *
67  *	o VLAN tag insertion and extraction
68  *
69  *	o TCP large send (segmentation offload)
70  *
71  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
72  * programming API is fairly straightforward. The RX filtering, EEPROM
73  * access and PHY access is the same as it is on the older 8139 series
74  * chips.
75  *
76  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
77  * same programming API and feature set as the 8139C+ with the following
78  * differences and additions:
79  *
80  *	o 1000Mbps mode
81  *
82  *	o Jumbo frames
83  *
84  * 	o GMII and TBI ports/registers for interfacing with copper
85  *	  or fiber PHYs
86  *
87  *      o RX and TX DMA rings can have up to 1024 descriptors
88  *        (the 8139C+ allows a maximum of 64)
89  *
90  *	o Slight differences in register layout from the 8139C+
91  *
92  * The TX start and timer interrupt registers are at different locations
93  * on the 8169 than they are on the 8139C+. Also, the status word in the
94  * RX descriptor has a slightly different bit layout. The 8169 does not
95  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
96  * copper gigE PHY.
97  *
98  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
99  * (the 'S' stands for 'single-chip'). These devices have the same
100  * programming API as the older 8169, but also have some vendor-specific
101  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
102  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
103  *
104  * This driver takes advantage of the RX and TX checksum offload and
105  * VLAN tag insertion/extraction features. It also implements TX
106  * interrupt moderation using the timer interrupt registers, which
107  * significantly reduces TX interrupt load. There is also support
108  * for jumbo frames, however the 8169/8169S/8110S can not transmit
109  * jumbo frames larger than 7.5K, so the max MTU possible with this
110  * driver is 7500 bytes.
111  */
112 
113 #include "bpfilter.h"
114 #include "vlan.h"
115 
116 #include <sys/param.h>
117 #include <sys/endian.h>
118 #include <sys/systm.h>
119 #include <sys/sockio.h>
120 #include <sys/mbuf.h>
121 #include <sys/malloc.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/device.h>
125 
126 #include <net/if.h>
127 #include <net/if_arp.h>
128 #include <net/if_dl.h>
129 #include <net/if_ether.h>
130 #include <net/if_media.h>
131 #include <net/if_vlanvar.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 
137 #include <machine/bus.h>
138 
139 #include <dev/mii/mii.h>
140 #include <dev/mii/miivar.h>
141 
142 #include <dev/pci/pcireg.h>
143 #include <dev/pci/pcivar.h>
144 #include <dev/pci/pcidevs.h>
145 
146 /*
147  * Default to using PIO access for this driver.
148  */
149 #define RE_USEIOSPACE
150 
151 #include <dev/ic/rtl81x9reg.h>
152 #include <dev/ic/rtl81x9var.h>
153 
154 #include <dev/ic/rtl8169var.h>
155 
156 
157 static int re_encap(struct rtk_softc *, struct mbuf *, int *);
158 
159 static int re_newbuf(struct rtk_softc *, int, struct mbuf *);
160 static int re_rx_list_init(struct rtk_softc *);
161 static int re_tx_list_init(struct rtk_softc *);
162 static void re_rxeof(struct rtk_softc *);
163 static void re_txeof(struct rtk_softc *);
164 static void re_tick(void *);
165 static void re_start(struct ifnet *);
166 static int re_ioctl(struct ifnet *, u_long, caddr_t);
167 static int re_init(struct ifnet *);
168 static void re_stop(struct ifnet *, int);
169 static void re_watchdog(struct ifnet *);
170 
171 static void re_shutdown(void *);
172 static int re_enable(struct rtk_softc *);
173 static void re_disable(struct rtk_softc *);
174 static void re_power(int, void *);
175 
176 static int re_ifmedia_upd(struct ifnet *);
177 static void re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
178 
179 static int re_gmii_readreg(struct device *, int, int);
180 static void re_gmii_writereg(struct device *, int, int, int);
181 
182 static int re_miibus_readreg(struct device *, int, int);
183 static void re_miibus_writereg(struct device *, int, int, int);
184 static void re_miibus_statchg(struct device *);
185 
186 static void re_reset(struct rtk_softc *);
187 
188 
189 #ifdef RE_USEIOSPACE
190 #define RTK_RES			SYS_RES_IOPORT
191 #define RTK_RID			RTK_PCI_LOIO
192 #else
193 #define RTK_RES			SYS_RES_MEMORY
194 #define RTK_RID			RTK_PCI_LOMEM
195 #endif
196 
197 #define EE_SET(x)					\
198 	CSR_WRITE_1(sc, RTK_EECMD,			\
199 		CSR_READ_1(sc, RTK_EECMD) | x)
200 
201 #define EE_CLR(x)					\
202 	CSR_WRITE_1(sc, RTK_EECMD,			\
203 		CSR_READ_1(sc, RTK_EECMD) & ~x)
204 
205 static int
206 re_gmii_readreg(struct device *self, int phy, int reg)
207 {
208 	struct rtk_softc	*sc = (void *)self;
209 	u_int32_t		rval;
210 	int			i;
211 
212 	if (phy != 7)
213 		return 0;
214 
215 	/* Let the rgephy driver read the GMEDIASTAT register */
216 
217 	if (reg == RTK_GMEDIASTAT) {
218 		rval = CSR_READ_1(sc, RTK_GMEDIASTAT);
219 		return rval;
220 	}
221 
222 	CSR_WRITE_4(sc, RTK_PHYAR, reg << 16);
223 	DELAY(1000);
224 
225 	for (i = 0; i < RTK_TIMEOUT; i++) {
226 		rval = CSR_READ_4(sc, RTK_PHYAR);
227 		if (rval & RTK_PHYAR_BUSY)
228 			break;
229 		DELAY(100);
230 	}
231 
232 	if (i == RTK_TIMEOUT) {
233 		aprint_error("%s: PHY read failed\n", sc->sc_dev.dv_xname);
234 		return 0;
235 	}
236 
237 	return rval & RTK_PHYAR_PHYDATA;
238 }
239 
240 static void
241 re_gmii_writereg(struct device *dev, int phy, int reg, int data)
242 {
243 	struct rtk_softc	*sc = (void *)dev;
244 	u_int32_t		rval;
245 	int			i;
246 
247 	CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) |
248 	    (data & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY);
249 	DELAY(1000);
250 
251 	for (i = 0; i < RTK_TIMEOUT; i++) {
252 		rval = CSR_READ_4(sc, RTK_PHYAR);
253 		if (!(rval & RTK_PHYAR_BUSY))
254 			break;
255 		DELAY(100);
256 	}
257 
258 	if (i == RTK_TIMEOUT) {
259 		aprint_error("%s: PHY write reg %x <- %x failed\n",
260 		    sc->sc_dev.dv_xname, reg, data);
261 		return;
262 	}
263 
264 	return;
265 }
266 
267 static int
268 re_miibus_readreg(struct device *dev, int phy, int reg)
269 {
270 	struct rtk_softc	*sc = (void *)dev;
271 	u_int16_t		rval = 0;
272 	u_int16_t		re8139_reg = 0;
273 	int			s;
274 
275 	s = splnet();
276 
277 	if (sc->rtk_type == RTK_8169) {
278 		rval = re_gmii_readreg(dev, phy, reg);
279 		splx(s);
280 		return rval;
281 	}
282 
283 	/* Pretend the internal PHY is only at address 0 */
284 	if (phy) {
285 		splx(s);
286 		return 0;
287 	}
288 	switch (reg) {
289 	case MII_BMCR:
290 		re8139_reg = RTK_BMCR;
291 		break;
292 	case MII_BMSR:
293 		re8139_reg = RTK_BMSR;
294 		break;
295 	case MII_ANAR:
296 		re8139_reg = RTK_ANAR;
297 		break;
298 	case MII_ANER:
299 		re8139_reg = RTK_ANER;
300 		break;
301 	case MII_ANLPAR:
302 		re8139_reg = RTK_LPAR;
303 		break;
304 	case MII_PHYIDR1:
305 	case MII_PHYIDR2:
306 		splx(s);
307 		return 0;
308 	/*
309 	 * Allow the rlphy driver to read the media status
310 	 * register. If we have a link partner which does not
311 	 * support NWAY, this is the register which will tell
312 	 * us the results of parallel detection.
313 	 */
314 	case RTK_MEDIASTAT:
315 		rval = CSR_READ_1(sc, RTK_MEDIASTAT);
316 		splx(s);
317 		return rval;
318 	default:
319 		aprint_error("%s: bad phy register\n", sc->sc_dev.dv_xname);
320 		splx(s);
321 		return 0;
322 	}
323 	rval = CSR_READ_2(sc, re8139_reg);
324 	splx(s);
325 	return rval;
326 }
327 
328 static void
329 re_miibus_writereg(struct device *dev, int phy, int reg, int data)
330 {
331 	struct rtk_softc	*sc = (void *)dev;
332 	u_int16_t		re8139_reg = 0;
333 	int			s;
334 
335 	s = splnet();
336 
337 	if (sc->rtk_type == RTK_8169) {
338 		re_gmii_writereg(dev, phy, reg, data);
339 		splx(s);
340 		return;
341 	}
342 
343 	/* Pretend the internal PHY is only at address 0 */
344 	if (phy) {
345 		splx(s);
346 		return;
347 	}
348 	switch (reg) {
349 	case MII_BMCR:
350 		re8139_reg = RTK_BMCR;
351 		break;
352 	case MII_BMSR:
353 		re8139_reg = RTK_BMSR;
354 		break;
355 	case MII_ANAR:
356 		re8139_reg = RTK_ANAR;
357 		break;
358 	case MII_ANER:
359 		re8139_reg = RTK_ANER;
360 		break;
361 	case MII_ANLPAR:
362 		re8139_reg = RTK_LPAR;
363 		break;
364 	case MII_PHYIDR1:
365 	case MII_PHYIDR2:
366 		splx(s);
367 		return;
368 		break;
369 	default:
370 		aprint_error("%s: bad phy register\n", sc->sc_dev.dv_xname);
371 		splx(s);
372 		return;
373 	}
374 	CSR_WRITE_2(sc, re8139_reg, data);
375 	splx(s);
376 	return;
377 }
378 
379 static void
380 re_miibus_statchg(struct device *dev)
381 {
382 
383 	return;
384 }
385 
386 static void
387 re_reset(struct rtk_softc *sc)
388 {
389 	register int		i;
390 
391 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
392 
393 	for (i = 0; i < RTK_TIMEOUT; i++) {
394 		DELAY(10);
395 		if (!(CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET))
396 			break;
397 	}
398 	if (i == RTK_TIMEOUT)
399 		aprint_error("%s: reset never completed!\n",
400 		    sc->sc_dev.dv_xname);
401 
402 	/*
403 	 * NB: Realtek-supplied Linux driver does this only for
404 	 * MCFG_METHOD_2, which corresponds to sc->sc_rev == 2.
405 	 */
406 	if (1) /* XXX check softc flag for 8169s version */
407 		CSR_WRITE_1(sc, 0x82, 1);
408 
409 	return;
410 }
411 
412 /*
413  * The following routine is designed to test for a defect on some
414  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
415  * lines connected to the bus, however for a 32-bit only card, they
416  * should be pulled high. The result of this defect is that the
417  * NIC will not work right if you plug it into a 64-bit slot: DMA
418  * operations will be done with 64-bit transfers, which will fail
419  * because the 64-bit data lines aren't connected.
420  *
421  * There's no way to work around this (short of talking a soldering
422  * iron to the board), however we can detect it. The method we use
423  * here is to put the NIC into digital loopback mode, set the receiver
424  * to promiscuous mode, and then try to send a frame. We then compare
425  * the frame data we sent to what was received. If the data matches,
426  * then the NIC is working correctly, otherwise we know the user has
427  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
428  * slot. In the latter case, there's no way the NIC can work correctly,
429  * so we print out a message on the console and abort the device attach.
430  */
431 
432 int
433 re_diag(struct rtk_softc *sc)
434 {
435 	struct ifnet		*ifp = &sc->ethercom.ec_if;
436 	struct mbuf		*m0;
437 	struct ether_header	*eh;
438 	struct rtk_desc		*cur_rx;
439 	bus_dmamap_t		dmamap;
440 	u_int16_t		status;
441 	u_int32_t		rxstat;
442 	int			total_len, i, s, error = 0;
443 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
444 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
445 
446 	/* Allocate a single mbuf */
447 
448 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
449 	if (m0 == NULL)
450 		return ENOBUFS;
451 
452 	/*
453 	 * Initialize the NIC in test mode. This sets the chip up
454 	 * so that it can send and receive frames, but performs the
455 	 * following special functions:
456 	 * - Puts receiver in promiscuous mode
457 	 * - Enables digital loopback mode
458 	 * - Leaves interrupts turned off
459 	 */
460 
461 	ifp->if_flags |= IFF_PROMISC;
462 	sc->rtk_testmode = 1;
463 	re_init(ifp);
464 	re_stop(ifp, 0);
465 	DELAY(100000);
466 	re_init(ifp);
467 
468 	/* Put some data in the mbuf */
469 
470 	eh = mtod(m0, struct ether_header *);
471 	bcopy((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
472 	bcopy((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
473 	eh->ether_type = htons(ETHERTYPE_IP);
474 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
475 
476 	/*
477 	 * Queue the packet, start transmission.
478 	 */
479 
480 	CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
481 	s = splnet();
482 	IF_ENQUEUE(&ifp->if_snd, m0);
483 	re_start(ifp);
484 	splx(s);
485 	m0 = NULL;
486 
487 	/* Wait for it to propagate through the chip */
488 
489 	DELAY(100000);
490 	for (i = 0; i < RTK_TIMEOUT; i++) {
491 		status = CSR_READ_2(sc, RTK_ISR);
492 		if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) ==
493 		    (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK))
494 			break;
495 		DELAY(10);
496 	}
497 	if (i == RTK_TIMEOUT) {
498 		aprint_error("%s: diagnostic failed, failed to receive packet "
499 		    "in loopback mode\n", sc->sc_dev.dv_xname);
500 		error = EIO;
501 		goto done;
502 	}
503 
504 	/*
505 	 * The packet should have been dumped into the first
506 	 * entry in the RX DMA ring. Grab it from there.
507 	 */
508 
509 	dmamap = sc->rtk_ldata.rtk_rx_list_map;
510 	bus_dmamap_sync(sc->sc_dmat,
511 	    dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
512 	dmamap = sc->rtk_ldata.rtk_rx_dmamap[0];
513 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
514 	    BUS_DMASYNC_POSTWRITE);
515 	bus_dmamap_unload(sc->sc_dmat,
516 	    sc->rtk_ldata.rtk_rx_dmamap[0]);
517 
518 	m0 = sc->rtk_ldata.rtk_rx_mbuf[0];
519 	sc->rtk_ldata.rtk_rx_mbuf[0] = NULL;
520 	eh = mtod(m0, struct ether_header *);
521 
522 	cur_rx = &sc->rtk_ldata.rtk_rx_list[0];
523 	total_len = RTK_RXBYTES(cur_rx);
524 	rxstat = le32toh(cur_rx->rtk_cmdstat);
525 
526 	if (total_len != ETHER_MIN_LEN) {
527 		aprint_error("%s: diagnostic failed, received short packet\n",
528 		    sc->sc_dev.dv_xname);
529 		error = EIO;
530 		goto done;
531 	}
532 
533 	/* Test that the received packet data matches what we sent. */
534 
535 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
536 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
537 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
538 		aprint_error("%s: WARNING, DMA FAILURE!\n",
539 		    sc->sc_dev.dv_xname);
540 		aprint_error("%s: expected TX data: %s",
541 		    sc->sc_dev.dv_xname, ether_sprintf(dst));
542 		aprint_error("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP);
543 		aprint_error("%s: received RX data: %s",
544 		    sc->sc_dev.dv_xname,
545 		    ether_sprintf(eh->ether_dhost));
546 		aprint_error("/%s/0x%x\n", ether_sprintf(eh->ether_shost),
547 		    ntohs(eh->ether_type));
548 		aprint_error("%s: You may have a defective 32-bit NIC plugged "
549 		    "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname);
550 		aprint_error("%s: Please re-install the NIC in a 32-bit slot "
551 		    "for proper operation.\n", sc->sc_dev.dv_xname);
552 		aprint_error("%s: Read the re(4) man page for more details.\n",
553 		    sc->sc_dev.dv_xname);
554 		error = EIO;
555 	}
556 
557 done:
558 	/* Turn interface off, release resources */
559 
560 	sc->rtk_testmode = 0;
561 	ifp->if_flags &= ~IFF_PROMISC;
562 	re_stop(ifp, 0);
563 	if (m0 != NULL)
564 		m_freem(m0);
565 
566 	return error;
567 }
568 
569 
570 /*
571  * Attach the interface. Allocate softc structures, do ifmedia
572  * setup and ethernet/BPF attach.
573  */
574 void
575 re_attach(struct rtk_softc *sc)
576 {
577 	u_char			eaddr[ETHER_ADDR_LEN];
578 	u_int16_t		val;
579 	struct ifnet		*ifp;
580 	int			error = 0, i, addr_len;
581 
582 
583 	/* XXX JRS: bus-attach-independent code begins approximately here */
584 
585 	/* Reset the adapter. */
586 	re_reset(sc);
587 
588 	if (sc->rtk_type == RTK_8169) {
589 		uint32_t hwrev;
590 
591 		/* Revision of 8169/8169S/8110s in bits 30..26, 23 */
592 		hwrev = CSR_READ_4(sc, RTK_TXCFG) & 0x7c800000;
593 		if (hwrev == (0x1 << 28)) {
594 			sc->sc_rev = 4;
595 		} else if (hwrev == (0x1 << 26)) {
596 			sc->sc_rev = 3;
597 		} else if (hwrev == (0x1 << 23)) {
598 			sc->sc_rev = 2;
599 		} else
600 			sc->sc_rev = 1;
601 #if defined(DEBUG) || 1
602 		aprint_normal("re_attach: MAC chip hwrev 0x%x softc %d\n",
603 		    hwrev, sc->sc_rev);
604 #endif
605 
606 		/* Set RX length mask */
607 
608 		sc->rtk_rxlenmask = RTK_RDESC_STAT_GFRAGLEN;
609 
610 		/* Force station address autoload from the EEPROM */
611 
612 		CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_AUTOLOAD);
613 		for (i = 0; i < RTK_TIMEOUT; i++) {
614 			if (!(CSR_READ_1(sc, RTK_EECMD) & RTK_EEMODE_AUTOLOAD))
615 				break;
616 			DELAY(100);
617 		}
618 		if (i == RTK_TIMEOUT)
619 			aprint_error("%s: eeprom autoload timed out\n",
620 			    sc->sc_dev.dv_xname);
621 
622 		for (i = 0; i < ETHER_ADDR_LEN; i++)
623 			eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
624 	} else {
625 
626 		/* Set RX length mask */
627 
628 		sc->rtk_rxlenmask = RTK_RDESC_STAT_FRAGLEN;
629 
630 		if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
631 			addr_len = RTK_EEADDR_LEN1;
632 		else
633 			addr_len = RTK_EEADDR_LEN0;
634 
635 		/*
636 		 * Get station address from the EEPROM.
637 		 */
638 		for (i = 0; i < 3; i++) {
639 			val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len);
640 			eaddr[(i * 2) + 0] = val & 0xff;
641 			eaddr[(i * 2) + 1] = val >> 8;
642 		}
643 	}
644 
645 	aprint_normal("%s: Ethernet address %s\n",
646 	    sc->sc_dev.dv_xname, ether_sprintf(eaddr));
647 
648 
649 	/* Allocate DMA'able memory for the TX ring */
650 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_TX_LIST_SZ,
651 		    RTK_ETHER_ALIGN, 0, &sc->rtk_ldata.rtk_tx_listseg,
652 		    1, &sc->rtk_ldata.rtk_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
653 		aprint_error("%s: can't allocate tx listseg, error = %d\n",
654 		    sc->sc_dev.dv_xname, error);
655 		goto fail_0;
656 	}
657 
658 	/* Load the map for the TX ring. */
659 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_tx_listseg,
660 		    sc->rtk_ldata.rtk_tx_listnseg, RTK_TX_LIST_SZ,
661 		    (caddr_t *)&sc->rtk_ldata.rtk_tx_list,
662 		    BUS_DMA_NOWAIT)) != 0) {
663 		aprint_error("%s: can't map tx list, error = %d\n",
664 		    sc->sc_dev.dv_xname, error);
665 	  	goto fail_1;
666 	}
667 	memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ);
668 
669 	if ((error = bus_dmamap_create(sc->sc_dmat, RTK_TX_LIST_SZ, 1,
670 		    RTK_TX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
671 		    &sc->rtk_ldata.rtk_tx_list_map)) != 0) {
672 		aprint_error("%s: can't create tx list map, error = %d\n",
673 		    sc->sc_dev.dv_xname, error);
674 		goto fail_2;
675 	}
676 
677 
678 	if ((error = bus_dmamap_load(sc->sc_dmat,
679 		    sc->rtk_ldata.rtk_tx_list_map, sc->rtk_ldata.rtk_tx_list,
680 		    RTK_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
681 		aprint_error("%s: can't load tx list, error = %d\n",
682 		    sc->sc_dev.dv_xname, error);
683 		goto fail_3;
684 	}
685 
686 	/* Create DMA maps for TX buffers */
687 	for (i = 0; i < RTK_TX_DESC_CNT; i++) {
688 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES * RTK_NTXSEGS,
689 		    RTK_NTXSEGS, MCLBYTES, 0, BUS_DMA_ALLOCNOW,
690 		    &sc->rtk_ldata.rtk_tx_dmamap[i]);
691 		if (error) {
692 			aprint_error("%s: can't create DMA map for TX\n",
693 			    sc->sc_dev.dv_xname);
694 			goto fail_4;
695 		}
696 	}
697 
698 	/* Allocate DMA'able memory for the RX ring */
699         if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_RX_LIST_SZ,
700 		    RTK_RING_ALIGN, 0, &sc->rtk_ldata.rtk_rx_listseg, 1,
701 		    &sc->rtk_ldata.rtk_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
702 		aprint_error("%s: can't allocate rx listseg, error = %d\n",
703 		    sc->sc_dev.dv_xname, error);
704 		goto fail_4;
705 	}
706 
707 	/* Load the map for the RX ring. */
708 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_rx_listseg,
709 		    sc->rtk_ldata.rtk_rx_listnseg, RTK_RX_LIST_SZ,
710 		    (caddr_t *)&sc->rtk_ldata.rtk_rx_list,
711 		    BUS_DMA_NOWAIT)) != 0) {
712 		aprint_error("%s: can't map rx list, error = %d\n",
713 		    sc->sc_dev.dv_xname, error);
714 		goto fail_5;
715 	}
716 	memset(sc->rtk_ldata.rtk_rx_list, 0, RTK_TX_LIST_SZ);
717 
718 	if ((error = bus_dmamap_create(sc->sc_dmat, RTK_RX_LIST_SZ, 1,
719 		    RTK_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
720 		    &sc->rtk_ldata.rtk_rx_list_map)) != 0) {
721 		aprint_error("%s: can't create rx list map, error = %d\n",
722 		    sc->sc_dev.dv_xname, error);
723 		goto fail_6;
724 	}
725 
726 	if ((error = bus_dmamap_load(sc->sc_dmat,
727 		    sc->rtk_ldata.rtk_rx_list_map, sc->rtk_ldata.rtk_rx_list,
728 		    RTK_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
729 		aprint_error("%s: can't load rx list, error = %d\n",
730 		    sc->sc_dev.dv_xname, error);
731 		goto fail_7;
732 	}
733 
734 	/* Create DMA maps for RX buffers */
735 	for (i = 0; i < RTK_RX_DESC_CNT; i++) {
736 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
737 		    0, BUS_DMA_ALLOCNOW, &sc->rtk_ldata.rtk_rx_dmamap[i]);
738 		if (error) {
739 			aprint_error("%s: can't create DMA map for RX\n",
740 			    sc->sc_dev.dv_xname);
741 			goto fail_8;
742 		}
743 	}
744 
745 	/*
746 	 * Record interface as attached. From here, we should not fail.
747 	 */
748 	sc->sc_flags |= RTK_ATTACHED;
749 
750 	ifp = &sc->ethercom.ec_if;
751 	ifp->if_softc = sc;
752 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
753 	ifp->if_mtu = ETHERMTU;
754 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
755 	ifp->if_ioctl = re_ioctl;
756 	sc->ethercom.ec_capabilities |=
757 	    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
758 	ifp->if_start = re_start;
759 	ifp->if_stop = re_stop;
760 	ifp->if_capabilities |=
761 	    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
762 	ifp->if_watchdog = re_watchdog;
763 	ifp->if_init = re_init;
764 	if (sc->rtk_type == RTK_8169)
765 		ifp->if_baudrate = 1000000000;
766 	else
767 		ifp->if_baudrate = 100000000;
768 	ifp->if_snd.ifq_maxlen = RTK_IFQ_MAXLEN;
769 	ifp->if_capenable = ifp->if_capabilities;
770 	IFQ_SET_READY(&ifp->if_snd);
771 
772 	callout_init(&sc->rtk_tick_ch);
773 
774 	/* Do MII setup */
775 	sc->mii.mii_ifp = ifp;
776 	sc->mii.mii_readreg = re_miibus_readreg;
777 	sc->mii.mii_writereg = re_miibus_writereg;
778 	sc->mii.mii_statchg = re_miibus_statchg;
779 	ifmedia_init(&sc->mii.mii_media, IFM_IMASK, re_ifmedia_upd,
780 	    re_ifmedia_sts);
781 	mii_attach(&sc->sc_dev, &sc->mii, 0xffffffff, MII_PHY_ANY,
782 	    MII_OFFSET_ANY, 0);
783 	ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
784 
785 	/*
786 	 * Call MI attach routine.
787 	 */
788 	if_attach(ifp);
789 	ether_ifattach(ifp, eaddr);
790 
791 
792 	/*
793 	 * Make sure the interface is shutdown during reboot.
794 	 */
795 	sc->sc_sdhook = shutdownhook_establish(re_shutdown, sc);
796 	if (sc->sc_sdhook == NULL)
797 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
798 		    sc->sc_dev.dv_xname);
799 	/*
800 	 * Add a suspend hook to make sure we come back up after a
801 	 * resume.
802 	 */
803 	sc->sc_powerhook = powerhook_establish(re_power, sc);
804 	if (sc->sc_powerhook == NULL)
805 		aprint_error("%s: WARNING: unable to establish power hook\n",
806 		    sc->sc_dev.dv_xname);
807 
808 
809 	return;
810 
811 fail_8:
812 	/* Destroy DMA maps for RX buffers. */
813 	for (i = 0; i < RTK_RX_DESC_CNT; i++)
814 		if (sc->rtk_ldata.rtk_rx_dmamap[i] != NULL)
815 			bus_dmamap_destroy(sc->sc_dmat,
816 			    sc->rtk_ldata.rtk_rx_dmamap[i]);
817 
818 	/* Free DMA'able memory for the RX ring. */
819 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
820 fail_7:
821 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
822 fail_6:
823 	bus_dmamem_unmap(sc->sc_dmat,
824 	    (caddr_t)sc->rtk_ldata.rtk_rx_list, RTK_RX_LIST_SZ);
825 fail_5:
826 	bus_dmamem_free(sc->sc_dmat,
827 	    &sc->rtk_ldata.rtk_rx_listseg, sc->rtk_ldata.rtk_rx_listnseg);
828 
829 fail_4:
830 	/* Destroy DMA maps for TX buffers. */
831 	for (i = 0; i < RTK_TX_DESC_CNT; i++)
832 		if (sc->rtk_ldata.rtk_tx_dmamap[i] != NULL)
833 			bus_dmamap_destroy(sc->sc_dmat,
834 			    sc->rtk_ldata.rtk_tx_dmamap[i]);
835 
836 	/* Free DMA'able memory for the TX ring. */
837 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
838 fail_3:
839 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
840 fail_2:
841 	bus_dmamem_unmap(sc->sc_dmat,
842 	    (caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ);
843 fail_1:
844 	bus_dmamem_free(sc->sc_dmat,
845 	    &sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
846 fail_0:
847 	return;
848 }
849 
850 
851 /*
852  * re_activate:
853  *     Handle device activation/deactivation requests.
854  */
855 int
856 re_activate(struct device *self, enum devact act)
857 {
858 	struct rtk_softc *sc = (void *) self;
859 	int s, error = 0;
860 
861 	s = splnet();
862 	switch (act) {
863 	case DVACT_ACTIVATE:
864 		error = EOPNOTSUPP;
865 		break;
866 	case DVACT_DEACTIVATE:
867 		mii_activate(&sc->mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
868 		if_deactivate(&sc->ethercom.ec_if);
869 		break;
870 	}
871 	splx(s);
872 
873 	return error;
874 }
875 
876 /*
877  * re_detach:
878  *     Detach a rtk interface.
879  */
880 int
881 re_detach(struct rtk_softc *sc)
882 {
883 	struct ifnet *ifp = &sc->ethercom.ec_if;
884 	int i;
885 
886 	/*
887 	 * Succeed now if there isn't any work to do.
888 	 */
889 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
890 		return 0;
891 
892 	/* Unhook our tick handler. */
893 	callout_stop(&sc->rtk_tick_ch);
894 
895 	/* Detach all PHYs. */
896 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
897 
898 	/* Delete all remaining media. */
899 	ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
900 
901 	ether_ifdetach(ifp);
902 	if_detach(ifp);
903 
904 	/* XXX undo re_allocmem() */
905 
906 	/* Destroy DMA maps for RX buffers. */
907 	for (i = 0; i < RTK_RX_DESC_CNT; i++)
908 		if (sc->rtk_ldata.rtk_rx_dmamap[i] != NULL)
909 			bus_dmamap_destroy(sc->sc_dmat,
910 			    sc->rtk_ldata.rtk_rx_dmamap[i]);
911 
912 	/* Free DMA'able memory for the RX ring. */
913 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
914 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
915 	bus_dmamem_unmap(sc->sc_dmat,
916 	    (caddr_t)sc->rtk_ldata.rtk_rx_list, RTK_RX_LIST_SZ);
917 	bus_dmamem_free(sc->sc_dmat,
918 	    &sc->rtk_ldata.rtk_rx_listseg, sc->rtk_ldata.rtk_rx_listnseg);
919 
920 	/* Destroy DMA maps for TX buffers. */
921 	for (i = 0; i < RTK_TX_DESC_CNT; i++)
922 		if (sc->rtk_ldata.rtk_tx_dmamap[i] != NULL)
923 			bus_dmamap_destroy(sc->sc_dmat,
924 			    sc->rtk_ldata.rtk_tx_dmamap[i]);
925 
926 	/* Free DMA'able memory for the TX ring. */
927 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
928 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
929 	bus_dmamem_unmap(sc->sc_dmat,
930 	    (caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ);
931 	bus_dmamem_free(sc->sc_dmat,
932 	    &sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
933 
934 
935 	shutdownhook_disestablish(sc->sc_sdhook);
936 	powerhook_disestablish(sc->sc_powerhook);
937 
938 	return 0;
939 }
940 
941 /*
942  * re_enable:
943  *     Enable the RTL81X9 chip.
944  */
945 static int
946 re_enable(struct rtk_softc *sc)
947 {
948 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
949 		if ((*sc->sc_enable)(sc) != 0) {
950 			aprint_error("%s: device enable failed\n",
951 			    sc->sc_dev.dv_xname);
952 			return EIO;
953 		}
954 		sc->sc_flags |= RTK_ENABLED;
955 	}
956 	return 0;
957 }
958 
959 /*
960  * re_disable:
961  *     Disable the RTL81X9 chip.
962  */
963 static void
964 re_disable(struct rtk_softc *sc)
965 {
966 
967 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
968 		(*sc->sc_disable)(sc);
969 		sc->sc_flags &= ~RTK_ENABLED;
970 	}
971 }
972 
973 /*
974  * re_power:
975  *     Power management (suspend/resume) hook.
976  */
977 void
978 re_power(int why, void *arg)
979 {
980 	struct rtk_softc *sc = (void *) arg;
981 	struct ifnet *ifp = &sc->ethercom.ec_if;
982 	int s;
983 
984 	s = splnet();
985 	switch (why) {
986 	case PWR_SUSPEND:
987 	case PWR_STANDBY:
988 		re_stop(ifp, 0);
989 		if (sc->sc_power != NULL)
990 			(*sc->sc_power)(sc, why);
991 		break;
992 	case PWR_RESUME:
993 		if (ifp->if_flags & IFF_UP) {
994 			if (sc->sc_power != NULL)
995 				(*sc->sc_power)(sc, why);
996 			re_init(ifp);
997 		}
998 		break;
999 	case PWR_SOFTSUSPEND:
1000 	case PWR_SOFTSTANDBY:
1001 	case PWR_SOFTRESUME:
1002 		break;
1003 	}
1004 	splx(s);
1005 }
1006 
1007 
1008 static int
1009 re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
1010 {
1011 	struct mbuf		*n = NULL;
1012 	bus_dmamap_t		map;
1013 	struct rtk_desc		*d;
1014 	u_int32_t		cmdstat;
1015 	int			error;
1016 
1017 	if (m == NULL) {
1018 		MGETHDR(n, M_DONTWAIT, MT_DATA);
1019 		if (n == NULL)
1020 			return ENOBUFS;
1021 		m = n;
1022 
1023 		MCLGET(m, M_DONTWAIT);
1024 		if (!(m->m_flags & M_EXT)) {
1025 			m_freem(m);
1026 			return ENOBUFS;
1027 		}
1028 	} else
1029 		m->m_data = m->m_ext.ext_buf;
1030 
1031 	/*
1032 	 * Initialize mbuf length fields and fixup
1033 	 * alignment so that the frame payload is
1034 	 * longword aligned.
1035 	 */
1036 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1037 	m_adj(m, RTK_ETHER_ALIGN);
1038 
1039 	map = sc->rtk_ldata.rtk_rx_dmamap[idx];
1040 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1041 
1042 	if (error)
1043 		goto out;
1044 
1045 	d = &sc->rtk_ldata.rtk_rx_list[idx];
1046 	if (le32toh(d->rtk_cmdstat) & RTK_RDESC_STAT_OWN)
1047 		goto out;
1048 
1049 	cmdstat = map->dm_segs[0].ds_len;
1050 	d->rtk_bufaddr_lo = htole32(RTK_ADDR_LO(map->dm_segs[0].ds_addr));
1051 	d->rtk_bufaddr_hi = htole32(RTK_ADDR_HI(map->dm_segs[0].ds_addr));
1052 	cmdstat |= RTK_TDESC_CMD_SOF;
1053 	if (idx == (RTK_RX_DESC_CNT - 1))
1054 		cmdstat |= RTK_TDESC_CMD_EOR;
1055 	d->rtk_cmdstat = htole32(cmdstat);
1056 
1057 	d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
1058 
1059 
1060 	sc->rtk_ldata.rtk_rx_list[idx].rtk_cmdstat |=
1061 	    htole32(RTK_RDESC_CMD_OWN);
1062 	sc->rtk_ldata.rtk_rx_mbuf[idx] = m;
1063 
1064 	bus_dmamap_sync(sc->sc_dmat, sc->rtk_ldata.rtk_rx_dmamap[idx], 0,
1065 	    sc->rtk_ldata.rtk_rx_dmamap[idx]->dm_mapsize,
1066 	    BUS_DMASYNC_PREREAD);
1067 
1068 	return 0;
1069 out:
1070 	if (n != NULL)
1071 		m_freem(n);
1072 	return ENOMEM;
1073 }
1074 
1075 static int
1076 re_tx_list_init(struct rtk_softc *sc)
1077 {
1078 	memset((char *)sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ);
1079 	memset((char *)&sc->rtk_ldata.rtk_tx_mbuf, 0,
1080 	    (RTK_TX_DESC_CNT * sizeof(struct mbuf *)));
1081 
1082 	bus_dmamap_sync(sc->sc_dmat,
1083 	    sc->rtk_ldata.rtk_tx_list_map, 0,
1084 	    sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1085 	sc->rtk_ldata.rtk_tx_prodidx = 0;
1086 	sc->rtk_ldata.rtk_tx_considx = 0;
1087 	sc->rtk_ldata.rtk_tx_free = RTK_TX_DESC_CNT;
1088 
1089 	return 0;
1090 }
1091 
1092 static int
1093 re_rx_list_init(struct rtk_softc *sc)
1094 {
1095 	int			i;
1096 
1097 	memset((char *)sc->rtk_ldata.rtk_rx_list, 0, RTK_RX_LIST_SZ);
1098 	memset((char *)&sc->rtk_ldata.rtk_rx_mbuf, 0,
1099 	    (RTK_RX_DESC_CNT * sizeof(struct mbuf *)));
1100 
1101 	for (i = 0; i < RTK_RX_DESC_CNT; i++) {
1102 		if (re_newbuf(sc, i, NULL) == ENOBUFS)
1103 			return ENOBUFS;
1104 	}
1105 
1106 	/* Flush the RX descriptors */
1107 
1108 	bus_dmamap_sync(sc->sc_dmat,
1109 	    sc->rtk_ldata.rtk_rx_list_map,
1110 	    0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
1111 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1112 
1113 	sc->rtk_ldata.rtk_rx_prodidx = 0;
1114 	sc->rtk_head = sc->rtk_tail = NULL;
1115 
1116 	return 0;
1117 }
1118 
1119 /*
1120  * RX handler for C+ and 8169. For the gigE chips, we support
1121  * the reception of jumbo frames that have been fragmented
1122  * across multiple 2K mbuf cluster buffers.
1123  */
1124 static void
1125 re_rxeof(struct rtk_softc *sc)
1126 {
1127 	struct mbuf		*m;
1128 	struct ifnet		*ifp;
1129 	int			i, total_len;
1130 	struct rtk_desc		*cur_rx;
1131 	struct m_tag		*mtag;
1132 	u_int32_t		rxstat, rxvlan;
1133 
1134 	ifp = &sc->ethercom.ec_if;
1135 	i = sc->rtk_ldata.rtk_rx_prodidx;
1136 
1137 	/* Invalidate the descriptor memory */
1138 
1139 	bus_dmamap_sync(sc->sc_dmat,
1140 	    sc->rtk_ldata.rtk_rx_list_map,
1141 	    0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
1142 	    BUS_DMASYNC_POSTREAD);
1143 
1144 	while (!RTK_OWN(&sc->rtk_ldata.rtk_rx_list[i])) {
1145 
1146 		cur_rx = &sc->rtk_ldata.rtk_rx_list[i];
1147 		m = sc->rtk_ldata.rtk_rx_mbuf[i];
1148 		total_len = RTK_RXBYTES(cur_rx);
1149 		rxstat = le32toh(cur_rx->rtk_cmdstat);
1150 		rxvlan = le32toh(cur_rx->rtk_vlanctl);
1151 
1152 		/* Invalidate the RX mbuf and unload its map */
1153 
1154 		bus_dmamap_sync(sc->sc_dmat,
1155 		    sc->rtk_ldata.rtk_rx_dmamap[i],
1156 		    0, sc->rtk_ldata.rtk_rx_dmamap[i]->dm_mapsize,
1157 		    BUS_DMASYNC_POSTWRITE);
1158 		bus_dmamap_unload(sc->sc_dmat,
1159 		    sc->rtk_ldata.rtk_rx_dmamap[i]);
1160 
1161 		if (!(rxstat & RTK_RDESC_STAT_EOF)) {
1162 			m->m_len = MCLBYTES - RTK_ETHER_ALIGN;
1163 			if (sc->rtk_head == NULL)
1164 				sc->rtk_head = sc->rtk_tail = m;
1165 			else {
1166 				m->m_flags &= ~M_PKTHDR;
1167 				sc->rtk_tail->m_next = m;
1168 				sc->rtk_tail = m;
1169 			}
1170 			re_newbuf(sc, i, NULL);
1171 			RTK_DESC_INC(i);
1172 			continue;
1173 		}
1174 
1175 		/*
1176 		 * NOTE: for the 8139C+, the frame length field
1177 		 * is always 12 bits in size, but for the gigE chips,
1178 		 * it is 13 bits (since the max RX frame length is 16K).
1179 		 * Unfortunately, all 32 bits in the status word
1180 		 * were already used, so to make room for the extra
1181 		 * length bit, RealTek took out the 'frame alignment
1182 		 * error' bit and shifted the other status bits
1183 		 * over one slot. The OWN, EOR, FS and LS bits are
1184 		 * still in the same places. We have already extracted
1185 		 * the frame length and checked the OWN bit, so rather
1186 		 * than using an alternate bit mapping, we shift the
1187 		 * status bits one space to the right so we can evaluate
1188 		 * them using the 8169 status as though it was in the
1189 		 * same format as that of the 8139C+.
1190 		 */
1191 		if (sc->rtk_type == RTK_8169)
1192 			rxstat >>= 1;
1193 
1194 		if (rxstat & RTK_RDESC_STAT_RXERRSUM) {
1195 			ifp->if_ierrors++;
1196 			/*
1197 			 * If this is part of a multi-fragment packet,
1198 			 * discard all the pieces.
1199 			 */
1200 			if (sc->rtk_head != NULL) {
1201 				m_freem(sc->rtk_head);
1202 				sc->rtk_head = sc->rtk_tail = NULL;
1203 			}
1204 			re_newbuf(sc, i, m);
1205 			RTK_DESC_INC(i);
1206 			continue;
1207 		}
1208 
1209 		/*
1210 		 * If allocating a replacement mbuf fails,
1211 		 * reload the current one.
1212 		 */
1213 
1214 		if (re_newbuf(sc, i, NULL)) {
1215 			ifp->if_ierrors++;
1216 			if (sc->rtk_head != NULL) {
1217 				m_freem(sc->rtk_head);
1218 				sc->rtk_head = sc->rtk_tail = NULL;
1219 			}
1220 			re_newbuf(sc, i, m);
1221 			RTK_DESC_INC(i);
1222 			continue;
1223 		}
1224 
1225 		RTK_DESC_INC(i);
1226 
1227 		if (sc->rtk_head != NULL) {
1228 			m->m_len = total_len % (MCLBYTES - RTK_ETHER_ALIGN);
1229 			/*
1230 			 * Special case: if there's 4 bytes or less
1231 			 * in this buffer, the mbuf can be discarded:
1232 			 * the last 4 bytes is the CRC, which we don't
1233 			 * care about anyway.
1234 			 */
1235 			if (m->m_len <= ETHER_CRC_LEN) {
1236 				sc->rtk_tail->m_len -=
1237 				    (ETHER_CRC_LEN - m->m_len);
1238 				m_freem(m);
1239 			} else {
1240 				m->m_len -= ETHER_CRC_LEN;
1241 				m->m_flags &= ~M_PKTHDR;
1242 				sc->rtk_tail->m_next = m;
1243 			}
1244 			m = sc->rtk_head;
1245 			sc->rtk_head = sc->rtk_tail = NULL;
1246 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1247 		} else
1248 			m->m_pkthdr.len = m->m_len =
1249 			    (total_len - ETHER_CRC_LEN);
1250 
1251 		ifp->if_ipackets++;
1252 		m->m_pkthdr.rcvif = ifp;
1253 
1254 		/* Do RX checksumming if enabled */
1255 
1256 		if (ifp->if_capenable & IFCAP_CSUM_IPv4) {
1257 
1258 			/* Check IP header checksum */
1259 			if (rxstat & RTK_RDESC_STAT_PROTOID)
1260 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;;
1261 			if (rxstat & RTK_RDESC_STAT_IPSUMBAD)
1262 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1263 		}
1264 
1265 		/* Check TCP/UDP checksum */
1266 		if (RTK_TCPPKT(rxstat) &&
1267 		    (ifp->if_capenable & IFCAP_CSUM_TCPv4)) {
1268 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1269 			if (rxstat & RTK_RDESC_STAT_TCPSUMBAD)
1270 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1271 		}
1272 		if (RTK_UDPPKT(rxstat) &&
1273 		    (ifp->if_capenable & IFCAP_CSUM_UDPv4)) {
1274 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1275 			if (rxstat & RTK_RDESC_STAT_UDPSUMBAD)
1276 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1277 		}
1278 
1279 		if (rxvlan & RTK_RDESC_VLANCTL_TAG) {
1280 			mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1281 			    M_NOWAIT);
1282 			if (mtag == NULL) {
1283 				ifp->if_ierrors++;
1284 				m_freem(m);
1285 				continue;
1286 			}
1287 			*(u_int *)(mtag + 1) =
1288 			    be16toh(rxvlan & RTK_RDESC_VLANCTL_DATA);
1289 			m_tag_prepend(m, mtag);
1290 		}
1291 #if NBPFILTER > 0
1292 		if (ifp->if_bpf)
1293 			bpf_mtap(ifp->if_bpf, m);
1294 #endif
1295 		(*ifp->if_input)(ifp, m);
1296 	}
1297 
1298 	/* Flush the RX DMA ring */
1299 
1300 	bus_dmamap_sync(sc->sc_dmat,
1301 	    sc->rtk_ldata.rtk_rx_list_map,
1302 	    0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
1303 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1304 
1305 	sc->rtk_ldata.rtk_rx_prodidx = i;
1306 
1307 	return;
1308 }
1309 
1310 static void
1311 re_txeof(struct rtk_softc *sc)
1312 {
1313 	struct ifnet		*ifp;
1314 	u_int32_t		txstat;
1315 	int			idx;
1316 
1317 	ifp = &sc->ethercom.ec_if;
1318 	idx = sc->rtk_ldata.rtk_tx_considx;
1319 
1320 	/* Invalidate the TX descriptor list */
1321 
1322 	bus_dmamap_sync(sc->sc_dmat,
1323 	    sc->rtk_ldata.rtk_tx_list_map,
1324 	    0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
1325 	    BUS_DMASYNC_POSTREAD);
1326 
1327 	while (idx != sc->rtk_ldata.rtk_tx_prodidx) {
1328 
1329 		txstat = le32toh(sc->rtk_ldata.rtk_tx_list[idx].rtk_cmdstat);
1330 		if (txstat & RTK_TDESC_CMD_OWN)
1331 			break;
1332 
1333 		/*
1334 		 * We only stash mbufs in the last descriptor
1335 		 * in a fragment chain, which also happens to
1336 		 * be the only place where the TX status bits
1337 		 * are valid.
1338 		 */
1339 
1340 		if (txstat & RTK_TDESC_CMD_EOF) {
1341 			m_freem(sc->rtk_ldata.rtk_tx_mbuf[idx]);
1342 			sc->rtk_ldata.rtk_tx_mbuf[idx] = NULL;
1343 			bus_dmamap_unload(sc->sc_dmat,
1344 			    sc->rtk_ldata.rtk_tx_dmamap[idx]);
1345 			if (txstat & (RTK_TDESC_STAT_EXCESSCOL |
1346 			    RTK_TDESC_STAT_COLCNT))
1347 				ifp->if_collisions++;
1348 			if (txstat & RTK_TDESC_STAT_TXERRSUM)
1349 				ifp->if_oerrors++;
1350 			else
1351 				ifp->if_opackets++;
1352 		}
1353 		sc->rtk_ldata.rtk_tx_free++;
1354 		RTK_DESC_INC(idx);
1355 	}
1356 
1357 	/* No changes made to the TX ring, so no flush needed */
1358 
1359 	if (idx != sc->rtk_ldata.rtk_tx_considx) {
1360 		sc->rtk_ldata.rtk_tx_considx = idx;
1361 		ifp->if_flags &= ~IFF_OACTIVE;
1362 		ifp->if_timer = 0;
1363 	}
1364 
1365 	/*
1366 	 * If not all descriptors have been released reaped yet,
1367 	 * reload the timer so that we will eventually get another
1368 	 * interrupt that will cause us to re-enter this routine.
1369 	 * This is done in case the transmitter has gone idle.
1370 	 */
1371 	if (sc->rtk_ldata.rtk_tx_free != RTK_TX_DESC_CNT)
1372 		CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1373 
1374 	return;
1375 }
1376 
1377 /*
1378  * Stop all chip I/O so that the kernel's probe routines don't
1379  * get confused by errant DMAs when rebooting.
1380  */
1381 static void
1382 re_shutdown(void *vsc)
1383 
1384 {
1385 	struct rtk_softc	*sc = (struct rtk_softc *)vsc;
1386 
1387 	re_stop(&sc->ethercom.ec_if, 0);
1388 }
1389 
1390 
1391 static void
1392 re_tick(void *xsc)
1393 {
1394 	struct rtk_softc	*sc = xsc;
1395 	int s;
1396 
1397 	/*XXX: just return for 8169S/8110S with rev 2 or newer phy */
1398 	s = splnet();
1399 
1400 	mii_tick(&sc->mii);
1401 	splx(s);
1402 
1403 	callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
1404 }
1405 
1406 #ifdef DEVICE_POLLING
1407 static void
1408 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1409 {
1410 	struct rtk_softc *sc = ifp->if_softc;
1411 
1412 	RTK_LOCK(sc);
1413 	if (!(ifp->if_capenable & IFCAP_POLLING)) {
1414 		ether_poll_deregister(ifp);
1415 		cmd = POLL_DEREGISTER;
1416 	}
1417 	if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1418 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
1419 		goto done;
1420 	}
1421 
1422 	sc->rxcycles = count;
1423 	re_rxeof(sc);
1424 	re_txeof(sc);
1425 
1426 	if (ifp->if_snd.ifq_head != NULL)
1427 		(*ifp->if_start)(ifp);
1428 
1429 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1430 		u_int16_t       status;
1431 
1432 		status = CSR_READ_2(sc, RTK_ISR);
1433 		if (status == 0xffff)
1434 			goto done;
1435 		if (status)
1436 			CSR_WRITE_2(sc, RTK_ISR, status);
1437 
1438 		/*
1439 		 * XXX check behaviour on receiver stalls.
1440 		 */
1441 
1442 		if (status & RTK_ISR_SYSTEM_ERR) {
1443 			re_reset(sc);
1444 			re_init(sc);
1445 		}
1446 	}
1447 done:
1448 	RTK_UNLOCK(sc);
1449 }
1450 #endif /* DEVICE_POLLING */
1451 
1452 int
1453 re_intr(void *arg)
1454 {
1455 	struct rtk_softc	*sc = arg;
1456 	struct ifnet		*ifp;
1457 	u_int16_t		status;
1458 	int			handled = 0;
1459 
1460 	ifp = &sc->ethercom.ec_if;
1461 
1462 	if (!(ifp->if_flags & IFF_UP))
1463 		return 0;
1464 
1465 #ifdef DEVICE_POLLING
1466 	if (ifp->if_flags & IFF_POLLING)
1467 		goto done;
1468 	if ((ifp->if_capenable & IFCAP_POLLING) &&
1469 	    ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
1470 		CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1471 		re_poll(ifp, 0, 1);
1472 		goto done;
1473 	}
1474 #endif /* DEVICE_POLLING */
1475 
1476 	for (;;) {
1477 
1478 		status = CSR_READ_2(sc, RTK_ISR);
1479 		/* If the card has gone away the read returns 0xffff. */
1480 		if (status == 0xffff)
1481 			break;
1482 		if (status) {
1483 			handled = 1;
1484 			CSR_WRITE_2(sc, RTK_ISR, status);
1485 		}
1486 
1487 		if ((status & RTK_INTRS_CPLUS) == 0)
1488 			break;
1489 
1490 		if (status & RTK_ISR_RX_OK)
1491 			re_rxeof(sc);
1492 
1493 		if (status & RTK_ISR_RX_ERR)
1494 			re_rxeof(sc);
1495 
1496 		if ((status & RTK_ISR_TIMEOUT_EXPIRED) ||
1497 		    (status & RTK_ISR_TX_ERR) ||
1498 		    (status & RTK_ISR_TX_DESC_UNAVAIL))
1499 			re_txeof(sc);
1500 
1501 		if (status & RTK_ISR_SYSTEM_ERR) {
1502 			re_reset(sc);
1503 			re_init(ifp);
1504 		}
1505 
1506 		if (status & RTK_ISR_LINKCHG) {
1507 			callout_stop(&sc->rtk_tick_ch);
1508 			re_tick(sc);
1509 		}
1510 	}
1511 
1512 	if (ifp->if_flags & IFF_UP) /* kludge for interrupt during re_init() */
1513 		if (ifp->if_snd.ifq_head != NULL)
1514 			(*ifp->if_start)(ifp);
1515 
1516 #ifdef DEVICE_POLLING
1517 done:
1518 #endif
1519 
1520 	return handled;
1521 }
1522 
1523 static int
1524 re_encap(struct rtk_softc *sc, struct mbuf *m_head, int *idx)
1525 {
1526 	bus_dmamap_t		map;
1527 	int			error, i, curidx;
1528 	struct m_tag		*mtag;
1529 	struct rtk_desc		*d;
1530 	u_int32_t		cmdstat, rtk_flags;
1531 
1532 	if (sc->rtk_ldata.rtk_tx_free <= 4)
1533 		return EFBIG;
1534 
1535 	/*
1536 	 * Set up checksum offload. Note: checksum offload bits must
1537 	 * appear in all descriptors of a multi-descriptor transmit
1538 	 * attempt. (This is according to testing done with an 8169
1539 	 * chip. I'm not sure if this is a requirement or a bug.)
1540 	 */
1541 
1542 	rtk_flags = 0;
1543 
1544 	if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
1545 		rtk_flags |= RTK_TDESC_CMD_IPCSUM;
1546 	if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1547 		rtk_flags |= RTK_TDESC_CMD_TCPCSUM;
1548 	if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1549 		rtk_flags |= RTK_TDESC_CMD_UDPCSUM;
1550 
1551 	map = sc->rtk_ldata.rtk_tx_dmamap[*idx];
1552 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1553 	    m_head, BUS_DMA_NOWAIT);
1554 
1555 	if (error) {
1556 		aprint_error("%s: can't map mbuf (error %d)\n",
1557 		    sc->sc_dev.dv_xname, error);
1558 		return ENOBUFS;
1559 	}
1560 
1561 	if (map->dm_nsegs > sc->rtk_ldata.rtk_tx_free - 4)
1562 		return ENOBUFS;
1563 	/*
1564 	 * Map the segment array into descriptors. Note that we set the
1565 	 * start-of-frame and end-of-frame markers for either TX or RX, but
1566 	 * they really only have meaning in the TX case. (In the RX case,
1567 	 * it's the chip that tells us where packets begin and end.)
1568 	 * We also keep track of the end of the ring and set the
1569 	 * end-of-ring bits as needed, and we set the ownership bits
1570 	 * in all except the very first descriptor. (The caller will
1571 	 * set this descriptor later when it start transmission or
1572 	 * reception.)
1573 	 */
1574 	i = 0;
1575 	curidx = *idx;
1576 	while (1) {
1577 		d = &sc->rtk_ldata.rtk_tx_list[curidx];
1578 		if (le32toh(d->rtk_cmdstat) & RTK_RDESC_STAT_OWN)
1579 			return ENOBUFS;
1580 
1581 		cmdstat = map->dm_segs[i].ds_len;
1582 		d->rtk_bufaddr_lo =
1583 		    htole32(RTK_ADDR_LO(map->dm_segs[i].ds_addr));
1584 		d->rtk_bufaddr_hi =
1585 		    htole32(RTK_ADDR_HI(map->dm_segs[i].ds_addr));
1586 		if (i == 0)
1587 			cmdstat |= RTK_TDESC_CMD_SOF;
1588 		else
1589 			cmdstat |= RTK_TDESC_CMD_OWN;
1590 		if (curidx == (RTK_RX_DESC_CNT - 1))
1591 			cmdstat |= RTK_TDESC_CMD_EOR;
1592 		d->rtk_cmdstat = htole32(cmdstat | rtk_flags);
1593 		i++;
1594 		if (i == map->dm_nsegs)
1595 			break;
1596 		RTK_DESC_INC(curidx);
1597 	}
1598 
1599 	d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
1600 
1601 	/*
1602 	 * Insure that the map for this transmission
1603 	 * is placed at the array index of the last descriptor
1604 	 * in this chain.
1605 	 */
1606 	sc->rtk_ldata.rtk_tx_dmamap[*idx] =
1607 	    sc->rtk_ldata.rtk_tx_dmamap[curidx];
1608 	sc->rtk_ldata.rtk_tx_dmamap[curidx] = map;
1609 	sc->rtk_ldata.rtk_tx_mbuf[curidx] = m_head;
1610 	sc->rtk_ldata.rtk_tx_free -= map->dm_nsegs;
1611 
1612 	/*
1613 	 * Set up hardware VLAN tagging. Note: vlan tag info must
1614 	 * appear in the first descriptor of a multi-descriptor
1615 	 * transmission attempt.
1616 	 */
1617 
1618 	if (sc->ethercom.ec_nvlans &&
1619 	    (mtag = m_tag_find(m_head, PACKET_TAG_VLAN, NULL)) != NULL)
1620 		sc->rtk_ldata.rtk_tx_list[*idx].rtk_vlanctl =
1621 		    htole32(htons(*(u_int *)(mtag + 1)) |
1622 		    RTK_TDESC_VLANCTL_TAG);
1623 
1624 	/* Transfer ownership of packet to the chip. */
1625 
1626 	sc->rtk_ldata.rtk_tx_list[curidx].rtk_cmdstat |=
1627 	    htole32(RTK_TDESC_CMD_OWN);
1628 	if (*idx != curidx)
1629 		sc->rtk_ldata.rtk_tx_list[*idx].rtk_cmdstat |=
1630 		    htole32(RTK_TDESC_CMD_OWN);
1631 
1632 	RTK_DESC_INC(curidx);
1633 	*idx = curidx;
1634 
1635 	return 0;
1636 }
1637 
1638 /*
1639  * Main transmit routine for C+ and gigE NICs.
1640  */
1641 
1642 static void
1643 re_start(struct ifnet *ifp)
1644 {
1645 	struct rtk_softc	*sc;
1646 	struct mbuf		*m_head = NULL;
1647 	int			idx;
1648 
1649 	sc = ifp->if_softc;
1650 
1651 	idx = sc->rtk_ldata.rtk_tx_prodidx;
1652 	while (sc->rtk_ldata.rtk_tx_mbuf[idx] == NULL) {
1653 		IF_DEQUEUE(&ifp->if_snd, m_head);
1654 		if (m_head == NULL)
1655 			break;
1656 
1657 		if (re_encap(sc, m_head, &idx)) {
1658 			IF_PREPEND(&ifp->if_snd, m_head);
1659 			ifp->if_flags |= IFF_OACTIVE;
1660 			break;
1661 		}
1662 #if NBPFILTER > 0
1663 		/*
1664 		 * If there's a BPF listener, bounce a copy of this frame
1665 		 * to him.
1666 		 */
1667 		if (ifp->if_bpf)
1668 			bpf_mtap(ifp->if_bpf, m_head);
1669 #endif
1670 	}
1671 
1672 	/* Flush the TX descriptors */
1673 
1674 	bus_dmamap_sync(sc->sc_dmat,
1675 	    sc->rtk_ldata.rtk_tx_list_map,
1676 	    0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
1677 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1678 
1679 	sc->rtk_ldata.rtk_tx_prodidx = idx;
1680 
1681 	/*
1682 	 * RealTek put the TX poll request register in a different
1683 	 * location on the 8169 gigE chip. I don't know why.
1684 	 */
1685 
1686 	if (sc->rtk_type == RTK_8169)
1687 		CSR_WRITE_2(sc, RTK_GTXSTART, RTK_TXSTART_START);
1688 	else
1689 		CSR_WRITE_2(sc, RTK_TXSTART, RTK_TXSTART_START);
1690 
1691 	/*
1692 	 * Use the countdown timer for interrupt moderation.
1693 	 * 'TX done' interrupts are disabled. Instead, we reset the
1694 	 * countdown timer, which will begin counting until it hits
1695 	 * the value in the TIMERINT register, and then trigger an
1696 	 * interrupt. Each time we write to the TIMERCNT register,
1697 	 * the timer count is reset to 0.
1698 	 */
1699 	CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1700 
1701 	/*
1702 	 * Set a timeout in case the chip goes out to lunch.
1703 	 */
1704 	ifp->if_timer = 5;
1705 
1706 	return;
1707 }
1708 
1709 static int
1710 re_init(struct ifnet *ifp)
1711 {
1712 	struct rtk_softc	*sc = ifp->if_softc;
1713 	u_int32_t		rxcfg = 0;
1714 	u_int32_t		reg;
1715 	int error;
1716 
1717 	if ((error = re_enable(sc)) != 0)
1718 		goto out;
1719 
1720 	/*
1721 	 * Cancel pending I/O and free all RX/TX buffers.
1722 	 */
1723 	re_stop(ifp, 0);
1724 
1725 	/*
1726 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
1727 	 * RX checksum offload. We must configure the C+ register
1728 	 * before all others.
1729 	 */
1730 	reg = 0;
1731 
1732 	/*
1733 	 * XXX: Realtek docs say bits 0 and 1 are reserved, for 8169S/8110S.
1734 	 * FreeBSD  drivers set these bits anyway (for 8139C+?).
1735 	 * So far, it works.
1736 	 */
1737 
1738 	/*
1739 	 * XXX: For 8169 and 8196S revs below 2, set bit 14.
1740 	 * For 8169S/8110S rev 2 and above, do not set bit 14.
1741 	 */
1742 	if (sc->rtk_type == RTK_8169 && sc->sc_rev == 1)
1743 		reg |= (0x1 << 14) | RTK_CPLUSCMD_PCI_MRW;;
1744 
1745 	if (1)  {/* not for 8169S ? */
1746 		reg |= RTK_CPLUSCMD_VLANSTRIP |
1747 		    (ifp->if_capenable &
1748 		    (IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4) ?
1749 		    RTK_CPLUSCMD_RXCSUM_ENB : 0);
1750 	}
1751 
1752 	CSR_WRITE_2(sc, RTK_CPLUS_CMD,
1753 	    reg | RTK_CPLUSCMD_RXENB | RTK_CPLUSCMD_TXENB);
1754 
1755 	/* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */
1756 	if (sc->rtk_type == RTK_8169)
1757 		CSR_WRITE_2(sc, RTK_CPLUS_CMD+0x2, 0x0000);
1758 
1759 	DELAY(10000);
1760 
1761 	/*
1762 	 * Init our MAC address.  Even though the chipset
1763 	 * documentation doesn't mention it, we need to enter "Config
1764 	 * register write enable" mode to modify the ID registers.
1765 	 */
1766 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG);
1767 	memcpy(&reg, LLADDR(ifp->if_sadl), 4);
1768 	CSR_WRITE_STREAM_4(sc, RTK_IDR0, reg);
1769 	reg = 0;
1770 	memcpy(&reg, LLADDR(ifp->if_sadl) + 4, 4);
1771 	CSR_WRITE_STREAM_4(sc, RTK_IDR4, reg);
1772 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
1773 
1774 	/*
1775 	 * For C+ mode, initialize the RX descriptors and mbufs.
1776 	 */
1777 	re_rx_list_init(sc);
1778 	re_tx_list_init(sc);
1779 
1780 	/*
1781 	 * Enable transmit and receive.
1782 	 */
1783 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1784 
1785 	/*
1786 	 * Set the initial TX and RX configuration.
1787 	 */
1788 	if (sc->rtk_testmode) {
1789 		if (sc->rtk_type == RTK_8169)
1790 			CSR_WRITE_4(sc, RTK_TXCFG,
1791 			    RTK_TXCFG_CONFIG | RTK_LOOPTEST_ON);
1792 		else
1793 			CSR_WRITE_4(sc, RTK_TXCFG,
1794 			    RTK_TXCFG_CONFIG | RTK_LOOPTEST_ON_CPLUS);
1795 	} else
1796 		CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1797 	CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1798 
1799 	/* Set the individual bit to receive frames for this host only. */
1800 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1801 	rxcfg |= RTK_RXCFG_RX_INDIV;
1802 
1803 	/* If we want promiscuous mode, set the allframes bit. */
1804 	if (ifp->if_flags & IFF_PROMISC) {
1805 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1806 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1807 	} else {
1808 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1809 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1810 	}
1811 
1812 	/*
1813 	 * Set capture broadcast bit to capture broadcast frames.
1814 	 */
1815 	if (ifp->if_flags & IFF_BROADCAST) {
1816 		rxcfg |= RTK_RXCFG_RX_BROAD;
1817 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1818 	} else {
1819 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1820 		CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1821 	}
1822 
1823 	/*
1824 	 * Program the multicast filter, if necessary.
1825 	 */
1826 	rtk_setmulti(sc);
1827 
1828 #ifdef DEVICE_POLLING
1829 	/*
1830 	 * Disable interrupts if we are polling.
1831 	 */
1832 	if (ifp->if_flags & IFF_POLLING)
1833 		CSR_WRITE_2(sc, RTK_IMR, 0);
1834 	else	/* otherwise ... */
1835 #endif /* DEVICE_POLLING */
1836 	/*
1837 	 * Enable interrupts.
1838 	 */
1839 	if (sc->rtk_testmode)
1840 		CSR_WRITE_2(sc, RTK_IMR, 0);
1841 	else
1842 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
1843 
1844 	/* Start RX/TX process. */
1845 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1846 #ifdef notdef
1847 	/* Enable receiver and transmitter. */
1848 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1849 #endif
1850 	/*
1851 	 * Load the addresses of the RX and TX lists into the chip.
1852 	 */
1853 
1854 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI,
1855 	    RTK_ADDR_HI(sc->rtk_ldata.rtk_rx_listseg.ds_addr));
1856 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO,
1857 	    RTK_ADDR_LO(sc->rtk_ldata.rtk_rx_listseg.ds_addr));
1858 
1859 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI,
1860 	    RTK_ADDR_HI(sc->rtk_ldata.rtk_tx_listseg.ds_addr));
1861 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO,
1862 	    RTK_ADDR_LO(sc->rtk_ldata.rtk_tx_listseg.ds_addr));
1863 
1864 	CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16);
1865 
1866 	/*
1867 	 * Initialize the timer interrupt register so that
1868 	 * a timer interrupt will be generated once the timer
1869 	 * reaches a certain number of ticks. The timer is
1870 	 * reloaded on each transmit. This gives us TX interrupt
1871 	 * moderation, which dramatically improves TX frame rate.
1872 	 */
1873 
1874 	if (sc->rtk_type == RTK_8169)
1875 		CSR_WRITE_4(sc, RTK_TIMERINT_8169, 0x800);
1876 	else
1877 		CSR_WRITE_4(sc, RTK_TIMERINT, 0x400);
1878 
1879 	/*
1880 	 * For 8169 gigE NICs, set the max allowed RX packet
1881 	 * size so we can receive jumbo frames.
1882 	 */
1883 	if (sc->rtk_type == RTK_8169)
1884 		CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383);
1885 
1886 	if (sc->rtk_testmode)
1887 		return 0;
1888 
1889 	mii_mediachg(&sc->mii);
1890 
1891 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD | RTK_CFG1_FULLDUPLEX);
1892 
1893 	ifp->if_flags |= IFF_RUNNING;
1894 	ifp->if_flags &= ~IFF_OACTIVE;
1895 
1896 	callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
1897 
1898 out:
1899 	if (error) {
1900 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1901 		ifp->if_timer = 0;
1902 		aprint_error("%s: interface not running\n",
1903 		    sc->sc_dev.dv_xname);
1904 	}
1905 
1906 	return error;
1907 
1908 }
1909 
1910 /*
1911  * Set media options.
1912  */
1913 static int
1914 re_ifmedia_upd(struct ifnet *ifp)
1915 {
1916 	struct rtk_softc	*sc;
1917 
1918 	sc = ifp->if_softc;
1919 
1920 	return mii_mediachg(&sc->mii);
1921 }
1922 
1923 /*
1924  * Report current media status.
1925  */
1926 static void
1927 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1928 {
1929 	struct rtk_softc	*sc;
1930 
1931 	sc = ifp->if_softc;
1932 
1933 	mii_pollstat(&sc->mii);
1934 	ifmr->ifm_active = sc->mii.mii_media_active;
1935 	ifmr->ifm_status = sc->mii.mii_media_status;
1936 
1937 	return;
1938 }
1939 
1940 static int
1941 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1942 {
1943 	struct rtk_softc	*sc = ifp->if_softc;
1944 	struct ifreq		*ifr = (struct ifreq *) data;
1945 	int			s, error = 0;
1946 
1947 	s = splnet();
1948 
1949 	switch (command) {
1950 	case SIOCSIFMTU:
1951 		if (ifr->ifr_mtu > RTK_JUMBO_MTU)
1952 			error = EINVAL;
1953 		ifp->if_mtu = ifr->ifr_mtu;
1954 		break;
1955 	case SIOCGIFMEDIA:
1956 	case SIOCSIFMEDIA:
1957 		error = ifmedia_ioctl(ifp, ifr, &sc->mii.mii_media, command);
1958 		break;
1959 	default:
1960 		error = ether_ioctl(ifp, command, data);
1961 		if (error == ENETRESET) {
1962 			if (ifp->if_flags & IFF_RUNNING)
1963 				rtk_setmulti(sc);
1964 			error = 0;
1965 		}
1966 		break;
1967 	}
1968 
1969 	splx(s);
1970 
1971 	return error;
1972 }
1973 
1974 static void
1975 re_watchdog(struct ifnet *ifp)
1976 {
1977 	struct rtk_softc	*sc;
1978 	int			s;
1979 
1980 	sc = ifp->if_softc;
1981 	s = splnet();
1982 	aprint_error("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1983 	ifp->if_oerrors++;
1984 
1985 	re_txeof(sc);
1986 	re_rxeof(sc);
1987 
1988 	re_init(ifp);
1989 
1990 	splx(s);
1991 }
1992 
1993 /*
1994  * Stop the adapter and free any mbufs allocated to the
1995  * RX and TX lists.
1996  */
1997 static void
1998 re_stop(struct ifnet *ifp, int disable)
1999 {
2000 	register int		i;
2001 	struct rtk_softc *sc = ifp->if_softc;
2002 
2003 	callout_stop(&sc->rtk_tick_ch);
2004 
2005 #ifdef DEVICE_POLLING
2006 	ether_poll_deregister(ifp);
2007 #endif /* DEVICE_POLLING */
2008 
2009 	mii_down(&sc->mii);
2010 
2011 	CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
2012 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
2013 
2014 	if (sc->rtk_head != NULL) {
2015 		m_freem(sc->rtk_head);
2016 		sc->rtk_head = sc->rtk_tail = NULL;
2017 	}
2018 
2019 	/* Free the TX list buffers. */
2020 	for (i = 0; i < RTK_TX_DESC_CNT; i++) {
2021 		if (sc->rtk_ldata.rtk_tx_mbuf[i] != NULL) {
2022 			bus_dmamap_unload(sc->sc_dmat,
2023 			    sc->rtk_ldata.rtk_tx_dmamap[i]);
2024 			m_freem(sc->rtk_ldata.rtk_tx_mbuf[i]);
2025 			sc->rtk_ldata.rtk_tx_mbuf[i] = NULL;
2026 		}
2027 	}
2028 
2029 	/* Free the RX list buffers. */
2030 	for (i = 0; i < RTK_RX_DESC_CNT; i++) {
2031 		if (sc->rtk_ldata.rtk_rx_mbuf[i] != NULL) {
2032 			bus_dmamap_unload(sc->sc_dmat,
2033 			    sc->rtk_ldata.rtk_rx_dmamap[i]);
2034 			m_freem(sc->rtk_ldata.rtk_rx_mbuf[i]);
2035 			sc->rtk_ldata.rtk_rx_mbuf[i] = NULL;
2036 		}
2037 	}
2038 
2039 	if (disable)
2040 		re_disable(sc);
2041 
2042 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2043 	ifp->if_timer = 0;
2044 
2045 	return;
2046 }
2047