xref: /netbsd-src/sys/dev/ic/rtl8169.c (revision ce2c90c7c172d95d2402a5b3d96d8f8e6d138a21)
1 /*	$NetBSD: rtl8169.c,v 1.29 2006/10/13 11:06:15 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
37 
38 /*
39  * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Networking Software Engineer
43  * Wind River Systems
44  */
45 
46 /*
47  * This driver is designed to support RealTek's next generation of
48  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
49  * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
50  * and the RTL8110S.
51  *
52  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
53  * with the older 8139 family, however it also supports a special
54  * C+ mode of operation that provides several new performance enhancing
55  * features. These include:
56  *
57  *	o Descriptor based DMA mechanism. Each descriptor represents
58  *	  a single packet fragment. Data buffers may be aligned on
59  *	  any byte boundary.
60  *
61  *	o 64-bit DMA
62  *
63  *	o TCP/IP checksum offload for both RX and TX
64  *
65  *	o High and normal priority transmit DMA rings
66  *
67  *	o VLAN tag insertion and extraction
68  *
69  *	o TCP large send (segmentation offload)
70  *
71  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
72  * programming API is fairly straightforward. The RX filtering, EEPROM
73  * access and PHY access is the same as it is on the older 8139 series
74  * chips.
75  *
76  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
77  * same programming API and feature set as the 8139C+ with the following
78  * differences and additions:
79  *
80  *	o 1000Mbps mode
81  *
82  *	o Jumbo frames
83  *
84  * 	o GMII and TBI ports/registers for interfacing with copper
85  *	  or fiber PHYs
86  *
87  *      o RX and TX DMA rings can have up to 1024 descriptors
88  *        (the 8139C+ allows a maximum of 64)
89  *
90  *	o Slight differences in register layout from the 8139C+
91  *
92  * The TX start and timer interrupt registers are at different locations
93  * on the 8169 than they are on the 8139C+. Also, the status word in the
94  * RX descriptor has a slightly different bit layout. The 8169 does not
95  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
96  * copper gigE PHY.
97  *
98  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
99  * (the 'S' stands for 'single-chip'). These devices have the same
100  * programming API as the older 8169, but also have some vendor-specific
101  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
102  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
103  *
104  * This driver takes advantage of the RX and TX checksum offload and
105  * VLAN tag insertion/extraction features. It also implements TX
106  * interrupt moderation using the timer interrupt registers, which
107  * significantly reduces TX interrupt load. There is also support
108  * for jumbo frames, however the 8169/8169S/8110S can not transmit
109  * jumbo frames larger than 7.5K, so the max MTU possible with this
110  * driver is 7500 bytes.
111  */
112 
113 #include "bpfilter.h"
114 #include "vlan.h"
115 
116 #include <sys/param.h>
117 #include <sys/endian.h>
118 #include <sys/systm.h>
119 #include <sys/sockio.h>
120 #include <sys/mbuf.h>
121 #include <sys/malloc.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/device.h>
125 
126 #include <net/if.h>
127 #include <net/if_arp.h>
128 #include <net/if_dl.h>
129 #include <net/if_ether.h>
130 #include <net/if_media.h>
131 #include <net/if_vlanvar.h>
132 
133 #include <netinet/in_systm.h>	/* XXX for IP_MAXPACKET */
134 #include <netinet/in.h>		/* XXX for IP_MAXPACKET */
135 #include <netinet/ip.h>		/* XXX for IP_MAXPACKET */
136 
137 #if NBPFILTER > 0
138 #include <net/bpf.h>
139 #endif
140 
141 #include <machine/bus.h>
142 
143 #include <dev/mii/mii.h>
144 #include <dev/mii/miivar.h>
145 
146 #include <dev/pci/pcireg.h>
147 #include <dev/pci/pcivar.h>
148 #include <dev/pci/pcidevs.h>
149 
150 #include <dev/ic/rtl81x9reg.h>
151 #include <dev/ic/rtl81x9var.h>
152 
153 #include <dev/ic/rtl8169var.h>
154 
155 
156 static int re_encap(struct rtk_softc *, struct mbuf *, int *);
157 
158 static int re_newbuf(struct rtk_softc *, int, struct mbuf *);
159 static int re_rx_list_init(struct rtk_softc *);
160 static int re_tx_list_init(struct rtk_softc *);
161 static void re_rxeof(struct rtk_softc *);
162 static void re_txeof(struct rtk_softc *);
163 static void re_tick(void *);
164 static void re_start(struct ifnet *);
165 static int re_ioctl(struct ifnet *, u_long, caddr_t);
166 static int re_init(struct ifnet *);
167 static void re_stop(struct ifnet *, int);
168 static void re_watchdog(struct ifnet *);
169 
170 static void re_shutdown(void *);
171 static int re_enable(struct rtk_softc *);
172 static void re_disable(struct rtk_softc *);
173 static void re_power(int, void *);
174 
175 static int re_ifmedia_upd(struct ifnet *);
176 static void re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
177 
178 static int re_gmii_readreg(struct device *, int, int);
179 static void re_gmii_writereg(struct device *, int, int, int);
180 
181 static int re_miibus_readreg(struct device *, int, int);
182 static void re_miibus_writereg(struct device *, int, int, int);
183 static void re_miibus_statchg(struct device *);
184 
185 static void re_reset(struct rtk_softc *);
186 
187 static int
188 re_gmii_readreg(struct device *self, int phy, int reg)
189 {
190 	struct rtk_softc	*sc = (void *)self;
191 	u_int32_t		rval;
192 	int			i;
193 
194 	if (phy != 7)
195 		return 0;
196 
197 	/* Let the rgephy driver read the GMEDIASTAT register */
198 
199 	if (reg == RTK_GMEDIASTAT) {
200 		rval = CSR_READ_1(sc, RTK_GMEDIASTAT);
201 		return rval;
202 	}
203 
204 	CSR_WRITE_4(sc, RTK_PHYAR, reg << 16);
205 	DELAY(1000);
206 
207 	for (i = 0; i < RTK_TIMEOUT; i++) {
208 		rval = CSR_READ_4(sc, RTK_PHYAR);
209 		if (rval & RTK_PHYAR_BUSY)
210 			break;
211 		DELAY(100);
212 	}
213 
214 	if (i == RTK_TIMEOUT) {
215 		aprint_error("%s: PHY read failed\n", sc->sc_dev.dv_xname);
216 		return 0;
217 	}
218 
219 	return rval & RTK_PHYAR_PHYDATA;
220 }
221 
222 static void
223 re_gmii_writereg(struct device *dev, int phy __unused, int reg, int data)
224 {
225 	struct rtk_softc	*sc = (void *)dev;
226 	u_int32_t		rval;
227 	int			i;
228 
229 	CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) |
230 	    (data & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY);
231 	DELAY(1000);
232 
233 	for (i = 0; i < RTK_TIMEOUT; i++) {
234 		rval = CSR_READ_4(sc, RTK_PHYAR);
235 		if (!(rval & RTK_PHYAR_BUSY))
236 			break;
237 		DELAY(100);
238 	}
239 
240 	if (i == RTK_TIMEOUT) {
241 		aprint_error("%s: PHY write reg %x <- %x failed\n",
242 		    sc->sc_dev.dv_xname, reg, data);
243 		return;
244 	}
245 
246 	return;
247 }
248 
249 static int
250 re_miibus_readreg(struct device *dev, int phy, int reg)
251 {
252 	struct rtk_softc	*sc = (void *)dev;
253 	u_int16_t		rval = 0;
254 	u_int16_t		re8139_reg = 0;
255 	int			s;
256 
257 	s = splnet();
258 
259 	if (sc->rtk_type == RTK_8169) {
260 		rval = re_gmii_readreg(dev, phy, reg);
261 		splx(s);
262 		return rval;
263 	}
264 
265 	/* Pretend the internal PHY is only at address 0 */
266 	if (phy) {
267 		splx(s);
268 		return 0;
269 	}
270 	switch (reg) {
271 	case MII_BMCR:
272 		re8139_reg = RTK_BMCR;
273 		break;
274 	case MII_BMSR:
275 		re8139_reg = RTK_BMSR;
276 		break;
277 	case MII_ANAR:
278 		re8139_reg = RTK_ANAR;
279 		break;
280 	case MII_ANER:
281 		re8139_reg = RTK_ANER;
282 		break;
283 	case MII_ANLPAR:
284 		re8139_reg = RTK_LPAR;
285 		break;
286 	case MII_PHYIDR1:
287 	case MII_PHYIDR2:
288 		splx(s);
289 		return 0;
290 	/*
291 	 * Allow the rlphy driver to read the media status
292 	 * register. If we have a link partner which does not
293 	 * support NWAY, this is the register which will tell
294 	 * us the results of parallel detection.
295 	 */
296 	case RTK_MEDIASTAT:
297 		rval = CSR_READ_1(sc, RTK_MEDIASTAT);
298 		splx(s);
299 		return rval;
300 	default:
301 		aprint_error("%s: bad phy register\n", sc->sc_dev.dv_xname);
302 		splx(s);
303 		return 0;
304 	}
305 	rval = CSR_READ_2(sc, re8139_reg);
306 	splx(s);
307 	return rval;
308 }
309 
310 static void
311 re_miibus_writereg(struct device *dev, int phy, int reg, int data)
312 {
313 	struct rtk_softc	*sc = (void *)dev;
314 	u_int16_t		re8139_reg = 0;
315 	int			s;
316 
317 	s = splnet();
318 
319 	if (sc->rtk_type == RTK_8169) {
320 		re_gmii_writereg(dev, phy, reg, data);
321 		splx(s);
322 		return;
323 	}
324 
325 	/* Pretend the internal PHY is only at address 0 */
326 	if (phy) {
327 		splx(s);
328 		return;
329 	}
330 	switch (reg) {
331 	case MII_BMCR:
332 		re8139_reg = RTK_BMCR;
333 		break;
334 	case MII_BMSR:
335 		re8139_reg = RTK_BMSR;
336 		break;
337 	case MII_ANAR:
338 		re8139_reg = RTK_ANAR;
339 		break;
340 	case MII_ANER:
341 		re8139_reg = RTK_ANER;
342 		break;
343 	case MII_ANLPAR:
344 		re8139_reg = RTK_LPAR;
345 		break;
346 	case MII_PHYIDR1:
347 	case MII_PHYIDR2:
348 		splx(s);
349 		return;
350 		break;
351 	default:
352 		aprint_error("%s: bad phy register\n", sc->sc_dev.dv_xname);
353 		splx(s);
354 		return;
355 	}
356 	CSR_WRITE_2(sc, re8139_reg, data);
357 	splx(s);
358 	return;
359 }
360 
361 static void
362 re_miibus_statchg(struct device *dev __unused)
363 {
364 
365 	return;
366 }
367 
368 static void
369 re_reset(struct rtk_softc *sc)
370 {
371 	register int		i;
372 
373 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
374 
375 	for (i = 0; i < RTK_TIMEOUT; i++) {
376 		DELAY(10);
377 		if (!(CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET))
378 			break;
379 	}
380 	if (i == RTK_TIMEOUT)
381 		aprint_error("%s: reset never completed!\n",
382 		    sc->sc_dev.dv_xname);
383 
384 	/*
385 	 * NB: Realtek-supplied Linux driver does this only for
386 	 * MCFG_METHOD_2, which corresponds to sc->sc_rev == 2.
387 	 */
388 	if (1) /* XXX check softc flag for 8169s version */
389 		CSR_WRITE_1(sc, 0x82, 1);
390 
391 	return;
392 }
393 
394 /*
395  * The following routine is designed to test for a defect on some
396  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
397  * lines connected to the bus, however for a 32-bit only card, they
398  * should be pulled high. The result of this defect is that the
399  * NIC will not work right if you plug it into a 64-bit slot: DMA
400  * operations will be done with 64-bit transfers, which will fail
401  * because the 64-bit data lines aren't connected.
402  *
403  * There's no way to work around this (short of talking a soldering
404  * iron to the board), however we can detect it. The method we use
405  * here is to put the NIC into digital loopback mode, set the receiver
406  * to promiscuous mode, and then try to send a frame. We then compare
407  * the frame data we sent to what was received. If the data matches,
408  * then the NIC is working correctly, otherwise we know the user has
409  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
410  * slot. In the latter case, there's no way the NIC can work correctly,
411  * so we print out a message on the console and abort the device attach.
412  */
413 
414 int
415 re_diag(struct rtk_softc *sc)
416 {
417 	struct ifnet		*ifp = &sc->ethercom.ec_if;
418 	struct mbuf		*m0;
419 	struct ether_header	*eh;
420 	struct rtk_desc		*cur_rx;
421 	bus_dmamap_t		dmamap;
422 	u_int16_t		status;
423 	u_int32_t		rxstat;
424 	int			total_len, i, s, error = 0;
425 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
426 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
427 
428 	/* Allocate a single mbuf */
429 
430 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
431 	if (m0 == NULL)
432 		return ENOBUFS;
433 
434 	/*
435 	 * Initialize the NIC in test mode. This sets the chip up
436 	 * so that it can send and receive frames, but performs the
437 	 * following special functions:
438 	 * - Puts receiver in promiscuous mode
439 	 * - Enables digital loopback mode
440 	 * - Leaves interrupts turned off
441 	 */
442 
443 	ifp->if_flags |= IFF_PROMISC;
444 	sc->rtk_testmode = 1;
445 	re_init(ifp);
446 	re_stop(ifp, 0);
447 	DELAY(100000);
448 	re_init(ifp);
449 
450 	/* Put some data in the mbuf */
451 
452 	eh = mtod(m0, struct ether_header *);
453 	bcopy((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
454 	bcopy((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
455 	eh->ether_type = htons(ETHERTYPE_IP);
456 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
457 
458 	/*
459 	 * Queue the packet, start transmission.
460 	 */
461 
462 	CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
463 	s = splnet();
464 	IF_ENQUEUE(&ifp->if_snd, m0);
465 	re_start(ifp);
466 	splx(s);
467 	m0 = NULL;
468 
469 	/* Wait for it to propagate through the chip */
470 
471 	DELAY(100000);
472 	for (i = 0; i < RTK_TIMEOUT; i++) {
473 		status = CSR_READ_2(sc, RTK_ISR);
474 		if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) ==
475 		    (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK))
476 			break;
477 		DELAY(10);
478 	}
479 	if (i == RTK_TIMEOUT) {
480 		aprint_error("%s: diagnostic failed, failed to receive packet "
481 		    "in loopback mode\n", sc->sc_dev.dv_xname);
482 		error = EIO;
483 		goto done;
484 	}
485 
486 	/*
487 	 * The packet should have been dumped into the first
488 	 * entry in the RX DMA ring. Grab it from there.
489 	 */
490 
491 	dmamap = sc->rtk_ldata.rtk_rx_list_map;
492 	bus_dmamap_sync(sc->sc_dmat,
493 	    dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
494 	dmamap = sc->rtk_ldata.rtk_rx_dmamap[0];
495 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
496 	    BUS_DMASYNC_POSTREAD);
497 	bus_dmamap_unload(sc->sc_dmat,
498 	    sc->rtk_ldata.rtk_rx_dmamap[0]);
499 
500 	m0 = sc->rtk_ldata.rtk_rx_mbuf[0];
501 	sc->rtk_ldata.rtk_rx_mbuf[0] = NULL;
502 	eh = mtod(m0, struct ether_header *);
503 
504 	cur_rx = &sc->rtk_ldata.rtk_rx_list[0];
505 	total_len = RTK_RXBYTES(cur_rx);
506 	rxstat = le32toh(cur_rx->rtk_cmdstat);
507 
508 	if (total_len != ETHER_MIN_LEN) {
509 		aprint_error("%s: diagnostic failed, received short packet\n",
510 		    sc->sc_dev.dv_xname);
511 		error = EIO;
512 		goto done;
513 	}
514 
515 	/* Test that the received packet data matches what we sent. */
516 
517 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
518 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
519 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
520 		aprint_error("%s: WARNING, DMA FAILURE!\n",
521 		    sc->sc_dev.dv_xname);
522 		aprint_error("%s: expected TX data: %s",
523 		    sc->sc_dev.dv_xname, ether_sprintf(dst));
524 		aprint_error("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP);
525 		aprint_error("%s: received RX data: %s",
526 		    sc->sc_dev.dv_xname,
527 		    ether_sprintf(eh->ether_dhost));
528 		aprint_error("/%s/0x%x\n", ether_sprintf(eh->ether_shost),
529 		    ntohs(eh->ether_type));
530 		aprint_error("%s: You may have a defective 32-bit NIC plugged "
531 		    "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname);
532 		aprint_error("%s: Please re-install the NIC in a 32-bit slot "
533 		    "for proper operation.\n", sc->sc_dev.dv_xname);
534 		aprint_error("%s: Read the re(4) man page for more details.\n",
535 		    sc->sc_dev.dv_xname);
536 		error = EIO;
537 	}
538 
539 done:
540 	/* Turn interface off, release resources */
541 
542 	sc->rtk_testmode = 0;
543 	ifp->if_flags &= ~IFF_PROMISC;
544 	re_stop(ifp, 0);
545 	if (m0 != NULL)
546 		m_freem(m0);
547 
548 	return error;
549 }
550 
551 
552 /*
553  * Attach the interface. Allocate softc structures, do ifmedia
554  * setup and ethernet/BPF attach.
555  */
556 void
557 re_attach(struct rtk_softc *sc)
558 {
559 	u_char			eaddr[ETHER_ADDR_LEN];
560 	u_int16_t		val;
561 	struct ifnet		*ifp;
562 	int			error = 0, i, addr_len;
563 
564 
565 	/* XXX JRS: bus-attach-independent code begins approximately here */
566 
567 	/* Reset the adapter. */
568 	re_reset(sc);
569 
570 	if (sc->rtk_type == RTK_8169) {
571 		uint32_t hwrev;
572 
573 		/* Revision of 8169/8169S/8110s in bits 30..26, 23 */
574 		hwrev = CSR_READ_4(sc, RTK_TXCFG) & 0x7c800000;
575 		if (hwrev == (0x1 << 28)) {
576 			sc->sc_rev = 4;
577 		} else if (hwrev == (0x1 << 26)) {
578 			sc->sc_rev = 3;
579 		} else if (hwrev == (0x1 << 23)) {
580 			sc->sc_rev = 2;
581 		} else
582 			sc->sc_rev = 1;
583 
584 		/* Set RX length mask */
585 
586 		sc->rtk_rxlenmask = RTK_RDESC_STAT_GFRAGLEN;
587 
588 		/* Force station address autoload from the EEPROM */
589 
590 		CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_AUTOLOAD);
591 		for (i = 0; i < RTK_TIMEOUT; i++) {
592 			if (!(CSR_READ_1(sc, RTK_EECMD) & RTK_EEMODE_AUTOLOAD))
593 				break;
594 			DELAY(100);
595 		}
596 		if (i == RTK_TIMEOUT)
597 			aprint_error("%s: eeprom autoload timed out\n",
598 			    sc->sc_dev.dv_xname);
599 
600 		for (i = 0; i < ETHER_ADDR_LEN; i++)
601 			eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
602 
603 		sc->rtk_ldata.rtk_tx_desc_cnt = RTK_TX_DESC_CNT_8169;
604 	} else {
605 
606 		/* Set RX length mask */
607 
608 		sc->rtk_rxlenmask = RTK_RDESC_STAT_FRAGLEN;
609 
610 		if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
611 			addr_len = RTK_EEADDR_LEN1;
612 		else
613 			addr_len = RTK_EEADDR_LEN0;
614 
615 		/*
616 		 * Get station address from the EEPROM.
617 		 */
618 		for (i = 0; i < 3; i++) {
619 			val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len);
620 			eaddr[(i * 2) + 0] = val & 0xff;
621 			eaddr[(i * 2) + 1] = val >> 8;
622 		}
623 
624 		sc->rtk_ldata.rtk_tx_desc_cnt = RTK_TX_DESC_CNT_8139;
625 	}
626 
627 	aprint_normal("%s: Ethernet address %s\n",
628 	    sc->sc_dev.dv_xname, ether_sprintf(eaddr));
629 
630 	if (sc->rtk_ldata.rtk_tx_desc_cnt >
631 	    PAGE_SIZE / sizeof(struct rtk_desc)) {
632 		sc->rtk_ldata.rtk_tx_desc_cnt =
633 		    PAGE_SIZE / sizeof(struct rtk_desc);
634 	}
635 
636 	aprint_verbose("%s: using %d tx descriptors\n",
637 	    sc->sc_dev.dv_xname, sc->rtk_ldata.rtk_tx_desc_cnt);
638 
639 	/* Allocate DMA'able memory for the TX ring */
640 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_TX_LIST_SZ(sc),
641 		    RTK_ETHER_ALIGN, 0, &sc->rtk_ldata.rtk_tx_listseg,
642 		    1, &sc->rtk_ldata.rtk_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
643 		aprint_error("%s: can't allocate tx listseg, error = %d\n",
644 		    sc->sc_dev.dv_xname, error);
645 		goto fail_0;
646 	}
647 
648 	/* Load the map for the TX ring. */
649 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_tx_listseg,
650 		    sc->rtk_ldata.rtk_tx_listnseg, RTK_TX_LIST_SZ(sc),
651 		    (caddr_t *)&sc->rtk_ldata.rtk_tx_list,
652 		    BUS_DMA_NOWAIT)) != 0) {
653 		aprint_error("%s: can't map tx list, error = %d\n",
654 		    sc->sc_dev.dv_xname, error);
655 	  	goto fail_1;
656 	}
657 	memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ(sc));
658 
659 	if ((error = bus_dmamap_create(sc->sc_dmat, RTK_TX_LIST_SZ(sc), 1,
660 		    RTK_TX_LIST_SZ(sc), 0, BUS_DMA_ALLOCNOW,
661 		    &sc->rtk_ldata.rtk_tx_list_map)) != 0) {
662 		aprint_error("%s: can't create tx list map, error = %d\n",
663 		    sc->sc_dev.dv_xname, error);
664 		goto fail_2;
665 	}
666 
667 
668 	if ((error = bus_dmamap_load(sc->sc_dmat,
669 		    sc->rtk_ldata.rtk_tx_list_map, sc->rtk_ldata.rtk_tx_list,
670 		    RTK_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
671 		aprint_error("%s: can't load tx list, error = %d\n",
672 		    sc->sc_dev.dv_xname, error);
673 		goto fail_3;
674 	}
675 
676 	/* Create DMA maps for TX buffers */
677 	for (i = 0; i < RTK_TX_QLEN; i++) {
678 		error = bus_dmamap_create(sc->sc_dmat,
679 		    round_page(IP_MAXPACKET),
680 		    RTK_TX_DESC_CNT(sc) - 4, RTK_TDESC_CMD_FRAGLEN,
681 		    0, BUS_DMA_ALLOCNOW,
682 		    &sc->rtk_ldata.rtk_txq[i].txq_dmamap);
683 		if (error) {
684 			aprint_error("%s: can't create DMA map for TX\n",
685 			    sc->sc_dev.dv_xname);
686 			goto fail_4;
687 		}
688 	}
689 
690 	/* Allocate DMA'able memory for the RX ring */
691         if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_RX_LIST_SZ,
692 		    RTK_RING_ALIGN, 0, &sc->rtk_ldata.rtk_rx_listseg, 1,
693 		    &sc->rtk_ldata.rtk_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
694 		aprint_error("%s: can't allocate rx listseg, error = %d\n",
695 		    sc->sc_dev.dv_xname, error);
696 		goto fail_4;
697 	}
698 
699 	/* Load the map for the RX ring. */
700 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rtk_ldata.rtk_rx_listseg,
701 		    sc->rtk_ldata.rtk_rx_listnseg, RTK_RX_LIST_SZ,
702 		    (caddr_t *)&sc->rtk_ldata.rtk_rx_list,
703 		    BUS_DMA_NOWAIT)) != 0) {
704 		aprint_error("%s: can't map rx list, error = %d\n",
705 		    sc->sc_dev.dv_xname, error);
706 		goto fail_5;
707 	}
708 	memset(sc->rtk_ldata.rtk_rx_list, 0, RTK_RX_LIST_SZ);
709 
710 	if ((error = bus_dmamap_create(sc->sc_dmat, RTK_RX_LIST_SZ, 1,
711 		    RTK_RX_LIST_SZ, 0, BUS_DMA_ALLOCNOW,
712 		    &sc->rtk_ldata.rtk_rx_list_map)) != 0) {
713 		aprint_error("%s: can't create rx list map, error = %d\n",
714 		    sc->sc_dev.dv_xname, error);
715 		goto fail_6;
716 	}
717 
718 	if ((error = bus_dmamap_load(sc->sc_dmat,
719 		    sc->rtk_ldata.rtk_rx_list_map, sc->rtk_ldata.rtk_rx_list,
720 		    RTK_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
721 		aprint_error("%s: can't load rx list, error = %d\n",
722 		    sc->sc_dev.dv_xname, error);
723 		goto fail_7;
724 	}
725 
726 	/* Create DMA maps for RX buffers */
727 	for (i = 0; i < RTK_RX_DESC_CNT; i++) {
728 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
729 		    0, BUS_DMA_ALLOCNOW, &sc->rtk_ldata.rtk_rx_dmamap[i]);
730 		if (error) {
731 			aprint_error("%s: can't create DMA map for RX\n",
732 			    sc->sc_dev.dv_xname);
733 			goto fail_8;
734 		}
735 	}
736 
737 	/*
738 	 * Record interface as attached. From here, we should not fail.
739 	 */
740 	sc->sc_flags |= RTK_ATTACHED;
741 
742 	ifp = &sc->ethercom.ec_if;
743 	ifp->if_softc = sc;
744 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
745 	ifp->if_mtu = ETHERMTU;
746 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
747 	ifp->if_ioctl = re_ioctl;
748 	sc->ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
749 
750 	/*
751 	 * This is a way to disable hw VLAN tagging by default
752 	 * (RE_VLAN is undefined), as it is problematic. PR 32643
753 	 */
754 
755 #ifdef RE_VLAN
756 	sc->ethercom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
757 #endif
758 	ifp->if_start = re_start;
759 	ifp->if_stop = re_stop;
760 
761 	/*
762 	 * IFCAP_CSUM_IPv4_Tx seems broken for small packets.
763 	 */
764 
765 	ifp->if_capabilities |=
766 	    /* IFCAP_CSUM_IPv4_Tx | */ IFCAP_CSUM_IPv4_Rx |
767 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
768 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
769 	    IFCAP_TSOv4;
770 	ifp->if_watchdog = re_watchdog;
771 	ifp->if_init = re_init;
772 	if (sc->rtk_type == RTK_8169)
773 		ifp->if_baudrate = 1000000000;
774 	else
775 		ifp->if_baudrate = 100000000;
776 	ifp->if_snd.ifq_maxlen = RTK_IFQ_MAXLEN;
777 	ifp->if_capenable = ifp->if_capabilities;
778 	IFQ_SET_READY(&ifp->if_snd);
779 
780 	callout_init(&sc->rtk_tick_ch);
781 
782 	/* Do MII setup */
783 	sc->mii.mii_ifp = ifp;
784 	sc->mii.mii_readreg = re_miibus_readreg;
785 	sc->mii.mii_writereg = re_miibus_writereg;
786 	sc->mii.mii_statchg = re_miibus_statchg;
787 	ifmedia_init(&sc->mii.mii_media, IFM_IMASK, re_ifmedia_upd,
788 	    re_ifmedia_sts);
789 	mii_attach(&sc->sc_dev, &sc->mii, 0xffffffff, MII_PHY_ANY,
790 	    MII_OFFSET_ANY, 0);
791 	ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
792 
793 	/*
794 	 * Call MI attach routine.
795 	 */
796 	if_attach(ifp);
797 	ether_ifattach(ifp, eaddr);
798 
799 
800 	/*
801 	 * Make sure the interface is shutdown during reboot.
802 	 */
803 	sc->sc_sdhook = shutdownhook_establish(re_shutdown, sc);
804 	if (sc->sc_sdhook == NULL)
805 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
806 		    sc->sc_dev.dv_xname);
807 	/*
808 	 * Add a suspend hook to make sure we come back up after a
809 	 * resume.
810 	 */
811 	sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
812 	    re_power, sc);
813 	if (sc->sc_powerhook == NULL)
814 		aprint_error("%s: WARNING: unable to establish power hook\n",
815 		    sc->sc_dev.dv_xname);
816 
817 
818 	return;
819 
820 fail_8:
821 	/* Destroy DMA maps for RX buffers. */
822 	for (i = 0; i < RTK_RX_DESC_CNT; i++)
823 		if (sc->rtk_ldata.rtk_rx_dmamap[i] != NULL)
824 			bus_dmamap_destroy(sc->sc_dmat,
825 			    sc->rtk_ldata.rtk_rx_dmamap[i]);
826 
827 	/* Free DMA'able memory for the RX ring. */
828 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
829 fail_7:
830 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
831 fail_6:
832 	bus_dmamem_unmap(sc->sc_dmat,
833 	    (caddr_t)sc->rtk_ldata.rtk_rx_list, RTK_RX_LIST_SZ);
834 fail_5:
835 	bus_dmamem_free(sc->sc_dmat,
836 	    &sc->rtk_ldata.rtk_rx_listseg, sc->rtk_ldata.rtk_rx_listnseg);
837 
838 fail_4:
839 	/* Destroy DMA maps for TX buffers. */
840 	for (i = 0; i < RTK_TX_QLEN; i++)
841 		if (sc->rtk_ldata.rtk_txq[i].txq_dmamap != NULL)
842 			bus_dmamap_destroy(sc->sc_dmat,
843 			    sc->rtk_ldata.rtk_txq[i].txq_dmamap);
844 
845 	/* Free DMA'able memory for the TX ring. */
846 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
847 fail_3:
848 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
849 fail_2:
850 	bus_dmamem_unmap(sc->sc_dmat,
851 	    (caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ(sc));
852 fail_1:
853 	bus_dmamem_free(sc->sc_dmat,
854 	    &sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
855 fail_0:
856 	return;
857 }
858 
859 
860 /*
861  * re_activate:
862  *     Handle device activation/deactivation requests.
863  */
864 int
865 re_activate(struct device *self, enum devact act)
866 {
867 	struct rtk_softc *sc = (void *) self;
868 	int s, error = 0;
869 
870 	s = splnet();
871 	switch (act) {
872 	case DVACT_ACTIVATE:
873 		error = EOPNOTSUPP;
874 		break;
875 	case DVACT_DEACTIVATE:
876 		mii_activate(&sc->mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
877 		if_deactivate(&sc->ethercom.ec_if);
878 		break;
879 	}
880 	splx(s);
881 
882 	return error;
883 }
884 
885 /*
886  * re_detach:
887  *     Detach a rtk interface.
888  */
889 int
890 re_detach(struct rtk_softc *sc)
891 {
892 	struct ifnet *ifp = &sc->ethercom.ec_if;
893 	int i;
894 
895 	/*
896 	 * Succeed now if there isn't any work to do.
897 	 */
898 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
899 		return 0;
900 
901 	/* Unhook our tick handler. */
902 	callout_stop(&sc->rtk_tick_ch);
903 
904 	/* Detach all PHYs. */
905 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
906 
907 	/* Delete all remaining media. */
908 	ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
909 
910 	ether_ifdetach(ifp);
911 	if_detach(ifp);
912 
913 	/* XXX undo re_allocmem() */
914 
915 	/* Destroy DMA maps for RX buffers. */
916 	for (i = 0; i < RTK_RX_DESC_CNT; i++)
917 		if (sc->rtk_ldata.rtk_rx_dmamap[i] != NULL)
918 			bus_dmamap_destroy(sc->sc_dmat,
919 			    sc->rtk_ldata.rtk_rx_dmamap[i]);
920 
921 	/* Free DMA'able memory for the RX ring. */
922 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
923 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_rx_list_map);
924 	bus_dmamem_unmap(sc->sc_dmat,
925 	    (caddr_t)sc->rtk_ldata.rtk_rx_list, RTK_RX_LIST_SZ);
926 	bus_dmamem_free(sc->sc_dmat,
927 	    &sc->rtk_ldata.rtk_rx_listseg, sc->rtk_ldata.rtk_rx_listnseg);
928 
929 	/* Destroy DMA maps for TX buffers. */
930 	for (i = 0; i < RTK_TX_QLEN; i++)
931 		if (sc->rtk_ldata.rtk_txq[i].txq_dmamap != NULL)
932 			bus_dmamap_destroy(sc->sc_dmat,
933 			    sc->rtk_ldata.rtk_txq[i].txq_dmamap);
934 
935 	/* Free DMA'able memory for the TX ring. */
936 	bus_dmamap_unload(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
937 	bus_dmamap_destroy(sc->sc_dmat, sc->rtk_ldata.rtk_tx_list_map);
938 	bus_dmamem_unmap(sc->sc_dmat,
939 	    (caddr_t)sc->rtk_ldata.rtk_tx_list, RTK_TX_LIST_SZ(sc));
940 	bus_dmamem_free(sc->sc_dmat,
941 	    &sc->rtk_ldata.rtk_tx_listseg, sc->rtk_ldata.rtk_tx_listnseg);
942 
943 
944 	shutdownhook_disestablish(sc->sc_sdhook);
945 	powerhook_disestablish(sc->sc_powerhook);
946 
947 	return 0;
948 }
949 
950 /*
951  * re_enable:
952  *     Enable the RTL81X9 chip.
953  */
954 static int
955 re_enable(struct rtk_softc *sc)
956 {
957 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
958 		if ((*sc->sc_enable)(sc) != 0) {
959 			aprint_error("%s: device enable failed\n",
960 			    sc->sc_dev.dv_xname);
961 			return EIO;
962 		}
963 		sc->sc_flags |= RTK_ENABLED;
964 	}
965 	return 0;
966 }
967 
968 /*
969  * re_disable:
970  *     Disable the RTL81X9 chip.
971  */
972 static void
973 re_disable(struct rtk_softc *sc)
974 {
975 
976 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
977 		(*sc->sc_disable)(sc);
978 		sc->sc_flags &= ~RTK_ENABLED;
979 	}
980 }
981 
982 /*
983  * re_power:
984  *     Power management (suspend/resume) hook.
985  */
986 void
987 re_power(int why, void *arg)
988 {
989 	struct rtk_softc *sc = (void *) arg;
990 	struct ifnet *ifp = &sc->ethercom.ec_if;
991 	int s;
992 
993 	s = splnet();
994 	switch (why) {
995 	case PWR_SUSPEND:
996 	case PWR_STANDBY:
997 		re_stop(ifp, 0);
998 		if (sc->sc_power != NULL)
999 			(*sc->sc_power)(sc, why);
1000 		break;
1001 	case PWR_RESUME:
1002 		if (ifp->if_flags & IFF_UP) {
1003 			if (sc->sc_power != NULL)
1004 				(*sc->sc_power)(sc, why);
1005 			re_init(ifp);
1006 		}
1007 		break;
1008 	case PWR_SOFTSUSPEND:
1009 	case PWR_SOFTSTANDBY:
1010 	case PWR_SOFTRESUME:
1011 		break;
1012 	}
1013 	splx(s);
1014 }
1015 
1016 
1017 static int
1018 re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
1019 {
1020 	struct mbuf		*n = NULL;
1021 	bus_dmamap_t		map;
1022 	struct rtk_desc		*d;
1023 	u_int32_t		cmdstat;
1024 	int			error;
1025 
1026 	if (m == NULL) {
1027 		MGETHDR(n, M_DONTWAIT, MT_DATA);
1028 		if (n == NULL)
1029 			return ENOBUFS;
1030 		m = n;
1031 
1032 		MCLGET(m, M_DONTWAIT);
1033 		if (!(m->m_flags & M_EXT)) {
1034 			m_freem(m);
1035 			return ENOBUFS;
1036 		}
1037 	} else
1038 		m->m_data = m->m_ext.ext_buf;
1039 
1040 	/*
1041 	 * Initialize mbuf length fields and fixup
1042 	 * alignment so that the frame payload is
1043 	 * longword aligned.
1044 	 */
1045 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1046 	m_adj(m, RTK_ETHER_ALIGN);
1047 
1048 	map = sc->rtk_ldata.rtk_rx_dmamap[idx];
1049 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1050 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1051 
1052 	if (error)
1053 		goto out;
1054 
1055 	d = &sc->rtk_ldata.rtk_rx_list[idx];
1056 	if (le32toh(d->rtk_cmdstat) & RTK_RDESC_STAT_OWN)
1057 		goto out;
1058 
1059 	cmdstat = map->dm_segs[0].ds_len;
1060 	d->rtk_bufaddr_lo = htole32(RTK_ADDR_LO(map->dm_segs[0].ds_addr));
1061 	d->rtk_bufaddr_hi = htole32(RTK_ADDR_HI(map->dm_segs[0].ds_addr));
1062 	if (idx == (RTK_RX_DESC_CNT - 1))
1063 		cmdstat |= RTK_RDESC_CMD_EOR;
1064 	d->rtk_cmdstat = htole32(cmdstat);
1065 
1066 	sc->rtk_ldata.rtk_rx_list[idx].rtk_cmdstat |=
1067 	    htole32(RTK_RDESC_CMD_OWN);
1068 	sc->rtk_ldata.rtk_rx_mbuf[idx] = m;
1069 
1070 	bus_dmamap_sync(sc->sc_dmat, sc->rtk_ldata.rtk_rx_dmamap[idx], 0,
1071 	    sc->rtk_ldata.rtk_rx_dmamap[idx]->dm_mapsize,
1072 	    BUS_DMASYNC_PREREAD);
1073 
1074 	return 0;
1075 out:
1076 	if (n != NULL)
1077 		m_freem(n);
1078 	return ENOMEM;
1079 }
1080 
1081 static int
1082 re_tx_list_init(struct rtk_softc *sc)
1083 {
1084 	int i;
1085 
1086 	memset(sc->rtk_ldata.rtk_tx_list, 0, RTK_TX_LIST_SZ(sc));
1087 	for (i = 0; i < RTK_TX_QLEN; i++) {
1088 		sc->rtk_ldata.rtk_txq[i].txq_mbuf = NULL;
1089 	}
1090 
1091 	bus_dmamap_sync(sc->sc_dmat,
1092 	    sc->rtk_ldata.rtk_tx_list_map, 0,
1093 	    sc->rtk_ldata.rtk_tx_list_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1094 	sc->rtk_ldata.rtk_txq_prodidx = 0;
1095 	sc->rtk_ldata.rtk_txq_considx = 0;
1096 	sc->rtk_ldata.rtk_tx_free = RTK_TX_DESC_CNT(sc);
1097 	sc->rtk_ldata.rtk_tx_nextfree = 0;
1098 
1099 	return 0;
1100 }
1101 
1102 static int
1103 re_rx_list_init(struct rtk_softc *sc)
1104 {
1105 	int			i;
1106 
1107 	memset((char *)sc->rtk_ldata.rtk_rx_list, 0, RTK_RX_LIST_SZ);
1108 	memset((char *)&sc->rtk_ldata.rtk_rx_mbuf, 0,
1109 	    (RTK_RX_DESC_CNT * sizeof(struct mbuf *)));
1110 
1111 	for (i = 0; i < RTK_RX_DESC_CNT; i++) {
1112 		if (re_newbuf(sc, i, NULL) == ENOBUFS)
1113 			return ENOBUFS;
1114 	}
1115 
1116 	/* Flush the RX descriptors */
1117 
1118 	bus_dmamap_sync(sc->sc_dmat,
1119 	    sc->rtk_ldata.rtk_rx_list_map,
1120 	    0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
1121 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1122 
1123 	sc->rtk_ldata.rtk_rx_prodidx = 0;
1124 	sc->rtk_head = sc->rtk_tail = NULL;
1125 
1126 	return 0;
1127 }
1128 
1129 /*
1130  * RX handler for C+ and 8169. For the gigE chips, we support
1131  * the reception of jumbo frames that have been fragmented
1132  * across multiple 2K mbuf cluster buffers.
1133  */
1134 static void
1135 re_rxeof(struct rtk_softc *sc)
1136 {
1137 	struct mbuf		*m;
1138 	struct ifnet		*ifp;
1139 	int			i, total_len;
1140 	struct rtk_desc		*cur_rx;
1141 	u_int32_t		rxstat, rxvlan;
1142 
1143 	ifp = &sc->ethercom.ec_if;
1144 	i = sc->rtk_ldata.rtk_rx_prodidx;
1145 
1146 	/* Invalidate the descriptor memory */
1147 
1148 	bus_dmamap_sync(sc->sc_dmat,
1149 	    sc->rtk_ldata.rtk_rx_list_map,
1150 	    0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
1151 	    BUS_DMASYNC_POSTREAD);
1152 
1153 	while (!RTK_OWN(&sc->rtk_ldata.rtk_rx_list[i])) {
1154 
1155 		cur_rx = &sc->rtk_ldata.rtk_rx_list[i];
1156 		m = sc->rtk_ldata.rtk_rx_mbuf[i];
1157 		total_len = RTK_RXBYTES(cur_rx);
1158 		rxstat = le32toh(cur_rx->rtk_cmdstat);
1159 		rxvlan = le32toh(cur_rx->rtk_vlanctl);
1160 
1161 		/* Invalidate the RX mbuf and unload its map */
1162 
1163 		bus_dmamap_sync(sc->sc_dmat,
1164 		    sc->rtk_ldata.rtk_rx_dmamap[i],
1165 		    0, sc->rtk_ldata.rtk_rx_dmamap[i]->dm_mapsize,
1166 		    BUS_DMASYNC_POSTREAD);
1167 		bus_dmamap_unload(sc->sc_dmat,
1168 		    sc->rtk_ldata.rtk_rx_dmamap[i]);
1169 
1170 		if (!(rxstat & RTK_RDESC_STAT_EOF)) {
1171 			m->m_len = MCLBYTES - RTK_ETHER_ALIGN;
1172 			if (sc->rtk_head == NULL)
1173 				sc->rtk_head = sc->rtk_tail = m;
1174 			else {
1175 				m->m_flags &= ~M_PKTHDR;
1176 				sc->rtk_tail->m_next = m;
1177 				sc->rtk_tail = m;
1178 			}
1179 			re_newbuf(sc, i, NULL);
1180 			RTK_RX_DESC_INC(sc, i);
1181 			continue;
1182 		}
1183 
1184 		/*
1185 		 * NOTE: for the 8139C+, the frame length field
1186 		 * is always 12 bits in size, but for the gigE chips,
1187 		 * it is 13 bits (since the max RX frame length is 16K).
1188 		 * Unfortunately, all 32 bits in the status word
1189 		 * were already used, so to make room for the extra
1190 		 * length bit, RealTek took out the 'frame alignment
1191 		 * error' bit and shifted the other status bits
1192 		 * over one slot. The OWN, EOR, FS and LS bits are
1193 		 * still in the same places. We have already extracted
1194 		 * the frame length and checked the OWN bit, so rather
1195 		 * than using an alternate bit mapping, we shift the
1196 		 * status bits one space to the right so we can evaluate
1197 		 * them using the 8169 status as though it was in the
1198 		 * same format as that of the 8139C+.
1199 		 */
1200 		if (sc->rtk_type == RTK_8169)
1201 			rxstat >>= 1;
1202 
1203 		if (rxstat & RTK_RDESC_STAT_RXERRSUM) {
1204 			ifp->if_ierrors++;
1205 			/*
1206 			 * If this is part of a multi-fragment packet,
1207 			 * discard all the pieces.
1208 			 */
1209 			if (sc->rtk_head != NULL) {
1210 				m_freem(sc->rtk_head);
1211 				sc->rtk_head = sc->rtk_tail = NULL;
1212 			}
1213 			re_newbuf(sc, i, m);
1214 			RTK_RX_DESC_INC(sc, i);
1215 			continue;
1216 		}
1217 
1218 		/*
1219 		 * If allocating a replacement mbuf fails,
1220 		 * reload the current one.
1221 		 */
1222 
1223 		if (re_newbuf(sc, i, NULL)) {
1224 			ifp->if_ierrors++;
1225 			if (sc->rtk_head != NULL) {
1226 				m_freem(sc->rtk_head);
1227 				sc->rtk_head = sc->rtk_tail = NULL;
1228 			}
1229 			re_newbuf(sc, i, m);
1230 			RTK_RX_DESC_INC(sc, i);
1231 			continue;
1232 		}
1233 
1234 		RTK_RX_DESC_INC(sc, i);
1235 
1236 		if (sc->rtk_head != NULL) {
1237 			m->m_len = total_len % (MCLBYTES - RTK_ETHER_ALIGN);
1238 			/*
1239 			 * Special case: if there's 4 bytes or less
1240 			 * in this buffer, the mbuf can be discarded:
1241 			 * the last 4 bytes is the CRC, which we don't
1242 			 * care about anyway.
1243 			 */
1244 			if (m->m_len <= ETHER_CRC_LEN) {
1245 				sc->rtk_tail->m_len -=
1246 				    (ETHER_CRC_LEN - m->m_len);
1247 				m_freem(m);
1248 			} else {
1249 				m->m_len -= ETHER_CRC_LEN;
1250 				m->m_flags &= ~M_PKTHDR;
1251 				sc->rtk_tail->m_next = m;
1252 			}
1253 			m = sc->rtk_head;
1254 			sc->rtk_head = sc->rtk_tail = NULL;
1255 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1256 		} else
1257 			m->m_pkthdr.len = m->m_len =
1258 			    (total_len - ETHER_CRC_LEN);
1259 
1260 		ifp->if_ipackets++;
1261 		m->m_pkthdr.rcvif = ifp;
1262 
1263 		/* Do RX checksumming if enabled */
1264 
1265 		if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
1266 
1267 			/* Check IP header checksum */
1268 			if (rxstat & RTK_RDESC_STAT_PROTOID)
1269 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;;
1270 			if (rxstat & RTK_RDESC_STAT_IPSUMBAD)
1271 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1272 		}
1273 
1274 		/* Check TCP/UDP checksum */
1275 		if (RTK_TCPPKT(rxstat) &&
1276 		    (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx)) {
1277 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1278 			if (rxstat & RTK_RDESC_STAT_TCPSUMBAD)
1279 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1280 		}
1281 		if (RTK_UDPPKT(rxstat) &&
1282 		    (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx)) {
1283 			m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1284 			if (rxstat & RTK_RDESC_STAT_UDPSUMBAD)
1285 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1286 		}
1287 
1288 #ifdef RE_VLAN
1289 		if (rxvlan & RTK_RDESC_VLANCTL_TAG) {
1290 			VLAN_INPUT_TAG(ifp, m,
1291 			     be16toh(rxvlan & RTK_RDESC_VLANCTL_DATA),
1292 			     continue);
1293 		}
1294 #endif
1295 #if NBPFILTER > 0
1296 		if (ifp->if_bpf)
1297 			bpf_mtap(ifp->if_bpf, m);
1298 #endif
1299 		(*ifp->if_input)(ifp, m);
1300 	}
1301 
1302 	/* Flush the RX DMA ring */
1303 
1304 	bus_dmamap_sync(sc->sc_dmat,
1305 	    sc->rtk_ldata.rtk_rx_list_map,
1306 	    0, sc->rtk_ldata.rtk_rx_list_map->dm_mapsize,
1307 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1308 
1309 	sc->rtk_ldata.rtk_rx_prodidx = i;
1310 
1311 	return;
1312 }
1313 
1314 static void
1315 re_txeof(struct rtk_softc *sc)
1316 {
1317 	struct ifnet		*ifp;
1318 	int			idx;
1319 	boolean_t		done = FALSE;
1320 
1321 	ifp = &sc->ethercom.ec_if;
1322 	idx = sc->rtk_ldata.rtk_txq_considx;
1323 
1324 	/* Invalidate the TX descriptor list */
1325 
1326 	bus_dmamap_sync(sc->sc_dmat,
1327 	    sc->rtk_ldata.rtk_tx_list_map,
1328 	    0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
1329 	    BUS_DMASYNC_POSTREAD);
1330 
1331 	while (/* CONSTCOND */ 1) {
1332 		struct rtk_txq *txq = &sc->rtk_ldata.rtk_txq[idx];
1333 		int descidx;
1334 		u_int32_t txstat;
1335 
1336 		if (txq->txq_mbuf == NULL) {
1337 			KASSERT(idx == sc->rtk_ldata.rtk_txq_prodidx);
1338 			break;
1339 		}
1340 
1341 		descidx = txq->txq_descidx;
1342 		txstat =
1343 		    le32toh(sc->rtk_ldata.rtk_tx_list[descidx].rtk_cmdstat);
1344 		KASSERT((txstat & RTK_TDESC_CMD_EOF) != 0);
1345 		if (txstat & RTK_TDESC_CMD_OWN)
1346 			break;
1347 
1348 		sc->rtk_ldata.rtk_tx_free += txq->txq_dmamap->dm_nsegs;
1349 		KASSERT(sc->rtk_ldata.rtk_tx_free <= RTK_TX_DESC_CNT(sc));
1350 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1351 		m_freem(txq->txq_mbuf);
1352 		txq->txq_mbuf = NULL;
1353 
1354 		if (txstat & (RTK_TDESC_STAT_EXCESSCOL | RTK_TDESC_STAT_COLCNT))
1355 			ifp->if_collisions++;
1356 		if (txstat & RTK_TDESC_STAT_TXERRSUM)
1357 			ifp->if_oerrors++;
1358 		else
1359 			ifp->if_opackets++;
1360 
1361 		idx = (idx + 1) % RTK_TX_QLEN;
1362 		done = TRUE;
1363 	}
1364 
1365 	/* No changes made to the TX ring, so no flush needed */
1366 
1367 	if (done) {
1368 		sc->rtk_ldata.rtk_txq_considx = idx;
1369 		ifp->if_flags &= ~IFF_OACTIVE;
1370 		ifp->if_timer = 0;
1371 	}
1372 
1373 	/*
1374 	 * If not all descriptors have been released reaped yet,
1375 	 * reload the timer so that we will eventually get another
1376 	 * interrupt that will cause us to re-enter this routine.
1377 	 * This is done in case the transmitter has gone idle.
1378 	 */
1379 	if (sc->rtk_ldata.rtk_tx_free != RTK_TX_DESC_CNT(sc))
1380 		CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1381 
1382 	return;
1383 }
1384 
1385 /*
1386  * Stop all chip I/O so that the kernel's probe routines don't
1387  * get confused by errant DMAs when rebooting.
1388  */
1389 static void
1390 re_shutdown(void *vsc)
1391 
1392 {
1393 	struct rtk_softc	*sc = (struct rtk_softc *)vsc;
1394 
1395 	re_stop(&sc->ethercom.ec_if, 0);
1396 }
1397 
1398 
1399 static void
1400 re_tick(void *xsc)
1401 {
1402 	struct rtk_softc	*sc = xsc;
1403 	int s;
1404 
1405 	/*XXX: just return for 8169S/8110S with rev 2 or newer phy */
1406 	s = splnet();
1407 
1408 	mii_tick(&sc->mii);
1409 	splx(s);
1410 
1411 	callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
1412 }
1413 
1414 #ifdef DEVICE_POLLING
1415 static void
1416 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1417 {
1418 	struct rtk_softc *sc = ifp->if_softc;
1419 
1420 	RTK_LOCK(sc);
1421 	if (!(ifp->if_capenable & IFCAP_POLLING)) {
1422 		ether_poll_deregister(ifp);
1423 		cmd = POLL_DEREGISTER;
1424 	}
1425 	if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1426 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
1427 		goto done;
1428 	}
1429 
1430 	sc->rxcycles = count;
1431 	re_rxeof(sc);
1432 	re_txeof(sc);
1433 
1434 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1435 		(*ifp->if_start)(ifp);
1436 
1437 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1438 		u_int16_t       status;
1439 
1440 		status = CSR_READ_2(sc, RTK_ISR);
1441 		if (status == 0xffff)
1442 			goto done;
1443 		if (status)
1444 			CSR_WRITE_2(sc, RTK_ISR, status);
1445 
1446 		/*
1447 		 * XXX check behaviour on receiver stalls.
1448 		 */
1449 
1450 		if (status & RTK_ISR_SYSTEM_ERR) {
1451 			re_reset(sc);
1452 			re_init(sc);
1453 		}
1454 	}
1455 done:
1456 	RTK_UNLOCK(sc);
1457 }
1458 #endif /* DEVICE_POLLING */
1459 
1460 int
1461 re_intr(void *arg)
1462 {
1463 	struct rtk_softc	*sc = arg;
1464 	struct ifnet		*ifp;
1465 	u_int16_t		status;
1466 	int			handled = 0;
1467 
1468 	ifp = &sc->ethercom.ec_if;
1469 
1470 	if (!(ifp->if_flags & IFF_UP))
1471 		return 0;
1472 
1473 #ifdef DEVICE_POLLING
1474 	if (ifp->if_flags & IFF_POLLING)
1475 		goto done;
1476 	if ((ifp->if_capenable & IFCAP_POLLING) &&
1477 	    ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
1478 		CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1479 		re_poll(ifp, 0, 1);
1480 		goto done;
1481 	}
1482 #endif /* DEVICE_POLLING */
1483 
1484 	for (;;) {
1485 
1486 		status = CSR_READ_2(sc, RTK_ISR);
1487 		/* If the card has gone away the read returns 0xffff. */
1488 		if (status == 0xffff)
1489 			break;
1490 		if (status) {
1491 			handled = 1;
1492 			CSR_WRITE_2(sc, RTK_ISR, status);
1493 		}
1494 
1495 		if ((status & RTK_INTRS_CPLUS) == 0)
1496 			break;
1497 
1498 		if ((status & RTK_ISR_RX_OK) ||
1499 		    (status & RTK_ISR_RX_ERR))
1500 			re_rxeof(sc);
1501 
1502 		if ((status & RTK_ISR_TIMEOUT_EXPIRED) ||
1503 		    (status & RTK_ISR_TX_ERR) ||
1504 		    (status & RTK_ISR_TX_DESC_UNAVAIL))
1505 			re_txeof(sc);
1506 
1507 		if (status & RTK_ISR_SYSTEM_ERR) {
1508 			re_reset(sc);
1509 			re_init(ifp);
1510 		}
1511 
1512 		if (status & RTK_ISR_LINKCHG) {
1513 			callout_stop(&sc->rtk_tick_ch);
1514 			re_tick(sc);
1515 		}
1516 	}
1517 
1518 	if (ifp->if_flags & IFF_UP) /* kludge for interrupt during re_init() */
1519 		if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1520 			(*ifp->if_start)(ifp);
1521 
1522 #ifdef DEVICE_POLLING
1523 done:
1524 #endif
1525 
1526 	return handled;
1527 }
1528 
1529 static int
1530 re_encap(struct rtk_softc *sc, struct mbuf *m, int *idx)
1531 {
1532 	bus_dmamap_t		map;
1533 	int			error, i, startidx, curidx;
1534 #ifdef RE_VLAN
1535 	struct m_tag		*mtag;
1536 #endif
1537 	struct rtk_desc		*d;
1538 	u_int32_t		cmdstat, rtk_flags;
1539 	struct rtk_txq		*txq;
1540 
1541 	if (sc->rtk_ldata.rtk_tx_free <= 4) {
1542 		return EFBIG;
1543 	}
1544 
1545 	/*
1546 	 * Set up checksum offload. Note: checksum offload bits must
1547 	 * appear in all descriptors of a multi-descriptor transmit
1548 	 * attempt. (This is according to testing done with an 8169
1549 	 * chip. I'm not sure if this is a requirement or a bug.)
1550 	 */
1551 
1552 	if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) {
1553 		u_int32_t segsz = m->m_pkthdr.segsz;
1554 
1555 		rtk_flags = RTK_TDESC_CMD_LGSEND |
1556 		    (segsz << RTK_TDESC_CMD_MSSVAL_SHIFT);
1557 	} else {
1558 
1559 		/*
1560 		 * set RTK_TDESC_CMD_IPCSUM if any checksum offloading
1561 		 * is requested.  otherwise, RTK_TDESC_CMD_TCPCSUM/
1562 		 * RTK_TDESC_CMD_UDPCSUM doesn't make effects.
1563 		 */
1564 
1565 		rtk_flags = 0;
1566 		if ((m->m_pkthdr.csum_flags &
1567 		    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
1568 			rtk_flags |= RTK_TDESC_CMD_IPCSUM;
1569 			if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1570 				rtk_flags |= RTK_TDESC_CMD_TCPCSUM;
1571 			} else if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
1572 				rtk_flags |= RTK_TDESC_CMD_UDPCSUM;
1573 			}
1574 		}
1575 	}
1576 
1577 	txq = &sc->rtk_ldata.rtk_txq[*idx];
1578 	map = txq->txq_dmamap;
1579 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1580 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1581 
1582 	if (error) {
1583 		/* XXX try to defrag if EFBIG? */
1584 
1585 		aprint_error("%s: can't map mbuf (error %d)\n",
1586 		    sc->sc_dev.dv_xname, error);
1587 
1588 		return error;
1589 	}
1590 
1591 	if (map->dm_nsegs > sc->rtk_ldata.rtk_tx_free - 4) {
1592 		error = EFBIG;
1593 		goto fail_unload;
1594 	}
1595 
1596 	/*
1597 	 * Make sure that the caches are synchronized before we
1598 	 * ask the chip to start DMA for the packet data.
1599 	 */
1600 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1601 		BUS_DMASYNC_PREWRITE);
1602 
1603 	/*
1604 	 * Map the segment array into descriptors. Note that we set the
1605 	 * start-of-frame and end-of-frame markers for either TX or RX, but
1606 	 * they really only have meaning in the TX case. (In the RX case,
1607 	 * it's the chip that tells us where packets begin and end.)
1608 	 * We also keep track of the end of the ring and set the
1609 	 * end-of-ring bits as needed, and we set the ownership bits
1610 	 * in all except the very first descriptor. (The caller will
1611 	 * set this descriptor later when it start transmission or
1612 	 * reception.)
1613 	 */
1614 	i = 0;
1615 	curidx = startidx = sc->rtk_ldata.rtk_tx_nextfree;
1616 	while (1) {
1617 		d = &sc->rtk_ldata.rtk_tx_list[curidx];
1618 		if (le32toh(d->rtk_cmdstat) & RTK_TDESC_STAT_OWN) {
1619 			while (i > 0) {
1620 				sc->rtk_ldata.rtk_tx_list[
1621 				    (curidx + RTK_TX_DESC_CNT(sc) - i) %
1622 				    RTK_TX_DESC_CNT(sc)].rtk_cmdstat = 0;
1623 				i--;
1624 			}
1625 			error = ENOBUFS;
1626 			goto fail_unload;
1627 		}
1628 
1629 		cmdstat = map->dm_segs[i].ds_len;
1630 		d->rtk_bufaddr_lo =
1631 		    htole32(RTK_ADDR_LO(map->dm_segs[i].ds_addr));
1632 		d->rtk_bufaddr_hi =
1633 		    htole32(RTK_ADDR_HI(map->dm_segs[i].ds_addr));
1634 		if (i == 0)
1635 			cmdstat |= RTK_TDESC_CMD_SOF;
1636 		else
1637 			cmdstat |= RTK_TDESC_CMD_OWN;
1638 		if (curidx == (RTK_TX_DESC_CNT(sc) - 1))
1639 			cmdstat |= RTK_TDESC_CMD_EOR;
1640 		d->rtk_cmdstat = htole32(cmdstat | rtk_flags);
1641 		i++;
1642 		if (i == map->dm_nsegs)
1643 			break;
1644 		RTK_TX_DESC_INC(sc, curidx);
1645 	}
1646 
1647 	d->rtk_cmdstat |= htole32(RTK_TDESC_CMD_EOF);
1648 
1649 	txq->txq_mbuf = m;
1650 	sc->rtk_ldata.rtk_tx_free -= map->dm_nsegs;
1651 
1652 	/*
1653 	 * Set up hardware VLAN tagging. Note: vlan tag info must
1654 	 * appear in the first descriptor of a multi-descriptor
1655 	 * transmission attempt.
1656 	 */
1657 
1658 #ifdef RE_VLAN
1659 	if ((mtag = VLAN_OUTPUT_TAG(&sc->ethercom, m)) != NULL) {
1660 		sc->rtk_ldata.rtk_tx_list[startidx].rtk_vlanctl =
1661 		    htole32(htons(VLAN_TAG_VALUE(mtag)) |
1662 		    RTK_TDESC_VLANCTL_TAG);
1663 	}
1664 #endif
1665 
1666 	/* Transfer ownership of packet to the chip. */
1667 
1668 	sc->rtk_ldata.rtk_tx_list[curidx].rtk_cmdstat |=
1669 	    htole32(RTK_TDESC_CMD_OWN);
1670 	if (startidx != curidx)
1671 		sc->rtk_ldata.rtk_tx_list[startidx].rtk_cmdstat |=
1672 		    htole32(RTK_TDESC_CMD_OWN);
1673 
1674 	txq->txq_descidx = curidx;
1675 	RTK_TX_DESC_INC(sc, curidx);
1676 	sc->rtk_ldata.rtk_tx_nextfree = curidx;
1677 	*idx = (*idx + 1) % RTK_TX_QLEN;
1678 
1679 	return 0;
1680 
1681 fail_unload:
1682 	bus_dmamap_unload(sc->sc_dmat, map);
1683 
1684 	return error;
1685 }
1686 
1687 /*
1688  * Main transmit routine for C+ and gigE NICs.
1689  */
1690 
1691 static void
1692 re_start(struct ifnet *ifp)
1693 {
1694 	struct rtk_softc	*sc;
1695 	int			idx;
1696 	boolean_t		done = FALSE;
1697 
1698 	sc = ifp->if_softc;
1699 
1700 	idx = sc->rtk_ldata.rtk_txq_prodidx;
1701 	while (/* CONSTCOND */ 1) {
1702 		struct mbuf *m;
1703 		int error;
1704 
1705 		IFQ_POLL(&ifp->if_snd, m);
1706 		if (m == NULL)
1707 			break;
1708 
1709 		if (sc->rtk_ldata.rtk_txq[idx].txq_mbuf != NULL) {
1710 			KASSERT(idx == sc->rtk_ldata.rtk_txq_considx);
1711 			ifp->if_flags |= IFF_OACTIVE;
1712 			break;
1713 		}
1714 
1715 		error = re_encap(sc, m, &idx);
1716 		if (error == EFBIG &&
1717 		    sc->rtk_ldata.rtk_tx_free == RTK_TX_DESC_CNT(sc)) {
1718 			IFQ_DEQUEUE(&ifp->if_snd, m);
1719 			m_freem(m);
1720 			ifp->if_oerrors++;
1721 			continue;
1722 		}
1723 		if (error) {
1724 			ifp->if_flags |= IFF_OACTIVE;
1725 			break;
1726 		}
1727 
1728 		IFQ_DEQUEUE(&ifp->if_snd, m);
1729 
1730 #if NBPFILTER > 0
1731 		/*
1732 		 * If there's a BPF listener, bounce a copy of this frame
1733 		 * to him.
1734 		 */
1735 		if (ifp->if_bpf)
1736 			bpf_mtap(ifp->if_bpf, m);
1737 #endif
1738 
1739 		done = TRUE;
1740 	}
1741 
1742 	if (!done) {
1743 		return;
1744 	}
1745 	sc->rtk_ldata.rtk_txq_prodidx = idx;
1746 
1747 	/* Flush the TX descriptors */
1748 
1749 	bus_dmamap_sync(sc->sc_dmat,
1750 	    sc->rtk_ldata.rtk_tx_list_map,
1751 	    0, sc->rtk_ldata.rtk_tx_list_map->dm_mapsize,
1752 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1753 
1754 	/*
1755 	 * RealTek put the TX poll request register in a different
1756 	 * location on the 8169 gigE chip. I don't know why.
1757 	 */
1758 
1759 	if (sc->rtk_type == RTK_8169)
1760 		CSR_WRITE_2(sc, RTK_GTXSTART, RTK_TXSTART_START);
1761 	else
1762 		CSR_WRITE_2(sc, RTK_TXSTART, RTK_TXSTART_START);
1763 
1764 	/*
1765 	 * Use the countdown timer for interrupt moderation.
1766 	 * 'TX done' interrupts are disabled. Instead, we reset the
1767 	 * countdown timer, which will begin counting until it hits
1768 	 * the value in the TIMERINT register, and then trigger an
1769 	 * interrupt. Each time we write to the TIMERCNT register,
1770 	 * the timer count is reset to 0.
1771 	 */
1772 	CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1773 
1774 	/*
1775 	 * Set a timeout in case the chip goes out to lunch.
1776 	 */
1777 	ifp->if_timer = 5;
1778 
1779 	return;
1780 }
1781 
1782 static int
1783 re_init(struct ifnet *ifp)
1784 {
1785 	struct rtk_softc	*sc = ifp->if_softc;
1786 	u_int32_t		rxcfg = 0;
1787 	u_int32_t		reg;
1788 	int error;
1789 
1790 	if ((error = re_enable(sc)) != 0)
1791 		goto out;
1792 
1793 	/*
1794 	 * Cancel pending I/O and free all RX/TX buffers.
1795 	 */
1796 	re_stop(ifp, 0);
1797 
1798 	/*
1799 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
1800 	 * RX checksum offload. We must configure the C+ register
1801 	 * before all others.
1802 	 */
1803 	reg = 0;
1804 
1805 	/*
1806 	 * XXX: Realtek docs say bits 0 and 1 are reserved, for 8169S/8110S.
1807 	 * FreeBSD  drivers set these bits anyway (for 8139C+?).
1808 	 * So far, it works.
1809 	 */
1810 
1811 	/*
1812 	 * XXX: For 8169 and 8196S revs below 2, set bit 14.
1813 	 * For 8169S/8110S rev 2 and above, do not set bit 14.
1814 	 */
1815 	if (sc->rtk_type == RTK_8169 && sc->sc_rev == 1)
1816 		reg |= (0x1 << 14) | RTK_CPLUSCMD_PCI_MRW;;
1817 
1818 	if (1)  {/* not for 8169S ? */
1819 		reg |=
1820 #ifdef RE_VLAN
1821 		    RTK_CPLUSCMD_VLANSTRIP |
1822 #endif
1823 		    (ifp->if_capenable &
1824 		    (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx |
1825 		     IFCAP_CSUM_UDPv4_Rx) ?
1826 		    RTK_CPLUSCMD_RXCSUM_ENB : 0);
1827 	}
1828 
1829 	CSR_WRITE_2(sc, RTK_CPLUS_CMD,
1830 	    reg | RTK_CPLUSCMD_RXENB | RTK_CPLUSCMD_TXENB);
1831 
1832 	/* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */
1833 	if (sc->rtk_type == RTK_8169)
1834 		CSR_WRITE_2(sc, RTK_CPLUS_CMD+0x2, 0x0000);
1835 
1836 	DELAY(10000);
1837 
1838 	/*
1839 	 * Init our MAC address.  Even though the chipset
1840 	 * documentation doesn't mention it, we need to enter "Config
1841 	 * register write enable" mode to modify the ID registers.
1842 	 */
1843 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG);
1844 	memcpy(&reg, LLADDR(ifp->if_sadl), 4);
1845 	CSR_WRITE_STREAM_4(sc, RTK_IDR0, reg);
1846 	reg = 0;
1847 	memcpy(&reg, LLADDR(ifp->if_sadl) + 4, 4);
1848 	CSR_WRITE_STREAM_4(sc, RTK_IDR4, reg);
1849 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
1850 
1851 	/*
1852 	 * For C+ mode, initialize the RX descriptors and mbufs.
1853 	 */
1854 	re_rx_list_init(sc);
1855 	re_tx_list_init(sc);
1856 
1857 	/*
1858 	 * Enable transmit and receive.
1859 	 */
1860 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1861 
1862 	/*
1863 	 * Set the initial TX and RX configuration.
1864 	 */
1865 	if (sc->rtk_testmode) {
1866 		if (sc->rtk_type == RTK_8169)
1867 			CSR_WRITE_4(sc, RTK_TXCFG,
1868 			    RTK_TXCFG_CONFIG | RTK_LOOPTEST_ON);
1869 		else
1870 			CSR_WRITE_4(sc, RTK_TXCFG,
1871 			    RTK_TXCFG_CONFIG | RTK_LOOPTEST_ON_CPLUS);
1872 	} else
1873 		CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1874 	CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1875 
1876 	/* Set the individual bit to receive frames for this host only. */
1877 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1878 	rxcfg |= RTK_RXCFG_RX_INDIV;
1879 
1880 	/* If we want promiscuous mode, set the allframes bit. */
1881 	if (ifp->if_flags & IFF_PROMISC)
1882 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1883 	else
1884 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1885 	CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1886 
1887 	/*
1888 	 * Set capture broadcast bit to capture broadcast frames.
1889 	 */
1890 	if (ifp->if_flags & IFF_BROADCAST)
1891 		rxcfg |= RTK_RXCFG_RX_BROAD;
1892 	else
1893 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1894 	CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1895 
1896 	/*
1897 	 * Program the multicast filter, if necessary.
1898 	 */
1899 	rtk_setmulti(sc);
1900 
1901 #ifdef DEVICE_POLLING
1902 	/*
1903 	 * Disable interrupts if we are polling.
1904 	 */
1905 	if (ifp->if_flags & IFF_POLLING)
1906 		CSR_WRITE_2(sc, RTK_IMR, 0);
1907 	else	/* otherwise ... */
1908 #endif /* DEVICE_POLLING */
1909 	/*
1910 	 * Enable interrupts.
1911 	 */
1912 	if (sc->rtk_testmode)
1913 		CSR_WRITE_2(sc, RTK_IMR, 0);
1914 	else
1915 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
1916 
1917 	/* Start RX/TX process. */
1918 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1919 #ifdef notdef
1920 	/* Enable receiver and transmitter. */
1921 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1922 #endif
1923 	/*
1924 	 * Load the addresses of the RX and TX lists into the chip.
1925 	 */
1926 
1927 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI,
1928 	    RTK_ADDR_HI(sc->rtk_ldata.rtk_rx_list_map->dm_segs[0].ds_addr));
1929 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO,
1930 	    RTK_ADDR_LO(sc->rtk_ldata.rtk_rx_list_map->dm_segs[0].ds_addr));
1931 
1932 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI,
1933 	    RTK_ADDR_HI(sc->rtk_ldata.rtk_tx_list_map->dm_segs[0].ds_addr));
1934 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO,
1935 	    RTK_ADDR_LO(sc->rtk_ldata.rtk_tx_list_map->dm_segs[0].ds_addr));
1936 
1937 	CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16);
1938 
1939 	/*
1940 	 * Initialize the timer interrupt register so that
1941 	 * a timer interrupt will be generated once the timer
1942 	 * reaches a certain number of ticks. The timer is
1943 	 * reloaded on each transmit. This gives us TX interrupt
1944 	 * moderation, which dramatically improves TX frame rate.
1945 	 */
1946 
1947 	if (sc->rtk_type == RTK_8169)
1948 		CSR_WRITE_4(sc, RTK_TIMERINT_8169, 0x800);
1949 	else
1950 		CSR_WRITE_4(sc, RTK_TIMERINT, 0x400);
1951 
1952 	/*
1953 	 * For 8169 gigE NICs, set the max allowed RX packet
1954 	 * size so we can receive jumbo frames.
1955 	 */
1956 	if (sc->rtk_type == RTK_8169)
1957 		CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383);
1958 
1959 	if (sc->rtk_testmode)
1960 		return 0;
1961 
1962 	mii_mediachg(&sc->mii);
1963 
1964 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD | RTK_CFG1_FULLDUPLEX);
1965 
1966 	ifp->if_flags |= IFF_RUNNING;
1967 	ifp->if_flags &= ~IFF_OACTIVE;
1968 
1969 	callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
1970 
1971 out:
1972 	if (error) {
1973 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1974 		ifp->if_timer = 0;
1975 		aprint_error("%s: interface not running\n",
1976 		    sc->sc_dev.dv_xname);
1977 	}
1978 
1979 	return error;
1980 
1981 }
1982 
1983 /*
1984  * Set media options.
1985  */
1986 static int
1987 re_ifmedia_upd(struct ifnet *ifp)
1988 {
1989 	struct rtk_softc	*sc;
1990 
1991 	sc = ifp->if_softc;
1992 
1993 	return mii_mediachg(&sc->mii);
1994 }
1995 
1996 /*
1997  * Report current media status.
1998  */
1999 static void
2000 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2001 {
2002 	struct rtk_softc	*sc;
2003 
2004 	sc = ifp->if_softc;
2005 
2006 	mii_pollstat(&sc->mii);
2007 	ifmr->ifm_active = sc->mii.mii_media_active;
2008 	ifmr->ifm_status = sc->mii.mii_media_status;
2009 
2010 	return;
2011 }
2012 
2013 static int
2014 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2015 {
2016 	struct rtk_softc	*sc = ifp->if_softc;
2017 	struct ifreq		*ifr = (struct ifreq *) data;
2018 	int			s, error = 0;
2019 
2020 	s = splnet();
2021 
2022 	switch (command) {
2023 	case SIOCSIFMTU:
2024 		if (ifr->ifr_mtu > RTK_JUMBO_MTU)
2025 			error = EINVAL;
2026 		ifp->if_mtu = ifr->ifr_mtu;
2027 		break;
2028 	case SIOCGIFMEDIA:
2029 	case SIOCSIFMEDIA:
2030 		error = ifmedia_ioctl(ifp, ifr, &sc->mii.mii_media, command);
2031 		break;
2032 	default:
2033 		error = ether_ioctl(ifp, command, data);
2034 		if (error == ENETRESET) {
2035 			if (ifp->if_flags & IFF_RUNNING)
2036 				rtk_setmulti(sc);
2037 			error = 0;
2038 		}
2039 		break;
2040 	}
2041 
2042 	splx(s);
2043 
2044 	return error;
2045 }
2046 
2047 static void
2048 re_watchdog(struct ifnet *ifp)
2049 {
2050 	struct rtk_softc	*sc;
2051 	int			s;
2052 
2053 	sc = ifp->if_softc;
2054 	s = splnet();
2055 	aprint_error("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2056 	ifp->if_oerrors++;
2057 
2058 	re_txeof(sc);
2059 	re_rxeof(sc);
2060 
2061 	re_init(ifp);
2062 
2063 	splx(s);
2064 }
2065 
2066 /*
2067  * Stop the adapter and free any mbufs allocated to the
2068  * RX and TX lists.
2069  */
2070 static void
2071 re_stop(struct ifnet *ifp, int disable)
2072 {
2073 	register int		i;
2074 	struct rtk_softc *sc = ifp->if_softc;
2075 
2076 	callout_stop(&sc->rtk_tick_ch);
2077 
2078 #ifdef DEVICE_POLLING
2079 	ether_poll_deregister(ifp);
2080 #endif /* DEVICE_POLLING */
2081 
2082 	mii_down(&sc->mii);
2083 
2084 	CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
2085 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
2086 
2087 	if (sc->rtk_head != NULL) {
2088 		m_freem(sc->rtk_head);
2089 		sc->rtk_head = sc->rtk_tail = NULL;
2090 	}
2091 
2092 	/* Free the TX list buffers. */
2093 	for (i = 0; i < RTK_TX_QLEN; i++) {
2094 		if (sc->rtk_ldata.rtk_txq[i].txq_mbuf != NULL) {
2095 			bus_dmamap_unload(sc->sc_dmat,
2096 			    sc->rtk_ldata.rtk_txq[i].txq_dmamap);
2097 			m_freem(sc->rtk_ldata.rtk_txq[i].txq_mbuf);
2098 			sc->rtk_ldata.rtk_txq[i].txq_mbuf = NULL;
2099 		}
2100 	}
2101 
2102 	/* Free the RX list buffers. */
2103 	for (i = 0; i < RTK_RX_DESC_CNT; i++) {
2104 		if (sc->rtk_ldata.rtk_rx_mbuf[i] != NULL) {
2105 			bus_dmamap_unload(sc->sc_dmat,
2106 			    sc->rtk_ldata.rtk_rx_dmamap[i]);
2107 			m_freem(sc->rtk_ldata.rtk_rx_mbuf[i]);
2108 			sc->rtk_ldata.rtk_rx_mbuf[i] = NULL;
2109 		}
2110 	}
2111 
2112 	if (disable)
2113 		re_disable(sc);
2114 
2115 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2116 	ifp->if_timer = 0;
2117 
2118 	return;
2119 }
2120