xref: /netbsd-src/sys/dev/ic/rtl8169.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: rtl8169.c,v 1.167 2020/09/21 06:57:00 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v 1.167 2020/09/21 06:57:00 msaitoh Exp $");
37 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
38 
39 /*
40  * RealTek 8139C+/8169/8169S/8168/8110S PCI NIC driver
41  *
42  * Written by Bill Paul <wpaul@windriver.com>
43  * Senior Networking Software Engineer
44  * Wind River Systems
45  */
46 
47 /*
48  * This driver is designed to support RealTek's next generation of
49  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
50  * six devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
51  * RTL8110S, the RTL8168 and the RTL8111.
52  *
53  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
54  * with the older 8139 family, however it also supports a special
55  * C+ mode of operation that provides several new performance enhancing
56  * features. These include:
57  *
58  *	o Descriptor based DMA mechanism. Each descriptor represents
59  *	  a single packet fragment. Data buffers may be aligned on
60  *	  any byte boundary.
61  *
62  *	o 64-bit DMA
63  *
64  *	o TCP/IP checksum offload for both RX and TX
65  *
66  *	o High and normal priority transmit DMA rings
67  *
68  *	o VLAN tag insertion and extraction
69  *
70  *	o TCP large send (segmentation offload)
71  *
72  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
73  * programming API is fairly straightforward. The RX filtering, EEPROM
74  * access and PHY access is the same as it is on the older 8139 series
75  * chips.
76  *
77  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
78  * same programming API and feature set as the 8139C+ with the following
79  * differences and additions:
80  *
81  *	o 1000Mbps mode
82  *
83  *	o Jumbo frames
84  *
85  *	o GMII and TBI ports/registers for interfacing with copper
86  *	  or fiber PHYs
87  *
88  *      o RX and TX DMA rings can have up to 1024 descriptors
89  *        (the 8139C+ allows a maximum of 64)
90  *
91  *	o Slight differences in register layout from the 8139C+
92  *
93  * The TX start and timer interrupt registers are at different locations
94  * on the 8169 than they are on the 8139C+. Also, the status word in the
95  * RX descriptor has a slightly different bit layout. The 8169 does not
96  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
97  * copper gigE PHY.
98  *
99  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
100  * (the 'S' stands for 'single-chip'). These devices have the same
101  * programming API as the older 8169, but also have some vendor-specific
102  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
103  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
104  *
105  * This driver takes advantage of the RX and TX checksum offload and
106  * VLAN tag insertion/extraction features. It also implements TX
107  * interrupt moderation using the timer interrupt registers, which
108  * significantly reduces TX interrupt load. There is also support
109  * for jumbo frames, however the 8169/8169S/8110S can not transmit
110  * jumbo frames larger than 7.5K, so the max MTU possible with this
111  * driver is 7500 bytes.
112  */
113 
114 
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/kernel.h>
122 #include <sys/socket.h>
123 #include <sys/device.h>
124 
125 #include <net/if.h>
126 #include <net/if_arp.h>
127 #include <net/if_dl.h>
128 #include <net/if_ether.h>
129 #include <net/if_media.h>
130 #include <net/if_vlanvar.h>
131 
132 #include <netinet/in_systm.h>	/* XXX for IP_MAXPACKET */
133 #include <netinet/in.h>		/* XXX for IP_MAXPACKET */
134 #include <netinet/ip.h>		/* XXX for IP_MAXPACKET */
135 
136 #include <net/bpf.h>
137 #include <sys/rndsource.h>
138 
139 #include <sys/bus.h>
140 
141 #include <dev/mii/mii.h>
142 #include <dev/mii/miivar.h>
143 
144 #include <dev/ic/rtl81x9reg.h>
145 #include <dev/ic/rtl81x9var.h>
146 
147 #include <dev/ic/rtl8169var.h>
148 
149 static inline void re_set_bufaddr(struct re_desc *, bus_addr_t);
150 
151 static int re_newbuf(struct rtk_softc *, int, struct mbuf *);
152 static int re_rx_list_init(struct rtk_softc *);
153 static int re_tx_list_init(struct rtk_softc *);
154 static void re_rxeof(struct rtk_softc *);
155 static void re_txeof(struct rtk_softc *);
156 static void re_tick(void *);
157 static void re_start(struct ifnet *);
158 static int re_ioctl(struct ifnet *, u_long, void *);
159 static int re_init(struct ifnet *);
160 static void re_stop(struct ifnet *, int);
161 static void re_watchdog(struct ifnet *);
162 
163 static int re_enable(struct rtk_softc *);
164 static void re_disable(struct rtk_softc *);
165 
166 static int re_gmii_readreg(device_t, int, int, uint16_t *);
167 static int re_gmii_writereg(device_t, int, int, uint16_t);
168 
169 static int re_miibus_readreg(device_t, int, int, uint16_t *);
170 static int re_miibus_writereg(device_t, int, int, uint16_t);
171 static void re_miibus_statchg(struct ifnet *);
172 
173 static void re_reset(struct rtk_softc *);
174 
175 static const struct re_revision {
176 	uint32_t		re_chipid;
177 	const char		*re_name;
178 } re_revisions[] = {
179 	{ RTK_HWREV_8100,	"RTL8100" },
180 	{ RTK_HWREV_8100E,	"RTL8100E" },
181 	{ RTK_HWREV_8100E_SPIN2, "RTL8100E 2" },
182 	{ RTK_HWREV_8101,	"RTL8101" },
183 	{ RTK_HWREV_8101E,	"RTL8101E" },
184 	{ RTK_HWREV_8102E,	"RTL8102E" },
185 	{ RTK_HWREV_8106E,	"RTL8106E" },
186 	{ RTK_HWREV_8401E,	"RTL8401E" },
187 	{ RTK_HWREV_8402,	"RTL8402" },
188 	{ RTK_HWREV_8411,	"RTL8411" },
189 	{ RTK_HWREV_8411B,	"RTL8411B" },
190 	{ RTK_HWREV_8102EL,	"RTL8102EL" },
191 	{ RTK_HWREV_8102EL_SPIN1, "RTL8102EL 1" },
192 	{ RTK_HWREV_8103E,       "RTL8103E" },
193 	{ RTK_HWREV_8110S,	"RTL8110S" },
194 	{ RTK_HWREV_8139CPLUS,	"RTL8139C+" },
195 	{ RTK_HWREV_8168B_SPIN1, "RTL8168 1" },
196 	{ RTK_HWREV_8168B_SPIN2, "RTL8168 2" },
197 	{ RTK_HWREV_8168B_SPIN3, "RTL8168 3" },
198 	{ RTK_HWREV_8168C,	"RTL8168C/8111C" },
199 	{ RTK_HWREV_8168C_SPIN2, "RTL8168C/8111C" },
200 	{ RTK_HWREV_8168CP,	"RTL8168CP/8111CP" },
201 	{ RTK_HWREV_8168F,	"RTL8168F/8111F" },
202 	{ RTK_HWREV_8168G,	"RTL8168G/8111G" },
203 	{ RTK_HWREV_8168GU,	"RTL8168GU/8111GU" },
204 	{ RTK_HWREV_8168H,	"RTL8168H/8111H" },
205 	{ RTK_HWREV_8105E,	"RTL8105E" },
206 	{ RTK_HWREV_8105E_SPIN1, "RTL8105E" },
207 	{ RTK_HWREV_8168D,	"RTL8168D/8111D" },
208 	{ RTK_HWREV_8168DP,	"RTL8168DP/8111DP" },
209 	{ RTK_HWREV_8168E,	"RTL8168E/8111E" },
210 	{ RTK_HWREV_8168E_VL,	"RTL8168E/8111E-VL" },
211 	{ RTK_HWREV_8168EP,	"RTL8168EP/8111EP" },
212 	{ RTK_HWREV_8168FP,	"RTL8168FP/8117" },
213 	{ RTK_HWREV_8169,	"RTL8169" },
214 	{ RTK_HWREV_8169_8110SB, "RTL8169/8110SB" },
215 	{ RTK_HWREV_8169_8110SBL, "RTL8169SBL" },
216 	{ RTK_HWREV_8169_8110SC, "RTL8169/8110SCd" },
217 	{ RTK_HWREV_8169_8110SCE, "RTL8169/8110SCe" },
218 	{ RTK_HWREV_8169S,	"RTL8169S" },
219 
220 	{ 0, NULL }
221 };
222 
223 static inline void
224 re_set_bufaddr(struct re_desc *d, bus_addr_t addr)
225 {
226 
227 	d->re_bufaddr_lo = htole32(RE_ADDR_LO(addr));
228 	d->re_bufaddr_hi = htole32(RE_ADDR_HI(addr));
229 }
230 
231 static int
232 re_gmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
233 {
234 	struct rtk_softc *sc = device_private(dev);
235 	uint32_t data;
236 	int i;
237 
238 	if (phy != 7)
239 		return -1;
240 
241 	/* Let the rgephy driver read the GMEDIASTAT register */
242 
243 	if (reg == RTK_GMEDIASTAT) {
244 		*val = CSR_READ_1(sc, RTK_GMEDIASTAT);
245 		return 0;
246 	}
247 
248 	CSR_WRITE_4(sc, RTK_PHYAR, reg << 16);
249 	DELAY(1000);
250 
251 	for (i = 0; i < RTK_TIMEOUT; i++) {
252 		data = CSR_READ_4(sc, RTK_PHYAR);
253 		if (data & RTK_PHYAR_BUSY)
254 			break;
255 		DELAY(100);
256 	}
257 
258 	if (i == RTK_TIMEOUT) {
259 		printf("%s: PHY read failed\n", device_xname(sc->sc_dev));
260 		return ETIMEDOUT;
261 	}
262 
263 	*val = data & RTK_PHYAR_PHYDATA;
264 	return 0;
265 }
266 
267 static int
268 re_gmii_writereg(device_t dev, int phy, int reg, uint16_t val)
269 {
270 	struct rtk_softc *sc = device_private(dev);
271 	uint32_t data;
272 	int i;
273 
274 	CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) |
275 	    (val & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY);
276 	DELAY(1000);
277 
278 	for (i = 0; i < RTK_TIMEOUT; i++) {
279 		data = CSR_READ_4(sc, RTK_PHYAR);
280 		if (!(data & RTK_PHYAR_BUSY))
281 			break;
282 		DELAY(100);
283 	}
284 
285 	if (i == RTK_TIMEOUT) {
286 		printf("%s: PHY write reg %x <- %hx failed\n",
287 		    device_xname(sc->sc_dev), reg, val);
288 		return ETIMEDOUT;
289 	}
290 
291 	return 0;
292 }
293 
294 static int
295 re_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
296 {
297 	struct rtk_softc *sc = device_private(dev);
298 	uint16_t re8139_reg = 0;
299 	int s, rv = 0;
300 
301 	s = splnet();
302 
303 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
304 		rv = re_gmii_readreg(dev, phy, reg, val);
305 		splx(s);
306 		return rv;
307 	}
308 
309 	/* Pretend the internal PHY is only at address 0 */
310 	if (phy) {
311 		splx(s);
312 		return -1;
313 	}
314 	switch (reg) {
315 	case MII_BMCR:
316 		re8139_reg = RTK_BMCR;
317 		break;
318 	case MII_BMSR:
319 		re8139_reg = RTK_BMSR;
320 		break;
321 	case MII_ANAR:
322 		re8139_reg = RTK_ANAR;
323 		break;
324 	case MII_ANER:
325 		re8139_reg = RTK_ANER;
326 		break;
327 	case MII_ANLPAR:
328 		re8139_reg = RTK_LPAR;
329 		break;
330 	case MII_PHYIDR1:
331 	case MII_PHYIDR2:
332 		*val = 0;
333 		splx(s);
334 		return 0;
335 	/*
336 	 * Allow the rlphy driver to read the media status
337 	 * register. If we have a link partner which does not
338 	 * support NWAY, this is the register which will tell
339 	 * us the results of parallel detection.
340 	 */
341 	case RTK_MEDIASTAT:
342 		*val = CSR_READ_1(sc, RTK_MEDIASTAT);
343 		splx(s);
344 		return 0;
345 	default:
346 		printf("%s: bad phy register\n", device_xname(sc->sc_dev));
347 		splx(s);
348 		return -1;
349 	}
350 	*val = CSR_READ_2(sc, re8139_reg);
351 	if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0 && re8139_reg == RTK_BMCR) {
352 		/* 8139C+ has different bit layout. */
353 		*val &= ~(BMCR_LOOP | BMCR_ISO);
354 	}
355 	splx(s);
356 	return 0;
357 }
358 
359 static int
360 re_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
361 {
362 	struct rtk_softc *sc = device_private(dev);
363 	uint16_t re8139_reg = 0;
364 	int s, rv;
365 
366 	s = splnet();
367 
368 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
369 		rv = re_gmii_writereg(dev, phy, reg, val);
370 		splx(s);
371 		return rv;
372 	}
373 
374 	/* Pretend the internal PHY is only at address 0 */
375 	if (phy) {
376 		splx(s);
377 		return -1;
378 	}
379 	switch (reg) {
380 	case MII_BMCR:
381 		re8139_reg = RTK_BMCR;
382 		if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) {
383 			/* 8139C+ has different bit layout. */
384 			val &= ~(BMCR_LOOP | BMCR_ISO);
385 		}
386 		break;
387 	case MII_BMSR:
388 		re8139_reg = RTK_BMSR;
389 		break;
390 	case MII_ANAR:
391 		re8139_reg = RTK_ANAR;
392 		break;
393 	case MII_ANER:
394 		re8139_reg = RTK_ANER;
395 		break;
396 	case MII_ANLPAR:
397 		re8139_reg = RTK_LPAR;
398 		break;
399 	case MII_PHYIDR1:
400 	case MII_PHYIDR2:
401 		splx(s);
402 		return 0;
403 		break;
404 	default:
405 		printf("%s: bad phy register\n", device_xname(sc->sc_dev));
406 		splx(s);
407 		return -1;
408 	}
409 	CSR_WRITE_2(sc, re8139_reg, val);
410 	splx(s);
411 	return 0;
412 }
413 
414 static void
415 re_miibus_statchg(struct ifnet *ifp)
416 {
417 
418 	return;
419 }
420 
421 static void
422 re_reset(struct rtk_softc *sc)
423 {
424 	int i;
425 
426 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
427 
428 	for (i = 0; i < RTK_TIMEOUT; i++) {
429 		DELAY(10);
430 		if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
431 			break;
432 	}
433 	if (i == RTK_TIMEOUT)
434 		printf("%s: reset never completed!\n",
435 		    device_xname(sc->sc_dev));
436 
437 	/*
438 	 * NB: Realtek-supplied FreeBSD driver does this only for MACFG_3,
439 	 *     but also says "Rtl8169s sigle chip detected".
440 	 */
441 	if ((sc->sc_quirk & RTKQ_MACLDPS) != 0)
442 		CSR_WRITE_1(sc, RTK_LDPS, 1);
443 
444 }
445 
446 /*
447  * The following routine is designed to test for a defect on some
448  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
449  * lines connected to the bus, however for a 32-bit only card, they
450  * should be pulled high. The result of this defect is that the
451  * NIC will not work right if you plug it into a 64-bit slot: DMA
452  * operations will be done with 64-bit transfers, which will fail
453  * because the 64-bit data lines aren't connected.
454  *
455  * There's no way to work around this (short of talking a soldering
456  * iron to the board), however we can detect it. The method we use
457  * here is to put the NIC into digital loopback mode, set the receiver
458  * to promiscuous mode, and then try to send a frame. We then compare
459  * the frame data we sent to what was received. If the data matches,
460  * then the NIC is working correctly, otherwise we know the user has
461  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
462  * slot. In the latter case, there's no way the NIC can work correctly,
463  * so we print out a message on the console and abort the device attach.
464  */
465 
466 int
467 re_diag(struct rtk_softc *sc)
468 {
469 	struct ifnet *ifp = &sc->ethercom.ec_if;
470 	struct mbuf *m0;
471 	struct ether_header *eh;
472 	struct re_rxsoft *rxs;
473 	struct re_desc *cur_rx;
474 	bus_dmamap_t dmamap;
475 	uint16_t status;
476 	uint32_t rxstat;
477 	int total_len, i, s, error = 0;
478 	static const uint8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
479 	static const uint8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
480 
481 	/* Allocate a single mbuf */
482 
483 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
484 	if (m0 == NULL)
485 		return ENOBUFS;
486 
487 	/*
488 	 * Initialize the NIC in test mode. This sets the chip up
489 	 * so that it can send and receive frames, but performs the
490 	 * following special functions:
491 	 * - Puts receiver in promiscuous mode
492 	 * - Enables digital loopback mode
493 	 * - Leaves interrupts turned off
494 	 */
495 
496 	ifp->if_flags |= IFF_PROMISC;
497 	sc->re_testmode = 1;
498 	re_init(ifp);
499 	re_stop(ifp, 0);
500 	DELAY(100000);
501 	re_init(ifp);
502 
503 	/* Put some data in the mbuf */
504 
505 	eh = mtod(m0, struct ether_header *);
506 	memcpy(eh->ether_dhost, &dst, ETHER_ADDR_LEN);
507 	memcpy(eh->ether_shost, &src, ETHER_ADDR_LEN);
508 	eh->ether_type = htons(ETHERTYPE_IP);
509 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
510 
511 	/*
512 	 * Queue the packet, start transmission.
513 	 */
514 
515 	CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
516 	s = splnet();
517 	IF_ENQUEUE(&ifp->if_snd, m0);
518 	re_start(ifp);
519 	splx(s);
520 	m0 = NULL;
521 
522 	/* Wait for it to propagate through the chip */
523 
524 	DELAY(100000);
525 	for (i = 0; i < RTK_TIMEOUT; i++) {
526 		status = CSR_READ_2(sc, RTK_ISR);
527 		if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) ==
528 		    (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK))
529 			break;
530 		DELAY(10);
531 	}
532 	if (i == RTK_TIMEOUT) {
533 		aprint_error_dev(sc->sc_dev,
534 		    "diagnostic failed, failed to receive packet "
535 		    "in loopback mode\n");
536 		error = EIO;
537 		goto done;
538 	}
539 
540 	/*
541 	 * The packet should have been dumped into the first
542 	 * entry in the RX DMA ring. Grab it from there.
543 	 */
544 
545 	rxs = &sc->re_ldata.re_rxsoft[0];
546 	dmamap = rxs->rxs_dmamap;
547 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
548 	    BUS_DMASYNC_POSTREAD);
549 	bus_dmamap_unload(sc->sc_dmat, dmamap);
550 
551 	m0 = rxs->rxs_mbuf;
552 	rxs->rxs_mbuf = NULL;
553 	eh = mtod(m0, struct ether_header *);
554 
555 	RE_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
556 	cur_rx = &sc->re_ldata.re_rx_list[0];
557 	rxstat = le32toh(cur_rx->re_cmdstat);
558 	total_len = rxstat & sc->re_rxlenmask;
559 
560 	if (total_len != ETHER_MIN_LEN) {
561 		aprint_error_dev(sc->sc_dev,
562 		    "diagnostic failed, received short packet\n");
563 		error = EIO;
564 		goto done;
565 	}
566 
567 	/* Test that the received packet data matches what we sent. */
568 
569 	if (memcmp(&eh->ether_dhost, &dst, ETHER_ADDR_LEN) ||
570 	    memcmp(&eh->ether_shost, &src, ETHER_ADDR_LEN) ||
571 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
572 		aprint_error_dev(sc->sc_dev, "WARNING, DMA FAILURE!\n"
573 		    "expected TX data: %s/%s/0x%x\n"
574 		    "received RX data: %s/%s/0x%x\n"
575 		    "You may have a defective 32-bit NIC plugged "
576 		    "into a 64-bit PCI slot.\n"
577 		    "Please re-install the NIC in a 32-bit slot "
578 		    "for proper operation.\n"
579 		    "Read the re(4) man page for more details.\n" ,
580 		    ether_sprintf(dst),  ether_sprintf(src), ETHERTYPE_IP,
581 		    ether_sprintf(eh->ether_dhost),
582 		    ether_sprintf(eh->ether_shost), ntohs(eh->ether_type));
583 		error = EIO;
584 	}
585 
586  done:
587 	/* Turn interface off, release resources */
588 
589 	sc->re_testmode = 0;
590 	ifp->if_flags &= ~IFF_PROMISC;
591 	re_stop(ifp, 0);
592 	if (m0 != NULL)
593 		m_freem(m0);
594 
595 	return error;
596 }
597 
598 
599 /*
600  * Attach the interface. Allocate softc structures, do ifmedia
601  * setup and ethernet/BPF attach.
602  */
603 void
604 re_attach(struct rtk_softc *sc)
605 {
606 	uint8_t eaddr[ETHER_ADDR_LEN];
607 	struct ifnet *ifp;
608 	struct mii_data *mii = &sc->mii;
609 	int error = 0, i;
610 	const struct re_revision *rr;
611 	const char *re_name = NULL;
612 
613 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
614 		/* Revision of 8169/8169S/8110s in bits 30..26, 23 */
615 		sc->sc_hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV;
616 
617 		for (rr = re_revisions; rr->re_name != NULL; rr++) {
618 			if (rr->re_chipid == sc->sc_hwrev)
619 				re_name = rr->re_name;
620 		}
621 
622 		if (re_name == NULL)
623 			aprint_normal_dev(sc->sc_dev,
624 			    "unknown ASIC (0x%04x)\n", sc->sc_hwrev >> 16);
625 		else
626 			aprint_normal_dev(sc->sc_dev,
627 			    "%s (0x%04x)\n", re_name, sc->sc_hwrev >> 16);
628 
629 		switch (sc->sc_hwrev) {
630 		case RTK_HWREV_8169:
631 			sc->sc_quirk |= RTKQ_8169NONS;
632 			break;
633 		case RTK_HWREV_8169S:
634 		case RTK_HWREV_8110S:
635 		case RTK_HWREV_8169_8110SB:
636 		case RTK_HWREV_8169_8110SBL:
637 		case RTK_HWREV_8169_8110SC:
638 			sc->sc_quirk |= RTKQ_MACLDPS;
639 			break;
640 		case RTK_HWREV_8168B_SPIN1:
641 		case RTK_HWREV_8168B_SPIN2:
642 		case RTK_HWREV_8168B_SPIN3:
643 			sc->sc_quirk |= RTKQ_MACSTAT;
644 			break;
645 		case RTK_HWREV_8168C:
646 		case RTK_HWREV_8168C_SPIN2:
647 		case RTK_HWREV_8168CP:
648 		case RTK_HWREV_8168D:
649 		case RTK_HWREV_8168DP:
650 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
651 			    RTKQ_MACSTAT | RTKQ_CMDSTOP;
652 			/*
653 			 * From FreeBSD driver:
654 			 *
655 			 * These (8168/8111) controllers support jumbo frame
656 			 * but it seems that enabling it requires touching
657 			 * additional magic registers. Depending on MAC
658 			 * revisions some controllers need to disable
659 			 * checksum offload. So disable jumbo frame until
660 			 * I have better idea what it really requires to
661 			 * make it support.
662 			 * RTL8168C/CP : supports up to 6KB jumbo frame.
663 			 * RTL8111C/CP : supports up to 9KB jumbo frame.
664 			 */
665 			sc->sc_quirk |= RTKQ_NOJUMBO;
666 			break;
667 		case RTK_HWREV_8168E:
668 		case RTK_HWREV_8168H_SPIN1:
669 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
670 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM |
671 			    RTKQ_NOJUMBO;
672 			break;
673 		case RTK_HWREV_8168H:
674 		case RTK_HWREV_8168FP:
675 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
676 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM |
677 			    RTKQ_NOJUMBO | RTKQ_RXDV_GATED | RTKQ_TXRXEN_LATER;
678 			break;
679 		case RTK_HWREV_8168E_VL:
680 		case RTK_HWREV_8168F:
681 		case RTK_HWREV_8411:
682 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
683 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO;
684 			break;
685 		case RTK_HWREV_8168EP:
686 		case RTK_HWREV_8168G:
687 		case RTK_HWREV_8168G_SPIN1:
688 		case RTK_HWREV_8168G_SPIN2:
689 		case RTK_HWREV_8411B:
690 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
691 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO |
692 			    RTKQ_RXDV_GATED;
693 			break;
694 		case RTK_HWREV_8100E:
695 		case RTK_HWREV_8100E_SPIN2:
696 		case RTK_HWREV_8101E:
697 			sc->sc_quirk |= RTKQ_NOJUMBO;
698 			break;
699 		case RTK_HWREV_8102E:
700 		case RTK_HWREV_8102EL:
701 		case RTK_HWREV_8102EL_SPIN1:
702 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
703 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO;
704 			break;
705 		case RTK_HWREV_8103E:
706 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
707 			    RTKQ_MACSTAT | RTKQ_CMDSTOP;
708 			break;
709 		case RTK_HWREV_8401E:
710 		case RTK_HWREV_8105E:
711 		case RTK_HWREV_8105E_SPIN1: /* XXX */
712 		case RTK_HWREV_8106E:
713 			sc->sc_quirk |= RTKQ_PHYWAKE_PM |
714 			    RTKQ_DESCV2 | RTKQ_NOEECMD | RTKQ_MACSTAT |
715 			    RTKQ_CMDSTOP;
716 			break;
717 		case RTK_HWREV_8402:
718 			sc->sc_quirk |= RTKQ_PHYWAKE_PM |
719 			    RTKQ_DESCV2 | RTKQ_NOEECMD | RTKQ_MACSTAT |
720 			    RTKQ_CMDSTOP; /* CMDSTOP_WAIT_TXQ */
721 			break;
722 		default:
723 			aprint_normal_dev(sc->sc_dev, "Use default quirks\n");
724 			/* assume the latest features */
725 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD;
726 			sc->sc_quirk |= RTKQ_NOJUMBO;
727 		}
728 
729 		/* Set RX length mask */
730 		sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN;
731 		sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8169;
732 	} else {
733 		sc->sc_quirk |= RTKQ_NOJUMBO;
734 
735 		/* Set RX length mask */
736 		sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN;
737 		sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8139;
738 	}
739 
740 	/* Reset the adapter. */
741 	re_reset(sc);
742 
743 	/*
744 	 * RTL81x9 chips automatically read EEPROM to init MAC address,
745 	 * and some NAS override its MAC address per own configuration,
746 	 * so no need to explicitely read EEPROM and set ID registers.
747 	 */
748 #ifdef RE_USE_EECMD
749 	if ((sc->sc_quirk & RTKQ_NOEECMD) != 0) {
750 		/*
751 		 * Get station address from ID registers.
752 		 */
753 		for (i = 0; i < ETHER_ADDR_LEN; i++)
754 			eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
755 	} else {
756 		uint16_t val;
757 		int addr_len;
758 
759 		/*
760 		 * Get station address from the EEPROM.
761 		 */
762 		if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
763 			addr_len = RTK_EEADDR_LEN1;
764 		else
765 			addr_len = RTK_EEADDR_LEN0;
766 
767 		/*
768 		 * Get station address from the EEPROM.
769 		 */
770 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
771 			val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len);
772 			eaddr[(i * 2) + 0] = val & 0xff;
773 			eaddr[(i * 2) + 1] = val >> 8;
774 		}
775 	}
776 #else
777 	/*
778 	 * Get station address from ID registers.
779 	 */
780 	for (i = 0; i < ETHER_ADDR_LEN; i++)
781 		eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
782 #endif
783 
784 	/* Take PHY out of power down mode. */
785 	if ((sc->sc_quirk & RTKQ_PHYWAKE_PM) != 0)
786 		CSR_WRITE_1(sc, RTK_PMCH, CSR_READ_1(sc, RTK_PMCH) | 0x80);
787 
788 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
789 	    ether_sprintf(eaddr));
790 
791 	if (sc->re_ldata.re_tx_desc_cnt >
792 	    PAGE_SIZE / sizeof(struct re_desc)) {
793 		sc->re_ldata.re_tx_desc_cnt =
794 		    PAGE_SIZE / sizeof(struct re_desc);
795 	}
796 
797 	aprint_verbose_dev(sc->sc_dev, "using %d tx descriptors\n",
798 	    sc->re_ldata.re_tx_desc_cnt);
799 	KASSERT(RE_NEXT_TX_DESC(sc, RE_TX_DESC_CNT(sc) - 1) == 0);
800 
801 	/* Allocate DMA'able memory for the TX ring */
802 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RE_TX_LIST_SZ(sc),
803 	    RE_RING_ALIGN, 0, &sc->re_ldata.re_tx_listseg, 1,
804 	    &sc->re_ldata.re_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
805 		aprint_error_dev(sc->sc_dev,
806 		    "can't allocate tx listseg, error = %d\n", error);
807 		goto fail_0;
808 	}
809 
810 	/* Load the map for the TX ring. */
811 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_tx_listseg,
812 	    sc->re_ldata.re_tx_listnseg, RE_TX_LIST_SZ(sc),
813 	    (void **)&sc->re_ldata.re_tx_list,
814 	    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
815 		aprint_error_dev(sc->sc_dev,
816 		    "can't map tx list, error = %d\n", error);
817 		goto fail_1;
818 	}
819 	memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
820 
821 	if ((error = bus_dmamap_create(sc->sc_dmat, RE_TX_LIST_SZ(sc), 1,
822 	    RE_TX_LIST_SZ(sc), 0, 0,
823 	    &sc->re_ldata.re_tx_list_map)) != 0) {
824 		aprint_error_dev(sc->sc_dev,
825 		    "can't create tx list map, error = %d\n", error);
826 		goto fail_2;
827 	}
828 
829 
830 	if ((error = bus_dmamap_load(sc->sc_dmat,
831 	    sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list,
832 	    RE_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
833 		aprint_error_dev(sc->sc_dev,
834 		    "can't load tx list, error = %d\n", error);
835 		goto fail_3;
836 	}
837 
838 	/* Create DMA maps for TX buffers */
839 	for (i = 0; i < RE_TX_QLEN; i++) {
840 		error = bus_dmamap_create(sc->sc_dmat,
841 		    round_page(IP_MAXPACKET),
842 		    RE_TX_DESC_CNT(sc), RE_TDESC_CMD_FRAGLEN,
843 		    0, 0, &sc->re_ldata.re_txq[i].txq_dmamap);
844 		if (error) {
845 			aprint_error_dev(sc->sc_dev,
846 			    "can't create DMA map for TX\n");
847 			goto fail_4;
848 		}
849 	}
850 
851 	/* Allocate DMA'able memory for the RX ring */
852 	/* XXX see also a comment about RE_RX_DMAMEM_SZ in rtl81x9var.h */
853 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
854 	    RE_RX_DMAMEM_SZ, RE_RING_ALIGN, 0, &sc->re_ldata.re_rx_listseg, 1,
855 	    &sc->re_ldata.re_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
856 		aprint_error_dev(sc->sc_dev,
857 		    "can't allocate rx listseg, error = %d\n", error);
858 		goto fail_4;
859 	}
860 
861 	/* Load the map for the RX ring. */
862 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_rx_listseg,
863 	    sc->re_ldata.re_rx_listnseg, RE_RX_DMAMEM_SZ,
864 	    (void **)&sc->re_ldata.re_rx_list,
865 	    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
866 		aprint_error_dev(sc->sc_dev,
867 		    "can't map rx list, error = %d\n", error);
868 		goto fail_5;
869 	}
870 	memset(sc->re_ldata.re_rx_list, 0, RE_RX_DMAMEM_SZ);
871 
872 	if ((error = bus_dmamap_create(sc->sc_dmat,
873 	    RE_RX_DMAMEM_SZ, 1, RE_RX_DMAMEM_SZ, 0, 0,
874 	    &sc->re_ldata.re_rx_list_map)) != 0) {
875 		aprint_error_dev(sc->sc_dev,
876 		    "can't create rx list map, error = %d\n", error);
877 		goto fail_6;
878 	}
879 
880 	if ((error = bus_dmamap_load(sc->sc_dmat,
881 	    sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list,
882 	    RE_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
883 		aprint_error_dev(sc->sc_dev,
884 		    "can't load rx list, error = %d\n", error);
885 		goto fail_7;
886 	}
887 
888 	/* Create DMA maps for RX buffers */
889 	for (i = 0; i < RE_RX_DESC_CNT; i++) {
890 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
891 		    0, 0, &sc->re_ldata.re_rxsoft[i].rxs_dmamap);
892 		if (error) {
893 			aprint_error_dev(sc->sc_dev,
894 			    "can't create DMA map for RX\n");
895 			goto fail_8;
896 		}
897 	}
898 
899 	/*
900 	 * Record interface as attached. From here, we should not fail.
901 	 */
902 	sc->sc_flags |= RTK_ATTACHED;
903 
904 	ifp = &sc->ethercom.ec_if;
905 	ifp->if_softc = sc;
906 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
907 	ifp->if_mtu = ETHERMTU;
908 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
909 	ifp->if_ioctl = re_ioctl;
910 	sc->ethercom.ec_capabilities |=
911 	    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
912 	ifp->if_start = re_start;
913 	ifp->if_stop = re_stop;
914 
915 	/*
916 	 * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets,
917 	 * so we have a workaround to handle the bug by padding
918 	 * such packets manually.
919 	 */
920 	ifp->if_capabilities |=
921 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
922 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
923 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
924 	    IFCAP_TSOv4;
925 
926 	ifp->if_watchdog = re_watchdog;
927 	ifp->if_init = re_init;
928 	ifp->if_snd.ifq_maxlen = RE_IFQ_MAXLEN;
929 	ifp->if_capenable = ifp->if_capabilities;
930 	IFQ_SET_READY(&ifp->if_snd);
931 
932 	callout_init(&sc->rtk_tick_ch, 0);
933 	callout_setfunc(&sc->rtk_tick_ch, re_tick, sc);
934 
935 	/* Do MII setup */
936 	mii->mii_ifp = ifp;
937 	mii->mii_readreg = re_miibus_readreg;
938 	mii->mii_writereg = re_miibus_writereg;
939 	mii->mii_statchg = re_miibus_statchg;
940 	sc->ethercom.ec_mii = mii;
941 	ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
942 	    ether_mediastatus);
943 	mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
944 	    MII_OFFSET_ANY, 0);
945 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
946 
947 	/*
948 	 * Call MI attach routine.
949 	 */
950 	if_attach(ifp);
951 	if_deferred_start_init(ifp, NULL);
952 	ether_ifattach(ifp, eaddr);
953 
954 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
955 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
956 
957 	if (pmf_device_register(sc->sc_dev, NULL, NULL))
958 		pmf_class_network_register(sc->sc_dev, ifp);
959 	else
960 		aprint_error_dev(sc->sc_dev,
961 		    "couldn't establish power handler\n");
962 
963 	return;
964 
965  fail_8:
966 	/* Destroy DMA maps for RX buffers. */
967 	for (i = 0; i < RE_RX_DESC_CNT; i++)
968 		if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
969 			bus_dmamap_destroy(sc->sc_dmat,
970 			    sc->re_ldata.re_rxsoft[i].rxs_dmamap);
971 
972 	/* Free DMA'able memory for the RX ring. */
973 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
974  fail_7:
975 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
976  fail_6:
977 	bus_dmamem_unmap(sc->sc_dmat,
978 	    (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
979  fail_5:
980 	bus_dmamem_free(sc->sc_dmat,
981 	    &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
982 
983  fail_4:
984 	/* Destroy DMA maps for TX buffers. */
985 	for (i = 0; i < RE_TX_QLEN; i++)
986 		if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
987 			bus_dmamap_destroy(sc->sc_dmat,
988 			    sc->re_ldata.re_txq[i].txq_dmamap);
989 
990 	/* Free DMA'able memory for the TX ring. */
991 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
992  fail_3:
993 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
994  fail_2:
995 	bus_dmamem_unmap(sc->sc_dmat,
996 	    (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
997  fail_1:
998 	bus_dmamem_free(sc->sc_dmat,
999 	    &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
1000  fail_0:
1001 	return;
1002 }
1003 
1004 
1005 /*
1006  * re_activate:
1007  *     Handle device activation/deactivation requests.
1008  */
1009 int
1010 re_activate(device_t self, enum devact act)
1011 {
1012 	struct rtk_softc *sc = device_private(self);
1013 
1014 	switch (act) {
1015 	case DVACT_DEACTIVATE:
1016 		if_deactivate(&sc->ethercom.ec_if);
1017 		return 0;
1018 	default:
1019 		return EOPNOTSUPP;
1020 	}
1021 }
1022 
1023 /*
1024  * re_detach:
1025  *     Detach a rtk interface.
1026  */
1027 int
1028 re_detach(struct rtk_softc *sc)
1029 {
1030 	struct ifnet *ifp = &sc->ethercom.ec_if;
1031 	int i;
1032 
1033 	/*
1034 	 * Succeed now if there isn't any work to do.
1035 	 */
1036 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
1037 		return 0;
1038 
1039 	/* Unhook our tick handler. */
1040 	callout_stop(&sc->rtk_tick_ch);
1041 
1042 	/* Detach all PHYs. */
1043 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
1044 
1045 	rnd_detach_source(&sc->rnd_source);
1046 	ether_ifdetach(ifp);
1047 	if_detach(ifp);
1048 
1049 	/* Delete all remaining media. */
1050 	ifmedia_fini(&sc->mii.mii_media);
1051 
1052 	/* Destroy DMA maps for RX buffers. */
1053 	for (i = 0; i < RE_RX_DESC_CNT; i++)
1054 		if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
1055 			bus_dmamap_destroy(sc->sc_dmat,
1056 			    sc->re_ldata.re_rxsoft[i].rxs_dmamap);
1057 
1058 	/* Free DMA'able memory for the RX ring. */
1059 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
1060 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
1061 	bus_dmamem_unmap(sc->sc_dmat,
1062 	    (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
1063 	bus_dmamem_free(sc->sc_dmat,
1064 	    &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
1065 
1066 	/* Destroy DMA maps for TX buffers. */
1067 	for (i = 0; i < RE_TX_QLEN; i++)
1068 		if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
1069 			bus_dmamap_destroy(sc->sc_dmat,
1070 			    sc->re_ldata.re_txq[i].txq_dmamap);
1071 
1072 	/* Free DMA'able memory for the TX ring. */
1073 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
1074 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
1075 	bus_dmamem_unmap(sc->sc_dmat,
1076 	    (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
1077 	bus_dmamem_free(sc->sc_dmat,
1078 	    &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
1079 
1080 	pmf_device_deregister(sc->sc_dev);
1081 
1082 	/* we don't want to run again */
1083 	sc->sc_flags &= ~RTK_ATTACHED;
1084 
1085 	return 0;
1086 }
1087 
1088 /*
1089  * re_enable:
1090  *     Enable the RTL81X9 chip.
1091  */
1092 static int
1093 re_enable(struct rtk_softc *sc)
1094 {
1095 
1096 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
1097 		if ((*sc->sc_enable)(sc) != 0) {
1098 			printf("%s: device enable failed\n",
1099 			    device_xname(sc->sc_dev));
1100 			return EIO;
1101 		}
1102 		sc->sc_flags |= RTK_ENABLED;
1103 	}
1104 	return 0;
1105 }
1106 
1107 /*
1108  * re_disable:
1109  *     Disable the RTL81X9 chip.
1110  */
1111 static void
1112 re_disable(struct rtk_softc *sc)
1113 {
1114 
1115 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
1116 		(*sc->sc_disable)(sc);
1117 		sc->sc_flags &= ~RTK_ENABLED;
1118 	}
1119 }
1120 
1121 static int
1122 re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
1123 {
1124 	struct mbuf *n = NULL;
1125 	bus_dmamap_t map;
1126 	struct re_desc *d;
1127 	struct re_rxsoft *rxs;
1128 	uint32_t cmdstat;
1129 	int error;
1130 
1131 	if (m == NULL) {
1132 		MGETHDR(n, M_DONTWAIT, MT_DATA);
1133 		if (n == NULL)
1134 			return ENOBUFS;
1135 
1136 		MCLAIM(n, &sc->ethercom.ec_rx_mowner);
1137 		MCLGET(n, M_DONTWAIT);
1138 		if ((n->m_flags & M_EXT) == 0) {
1139 			m_freem(n);
1140 			return ENOBUFS;
1141 		}
1142 		m = n;
1143 	} else
1144 		m->m_data = m->m_ext.ext_buf;
1145 
1146 	/*
1147 	 * Initialize mbuf length fields and fixup
1148 	 * alignment so that the frame payload is
1149 	 * longword aligned.
1150 	 */
1151 	m->m_len = m->m_pkthdr.len = MCLBYTES - RE_ETHER_ALIGN;
1152 	m->m_data += RE_ETHER_ALIGN;
1153 
1154 	rxs = &sc->re_ldata.re_rxsoft[idx];
1155 	map = rxs->rxs_dmamap;
1156 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1157 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1158 
1159 	if (error)
1160 		goto out;
1161 
1162 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1163 	    BUS_DMASYNC_PREREAD);
1164 
1165 	d = &sc->re_ldata.re_rx_list[idx];
1166 #ifdef DIAGNOSTIC
1167 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1168 	cmdstat = le32toh(d->re_cmdstat);
1169 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1170 	if (cmdstat & RE_RDESC_STAT_OWN) {
1171 		panic("%s: tried to map busy RX descriptor",
1172 		    device_xname(sc->sc_dev));
1173 	}
1174 #endif
1175 
1176 	rxs->rxs_mbuf = m;
1177 
1178 	d->re_vlanctl = 0;
1179 	cmdstat = map->dm_segs[0].ds_len;
1180 	if (idx == (RE_RX_DESC_CNT - 1))
1181 		cmdstat |= RE_RDESC_CMD_EOR;
1182 	re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1183 	d->re_cmdstat = htole32(cmdstat);
1184 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1185 	cmdstat |= RE_RDESC_CMD_OWN;
1186 	d->re_cmdstat = htole32(cmdstat);
1187 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1188 
1189 	return 0;
1190  out:
1191 	if (n != NULL)
1192 		m_freem(n);
1193 	return ENOMEM;
1194 }
1195 
1196 static int
1197 re_tx_list_init(struct rtk_softc *sc)
1198 {
1199 	int i;
1200 
1201 	memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
1202 	for (i = 0; i < RE_TX_QLEN; i++) {
1203 		sc->re_ldata.re_txq[i].txq_mbuf = NULL;
1204 	}
1205 
1206 	bus_dmamap_sync(sc->sc_dmat,
1207 	    sc->re_ldata.re_tx_list_map, 0,
1208 	    sc->re_ldata.re_tx_list_map->dm_mapsize,
1209 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1210 	sc->re_ldata.re_txq_prodidx = 0;
1211 	sc->re_ldata.re_txq_considx = 0;
1212 	sc->re_ldata.re_txq_free = RE_TX_QLEN;
1213 	sc->re_ldata.re_tx_free = RE_TX_DESC_CNT(sc);
1214 	sc->re_ldata.re_tx_nextfree = 0;
1215 
1216 	return 0;
1217 }
1218 
1219 static int
1220 re_rx_list_init(struct rtk_softc *sc)
1221 {
1222 	int i;
1223 
1224 	memset(sc->re_ldata.re_rx_list, 0, RE_RX_LIST_SZ);
1225 
1226 	for (i = 0; i < RE_RX_DESC_CNT; i++) {
1227 		if (re_newbuf(sc, i, NULL) == ENOBUFS)
1228 			return ENOBUFS;
1229 	}
1230 
1231 	sc->re_ldata.re_rx_prodidx = 0;
1232 	sc->re_head = sc->re_tail = NULL;
1233 
1234 	return 0;
1235 }
1236 
1237 /*
1238  * RX handler for C+ and 8169. For the gigE chips, we support
1239  * the reception of jumbo frames that have been fragmented
1240  * across multiple 2K mbuf cluster buffers.
1241  */
1242 static void
1243 re_rxeof(struct rtk_softc *sc)
1244 {
1245 	struct mbuf *m;
1246 	struct ifnet *ifp;
1247 	int i, total_len;
1248 	struct re_desc *cur_rx;
1249 	struct re_rxsoft *rxs;
1250 	uint32_t rxstat, rxvlan;
1251 
1252 	ifp = &sc->ethercom.ec_if;
1253 
1254 	for (i = sc->re_ldata.re_rx_prodidx;; i = RE_NEXT_RX_DESC(sc, i)) {
1255 		cur_rx = &sc->re_ldata.re_rx_list[i];
1256 		RE_RXDESCSYNC(sc, i,
1257 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1258 		rxstat = le32toh(cur_rx->re_cmdstat);
1259 		rxvlan = le32toh(cur_rx->re_vlanctl);
1260 		RE_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1261 		if ((rxstat & RE_RDESC_STAT_OWN) != 0) {
1262 			break;
1263 		}
1264 		total_len = rxstat & sc->re_rxlenmask;
1265 		rxs = &sc->re_ldata.re_rxsoft[i];
1266 		m = rxs->rxs_mbuf;
1267 
1268 		/* Invalidate the RX mbuf and unload its map */
1269 
1270 		bus_dmamap_sync(sc->sc_dmat,
1271 		    rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1272 		    BUS_DMASYNC_POSTREAD);
1273 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1274 
1275 		if ((rxstat & RE_RDESC_STAT_EOF) == 0) {
1276 			m->m_len = MCLBYTES - RE_ETHER_ALIGN;
1277 			if (sc->re_head == NULL)
1278 				sc->re_head = sc->re_tail = m;
1279 			else {
1280 				m_remove_pkthdr(m);
1281 				sc->re_tail->m_next = m;
1282 				sc->re_tail = m;
1283 			}
1284 			re_newbuf(sc, i, NULL);
1285 			continue;
1286 		}
1287 
1288 		/*
1289 		 * NOTE: for the 8139C+, the frame length field
1290 		 * is always 12 bits in size, but for the gigE chips,
1291 		 * it is 13 bits (since the max RX frame length is 16K).
1292 		 * Unfortunately, all 32 bits in the status word
1293 		 * were already used, so to make room for the extra
1294 		 * length bit, RealTek took out the 'frame alignment
1295 		 * error' bit and shifted the other status bits
1296 		 * over one slot. The OWN, EOR, FS and LS bits are
1297 		 * still in the same places. We have already extracted
1298 		 * the frame length and checked the OWN bit, so rather
1299 		 * than using an alternate bit mapping, we shift the
1300 		 * status bits one space to the right so we can evaluate
1301 		 * them using the 8169 status as though it was in the
1302 		 * same format as that of the 8139C+.
1303 		 */
1304 		if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0)
1305 			rxstat >>= 1;
1306 
1307 		if (__predict_false((rxstat & RE_RDESC_STAT_RXERRSUM) != 0)) {
1308 #ifdef RE_DEBUG
1309 			printf("%s: RX error (rxstat = 0x%08x)",
1310 			    device_xname(sc->sc_dev), rxstat);
1311 			if (rxstat & RE_RDESC_STAT_FRALIGN)
1312 				printf(", frame alignment error");
1313 			if (rxstat & RE_RDESC_STAT_BUFOFLOW)
1314 				printf(", out of buffer space");
1315 			if (rxstat & RE_RDESC_STAT_FIFOOFLOW)
1316 				printf(", FIFO overrun");
1317 			if (rxstat & RE_RDESC_STAT_GIANT)
1318 				printf(", giant packet");
1319 			if (rxstat & RE_RDESC_STAT_RUNT)
1320 				printf(", runt packet");
1321 			if (rxstat & RE_RDESC_STAT_CRCERR)
1322 				printf(", CRC error");
1323 			printf("\n");
1324 #endif
1325 			if_statinc(ifp, if_ierrors);
1326 			/*
1327 			 * If this is part of a multi-fragment packet,
1328 			 * discard all the pieces.
1329 			 */
1330 			if (sc->re_head != NULL) {
1331 				m_freem(sc->re_head);
1332 				sc->re_head = sc->re_tail = NULL;
1333 			}
1334 			re_newbuf(sc, i, m);
1335 			continue;
1336 		}
1337 
1338 		/*
1339 		 * If allocating a replacement mbuf fails,
1340 		 * reload the current one.
1341 		 */
1342 
1343 		if (__predict_false(re_newbuf(sc, i, NULL) != 0)) {
1344 			if_statinc(ifp, if_ierrors);
1345 			if (sc->re_head != NULL) {
1346 				m_freem(sc->re_head);
1347 				sc->re_head = sc->re_tail = NULL;
1348 			}
1349 			re_newbuf(sc, i, m);
1350 			continue;
1351 		}
1352 
1353 		if (sc->re_head != NULL) {
1354 			m->m_len = total_len % (MCLBYTES - RE_ETHER_ALIGN);
1355 			/*
1356 			 * Special case: if there's 4 bytes or less
1357 			 * in this buffer, the mbuf can be discarded:
1358 			 * the last 4 bytes is the CRC, which we don't
1359 			 * care about anyway.
1360 			 */
1361 			if (m->m_len <= ETHER_CRC_LEN) {
1362 				sc->re_tail->m_len -=
1363 				    (ETHER_CRC_LEN - m->m_len);
1364 				m_freem(m);
1365 			} else {
1366 				m->m_len -= ETHER_CRC_LEN;
1367 				m_remove_pkthdr(m);
1368 				sc->re_tail->m_next = m;
1369 			}
1370 			m = sc->re_head;
1371 			sc->re_head = sc->re_tail = NULL;
1372 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1373 		} else
1374 			m->m_pkthdr.len = m->m_len =
1375 			    (total_len - ETHER_CRC_LEN);
1376 
1377 		m_set_rcvif(m, ifp);
1378 
1379 		/* Do RX checksumming */
1380 		if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1381 			/* Check IP header checksum */
1382 			if ((rxstat & RE_RDESC_STAT_PROTOID) != 0) {
1383 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1384 				if (rxstat & RE_RDESC_STAT_IPSUMBAD)
1385 					m->m_pkthdr.csum_flags |=
1386 					    M_CSUM_IPv4_BAD;
1387 
1388 				/* Check TCP/UDP checksum */
1389 				if (RE_TCPPKT(rxstat)) {
1390 					m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1391 					if (rxstat & RE_RDESC_STAT_TCPSUMBAD)
1392 						m->m_pkthdr.csum_flags |=
1393 						    M_CSUM_TCP_UDP_BAD;
1394 				} else if (RE_UDPPKT(rxstat)) {
1395 					m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1396 					if (rxstat & RE_RDESC_STAT_UDPSUMBAD) {
1397 						/*
1398 						 * XXX: 8139C+ thinks UDP csum
1399 						 * 0xFFFF is bad, force software
1400 						 * calculation.
1401 						 */
1402 						if (sc->sc_quirk & RTKQ_8139CPLUS)
1403 							m->m_pkthdr.csum_flags
1404 							    &= ~M_CSUM_UDPv4;
1405 						else
1406 							m->m_pkthdr.csum_flags
1407 							    |= M_CSUM_TCP_UDP_BAD;
1408 					}
1409 				}
1410 			}
1411 		} else {
1412 			/* Check IPv4 header checksum */
1413 			if ((rxvlan & RE_RDESC_VLANCTL_IPV4) != 0) {
1414 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1415 				if (rxstat & RE_RDESC_STAT_IPSUMBAD)
1416 					m->m_pkthdr.csum_flags |=
1417 					    M_CSUM_IPv4_BAD;
1418 
1419 				/* Check TCPv4/UDPv4 checksum */
1420 				if (RE_TCPPKT(rxstat)) {
1421 					m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1422 					if (rxstat & RE_RDESC_STAT_TCPSUMBAD)
1423 						m->m_pkthdr.csum_flags |=
1424 						    M_CSUM_TCP_UDP_BAD;
1425 				} else if (RE_UDPPKT(rxstat)) {
1426 					m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1427 					if (rxstat & RE_RDESC_STAT_UDPSUMBAD)
1428 						m->m_pkthdr.csum_flags |=
1429 						    M_CSUM_TCP_UDP_BAD;
1430 				}
1431 			}
1432 			/* XXX Check TCPv6/UDPv6 checksum? */
1433 		}
1434 
1435 		if (rxvlan & RE_RDESC_VLANCTL_TAG) {
1436 			vlan_set_tag(m,
1437 			     bswap16(rxvlan & RE_RDESC_VLANCTL_DATA));
1438 		}
1439 		if_percpuq_enqueue(ifp->if_percpuq, m);
1440 	}
1441 
1442 	sc->re_ldata.re_rx_prodidx = i;
1443 }
1444 
1445 static void
1446 re_txeof(struct rtk_softc *sc)
1447 {
1448 	struct ifnet *ifp;
1449 	struct re_txq *txq;
1450 	uint32_t txstat;
1451 	int idx, descidx;
1452 
1453 	ifp = &sc->ethercom.ec_if;
1454 
1455 	for (idx = sc->re_ldata.re_txq_considx;
1456 	    sc->re_ldata.re_txq_free < RE_TX_QLEN;
1457 	    idx = RE_NEXT_TXQ(sc, idx), sc->re_ldata.re_txq_free++) {
1458 		txq = &sc->re_ldata.re_txq[idx];
1459 		KASSERT(txq->txq_mbuf != NULL);
1460 
1461 		descidx = txq->txq_descidx;
1462 		RE_TXDESCSYNC(sc, descidx,
1463 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1464 		txstat =
1465 		    le32toh(sc->re_ldata.re_tx_list[descidx].re_cmdstat);
1466 		RE_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD);
1467 		KASSERT((txstat & RE_TDESC_CMD_EOF) != 0);
1468 		if (txstat & RE_TDESC_CMD_OWN) {
1469 			break;
1470 		}
1471 
1472 		sc->re_ldata.re_tx_free += txq->txq_nsegs;
1473 		KASSERT(sc->re_ldata.re_tx_free <= RE_TX_DESC_CNT(sc));
1474 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1475 		    0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1476 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1477 		m_freem(txq->txq_mbuf);
1478 		txq->txq_mbuf = NULL;
1479 
1480 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1481 		if (txstat & (RE_TDESC_STAT_EXCESSCOL | RE_TDESC_STAT_COLCNT))
1482 			if_statinc_ref(nsr, if_collisions);
1483 		if (txstat & RE_TDESC_STAT_TXERRSUM)
1484 			if_statinc_ref(nsr, if_oerrors);
1485 		else
1486 			if_statinc_ref(nsr, if_opackets);
1487 		IF_STAT_PUTREF(ifp);
1488 	}
1489 
1490 	sc->re_ldata.re_txq_considx = idx;
1491 
1492 	if (sc->re_ldata.re_txq_free > RE_NTXDESC_RSVD)
1493 		ifp->if_flags &= ~IFF_OACTIVE;
1494 
1495 	/*
1496 	 * If not all descriptors have been released reaped yet,
1497 	 * reload the timer so that we will eventually get another
1498 	 * interrupt that will cause us to re-enter this routine.
1499 	 * This is done in case the transmitter has gone idle.
1500 	 */
1501 	if (sc->re_ldata.re_txq_free < RE_TX_QLEN) {
1502 		if ((sc->sc_quirk & RTKQ_IM_HW) == 0)
1503 			CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1504 		if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
1505 			/*
1506 			 * Some chips will ignore a second TX request
1507 			 * issued while an existing transmission is in
1508 			 * progress. If the transmitter goes idle but
1509 			 * there are still packets waiting to be sent,
1510 			 * we need to restart the channel here to flush
1511 			 * them out. This only seems to be required with
1512 			 * the PCIe devices.
1513 			 */
1514 			CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START);
1515 		}
1516 	} else
1517 		ifp->if_timer = 0;
1518 }
1519 
1520 static void
1521 re_tick(void *arg)
1522 {
1523 	struct rtk_softc *sc = arg;
1524 	int s;
1525 
1526 	/* XXX: just return for 8169S/8110S with rev 2 or newer phy */
1527 	s = splnet();
1528 
1529 	mii_tick(&sc->mii);
1530 	splx(s);
1531 
1532 	callout_schedule(&sc->rtk_tick_ch, hz);
1533 }
1534 
1535 int
1536 re_intr(void *arg)
1537 {
1538 	struct rtk_softc *sc = arg;
1539 	struct ifnet *ifp;
1540 	uint16_t status;
1541 	int handled = 0;
1542 
1543 	if (!device_has_power(sc->sc_dev))
1544 		return 0;
1545 
1546 	ifp = &sc->ethercom.ec_if;
1547 
1548 	if ((ifp->if_flags & IFF_UP) == 0)
1549 		return 0;
1550 
1551 	const uint16_t status_mask = (sc->sc_quirk & RTKQ_IM_HW) ?
1552 	    RTK_INTRS_IM_HW : RTK_INTRS_CPLUS;
1553 
1554 	for (;;) {
1555 
1556 		status = CSR_READ_2(sc, RTK_ISR);
1557 		/* If the card has gone away the read returns 0xffff. */
1558 		if (status == 0xffff)
1559 			break;
1560 		if (status) {
1561 			handled = 1;
1562 			CSR_WRITE_2(sc, RTK_ISR, status);
1563 		}
1564 
1565 		if ((status & status_mask) == 0)
1566 			break;
1567 
1568 		if (status & (RTK_ISR_RX_OK | RTK_ISR_RX_ERR))
1569 			re_rxeof(sc);
1570 
1571 		if (status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_TX_ERR |
1572 		    RTK_ISR_TX_DESC_UNAVAIL | RTK_ISR_TX_OK))
1573 			re_txeof(sc);
1574 
1575 		if (status & RTK_ISR_SYSTEM_ERR) {
1576 			re_init(ifp);
1577 		}
1578 
1579 		if (status & RTK_ISR_LINKCHG) {
1580 			callout_stop(&sc->rtk_tick_ch);
1581 			re_tick(sc);
1582 		}
1583 	}
1584 
1585 	if (handled)
1586 		if_schedule_deferred_start(ifp);
1587 
1588 	rnd_add_uint32(&sc->rnd_source, status);
1589 
1590 	return handled;
1591 }
1592 
1593 
1594 
1595 /*
1596  * Main transmit routine for C+ and gigE NICs.
1597  */
1598 
1599 static void
1600 re_start(struct ifnet *ifp)
1601 {
1602 	struct rtk_softc *sc;
1603 	struct mbuf *m;
1604 	bus_dmamap_t map;
1605 	struct re_txq *txq;
1606 	struct re_desc *d;
1607 	uint32_t cmdstat, re_flags, vlanctl;
1608 	int ofree, idx, error, nsegs, seg;
1609 	int startdesc, curdesc, lastdesc;
1610 	bool pad;
1611 
1612 	sc = ifp->if_softc;
1613 	ofree = sc->re_ldata.re_txq_free;
1614 
1615 	for (idx = sc->re_ldata.re_txq_prodidx;; idx = RE_NEXT_TXQ(sc, idx)) {
1616 
1617 		IFQ_POLL(&ifp->if_snd, m);
1618 		if (m == NULL)
1619 			break;
1620 
1621 		if (sc->re_ldata.re_txq_free == 0 ||
1622 		    sc->re_ldata.re_tx_free == 0) {
1623 			/* no more free slots left */
1624 			ifp->if_flags |= IFF_OACTIVE;
1625 			break;
1626 		}
1627 
1628 		/*
1629 		 * Set up checksum offload. Note: checksum offload bits must
1630 		 * appear in all descriptors of a multi-descriptor transmit
1631 		 * attempt. (This is according to testing done with an 8169
1632 		 * chip. I'm not sure if this is a requirement or a bug.)
1633 		 */
1634 
1635 		vlanctl = 0;
1636 		if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) {
1637 			uint32_t segsz = m->m_pkthdr.segsz;
1638 
1639 			if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1640 				re_flags = RE_TDESC_CMD_LGSEND |
1641 				    (segsz << RE_TDESC_CMD_MSSVAL_SHIFT);
1642 			} else {
1643 				re_flags = RE_TDESC_CMD_LGSEND_V4;
1644 				vlanctl |=
1645 				    (segsz << RE_TDESC_VLANCTL_MSSVAL_SHIFT);
1646 			}
1647 		} else {
1648 			/*
1649 			 * set RE_TDESC_CMD_IPCSUM if any checksum offloading
1650 			 * is requested.  otherwise, RE_TDESC_CMD_TCPCSUM/
1651 			 * RE_TDESC_CMD_UDPCSUM doesn't make effects.
1652 			 */
1653 			re_flags = 0;
1654 			if ((m->m_pkthdr.csum_flags &
1655 			    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4))
1656 			    != 0) {
1657 				if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1658 					re_flags |= RE_TDESC_CMD_IPCSUM;
1659 					if (m->m_pkthdr.csum_flags &
1660 					    M_CSUM_TCPv4) {
1661 						re_flags |=
1662 						    RE_TDESC_CMD_TCPCSUM;
1663 					} else if (m->m_pkthdr.csum_flags &
1664 					    M_CSUM_UDPv4) {
1665 						re_flags |=
1666 						    RE_TDESC_CMD_UDPCSUM;
1667 					}
1668 				} else {
1669 					vlanctl |= RE_TDESC_VLANCTL_IPCSUM;
1670 					if (m->m_pkthdr.csum_flags &
1671 					    M_CSUM_TCPv4) {
1672 						vlanctl |=
1673 						    RE_TDESC_VLANCTL_TCPCSUM;
1674 					} else if (m->m_pkthdr.csum_flags &
1675 					    M_CSUM_UDPv4) {
1676 						vlanctl |=
1677 						    RE_TDESC_VLANCTL_UDPCSUM;
1678 					}
1679 				}
1680 			}
1681 		}
1682 
1683 		txq = &sc->re_ldata.re_txq[idx];
1684 		map = txq->txq_dmamap;
1685 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1686 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1687 
1688 		if (__predict_false(error)) {
1689 			/* XXX try to defrag if EFBIG? */
1690 			printf("%s: can't map mbuf (error %d)\n",
1691 			    device_xname(sc->sc_dev), error);
1692 
1693 			IFQ_DEQUEUE(&ifp->if_snd, m);
1694 			m_freem(m);
1695 			if_statinc(ifp, if_oerrors);
1696 			continue;
1697 		}
1698 
1699 		nsegs = map->dm_nsegs;
1700 		pad = false;
1701 		if (__predict_false(m->m_pkthdr.len <= RE_IP4CSUMTX_PADLEN &&
1702 		    (re_flags & RE_TDESC_CMD_IPCSUM) != 0 &&
1703 		    (sc->sc_quirk & RTKQ_DESCV2) == 0)) {
1704 			pad = true;
1705 			nsegs++;
1706 		}
1707 
1708 		if (nsegs > sc->re_ldata.re_tx_free) {
1709 			/*
1710 			 * Not enough free descriptors to transmit this packet.
1711 			 */
1712 			ifp->if_flags |= IFF_OACTIVE;
1713 			bus_dmamap_unload(sc->sc_dmat, map);
1714 			break;
1715 		}
1716 
1717 		IFQ_DEQUEUE(&ifp->if_snd, m);
1718 
1719 		/*
1720 		 * Make sure that the caches are synchronized before we
1721 		 * ask the chip to start DMA for the packet data.
1722 		 */
1723 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1724 		    BUS_DMASYNC_PREWRITE);
1725 
1726 		/*
1727 		 * Set up hardware VLAN tagging. Note: vlan tag info must
1728 		 * appear in all descriptors of a multi-descriptor
1729 		 * transmission attempt.
1730 		 */
1731 		if (vlan_has_tag(m))
1732 			vlanctl |= bswap16(vlan_get_tag(m)) |
1733 			    RE_TDESC_VLANCTL_TAG;
1734 
1735 		/*
1736 		 * Map the segment array into descriptors.
1737 		 * Note that we set the start-of-frame and
1738 		 * end-of-frame markers for either TX or RX,
1739 		 * but they really only have meaning in the TX case.
1740 		 * (In the RX case, it's the chip that tells us
1741 		 *  where packets begin and end.)
1742 		 * We also keep track of the end of the ring
1743 		 * and set the end-of-ring bits as needed,
1744 		 * and we set the ownership bits in all except
1745 		 * the very first descriptor. (The caller will
1746 		 * set this descriptor later when it start
1747 		 * transmission or reception.)
1748 		 */
1749 		curdesc = startdesc = sc->re_ldata.re_tx_nextfree;
1750 		lastdesc = -1;
1751 		for (seg = 0; seg < map->dm_nsegs;
1752 		    seg++, curdesc = RE_NEXT_TX_DESC(sc, curdesc)) {
1753 			d = &sc->re_ldata.re_tx_list[curdesc];
1754 #ifdef DIAGNOSTIC
1755 			RE_TXDESCSYNC(sc, curdesc,
1756 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1757 			cmdstat = le32toh(d->re_cmdstat);
1758 			RE_TXDESCSYNC(sc, curdesc, BUS_DMASYNC_PREREAD);
1759 			if (cmdstat & RE_TDESC_STAT_OWN) {
1760 				panic("%s: tried to map busy TX descriptor",
1761 				    device_xname(sc->sc_dev));
1762 			}
1763 #endif
1764 
1765 			d->re_vlanctl = htole32(vlanctl);
1766 			re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1767 			cmdstat = re_flags | map->dm_segs[seg].ds_len;
1768 			if (seg == 0)
1769 				cmdstat |= RE_TDESC_CMD_SOF;
1770 			else
1771 				cmdstat |= RE_TDESC_CMD_OWN;
1772 			if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1773 				cmdstat |= RE_TDESC_CMD_EOR;
1774 			if (seg == nsegs - 1) {
1775 				cmdstat |= RE_TDESC_CMD_EOF;
1776 				lastdesc = curdesc;
1777 			}
1778 			d->re_cmdstat = htole32(cmdstat);
1779 			RE_TXDESCSYNC(sc, curdesc,
1780 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1781 		}
1782 		if (__predict_false(pad)) {
1783 			d = &sc->re_ldata.re_tx_list[curdesc];
1784 			d->re_vlanctl = htole32(vlanctl);
1785 			re_set_bufaddr(d, RE_TXPADDADDR(sc));
1786 			cmdstat = re_flags |
1787 			    RE_TDESC_CMD_OWN | RE_TDESC_CMD_EOF |
1788 			    (RE_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1789 			if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1790 				cmdstat |= RE_TDESC_CMD_EOR;
1791 			d->re_cmdstat = htole32(cmdstat);
1792 			RE_TXDESCSYNC(sc, curdesc,
1793 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1794 			lastdesc = curdesc;
1795 			curdesc = RE_NEXT_TX_DESC(sc, curdesc);
1796 		}
1797 		KASSERT(lastdesc != -1);
1798 
1799 		/* Transfer ownership of packet to the chip. */
1800 
1801 		sc->re_ldata.re_tx_list[startdesc].re_cmdstat |=
1802 		    htole32(RE_TDESC_CMD_OWN);
1803 		RE_TXDESCSYNC(sc, startdesc,
1804 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1805 
1806 		/* update info of TX queue and descriptors */
1807 		txq->txq_mbuf = m;
1808 		txq->txq_descidx = lastdesc;
1809 		txq->txq_nsegs = nsegs;
1810 
1811 		sc->re_ldata.re_txq_free--;
1812 		sc->re_ldata.re_tx_free -= nsegs;
1813 		sc->re_ldata.re_tx_nextfree = curdesc;
1814 
1815 		/*
1816 		 * If there's a BPF listener, bounce a copy of this frame
1817 		 * to him.
1818 		 */
1819 		bpf_mtap(ifp, m, BPF_D_OUT);
1820 	}
1821 
1822 	if (sc->re_ldata.re_txq_free < ofree) {
1823 		/*
1824 		 * TX packets are enqueued.
1825 		 */
1826 		sc->re_ldata.re_txq_prodidx = idx;
1827 
1828 		/*
1829 		 * Start the transmitter to poll.
1830 		 *
1831 		 * RealTek put the TX poll request register in a different
1832 		 * location on the 8169 gigE chip. I don't know why.
1833 		 */
1834 		if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
1835 			CSR_WRITE_1(sc, RTK_TXSTART, RTK_TXSTART_START);
1836 		else
1837 			CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START);
1838 
1839 		if ((sc->sc_quirk & RTKQ_IM_HW) == 0) {
1840 			/*
1841 			 * Use the countdown timer for interrupt moderation.
1842 			 * 'TX done' interrupts are disabled. Instead, we reset
1843 			 * the countdown timer, which will begin counting until
1844 			 * it hits the value in the TIMERINT register, and then
1845 			 * trigger an interrupt. Each time we write to the
1846 			 * TIMERCNT register, the timer count is reset to 0.
1847 			 */
1848 			CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1849 		}
1850 
1851 		/*
1852 		 * Set a timeout in case the chip goes out to lunch.
1853 		 */
1854 		ifp->if_timer = 5;
1855 	}
1856 }
1857 
1858 static int
1859 re_init(struct ifnet *ifp)
1860 {
1861 	struct rtk_softc *sc = ifp->if_softc;
1862 	uint32_t rxcfg = 0;
1863 	uint16_t cfg;
1864 	int error;
1865 #ifdef RE_USE_EECMD
1866 	const uint8_t *enaddr;
1867 	uint32_t reg;
1868 #endif
1869 
1870 	if ((error = re_enable(sc)) != 0)
1871 		goto out;
1872 
1873 	/*
1874 	 * Cancel pending I/O and free all RX/TX buffers.
1875 	 */
1876 	re_stop(ifp, 0);
1877 
1878 	re_reset(sc);
1879 
1880 	/*
1881 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
1882 	 * RX checksum offload. We must configure the C+ register
1883 	 * before all others.
1884 	 */
1885 	cfg = RE_CPLUSCMD_PCI_MRW;
1886 
1887 	/*
1888 	 * XXX: For old 8169 set bit 14.
1889 	 *      For 8169S/8110S and above, do not set bit 14.
1890 	 */
1891 	if ((sc->sc_quirk & RTKQ_8169NONS) != 0)
1892 		cfg |= (0x1 << 14);
1893 
1894 	if ((sc->ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
1895 		cfg |= RE_CPLUSCMD_VLANSTRIP;
1896 	if ((ifp->if_capenable & (IFCAP_CSUM_IPv4_Rx |
1897 	     IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) != 0)
1898 		cfg |= RE_CPLUSCMD_RXCSUM_ENB;
1899 	if ((sc->sc_quirk & RTKQ_MACSTAT) != 0) {
1900 		cfg |= RE_CPLUSCMD_MACSTAT_DIS;
1901 		cfg |= RE_CPLUSCMD_TXENB;
1902 	} else
1903 		cfg |= RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB;
1904 
1905 	CSR_WRITE_2(sc, RTK_CPLUS_CMD, cfg);
1906 
1907 	/* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */
1908 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
1909 		if ((sc->sc_quirk & RTKQ_IM_HW) == 0) {
1910 			CSR_WRITE_2(sc, RTK_IM, 0x0000);
1911 		} else {
1912 			CSR_WRITE_2(sc, RTK_IM, 0x5151);
1913 		}
1914 	}
1915 
1916 	DELAY(10000);
1917 
1918 #ifdef RE_USE_EECMD
1919 	/*
1920 	 * Init our MAC address.  Even though the chipset
1921 	 * documentation doesn't mention it, we need to enter "Config
1922 	 * register write enable" mode to modify the ID registers.
1923 	 */
1924 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG);
1925 	enaddr = CLLADDR(ifp->if_sadl);
1926 	reg = enaddr[0] | (enaddr[1] << 8) |
1927 	    (enaddr[2] << 16) | (enaddr[3] << 24);
1928 	CSR_WRITE_4(sc, RTK_IDR0, reg);
1929 	reg = enaddr[4] | (enaddr[5] << 8);
1930 	CSR_WRITE_4(sc, RTK_IDR4, reg);
1931 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
1932 #endif
1933 
1934 	/*
1935 	 * For C+ mode, initialize the RX descriptors and mbufs.
1936 	 */
1937 	re_rx_list_init(sc);
1938 	re_tx_list_init(sc);
1939 
1940 	/*
1941 	 * Load the addresses of the RX and TX lists into the chip.
1942 	 */
1943 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI,
1944 	    RE_ADDR_HI(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1945 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO,
1946 	    RE_ADDR_LO(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1947 
1948 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI,
1949 	    RE_ADDR_HI(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1950 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO,
1951 	    RE_ADDR_LO(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1952 
1953 	if (sc->sc_quirk & RTKQ_RXDV_GATED) {
1954 		CSR_WRITE_4(sc, RTK_MISC,
1955 		    CSR_READ_4(sc, RTK_MISC) & ~RTK_MISC_RXDV_GATED_EN);
1956 	}
1957 
1958 	/*
1959 	 * Enable transmit and receive.
1960 	 */
1961 	if ((sc->sc_quirk & RTKQ_TXRXEN_LATER) == 0)
1962 		CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1963 
1964 	/*
1965 	 * Set the initial TX and RX configuration.
1966 	 */
1967 	if (sc->re_testmode && (sc->sc_quirk & RTKQ_8169NONS) != 0) {
1968 		/* test mode is needed only for old 8169 */
1969 		CSR_WRITE_4(sc, RTK_TXCFG,
1970 		    RE_TXCFG_CONFIG | RTK_LOOPTEST_ON);
1971 	} else
1972 		CSR_WRITE_4(sc, RTK_TXCFG, RE_TXCFG_CONFIG);
1973 
1974 	CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16);
1975 
1976 	CSR_WRITE_4(sc, RTK_RXCFG, RE_RXCFG_CONFIG);
1977 
1978 	/* Set the individual bit to receive frames for this host only. */
1979 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1980 	rxcfg |= RTK_RXCFG_RX_INDIV;
1981 
1982 	/* If we want promiscuous mode, set the allframes bit. */
1983 	if (ifp->if_flags & IFF_PROMISC)
1984 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1985 	else
1986 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1987 	CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1988 
1989 	/*
1990 	 * Set capture broadcast bit to capture broadcast frames.
1991 	 */
1992 	if (ifp->if_flags & IFF_BROADCAST)
1993 		rxcfg |= RTK_RXCFG_RX_BROAD;
1994 	else
1995 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1996 	CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1997 
1998 	/*
1999 	 * Program the multicast filter, if necessary.
2000 	 */
2001 	rtk_setmulti(sc);
2002 
2003 	/*
2004 	 * some chips require to enable TX/RX *AFTER* TX/RX configuration
2005 	 */
2006 	if ((sc->sc_quirk & RTKQ_TXRXEN_LATER) != 0)
2007 		CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
2008 
2009 	/*
2010 	 * Enable interrupts.
2011 	 */
2012 	if (sc->re_testmode)
2013 		CSR_WRITE_2(sc, RTK_IMR, 0);
2014 	else if ((sc->sc_quirk & RTKQ_IM_HW) != 0)
2015 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_IM_HW);
2016 	else
2017 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
2018 
2019 	/* Start RX/TX process. */
2020 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
2021 #ifdef notdef
2022 	/* Enable receiver and transmitter. */
2023 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
2024 #endif
2025 
2026 	/*
2027 	 * Initialize the timer interrupt register so that
2028 	 * a timer interrupt will be generated once the timer
2029 	 * reaches a certain number of ticks. The timer is
2030 	 * reloaded on each transmit. This gives us TX interrupt
2031 	 * moderation, which dramatically improves TX frame rate.
2032 	 */
2033 
2034 	unsigned defer;		/* timer interval / ns */
2035 	unsigned period;	/* busclock period / ns */
2036 
2037 	/*
2038 	 * Maximum frame rate
2039 	 * 1500 byte PDU -> 81274 Hz
2040 	 *   46 byte PDU -> 1488096 Hz
2041 	 *
2042 	 * Deferring interrupts by up to 128us needs descriptors for
2043 	 * 1500 byte PDU -> 10.4 frames
2044 	 *   46 byte PDU -> 190.4 frames
2045 	 *
2046 	 */
2047 	defer = 128000;
2048 
2049 	if ((sc->sc_quirk & RTKQ_IM_HW) != 0) {
2050 		period = 1;
2051 		defer = 0;
2052 	} else if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
2053 		period = 8;
2054 	} else {
2055 		switch (CSR_READ_1(sc, RTK_CFG2_BUSFREQ) & 0x7) {
2056 		case RTK_BUSFREQ_33MHZ:
2057 			period = 30;
2058 			break;
2059 		case RTK_BUSFREQ_66MHZ:
2060 			period = 15;
2061 			break;
2062 		default:
2063 			/* lowest possible clock */
2064 			period = 60;
2065 			break;
2066 		}
2067 	}
2068 
2069 	/* Timer Interrupt register address varies */
2070 	uint16_t re8139_reg;
2071 	if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
2072 		re8139_reg = RTK_TIMERINT;
2073 	else
2074 		re8139_reg = RTK_TIMERINT_8169;
2075 	CSR_WRITE_4(sc, re8139_reg, defer / period);
2076 
2077 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
2078 		/*
2079 		 * For 8169 gigE NICs, set the max allowed RX packet
2080 		 * size so we can receive jumbo frames.
2081 		 */
2082 		CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383);
2083 	}
2084 
2085 	if (sc->re_testmode)
2086 		return 0;
2087 
2088 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD);
2089 
2090 	ifp->if_flags |= IFF_RUNNING;
2091 	ifp->if_flags &= ~IFF_OACTIVE;
2092 
2093 	callout_schedule(&sc->rtk_tick_ch, hz);
2094 
2095  out:
2096 	if (error) {
2097 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2098 		ifp->if_timer = 0;
2099 		printf("%s: interface not running\n",
2100 		    device_xname(sc->sc_dev));
2101 	}
2102 
2103 	return error;
2104 }
2105 
2106 static int
2107 re_ioctl(struct ifnet *ifp, u_long command, void *data)
2108 {
2109 	struct rtk_softc *sc = ifp->if_softc;
2110 	struct ifreq *ifr = data;
2111 	int s, error = 0;
2112 
2113 	s = splnet();
2114 
2115 	switch (command) {
2116 	case SIOCSIFMTU:
2117 		/*
2118 		 * Disable jumbo frames if it's not supported.
2119 		 */
2120 		if ((sc->sc_quirk & RTKQ_NOJUMBO) != 0 &&
2121 		    ifr->ifr_mtu > ETHERMTU) {
2122 			error = EINVAL;
2123 			break;
2124 		}
2125 
2126 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
2127 			error = EINVAL;
2128 		else if ((error = ifioctl_common(ifp, command, data)) ==
2129 		    ENETRESET)
2130 			error = 0;
2131 		break;
2132 	default:
2133 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2134 			break;
2135 
2136 		error = 0;
2137 
2138 		if (command == SIOCSIFCAP)
2139 			error = (*ifp->if_init)(ifp);
2140 		else if (command != SIOCADDMULTI && command != SIOCDELMULTI)
2141 			;
2142 		else if (ifp->if_flags & IFF_RUNNING)
2143 			rtk_setmulti(sc);
2144 		break;
2145 	}
2146 
2147 	splx(s);
2148 
2149 	return error;
2150 }
2151 
2152 static void
2153 re_watchdog(struct ifnet *ifp)
2154 {
2155 	struct rtk_softc *sc;
2156 	int s;
2157 
2158 	sc = ifp->if_softc;
2159 	s = splnet();
2160 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
2161 	if_statinc(ifp, if_oerrors);
2162 
2163 	re_txeof(sc);
2164 	re_rxeof(sc);
2165 
2166 	re_init(ifp);
2167 
2168 	splx(s);
2169 }
2170 
2171 /*
2172  * Stop the adapter and free any mbufs allocated to the
2173  * RX and TX lists.
2174  */
2175 static void
2176 re_stop(struct ifnet *ifp, int disable)
2177 {
2178 	int i;
2179 	struct rtk_softc *sc = ifp->if_softc;
2180 
2181 	callout_stop(&sc->rtk_tick_ch);
2182 
2183 	mii_down(&sc->mii);
2184 
2185 	if ((sc->sc_quirk & RTKQ_CMDSTOP) != 0)
2186 		CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_STOPREQ | RTK_CMD_TX_ENB |
2187 		    RTK_CMD_RX_ENB);
2188 	else
2189 		CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
2190 	DELAY(1000);
2191 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
2192 	CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
2193 
2194 	if (sc->re_head != NULL) {
2195 		m_freem(sc->re_head);
2196 		sc->re_head = sc->re_tail = NULL;
2197 	}
2198 
2199 	/* Free the TX list buffers. */
2200 	for (i = 0; i < RE_TX_QLEN; i++) {
2201 		if (sc->re_ldata.re_txq[i].txq_mbuf != NULL) {
2202 			bus_dmamap_unload(sc->sc_dmat,
2203 			    sc->re_ldata.re_txq[i].txq_dmamap);
2204 			m_freem(sc->re_ldata.re_txq[i].txq_mbuf);
2205 			sc->re_ldata.re_txq[i].txq_mbuf = NULL;
2206 		}
2207 	}
2208 
2209 	/* Free the RX list buffers. */
2210 	for (i = 0; i < RE_RX_DESC_CNT; i++) {
2211 		if (sc->re_ldata.re_rxsoft[i].rxs_mbuf != NULL) {
2212 			bus_dmamap_unload(sc->sc_dmat,
2213 			    sc->re_ldata.re_rxsoft[i].rxs_dmamap);
2214 			m_freem(sc->re_ldata.re_rxsoft[i].rxs_mbuf);
2215 			sc->re_ldata.re_rxsoft[i].rxs_mbuf = NULL;
2216 		}
2217 	}
2218 
2219 	if (disable)
2220 		re_disable(sc);
2221 
2222 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2223 	ifp->if_timer = 0;
2224 }
2225