xref: /netbsd-src/sys/dev/ic/rtl8169.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /*	$NetBSD: rtl8169.c,v 1.168 2021/09/04 19:27:43 jakllsch Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v 1.168 2021/09/04 19:27:43 jakllsch Exp $");
37 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
38 
39 /*
40  * RealTek 8139C+/8169/8169S/8168/8110S PCI NIC driver
41  *
42  * Written by Bill Paul <wpaul@windriver.com>
43  * Senior Networking Software Engineer
44  * Wind River Systems
45  */
46 
47 /*
48  * This driver is designed to support RealTek's next generation of
49  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
50  * six devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
51  * RTL8110S, the RTL8168 and the RTL8111.
52  *
53  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
54  * with the older 8139 family, however it also supports a special
55  * C+ mode of operation that provides several new performance enhancing
56  * features. These include:
57  *
58  *	o Descriptor based DMA mechanism. Each descriptor represents
59  *	  a single packet fragment. Data buffers may be aligned on
60  *	  any byte boundary.
61  *
62  *	o 64-bit DMA
63  *
64  *	o TCP/IP checksum offload for both RX and TX
65  *
66  *	o High and normal priority transmit DMA rings
67  *
68  *	o VLAN tag insertion and extraction
69  *
70  *	o TCP large send (segmentation offload)
71  *
72  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
73  * programming API is fairly straightforward. The RX filtering, EEPROM
74  * access and PHY access is the same as it is on the older 8139 series
75  * chips.
76  *
77  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
78  * same programming API and feature set as the 8139C+ with the following
79  * differences and additions:
80  *
81  *	o 1000Mbps mode
82  *
83  *	o Jumbo frames
84  *
85  *	o GMII and TBI ports/registers for interfacing with copper
86  *	  or fiber PHYs
87  *
88  *      o RX and TX DMA rings can have up to 1024 descriptors
89  *        (the 8139C+ allows a maximum of 64)
90  *
91  *	o Slight differences in register layout from the 8139C+
92  *
93  * The TX start and timer interrupt registers are at different locations
94  * on the 8169 than they are on the 8139C+. Also, the status word in the
95  * RX descriptor has a slightly different bit layout. The 8169 does not
96  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
97  * copper gigE PHY.
98  *
99  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
100  * (the 'S' stands for 'single-chip'). These devices have the same
101  * programming API as the older 8169, but also have some vendor-specific
102  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
103  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
104  *
105  * This driver takes advantage of the RX and TX checksum offload and
106  * VLAN tag insertion/extraction features. It also implements TX
107  * interrupt moderation using the timer interrupt registers, which
108  * significantly reduces TX interrupt load. There is also support
109  * for jumbo frames, however the 8169/8169S/8110S can not transmit
110  * jumbo frames larger than 7.5K, so the max MTU possible with this
111  * driver is 7500 bytes.
112  */
113 
114 
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/kernel.h>
122 #include <sys/socket.h>
123 #include <sys/device.h>
124 
125 #include <net/if.h>
126 #include <net/if_arp.h>
127 #include <net/if_dl.h>
128 #include <net/if_ether.h>
129 #include <net/if_media.h>
130 #include <net/if_vlanvar.h>
131 
132 #include <netinet/in_systm.h>	/* XXX for IP_MAXPACKET */
133 #include <netinet/in.h>		/* XXX for IP_MAXPACKET */
134 #include <netinet/ip.h>		/* XXX for IP_MAXPACKET */
135 
136 #include <net/bpf.h>
137 #include <sys/rndsource.h>
138 
139 #include <sys/bus.h>
140 
141 #include <dev/mii/mii.h>
142 #include <dev/mii/miivar.h>
143 
144 #include <dev/ic/rtl81x9reg.h>
145 #include <dev/ic/rtl81x9var.h>
146 
147 #include <dev/ic/rtl8169var.h>
148 
149 static inline void re_set_bufaddr(struct re_desc *, bus_addr_t);
150 
151 static int re_newbuf(struct rtk_softc *, int, struct mbuf *);
152 static int re_rx_list_init(struct rtk_softc *);
153 static int re_tx_list_init(struct rtk_softc *);
154 static void re_rxeof(struct rtk_softc *);
155 static void re_txeof(struct rtk_softc *);
156 static void re_tick(void *);
157 static void re_start(struct ifnet *);
158 static int re_ioctl(struct ifnet *, u_long, void *);
159 static int re_init(struct ifnet *);
160 static void re_stop(struct ifnet *, int);
161 static void re_watchdog(struct ifnet *);
162 
163 static int re_enable(struct rtk_softc *);
164 static void re_disable(struct rtk_softc *);
165 
166 static int re_gmii_readreg(device_t, int, int, uint16_t *);
167 static int re_gmii_writereg(device_t, int, int, uint16_t);
168 
169 static int re_miibus_readreg(device_t, int, int, uint16_t *);
170 static int re_miibus_writereg(device_t, int, int, uint16_t);
171 static void re_miibus_statchg(struct ifnet *);
172 
173 static void re_reset(struct rtk_softc *);
174 
175 static const struct re_revision {
176 	uint32_t		re_chipid;
177 	const char		*re_name;
178 } re_revisions[] = {
179 	{ RTK_HWREV_8100,	"RTL8100" },
180 	{ RTK_HWREV_8100E,	"RTL8100E" },
181 	{ RTK_HWREV_8100E_SPIN2, "RTL8100E 2" },
182 	{ RTK_HWREV_8101,	"RTL8101" },
183 	{ RTK_HWREV_8101E,	"RTL8101E" },
184 	{ RTK_HWREV_8102E,	"RTL8102E" },
185 	{ RTK_HWREV_8106E,	"RTL8106E" },
186 	{ RTK_HWREV_8401E,	"RTL8401E" },
187 	{ RTK_HWREV_8402,	"RTL8402" },
188 	{ RTK_HWREV_8411,	"RTL8411" },
189 	{ RTK_HWREV_8411B,	"RTL8411B" },
190 	{ RTK_HWREV_8102EL,	"RTL8102EL" },
191 	{ RTK_HWREV_8102EL_SPIN1, "RTL8102EL 1" },
192 	{ RTK_HWREV_8103E,       "RTL8103E" },
193 	{ RTK_HWREV_8110S,	"RTL8110S" },
194 	{ RTK_HWREV_8139CPLUS,	"RTL8139C+" },
195 	{ RTK_HWREV_8168B_SPIN1, "RTL8168 1" },
196 	{ RTK_HWREV_8168B_SPIN2, "RTL8168 2" },
197 	{ RTK_HWREV_8168B_SPIN3, "RTL8168 3" },
198 	{ RTK_HWREV_8168C,	"RTL8168C/8111C" },
199 	{ RTK_HWREV_8168C_SPIN2, "RTL8168C/8111C" },
200 	{ RTK_HWREV_8168CP,	"RTL8168CP/8111CP" },
201 	{ RTK_HWREV_8168F,	"RTL8168F/8111F" },
202 	{ RTK_HWREV_8168G,	"RTL8168G/8111G" },
203 	{ RTK_HWREV_8168GU,	"RTL8168GU/8111GU" },
204 	{ RTK_HWREV_8168H,	"RTL8168H/8111H" },
205 	{ RTK_HWREV_8105E,	"RTL8105E" },
206 	{ RTK_HWREV_8105E_SPIN1, "RTL8105E" },
207 	{ RTK_HWREV_8168D,	"RTL8168D/8111D" },
208 	{ RTK_HWREV_8168DP,	"RTL8168DP/8111DP" },
209 	{ RTK_HWREV_8168E,	"RTL8168E/8111E" },
210 	{ RTK_HWREV_8168E_VL,	"RTL8168E/8111E-VL" },
211 	{ RTK_HWREV_8168EP,	"RTL8168EP/8111EP" },
212 	{ RTK_HWREV_8168FP,	"RTL8168FP/8117" },
213 	{ RTK_HWREV_8169,	"RTL8169" },
214 	{ RTK_HWREV_8169_8110SB, "RTL8169/8110SB" },
215 	{ RTK_HWREV_8169_8110SBL, "RTL8169SBL" },
216 	{ RTK_HWREV_8169_8110SC, "RTL8169/8110SCd" },
217 	{ RTK_HWREV_8169_8110SCE, "RTL8169/8110SCe" },
218 	{ RTK_HWREV_8169S,	"RTL8169S" },
219 
220 	{ 0, NULL }
221 };
222 
223 static inline void
224 re_set_bufaddr(struct re_desc *d, bus_addr_t addr)
225 {
226 
227 	d->re_bufaddr_lo = htole32(RE_ADDR_LO(addr));
228 	d->re_bufaddr_hi = htole32(RE_ADDR_HI(addr));
229 }
230 
231 static int
232 re_gmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
233 {
234 	struct rtk_softc *sc = device_private(dev);
235 	uint32_t data;
236 	int i;
237 
238 	if (phy != 7)
239 		return -1;
240 
241 	/* Let the rgephy driver read the GMEDIASTAT register */
242 
243 	if (reg == RTK_GMEDIASTAT) {
244 		*val = CSR_READ_1(sc, RTK_GMEDIASTAT);
245 		return 0;
246 	}
247 
248 	CSR_WRITE_4(sc, RTK_PHYAR, reg << 16);
249 	DELAY(1000);
250 
251 	for (i = 0; i < RTK_TIMEOUT; i++) {
252 		data = CSR_READ_4(sc, RTK_PHYAR);
253 		if (data & RTK_PHYAR_BUSY)
254 			break;
255 		DELAY(100);
256 	}
257 
258 	if (i == RTK_TIMEOUT) {
259 		printf("%s: PHY read failed\n", device_xname(sc->sc_dev));
260 		return ETIMEDOUT;
261 	}
262 
263 	*val = data & RTK_PHYAR_PHYDATA;
264 	return 0;
265 }
266 
267 static int
268 re_gmii_writereg(device_t dev, int phy, int reg, uint16_t val)
269 {
270 	struct rtk_softc *sc = device_private(dev);
271 	uint32_t data;
272 	int i;
273 
274 	CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) |
275 	    (val & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY);
276 	DELAY(1000);
277 
278 	for (i = 0; i < RTK_TIMEOUT; i++) {
279 		data = CSR_READ_4(sc, RTK_PHYAR);
280 		if (!(data & RTK_PHYAR_BUSY))
281 			break;
282 		DELAY(100);
283 	}
284 
285 	if (i == RTK_TIMEOUT) {
286 		printf("%s: PHY write reg %x <- %hx failed\n",
287 		    device_xname(sc->sc_dev), reg, val);
288 		return ETIMEDOUT;
289 	}
290 
291 	return 0;
292 }
293 
294 static int
295 re_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
296 {
297 	struct rtk_softc *sc = device_private(dev);
298 	uint16_t re8139_reg = 0;
299 	int s, rv = 0;
300 
301 	s = splnet();
302 
303 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
304 		rv = re_gmii_readreg(dev, phy, reg, val);
305 		splx(s);
306 		return rv;
307 	}
308 
309 	/* Pretend the internal PHY is only at address 0 */
310 	if (phy) {
311 		splx(s);
312 		return -1;
313 	}
314 	switch (reg) {
315 	case MII_BMCR:
316 		re8139_reg = RTK_BMCR;
317 		break;
318 	case MII_BMSR:
319 		re8139_reg = RTK_BMSR;
320 		break;
321 	case MII_ANAR:
322 		re8139_reg = RTK_ANAR;
323 		break;
324 	case MII_ANER:
325 		re8139_reg = RTK_ANER;
326 		break;
327 	case MII_ANLPAR:
328 		re8139_reg = RTK_LPAR;
329 		break;
330 	case MII_PHYIDR1:
331 	case MII_PHYIDR2:
332 		*val = 0;
333 		splx(s);
334 		return 0;
335 	/*
336 	 * Allow the rlphy driver to read the media status
337 	 * register. If we have a link partner which does not
338 	 * support NWAY, this is the register which will tell
339 	 * us the results of parallel detection.
340 	 */
341 	case RTK_MEDIASTAT:
342 		*val = CSR_READ_1(sc, RTK_MEDIASTAT);
343 		splx(s);
344 		return 0;
345 	default:
346 		printf("%s: bad phy register\n", device_xname(sc->sc_dev));
347 		splx(s);
348 		return -1;
349 	}
350 	*val = CSR_READ_2(sc, re8139_reg);
351 	if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0 && re8139_reg == RTK_BMCR) {
352 		/* 8139C+ has different bit layout. */
353 		*val &= ~(BMCR_LOOP | BMCR_ISO);
354 	}
355 	splx(s);
356 	return 0;
357 }
358 
359 static int
360 re_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
361 {
362 	struct rtk_softc *sc = device_private(dev);
363 	uint16_t re8139_reg = 0;
364 	int s, rv;
365 
366 	s = splnet();
367 
368 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
369 		rv = re_gmii_writereg(dev, phy, reg, val);
370 		splx(s);
371 		return rv;
372 	}
373 
374 	/* Pretend the internal PHY is only at address 0 */
375 	if (phy) {
376 		splx(s);
377 		return -1;
378 	}
379 	switch (reg) {
380 	case MII_BMCR:
381 		re8139_reg = RTK_BMCR;
382 		if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) {
383 			/* 8139C+ has different bit layout. */
384 			val &= ~(BMCR_LOOP | BMCR_ISO);
385 		}
386 		break;
387 	case MII_BMSR:
388 		re8139_reg = RTK_BMSR;
389 		break;
390 	case MII_ANAR:
391 		re8139_reg = RTK_ANAR;
392 		break;
393 	case MII_ANER:
394 		re8139_reg = RTK_ANER;
395 		break;
396 	case MII_ANLPAR:
397 		re8139_reg = RTK_LPAR;
398 		break;
399 	case MII_PHYIDR1:
400 	case MII_PHYIDR2:
401 		splx(s);
402 		return 0;
403 		break;
404 	default:
405 		printf("%s: bad phy register\n", device_xname(sc->sc_dev));
406 		splx(s);
407 		return -1;
408 	}
409 	CSR_WRITE_2(sc, re8139_reg, val);
410 	splx(s);
411 	return 0;
412 }
413 
414 static void
415 re_miibus_statchg(struct ifnet *ifp)
416 {
417 
418 	return;
419 }
420 
421 static void
422 re_reset(struct rtk_softc *sc)
423 {
424 	int i;
425 
426 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
427 
428 	for (i = 0; i < RTK_TIMEOUT; i++) {
429 		DELAY(10);
430 		if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
431 			break;
432 	}
433 	if (i == RTK_TIMEOUT)
434 		printf("%s: reset never completed!\n",
435 		    device_xname(sc->sc_dev));
436 
437 	/*
438 	 * NB: Realtek-supplied FreeBSD driver does this only for MACFG_3,
439 	 *     but also says "Rtl8169s sigle chip detected".
440 	 */
441 	if ((sc->sc_quirk & RTKQ_MACLDPS) != 0)
442 		CSR_WRITE_1(sc, RTK_LDPS, 1);
443 
444 }
445 
446 /*
447  * The following routine is designed to test for a defect on some
448  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
449  * lines connected to the bus, however for a 32-bit only card, they
450  * should be pulled high. The result of this defect is that the
451  * NIC will not work right if you plug it into a 64-bit slot: DMA
452  * operations will be done with 64-bit transfers, which will fail
453  * because the 64-bit data lines aren't connected.
454  *
455  * There's no way to work around this (short of talking a soldering
456  * iron to the board), however we can detect it. The method we use
457  * here is to put the NIC into digital loopback mode, set the receiver
458  * to promiscuous mode, and then try to send a frame. We then compare
459  * the frame data we sent to what was received. If the data matches,
460  * then the NIC is working correctly, otherwise we know the user has
461  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
462  * slot. In the latter case, there's no way the NIC can work correctly,
463  * so we print out a message on the console and abort the device attach.
464  */
465 
466 int
467 re_diag(struct rtk_softc *sc)
468 {
469 	struct ifnet *ifp = &sc->ethercom.ec_if;
470 	struct mbuf *m0;
471 	struct ether_header *eh;
472 	struct re_rxsoft *rxs;
473 	struct re_desc *cur_rx;
474 	bus_dmamap_t dmamap;
475 	uint16_t status;
476 	uint32_t rxstat;
477 	int total_len, i, s, error = 0;
478 	static const uint8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
479 	static const uint8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
480 
481 	/* Allocate a single mbuf */
482 
483 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
484 	if (m0 == NULL)
485 		return ENOBUFS;
486 
487 	/*
488 	 * Initialize the NIC in test mode. This sets the chip up
489 	 * so that it can send and receive frames, but performs the
490 	 * following special functions:
491 	 * - Puts receiver in promiscuous mode
492 	 * - Enables digital loopback mode
493 	 * - Leaves interrupts turned off
494 	 */
495 
496 	ifp->if_flags |= IFF_PROMISC;
497 	sc->re_testmode = 1;
498 	re_init(ifp);
499 	re_stop(ifp, 0);
500 	DELAY(100000);
501 	re_init(ifp);
502 
503 	/* Put some data in the mbuf */
504 
505 	eh = mtod(m0, struct ether_header *);
506 	memcpy(eh->ether_dhost, &dst, ETHER_ADDR_LEN);
507 	memcpy(eh->ether_shost, &src, ETHER_ADDR_LEN);
508 	eh->ether_type = htons(ETHERTYPE_IP);
509 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
510 
511 	/*
512 	 * Queue the packet, start transmission.
513 	 */
514 
515 	CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
516 	s = splnet();
517 	IF_ENQUEUE(&ifp->if_snd, m0);
518 	re_start(ifp);
519 	splx(s);
520 	m0 = NULL;
521 
522 	/* Wait for it to propagate through the chip */
523 
524 	DELAY(100000);
525 	for (i = 0; i < RTK_TIMEOUT; i++) {
526 		status = CSR_READ_2(sc, RTK_ISR);
527 		if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) ==
528 		    (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK))
529 			break;
530 		DELAY(10);
531 	}
532 	if (i == RTK_TIMEOUT) {
533 		aprint_error_dev(sc->sc_dev,
534 		    "diagnostic failed, failed to receive packet "
535 		    "in loopback mode\n");
536 		error = EIO;
537 		goto done;
538 	}
539 
540 	/*
541 	 * The packet should have been dumped into the first
542 	 * entry in the RX DMA ring. Grab it from there.
543 	 */
544 
545 	rxs = &sc->re_ldata.re_rxsoft[0];
546 	dmamap = rxs->rxs_dmamap;
547 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
548 	    BUS_DMASYNC_POSTREAD);
549 	bus_dmamap_unload(sc->sc_dmat, dmamap);
550 
551 	m0 = rxs->rxs_mbuf;
552 	rxs->rxs_mbuf = NULL;
553 	eh = mtod(m0, struct ether_header *);
554 
555 	RE_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
556 	cur_rx = &sc->re_ldata.re_rx_list[0];
557 	rxstat = le32toh(cur_rx->re_cmdstat);
558 	total_len = rxstat & sc->re_rxlenmask;
559 
560 	if (total_len != ETHER_MIN_LEN) {
561 		aprint_error_dev(sc->sc_dev,
562 		    "diagnostic failed, received short packet\n");
563 		error = EIO;
564 		goto done;
565 	}
566 
567 	/* Test that the received packet data matches what we sent. */
568 
569 	if (memcmp(&eh->ether_dhost, &dst, ETHER_ADDR_LEN) ||
570 	    memcmp(&eh->ether_shost, &src, ETHER_ADDR_LEN) ||
571 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
572 		aprint_error_dev(sc->sc_dev, "WARNING, DMA FAILURE!\n"
573 		    "expected TX data: %s/%s/0x%x\n"
574 		    "received RX data: %s/%s/0x%x\n"
575 		    "You may have a defective 32-bit NIC plugged "
576 		    "into a 64-bit PCI slot.\n"
577 		    "Please re-install the NIC in a 32-bit slot "
578 		    "for proper operation.\n"
579 		    "Read the re(4) man page for more details.\n" ,
580 		    ether_sprintf(dst),  ether_sprintf(src), ETHERTYPE_IP,
581 		    ether_sprintf(eh->ether_dhost),
582 		    ether_sprintf(eh->ether_shost), ntohs(eh->ether_type));
583 		error = EIO;
584 	}
585 
586  done:
587 	/* Turn interface off, release resources */
588 
589 	sc->re_testmode = 0;
590 	ifp->if_flags &= ~IFF_PROMISC;
591 	re_stop(ifp, 0);
592 	if (m0 != NULL)
593 		m_freem(m0);
594 
595 	return error;
596 }
597 
598 
599 /*
600  * Attach the interface. Allocate softc structures, do ifmedia
601  * setup and ethernet/BPF attach.
602  */
603 void
604 re_attach(struct rtk_softc *sc)
605 {
606 	uint8_t eaddr[ETHER_ADDR_LEN];
607 	struct ifnet *ifp;
608 	struct mii_data *mii = &sc->mii;
609 	int error = 0, i;
610 	const struct re_revision *rr;
611 	const char *re_name = NULL;
612 
613 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
614 		/* Revision of 8169/8169S/8110s in bits 30..26, 23 */
615 		sc->sc_hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV;
616 
617 		for (rr = re_revisions; rr->re_name != NULL; rr++) {
618 			if (rr->re_chipid == sc->sc_hwrev)
619 				re_name = rr->re_name;
620 		}
621 
622 		if (re_name == NULL)
623 			aprint_normal_dev(sc->sc_dev,
624 			    "unknown ASIC (0x%04x)\n", sc->sc_hwrev >> 16);
625 		else
626 			aprint_normal_dev(sc->sc_dev,
627 			    "%s (0x%04x)\n", re_name, sc->sc_hwrev >> 16);
628 
629 		switch (sc->sc_hwrev) {
630 		case RTK_HWREV_8169:
631 			sc->sc_quirk |= RTKQ_8169NONS;
632 			break;
633 		case RTK_HWREV_8169S:
634 		case RTK_HWREV_8110S:
635 		case RTK_HWREV_8169_8110SB:
636 		case RTK_HWREV_8169_8110SBL:
637 		case RTK_HWREV_8169_8110SC:
638 			sc->sc_quirk |= RTKQ_MACLDPS;
639 			break;
640 		case RTK_HWREV_8168B_SPIN1:
641 		case RTK_HWREV_8168B_SPIN2:
642 		case RTK_HWREV_8168B_SPIN3:
643 			sc->sc_quirk |= RTKQ_MACSTAT;
644 			break;
645 		case RTK_HWREV_8168C:
646 		case RTK_HWREV_8168C_SPIN2:
647 		case RTK_HWREV_8168CP:
648 		case RTK_HWREV_8168D:
649 		case RTK_HWREV_8168DP:
650 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
651 			    RTKQ_MACSTAT | RTKQ_CMDSTOP;
652 			/*
653 			 * From FreeBSD driver:
654 			 *
655 			 * These (8168/8111) controllers support jumbo frame
656 			 * but it seems that enabling it requires touching
657 			 * additional magic registers. Depending on MAC
658 			 * revisions some controllers need to disable
659 			 * checksum offload. So disable jumbo frame until
660 			 * I have better idea what it really requires to
661 			 * make it support.
662 			 * RTL8168C/CP : supports up to 6KB jumbo frame.
663 			 * RTL8111C/CP : supports up to 9KB jumbo frame.
664 			 */
665 			sc->sc_quirk |= RTKQ_NOJUMBO;
666 			break;
667 		case RTK_HWREV_8168E:
668 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
669 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_PHYWAKE_PM |
670 			    RTKQ_NOJUMBO;
671 			break;
672 		case RTK_HWREV_8168E_VL:
673 		case RTK_HWREV_8168F:
674 		case RTK_HWREV_8411:
675 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
676 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO;
677 			break;
678 		case RTK_HWREV_8168EP:
679 		case RTK_HWREV_8168FP:
680 		case RTK_HWREV_8168G:
681 		case RTK_HWREV_8168GU:
682 		case RTK_HWREV_8168H:
683 		case RTK_HWREV_8411B:
684 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
685 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO |
686 			    RTKQ_RXDV_GATED | RTKQ_TXRXEN_LATER;
687 			break;
688 		case RTK_HWREV_8100E:
689 		case RTK_HWREV_8100E_SPIN2:
690 		case RTK_HWREV_8101E:
691 			sc->sc_quirk |= RTKQ_NOJUMBO;
692 			break;
693 		case RTK_HWREV_8102E:
694 		case RTK_HWREV_8102EL:
695 		case RTK_HWREV_8102EL_SPIN1:
696 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
697 			    RTKQ_MACSTAT | RTKQ_CMDSTOP | RTKQ_NOJUMBO;
698 			break;
699 		case RTK_HWREV_8103E:
700 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD |
701 			    RTKQ_MACSTAT | RTKQ_CMDSTOP;
702 			break;
703 		case RTK_HWREV_8401E:
704 		case RTK_HWREV_8105E:
705 		case RTK_HWREV_8105E_SPIN1: /* XXX */
706 		case RTK_HWREV_8106E:
707 			sc->sc_quirk |= RTKQ_PHYWAKE_PM |
708 			    RTKQ_DESCV2 | RTKQ_NOEECMD | RTKQ_MACSTAT |
709 			    RTKQ_CMDSTOP;
710 			break;
711 		case RTK_HWREV_8402:
712 			sc->sc_quirk |= RTKQ_PHYWAKE_PM |
713 			    RTKQ_DESCV2 | RTKQ_NOEECMD | RTKQ_MACSTAT |
714 			    RTKQ_CMDSTOP; /* CMDSTOP_WAIT_TXQ */
715 			break;
716 		default:
717 			aprint_normal_dev(sc->sc_dev, "Use default quirks\n");
718 			/* assume the latest features */
719 			sc->sc_quirk |= RTKQ_DESCV2 | RTKQ_NOEECMD;
720 			sc->sc_quirk |= RTKQ_NOJUMBO;
721 		}
722 
723 		/* Set RX length mask */
724 		sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN;
725 		sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8169;
726 	} else {
727 		sc->sc_quirk |= RTKQ_NOJUMBO;
728 
729 		/* Set RX length mask */
730 		sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN;
731 		sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8139;
732 	}
733 
734 	/* Reset the adapter. */
735 	re_reset(sc);
736 
737 	/*
738 	 * RTL81x9 chips automatically read EEPROM to init MAC address,
739 	 * and some NAS override its MAC address per own configuration,
740 	 * so no need to explicitely read EEPROM and set ID registers.
741 	 */
742 #ifdef RE_USE_EECMD
743 	if ((sc->sc_quirk & RTKQ_NOEECMD) != 0) {
744 		/*
745 		 * Get station address from ID registers.
746 		 */
747 		for (i = 0; i < ETHER_ADDR_LEN; i++)
748 			eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
749 	} else {
750 		uint16_t val;
751 		int addr_len;
752 
753 		/*
754 		 * Get station address from the EEPROM.
755 		 */
756 		if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
757 			addr_len = RTK_EEADDR_LEN1;
758 		else
759 			addr_len = RTK_EEADDR_LEN0;
760 
761 		/*
762 		 * Get station address from the EEPROM.
763 		 */
764 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
765 			val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len);
766 			eaddr[(i * 2) + 0] = val & 0xff;
767 			eaddr[(i * 2) + 1] = val >> 8;
768 		}
769 	}
770 #else
771 	/*
772 	 * Get station address from ID registers.
773 	 */
774 	for (i = 0; i < ETHER_ADDR_LEN; i++)
775 		eaddr[i] = CSR_READ_1(sc, RTK_IDR0 + i);
776 #endif
777 
778 	/* Take PHY out of power down mode. */
779 	if ((sc->sc_quirk & RTKQ_PHYWAKE_PM) != 0)
780 		CSR_WRITE_1(sc, RTK_PMCH, CSR_READ_1(sc, RTK_PMCH) | 0x80);
781 
782 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
783 	    ether_sprintf(eaddr));
784 
785 	if (sc->re_ldata.re_tx_desc_cnt >
786 	    PAGE_SIZE / sizeof(struct re_desc)) {
787 		sc->re_ldata.re_tx_desc_cnt =
788 		    PAGE_SIZE / sizeof(struct re_desc);
789 	}
790 
791 	aprint_verbose_dev(sc->sc_dev, "using %d tx descriptors\n",
792 	    sc->re_ldata.re_tx_desc_cnt);
793 	KASSERT(RE_NEXT_TX_DESC(sc, RE_TX_DESC_CNT(sc) - 1) == 0);
794 
795 	/* Allocate DMA'able memory for the TX ring */
796 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RE_TX_LIST_SZ(sc),
797 	    RE_RING_ALIGN, 0, &sc->re_ldata.re_tx_listseg, 1,
798 	    &sc->re_ldata.re_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
799 		aprint_error_dev(sc->sc_dev,
800 		    "can't allocate tx listseg, error = %d\n", error);
801 		goto fail_0;
802 	}
803 
804 	/* Load the map for the TX ring. */
805 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_tx_listseg,
806 	    sc->re_ldata.re_tx_listnseg, RE_TX_LIST_SZ(sc),
807 	    (void **)&sc->re_ldata.re_tx_list,
808 	    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
809 		aprint_error_dev(sc->sc_dev,
810 		    "can't map tx list, error = %d\n", error);
811 		goto fail_1;
812 	}
813 	memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
814 
815 	if ((error = bus_dmamap_create(sc->sc_dmat, RE_TX_LIST_SZ(sc), 1,
816 	    RE_TX_LIST_SZ(sc), 0, 0,
817 	    &sc->re_ldata.re_tx_list_map)) != 0) {
818 		aprint_error_dev(sc->sc_dev,
819 		    "can't create tx list map, error = %d\n", error);
820 		goto fail_2;
821 	}
822 
823 
824 	if ((error = bus_dmamap_load(sc->sc_dmat,
825 	    sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list,
826 	    RE_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
827 		aprint_error_dev(sc->sc_dev,
828 		    "can't load tx list, error = %d\n", error);
829 		goto fail_3;
830 	}
831 
832 	/* Create DMA maps for TX buffers */
833 	for (i = 0; i < RE_TX_QLEN; i++) {
834 		error = bus_dmamap_create(sc->sc_dmat,
835 		    round_page(IP_MAXPACKET),
836 		    RE_TX_DESC_CNT(sc), RE_TDESC_CMD_FRAGLEN,
837 		    0, 0, &sc->re_ldata.re_txq[i].txq_dmamap);
838 		if (error) {
839 			aprint_error_dev(sc->sc_dev,
840 			    "can't create DMA map for TX\n");
841 			goto fail_4;
842 		}
843 	}
844 
845 	/* Allocate DMA'able memory for the RX ring */
846 	/* XXX see also a comment about RE_RX_DMAMEM_SZ in rtl81x9var.h */
847 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
848 	    RE_RX_DMAMEM_SZ, RE_RING_ALIGN, 0, &sc->re_ldata.re_rx_listseg, 1,
849 	    &sc->re_ldata.re_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
850 		aprint_error_dev(sc->sc_dev,
851 		    "can't allocate rx listseg, error = %d\n", error);
852 		goto fail_4;
853 	}
854 
855 	/* Load the map for the RX ring. */
856 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_rx_listseg,
857 	    sc->re_ldata.re_rx_listnseg, RE_RX_DMAMEM_SZ,
858 	    (void **)&sc->re_ldata.re_rx_list,
859 	    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
860 		aprint_error_dev(sc->sc_dev,
861 		    "can't map rx list, error = %d\n", error);
862 		goto fail_5;
863 	}
864 	memset(sc->re_ldata.re_rx_list, 0, RE_RX_DMAMEM_SZ);
865 
866 	if ((error = bus_dmamap_create(sc->sc_dmat,
867 	    RE_RX_DMAMEM_SZ, 1, RE_RX_DMAMEM_SZ, 0, 0,
868 	    &sc->re_ldata.re_rx_list_map)) != 0) {
869 		aprint_error_dev(sc->sc_dev,
870 		    "can't create rx list map, error = %d\n", error);
871 		goto fail_6;
872 	}
873 
874 	if ((error = bus_dmamap_load(sc->sc_dmat,
875 	    sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list,
876 	    RE_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
877 		aprint_error_dev(sc->sc_dev,
878 		    "can't load rx list, error = %d\n", error);
879 		goto fail_7;
880 	}
881 
882 	/* Create DMA maps for RX buffers */
883 	for (i = 0; i < RE_RX_DESC_CNT; i++) {
884 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
885 		    0, 0, &sc->re_ldata.re_rxsoft[i].rxs_dmamap);
886 		if (error) {
887 			aprint_error_dev(sc->sc_dev,
888 			    "can't create DMA map for RX\n");
889 			goto fail_8;
890 		}
891 	}
892 
893 	/*
894 	 * Record interface as attached. From here, we should not fail.
895 	 */
896 	sc->sc_flags |= RTK_ATTACHED;
897 
898 	ifp = &sc->ethercom.ec_if;
899 	ifp->if_softc = sc;
900 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
901 	ifp->if_mtu = ETHERMTU;
902 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
903 	ifp->if_ioctl = re_ioctl;
904 	sc->ethercom.ec_capabilities |=
905 	    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
906 	ifp->if_start = re_start;
907 	ifp->if_stop = re_stop;
908 
909 	/*
910 	 * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets,
911 	 * so we have a workaround to handle the bug by padding
912 	 * such packets manually.
913 	 */
914 	ifp->if_capabilities |=
915 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
916 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
917 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
918 	    IFCAP_TSOv4;
919 
920 	ifp->if_watchdog = re_watchdog;
921 	ifp->if_init = re_init;
922 	ifp->if_snd.ifq_maxlen = RE_IFQ_MAXLEN;
923 	ifp->if_capenable = ifp->if_capabilities;
924 	IFQ_SET_READY(&ifp->if_snd);
925 
926 	callout_init(&sc->rtk_tick_ch, 0);
927 	callout_setfunc(&sc->rtk_tick_ch, re_tick, sc);
928 
929 	/* Do MII setup */
930 	mii->mii_ifp = ifp;
931 	mii->mii_readreg = re_miibus_readreg;
932 	mii->mii_writereg = re_miibus_writereg;
933 	mii->mii_statchg = re_miibus_statchg;
934 	sc->ethercom.ec_mii = mii;
935 	ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
936 	    ether_mediastatus);
937 	mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
938 	    MII_OFFSET_ANY, 0);
939 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
940 
941 	/*
942 	 * Call MI attach routine.
943 	 */
944 	if_attach(ifp);
945 	if_deferred_start_init(ifp, NULL);
946 	ether_ifattach(ifp, eaddr);
947 
948 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
949 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
950 
951 	if (pmf_device_register(sc->sc_dev, NULL, NULL))
952 		pmf_class_network_register(sc->sc_dev, ifp);
953 	else
954 		aprint_error_dev(sc->sc_dev,
955 		    "couldn't establish power handler\n");
956 
957 	return;
958 
959  fail_8:
960 	/* Destroy DMA maps for RX buffers. */
961 	for (i = 0; i < RE_RX_DESC_CNT; i++)
962 		if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
963 			bus_dmamap_destroy(sc->sc_dmat,
964 			    sc->re_ldata.re_rxsoft[i].rxs_dmamap);
965 
966 	/* Free DMA'able memory for the RX ring. */
967 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
968  fail_7:
969 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
970  fail_6:
971 	bus_dmamem_unmap(sc->sc_dmat,
972 	    (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
973  fail_5:
974 	bus_dmamem_free(sc->sc_dmat,
975 	    &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
976 
977  fail_4:
978 	/* Destroy DMA maps for TX buffers. */
979 	for (i = 0; i < RE_TX_QLEN; i++)
980 		if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
981 			bus_dmamap_destroy(sc->sc_dmat,
982 			    sc->re_ldata.re_txq[i].txq_dmamap);
983 
984 	/* Free DMA'able memory for the TX ring. */
985 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
986  fail_3:
987 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
988  fail_2:
989 	bus_dmamem_unmap(sc->sc_dmat,
990 	    (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
991  fail_1:
992 	bus_dmamem_free(sc->sc_dmat,
993 	    &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
994  fail_0:
995 	return;
996 }
997 
998 
999 /*
1000  * re_activate:
1001  *     Handle device activation/deactivation requests.
1002  */
1003 int
1004 re_activate(device_t self, enum devact act)
1005 {
1006 	struct rtk_softc *sc = device_private(self);
1007 
1008 	switch (act) {
1009 	case DVACT_DEACTIVATE:
1010 		if_deactivate(&sc->ethercom.ec_if);
1011 		return 0;
1012 	default:
1013 		return EOPNOTSUPP;
1014 	}
1015 }
1016 
1017 /*
1018  * re_detach:
1019  *     Detach a rtk interface.
1020  */
1021 int
1022 re_detach(struct rtk_softc *sc)
1023 {
1024 	struct ifnet *ifp = &sc->ethercom.ec_if;
1025 	int i;
1026 
1027 	/*
1028 	 * Succeed now if there isn't any work to do.
1029 	 */
1030 	if ((sc->sc_flags & RTK_ATTACHED) == 0)
1031 		return 0;
1032 
1033 	/* Unhook our tick handler. */
1034 	callout_stop(&sc->rtk_tick_ch);
1035 
1036 	/* Detach all PHYs. */
1037 	mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
1038 
1039 	rnd_detach_source(&sc->rnd_source);
1040 	ether_ifdetach(ifp);
1041 	if_detach(ifp);
1042 
1043 	/* Delete all remaining media. */
1044 	ifmedia_fini(&sc->mii.mii_media);
1045 
1046 	/* Destroy DMA maps for RX buffers. */
1047 	for (i = 0; i < RE_RX_DESC_CNT; i++)
1048 		if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
1049 			bus_dmamap_destroy(sc->sc_dmat,
1050 			    sc->re_ldata.re_rxsoft[i].rxs_dmamap);
1051 
1052 	/* Free DMA'able memory for the RX ring. */
1053 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
1054 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
1055 	bus_dmamem_unmap(sc->sc_dmat,
1056 	    (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
1057 	bus_dmamem_free(sc->sc_dmat,
1058 	    &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
1059 
1060 	/* Destroy DMA maps for TX buffers. */
1061 	for (i = 0; i < RE_TX_QLEN; i++)
1062 		if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
1063 			bus_dmamap_destroy(sc->sc_dmat,
1064 			    sc->re_ldata.re_txq[i].txq_dmamap);
1065 
1066 	/* Free DMA'able memory for the TX ring. */
1067 	bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
1068 	bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
1069 	bus_dmamem_unmap(sc->sc_dmat,
1070 	    (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
1071 	bus_dmamem_free(sc->sc_dmat,
1072 	    &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
1073 
1074 	pmf_device_deregister(sc->sc_dev);
1075 
1076 	/* we don't want to run again */
1077 	sc->sc_flags &= ~RTK_ATTACHED;
1078 
1079 	return 0;
1080 }
1081 
1082 /*
1083  * re_enable:
1084  *     Enable the RTL81X9 chip.
1085  */
1086 static int
1087 re_enable(struct rtk_softc *sc)
1088 {
1089 
1090 	if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
1091 		if ((*sc->sc_enable)(sc) != 0) {
1092 			printf("%s: device enable failed\n",
1093 			    device_xname(sc->sc_dev));
1094 			return EIO;
1095 		}
1096 		sc->sc_flags |= RTK_ENABLED;
1097 	}
1098 	return 0;
1099 }
1100 
1101 /*
1102  * re_disable:
1103  *     Disable the RTL81X9 chip.
1104  */
1105 static void
1106 re_disable(struct rtk_softc *sc)
1107 {
1108 
1109 	if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
1110 		(*sc->sc_disable)(sc);
1111 		sc->sc_flags &= ~RTK_ENABLED;
1112 	}
1113 }
1114 
1115 static int
1116 re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
1117 {
1118 	struct mbuf *n = NULL;
1119 	bus_dmamap_t map;
1120 	struct re_desc *d;
1121 	struct re_rxsoft *rxs;
1122 	uint32_t cmdstat;
1123 	int error;
1124 
1125 	if (m == NULL) {
1126 		MGETHDR(n, M_DONTWAIT, MT_DATA);
1127 		if (n == NULL)
1128 			return ENOBUFS;
1129 
1130 		MCLAIM(n, &sc->ethercom.ec_rx_mowner);
1131 		MCLGET(n, M_DONTWAIT);
1132 		if ((n->m_flags & M_EXT) == 0) {
1133 			m_freem(n);
1134 			return ENOBUFS;
1135 		}
1136 		m = n;
1137 	} else
1138 		m->m_data = m->m_ext.ext_buf;
1139 
1140 	/*
1141 	 * Initialize mbuf length fields and fixup
1142 	 * alignment so that the frame payload is
1143 	 * longword aligned.
1144 	 */
1145 	m->m_len = m->m_pkthdr.len = MCLBYTES - RE_ETHER_ALIGN;
1146 	m->m_data += RE_ETHER_ALIGN;
1147 
1148 	rxs = &sc->re_ldata.re_rxsoft[idx];
1149 	map = rxs->rxs_dmamap;
1150 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1151 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1152 
1153 	if (error)
1154 		goto out;
1155 
1156 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1157 	    BUS_DMASYNC_PREREAD);
1158 
1159 	d = &sc->re_ldata.re_rx_list[idx];
1160 #ifdef DIAGNOSTIC
1161 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1162 	cmdstat = le32toh(d->re_cmdstat);
1163 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1164 	if (cmdstat & RE_RDESC_STAT_OWN) {
1165 		panic("%s: tried to map busy RX descriptor",
1166 		    device_xname(sc->sc_dev));
1167 	}
1168 #endif
1169 
1170 	rxs->rxs_mbuf = m;
1171 
1172 	d->re_vlanctl = 0;
1173 	cmdstat = map->dm_segs[0].ds_len;
1174 	if (idx == (RE_RX_DESC_CNT - 1))
1175 		cmdstat |= RE_RDESC_CMD_EOR;
1176 	re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1177 	d->re_cmdstat = htole32(cmdstat);
1178 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1179 	cmdstat |= RE_RDESC_CMD_OWN;
1180 	d->re_cmdstat = htole32(cmdstat);
1181 	RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1182 
1183 	return 0;
1184  out:
1185 	if (n != NULL)
1186 		m_freem(n);
1187 	return ENOMEM;
1188 }
1189 
1190 static int
1191 re_tx_list_init(struct rtk_softc *sc)
1192 {
1193 	int i;
1194 
1195 	memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
1196 	for (i = 0; i < RE_TX_QLEN; i++) {
1197 		sc->re_ldata.re_txq[i].txq_mbuf = NULL;
1198 	}
1199 
1200 	bus_dmamap_sync(sc->sc_dmat,
1201 	    sc->re_ldata.re_tx_list_map, 0,
1202 	    sc->re_ldata.re_tx_list_map->dm_mapsize,
1203 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1204 	sc->re_ldata.re_txq_prodidx = 0;
1205 	sc->re_ldata.re_txq_considx = 0;
1206 	sc->re_ldata.re_txq_free = RE_TX_QLEN;
1207 	sc->re_ldata.re_tx_free = RE_TX_DESC_CNT(sc);
1208 	sc->re_ldata.re_tx_nextfree = 0;
1209 
1210 	return 0;
1211 }
1212 
1213 static int
1214 re_rx_list_init(struct rtk_softc *sc)
1215 {
1216 	int i;
1217 
1218 	memset(sc->re_ldata.re_rx_list, 0, RE_RX_LIST_SZ);
1219 
1220 	for (i = 0; i < RE_RX_DESC_CNT; i++) {
1221 		if (re_newbuf(sc, i, NULL) == ENOBUFS)
1222 			return ENOBUFS;
1223 	}
1224 
1225 	sc->re_ldata.re_rx_prodidx = 0;
1226 	sc->re_head = sc->re_tail = NULL;
1227 
1228 	return 0;
1229 }
1230 
1231 /*
1232  * RX handler for C+ and 8169. For the gigE chips, we support
1233  * the reception of jumbo frames that have been fragmented
1234  * across multiple 2K mbuf cluster buffers.
1235  */
1236 static void
1237 re_rxeof(struct rtk_softc *sc)
1238 {
1239 	struct mbuf *m;
1240 	struct ifnet *ifp;
1241 	int i, total_len;
1242 	struct re_desc *cur_rx;
1243 	struct re_rxsoft *rxs;
1244 	uint32_t rxstat, rxvlan;
1245 
1246 	ifp = &sc->ethercom.ec_if;
1247 
1248 	for (i = sc->re_ldata.re_rx_prodidx;; i = RE_NEXT_RX_DESC(sc, i)) {
1249 		cur_rx = &sc->re_ldata.re_rx_list[i];
1250 		RE_RXDESCSYNC(sc, i,
1251 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1252 		rxstat = le32toh(cur_rx->re_cmdstat);
1253 		rxvlan = le32toh(cur_rx->re_vlanctl);
1254 		RE_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1255 		if ((rxstat & RE_RDESC_STAT_OWN) != 0) {
1256 			break;
1257 		}
1258 		total_len = rxstat & sc->re_rxlenmask;
1259 		rxs = &sc->re_ldata.re_rxsoft[i];
1260 		m = rxs->rxs_mbuf;
1261 
1262 		/* Invalidate the RX mbuf and unload its map */
1263 
1264 		bus_dmamap_sync(sc->sc_dmat,
1265 		    rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1266 		    BUS_DMASYNC_POSTREAD);
1267 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1268 
1269 		if ((rxstat & RE_RDESC_STAT_EOF) == 0) {
1270 			m->m_len = MCLBYTES - RE_ETHER_ALIGN;
1271 			if (sc->re_head == NULL)
1272 				sc->re_head = sc->re_tail = m;
1273 			else {
1274 				m_remove_pkthdr(m);
1275 				sc->re_tail->m_next = m;
1276 				sc->re_tail = m;
1277 			}
1278 			re_newbuf(sc, i, NULL);
1279 			continue;
1280 		}
1281 
1282 		/*
1283 		 * NOTE: for the 8139C+, the frame length field
1284 		 * is always 12 bits in size, but for the gigE chips,
1285 		 * it is 13 bits (since the max RX frame length is 16K).
1286 		 * Unfortunately, all 32 bits in the status word
1287 		 * were already used, so to make room for the extra
1288 		 * length bit, RealTek took out the 'frame alignment
1289 		 * error' bit and shifted the other status bits
1290 		 * over one slot. The OWN, EOR, FS and LS bits are
1291 		 * still in the same places. We have already extracted
1292 		 * the frame length and checked the OWN bit, so rather
1293 		 * than using an alternate bit mapping, we shift the
1294 		 * status bits one space to the right so we can evaluate
1295 		 * them using the 8169 status as though it was in the
1296 		 * same format as that of the 8139C+.
1297 		 */
1298 		if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0)
1299 			rxstat >>= 1;
1300 
1301 		if (__predict_false((rxstat & RE_RDESC_STAT_RXERRSUM) != 0)) {
1302 #ifdef RE_DEBUG
1303 			printf("%s: RX error (rxstat = 0x%08x)",
1304 			    device_xname(sc->sc_dev), rxstat);
1305 			if (rxstat & RE_RDESC_STAT_FRALIGN)
1306 				printf(", frame alignment error");
1307 			if (rxstat & RE_RDESC_STAT_BUFOFLOW)
1308 				printf(", out of buffer space");
1309 			if (rxstat & RE_RDESC_STAT_FIFOOFLOW)
1310 				printf(", FIFO overrun");
1311 			if (rxstat & RE_RDESC_STAT_GIANT)
1312 				printf(", giant packet");
1313 			if (rxstat & RE_RDESC_STAT_RUNT)
1314 				printf(", runt packet");
1315 			if (rxstat & RE_RDESC_STAT_CRCERR)
1316 				printf(", CRC error");
1317 			printf("\n");
1318 #endif
1319 			if_statinc(ifp, if_ierrors);
1320 			/*
1321 			 * If this is part of a multi-fragment packet,
1322 			 * discard all the pieces.
1323 			 */
1324 			if (sc->re_head != NULL) {
1325 				m_freem(sc->re_head);
1326 				sc->re_head = sc->re_tail = NULL;
1327 			}
1328 			re_newbuf(sc, i, m);
1329 			continue;
1330 		}
1331 
1332 		/*
1333 		 * If allocating a replacement mbuf fails,
1334 		 * reload the current one.
1335 		 */
1336 
1337 		if (__predict_false(re_newbuf(sc, i, NULL) != 0)) {
1338 			if_statinc(ifp, if_ierrors);
1339 			if (sc->re_head != NULL) {
1340 				m_freem(sc->re_head);
1341 				sc->re_head = sc->re_tail = NULL;
1342 			}
1343 			re_newbuf(sc, i, m);
1344 			continue;
1345 		}
1346 
1347 		if (sc->re_head != NULL) {
1348 			m->m_len = total_len % (MCLBYTES - RE_ETHER_ALIGN);
1349 			/*
1350 			 * Special case: if there's 4 bytes or less
1351 			 * in this buffer, the mbuf can be discarded:
1352 			 * the last 4 bytes is the CRC, which we don't
1353 			 * care about anyway.
1354 			 */
1355 			if (m->m_len <= ETHER_CRC_LEN) {
1356 				sc->re_tail->m_len -=
1357 				    (ETHER_CRC_LEN - m->m_len);
1358 				m_freem(m);
1359 			} else {
1360 				m->m_len -= ETHER_CRC_LEN;
1361 				m_remove_pkthdr(m);
1362 				sc->re_tail->m_next = m;
1363 			}
1364 			m = sc->re_head;
1365 			sc->re_head = sc->re_tail = NULL;
1366 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1367 		} else
1368 			m->m_pkthdr.len = m->m_len =
1369 			    (total_len - ETHER_CRC_LEN);
1370 
1371 		m_set_rcvif(m, ifp);
1372 
1373 		/* Do RX checksumming */
1374 		if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1375 			/* Check IP header checksum */
1376 			if ((rxstat & RE_RDESC_STAT_PROTOID) != 0) {
1377 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1378 				if (rxstat & RE_RDESC_STAT_IPSUMBAD)
1379 					m->m_pkthdr.csum_flags |=
1380 					    M_CSUM_IPv4_BAD;
1381 
1382 				/* Check TCP/UDP checksum */
1383 				if (RE_TCPPKT(rxstat)) {
1384 					m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1385 					if (rxstat & RE_RDESC_STAT_TCPSUMBAD)
1386 						m->m_pkthdr.csum_flags |=
1387 						    M_CSUM_TCP_UDP_BAD;
1388 				} else if (RE_UDPPKT(rxstat)) {
1389 					m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1390 					if (rxstat & RE_RDESC_STAT_UDPSUMBAD) {
1391 						/*
1392 						 * XXX: 8139C+ thinks UDP csum
1393 						 * 0xFFFF is bad, force software
1394 						 * calculation.
1395 						 */
1396 						if (sc->sc_quirk & RTKQ_8139CPLUS)
1397 							m->m_pkthdr.csum_flags
1398 							    &= ~M_CSUM_UDPv4;
1399 						else
1400 							m->m_pkthdr.csum_flags
1401 							    |= M_CSUM_TCP_UDP_BAD;
1402 					}
1403 				}
1404 			}
1405 		} else {
1406 			/* Check IPv4 header checksum */
1407 			if ((rxvlan & RE_RDESC_VLANCTL_IPV4) != 0) {
1408 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1409 				if (rxstat & RE_RDESC_STAT_IPSUMBAD)
1410 					m->m_pkthdr.csum_flags |=
1411 					    M_CSUM_IPv4_BAD;
1412 
1413 				/* Check TCPv4/UDPv4 checksum */
1414 				if (RE_TCPPKT(rxstat)) {
1415 					m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1416 					if (rxstat & RE_RDESC_STAT_TCPSUMBAD)
1417 						m->m_pkthdr.csum_flags |=
1418 						    M_CSUM_TCP_UDP_BAD;
1419 				} else if (RE_UDPPKT(rxstat)) {
1420 					m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1421 					if (rxstat & RE_RDESC_STAT_UDPSUMBAD)
1422 						m->m_pkthdr.csum_flags |=
1423 						    M_CSUM_TCP_UDP_BAD;
1424 				}
1425 			}
1426 			/* XXX Check TCPv6/UDPv6 checksum? */
1427 		}
1428 
1429 		if (rxvlan & RE_RDESC_VLANCTL_TAG) {
1430 			vlan_set_tag(m,
1431 			     bswap16(rxvlan & RE_RDESC_VLANCTL_DATA));
1432 		}
1433 		if_percpuq_enqueue(ifp->if_percpuq, m);
1434 	}
1435 
1436 	sc->re_ldata.re_rx_prodidx = i;
1437 }
1438 
1439 static void
1440 re_txeof(struct rtk_softc *sc)
1441 {
1442 	struct ifnet *ifp;
1443 	struct re_txq *txq;
1444 	uint32_t txstat;
1445 	int idx, descidx;
1446 
1447 	ifp = &sc->ethercom.ec_if;
1448 
1449 	for (idx = sc->re_ldata.re_txq_considx;
1450 	    sc->re_ldata.re_txq_free < RE_TX_QLEN;
1451 	    idx = RE_NEXT_TXQ(sc, idx), sc->re_ldata.re_txq_free++) {
1452 		txq = &sc->re_ldata.re_txq[idx];
1453 		KASSERT(txq->txq_mbuf != NULL);
1454 
1455 		descidx = txq->txq_descidx;
1456 		RE_TXDESCSYNC(sc, descidx,
1457 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1458 		txstat =
1459 		    le32toh(sc->re_ldata.re_tx_list[descidx].re_cmdstat);
1460 		RE_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD);
1461 		KASSERT((txstat & RE_TDESC_CMD_EOF) != 0);
1462 		if (txstat & RE_TDESC_CMD_OWN) {
1463 			break;
1464 		}
1465 
1466 		sc->re_ldata.re_tx_free += txq->txq_nsegs;
1467 		KASSERT(sc->re_ldata.re_tx_free <= RE_TX_DESC_CNT(sc));
1468 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1469 		    0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1470 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1471 		m_freem(txq->txq_mbuf);
1472 		txq->txq_mbuf = NULL;
1473 
1474 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1475 		if (txstat & (RE_TDESC_STAT_EXCESSCOL | RE_TDESC_STAT_COLCNT))
1476 			if_statinc_ref(nsr, if_collisions);
1477 		if (txstat & RE_TDESC_STAT_TXERRSUM)
1478 			if_statinc_ref(nsr, if_oerrors);
1479 		else
1480 			if_statinc_ref(nsr, if_opackets);
1481 		IF_STAT_PUTREF(ifp);
1482 	}
1483 
1484 	sc->re_ldata.re_txq_considx = idx;
1485 
1486 	if (sc->re_ldata.re_txq_free > RE_NTXDESC_RSVD)
1487 		ifp->if_flags &= ~IFF_OACTIVE;
1488 
1489 	/*
1490 	 * If not all descriptors have been released reaped yet,
1491 	 * reload the timer so that we will eventually get another
1492 	 * interrupt that will cause us to re-enter this routine.
1493 	 * This is done in case the transmitter has gone idle.
1494 	 */
1495 	if (sc->re_ldata.re_txq_free < RE_TX_QLEN) {
1496 		if ((sc->sc_quirk & RTKQ_IM_HW) == 0)
1497 			CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1498 		if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
1499 			/*
1500 			 * Some chips will ignore a second TX request
1501 			 * issued while an existing transmission is in
1502 			 * progress. If the transmitter goes idle but
1503 			 * there are still packets waiting to be sent,
1504 			 * we need to restart the channel here to flush
1505 			 * them out. This only seems to be required with
1506 			 * the PCIe devices.
1507 			 */
1508 			CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START);
1509 		}
1510 	} else
1511 		ifp->if_timer = 0;
1512 }
1513 
1514 static void
1515 re_tick(void *arg)
1516 {
1517 	struct rtk_softc *sc = arg;
1518 	int s;
1519 
1520 	/* XXX: just return for 8169S/8110S with rev 2 or newer phy */
1521 	s = splnet();
1522 
1523 	mii_tick(&sc->mii);
1524 	splx(s);
1525 
1526 	callout_schedule(&sc->rtk_tick_ch, hz);
1527 }
1528 
1529 int
1530 re_intr(void *arg)
1531 {
1532 	struct rtk_softc *sc = arg;
1533 	struct ifnet *ifp;
1534 	uint16_t status;
1535 	int handled = 0;
1536 
1537 	if (!device_has_power(sc->sc_dev))
1538 		return 0;
1539 
1540 	ifp = &sc->ethercom.ec_if;
1541 
1542 	if ((ifp->if_flags & IFF_UP) == 0)
1543 		return 0;
1544 
1545 	const uint16_t status_mask = (sc->sc_quirk & RTKQ_IM_HW) ?
1546 	    RTK_INTRS_IM_HW : RTK_INTRS_CPLUS;
1547 
1548 	for (;;) {
1549 
1550 		status = CSR_READ_2(sc, RTK_ISR);
1551 		/* If the card has gone away the read returns 0xffff. */
1552 		if (status == 0xffff)
1553 			break;
1554 		if (status) {
1555 			handled = 1;
1556 			CSR_WRITE_2(sc, RTK_ISR, status);
1557 		}
1558 
1559 		if ((status & status_mask) == 0)
1560 			break;
1561 
1562 		if (status & (RTK_ISR_RX_OK | RTK_ISR_RX_ERR))
1563 			re_rxeof(sc);
1564 
1565 		if (status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_TX_ERR |
1566 		    RTK_ISR_TX_DESC_UNAVAIL | RTK_ISR_TX_OK))
1567 			re_txeof(sc);
1568 
1569 		if (status & RTK_ISR_SYSTEM_ERR) {
1570 			re_init(ifp);
1571 		}
1572 
1573 		if (status & RTK_ISR_LINKCHG) {
1574 			callout_stop(&sc->rtk_tick_ch);
1575 			re_tick(sc);
1576 		}
1577 	}
1578 
1579 	if (handled)
1580 		if_schedule_deferred_start(ifp);
1581 
1582 	rnd_add_uint32(&sc->rnd_source, status);
1583 
1584 	return handled;
1585 }
1586 
1587 
1588 
1589 /*
1590  * Main transmit routine for C+ and gigE NICs.
1591  */
1592 
1593 static void
1594 re_start(struct ifnet *ifp)
1595 {
1596 	struct rtk_softc *sc;
1597 	struct mbuf *m;
1598 	bus_dmamap_t map;
1599 	struct re_txq *txq;
1600 	struct re_desc *d;
1601 	uint32_t cmdstat, re_flags, vlanctl;
1602 	int ofree, idx, error, nsegs, seg;
1603 	int startdesc, curdesc, lastdesc;
1604 	bool pad;
1605 
1606 	sc = ifp->if_softc;
1607 	ofree = sc->re_ldata.re_txq_free;
1608 
1609 	for (idx = sc->re_ldata.re_txq_prodidx;; idx = RE_NEXT_TXQ(sc, idx)) {
1610 
1611 		IFQ_POLL(&ifp->if_snd, m);
1612 		if (m == NULL)
1613 			break;
1614 
1615 		if (sc->re_ldata.re_txq_free == 0 ||
1616 		    sc->re_ldata.re_tx_free == 0) {
1617 			/* no more free slots left */
1618 			ifp->if_flags |= IFF_OACTIVE;
1619 			break;
1620 		}
1621 
1622 		/*
1623 		 * Set up checksum offload. Note: checksum offload bits must
1624 		 * appear in all descriptors of a multi-descriptor transmit
1625 		 * attempt. (This is according to testing done with an 8169
1626 		 * chip. I'm not sure if this is a requirement or a bug.)
1627 		 */
1628 
1629 		vlanctl = 0;
1630 		if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) {
1631 			uint32_t segsz = m->m_pkthdr.segsz;
1632 
1633 			if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1634 				re_flags = RE_TDESC_CMD_LGSEND |
1635 				    (segsz << RE_TDESC_CMD_MSSVAL_SHIFT);
1636 			} else {
1637 				re_flags = RE_TDESC_CMD_LGSEND_V4;
1638 				vlanctl |=
1639 				    (segsz << RE_TDESC_VLANCTL_MSSVAL_SHIFT);
1640 			}
1641 		} else {
1642 			/*
1643 			 * set RE_TDESC_CMD_IPCSUM if any checksum offloading
1644 			 * is requested.  otherwise, RE_TDESC_CMD_TCPCSUM/
1645 			 * RE_TDESC_CMD_UDPCSUM doesn't make effects.
1646 			 */
1647 			re_flags = 0;
1648 			if ((m->m_pkthdr.csum_flags &
1649 			    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4))
1650 			    != 0) {
1651 				if ((sc->sc_quirk & RTKQ_DESCV2) == 0) {
1652 					re_flags |= RE_TDESC_CMD_IPCSUM;
1653 					if (m->m_pkthdr.csum_flags &
1654 					    M_CSUM_TCPv4) {
1655 						re_flags |=
1656 						    RE_TDESC_CMD_TCPCSUM;
1657 					} else if (m->m_pkthdr.csum_flags &
1658 					    M_CSUM_UDPv4) {
1659 						re_flags |=
1660 						    RE_TDESC_CMD_UDPCSUM;
1661 					}
1662 				} else {
1663 					vlanctl |= RE_TDESC_VLANCTL_IPCSUM;
1664 					if (m->m_pkthdr.csum_flags &
1665 					    M_CSUM_TCPv4) {
1666 						vlanctl |=
1667 						    RE_TDESC_VLANCTL_TCPCSUM;
1668 					} else if (m->m_pkthdr.csum_flags &
1669 					    M_CSUM_UDPv4) {
1670 						vlanctl |=
1671 						    RE_TDESC_VLANCTL_UDPCSUM;
1672 					}
1673 				}
1674 			}
1675 		}
1676 
1677 		txq = &sc->re_ldata.re_txq[idx];
1678 		map = txq->txq_dmamap;
1679 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1680 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1681 
1682 		if (__predict_false(error)) {
1683 			/* XXX try to defrag if EFBIG? */
1684 			printf("%s: can't map mbuf (error %d)\n",
1685 			    device_xname(sc->sc_dev), error);
1686 
1687 			IFQ_DEQUEUE(&ifp->if_snd, m);
1688 			m_freem(m);
1689 			if_statinc(ifp, if_oerrors);
1690 			continue;
1691 		}
1692 
1693 		nsegs = map->dm_nsegs;
1694 		pad = false;
1695 		if (__predict_false(m->m_pkthdr.len <= RE_IP4CSUMTX_PADLEN &&
1696 		    (re_flags & RE_TDESC_CMD_IPCSUM) != 0 &&
1697 		    (sc->sc_quirk & RTKQ_DESCV2) == 0)) {
1698 			pad = true;
1699 			nsegs++;
1700 		}
1701 
1702 		if (nsegs > sc->re_ldata.re_tx_free) {
1703 			/*
1704 			 * Not enough free descriptors to transmit this packet.
1705 			 */
1706 			ifp->if_flags |= IFF_OACTIVE;
1707 			bus_dmamap_unload(sc->sc_dmat, map);
1708 			break;
1709 		}
1710 
1711 		IFQ_DEQUEUE(&ifp->if_snd, m);
1712 
1713 		/*
1714 		 * Make sure that the caches are synchronized before we
1715 		 * ask the chip to start DMA for the packet data.
1716 		 */
1717 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1718 		    BUS_DMASYNC_PREWRITE);
1719 
1720 		/*
1721 		 * Set up hardware VLAN tagging. Note: vlan tag info must
1722 		 * appear in all descriptors of a multi-descriptor
1723 		 * transmission attempt.
1724 		 */
1725 		if (vlan_has_tag(m))
1726 			vlanctl |= bswap16(vlan_get_tag(m)) |
1727 			    RE_TDESC_VLANCTL_TAG;
1728 
1729 		/*
1730 		 * Map the segment array into descriptors.
1731 		 * Note that we set the start-of-frame and
1732 		 * end-of-frame markers for either TX or RX,
1733 		 * but they really only have meaning in the TX case.
1734 		 * (In the RX case, it's the chip that tells us
1735 		 *  where packets begin and end.)
1736 		 * We also keep track of the end of the ring
1737 		 * and set the end-of-ring bits as needed,
1738 		 * and we set the ownership bits in all except
1739 		 * the very first descriptor. (The caller will
1740 		 * set this descriptor later when it start
1741 		 * transmission or reception.)
1742 		 */
1743 		curdesc = startdesc = sc->re_ldata.re_tx_nextfree;
1744 		lastdesc = -1;
1745 		for (seg = 0; seg < map->dm_nsegs;
1746 		    seg++, curdesc = RE_NEXT_TX_DESC(sc, curdesc)) {
1747 			d = &sc->re_ldata.re_tx_list[curdesc];
1748 #ifdef DIAGNOSTIC
1749 			RE_TXDESCSYNC(sc, curdesc,
1750 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1751 			cmdstat = le32toh(d->re_cmdstat);
1752 			RE_TXDESCSYNC(sc, curdesc, BUS_DMASYNC_PREREAD);
1753 			if (cmdstat & RE_TDESC_STAT_OWN) {
1754 				panic("%s: tried to map busy TX descriptor",
1755 				    device_xname(sc->sc_dev));
1756 			}
1757 #endif
1758 
1759 			d->re_vlanctl = htole32(vlanctl);
1760 			re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1761 			cmdstat = re_flags | map->dm_segs[seg].ds_len;
1762 			if (seg == 0)
1763 				cmdstat |= RE_TDESC_CMD_SOF;
1764 			else
1765 				cmdstat |= RE_TDESC_CMD_OWN;
1766 			if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1767 				cmdstat |= RE_TDESC_CMD_EOR;
1768 			if (seg == nsegs - 1) {
1769 				cmdstat |= RE_TDESC_CMD_EOF;
1770 				lastdesc = curdesc;
1771 			}
1772 			d->re_cmdstat = htole32(cmdstat);
1773 			RE_TXDESCSYNC(sc, curdesc,
1774 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1775 		}
1776 		if (__predict_false(pad)) {
1777 			d = &sc->re_ldata.re_tx_list[curdesc];
1778 			d->re_vlanctl = htole32(vlanctl);
1779 			re_set_bufaddr(d, RE_TXPADDADDR(sc));
1780 			cmdstat = re_flags |
1781 			    RE_TDESC_CMD_OWN | RE_TDESC_CMD_EOF |
1782 			    (RE_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1783 			if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1784 				cmdstat |= RE_TDESC_CMD_EOR;
1785 			d->re_cmdstat = htole32(cmdstat);
1786 			RE_TXDESCSYNC(sc, curdesc,
1787 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1788 			lastdesc = curdesc;
1789 			curdesc = RE_NEXT_TX_DESC(sc, curdesc);
1790 		}
1791 		KASSERT(lastdesc != -1);
1792 
1793 		/* Transfer ownership of packet to the chip. */
1794 
1795 		sc->re_ldata.re_tx_list[startdesc].re_cmdstat |=
1796 		    htole32(RE_TDESC_CMD_OWN);
1797 		RE_TXDESCSYNC(sc, startdesc,
1798 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1799 
1800 		/* update info of TX queue and descriptors */
1801 		txq->txq_mbuf = m;
1802 		txq->txq_descidx = lastdesc;
1803 		txq->txq_nsegs = nsegs;
1804 
1805 		sc->re_ldata.re_txq_free--;
1806 		sc->re_ldata.re_tx_free -= nsegs;
1807 		sc->re_ldata.re_tx_nextfree = curdesc;
1808 
1809 		/*
1810 		 * If there's a BPF listener, bounce a copy of this frame
1811 		 * to him.
1812 		 */
1813 		bpf_mtap(ifp, m, BPF_D_OUT);
1814 	}
1815 
1816 	if (sc->re_ldata.re_txq_free < ofree) {
1817 		/*
1818 		 * TX packets are enqueued.
1819 		 */
1820 		sc->re_ldata.re_txq_prodidx = idx;
1821 
1822 		/*
1823 		 * Start the transmitter to poll.
1824 		 *
1825 		 * RealTek put the TX poll request register in a different
1826 		 * location on the 8169 gigE chip. I don't know why.
1827 		 */
1828 		if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
1829 			CSR_WRITE_1(sc, RTK_TXSTART, RTK_TXSTART_START);
1830 		else
1831 			CSR_WRITE_1(sc, RTK_GTXSTART, RTK_TXSTART_START);
1832 
1833 		if ((sc->sc_quirk & RTKQ_IM_HW) == 0) {
1834 			/*
1835 			 * Use the countdown timer for interrupt moderation.
1836 			 * 'TX done' interrupts are disabled. Instead, we reset
1837 			 * the countdown timer, which will begin counting until
1838 			 * it hits the value in the TIMERINT register, and then
1839 			 * trigger an interrupt. Each time we write to the
1840 			 * TIMERCNT register, the timer count is reset to 0.
1841 			 */
1842 			CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1843 		}
1844 
1845 		/*
1846 		 * Set a timeout in case the chip goes out to lunch.
1847 		 */
1848 		ifp->if_timer = 5;
1849 	}
1850 }
1851 
1852 static int
1853 re_init(struct ifnet *ifp)
1854 {
1855 	struct rtk_softc *sc = ifp->if_softc;
1856 	uint32_t rxcfg = 0;
1857 	uint16_t cfg;
1858 	int error;
1859 #ifdef RE_USE_EECMD
1860 	const uint8_t *enaddr;
1861 	uint32_t reg;
1862 #endif
1863 
1864 	if ((error = re_enable(sc)) != 0)
1865 		goto out;
1866 
1867 	/*
1868 	 * Cancel pending I/O and free all RX/TX buffers.
1869 	 */
1870 	re_stop(ifp, 0);
1871 
1872 	re_reset(sc);
1873 
1874 	/*
1875 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
1876 	 * RX checksum offload. We must configure the C+ register
1877 	 * before all others.
1878 	 */
1879 	cfg = RE_CPLUSCMD_PCI_MRW;
1880 
1881 	/*
1882 	 * XXX: For old 8169 set bit 14.
1883 	 *      For 8169S/8110S and above, do not set bit 14.
1884 	 */
1885 	if ((sc->sc_quirk & RTKQ_8169NONS) != 0)
1886 		cfg |= (0x1 << 14);
1887 
1888 	if ((sc->ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
1889 		cfg |= RE_CPLUSCMD_VLANSTRIP;
1890 	if ((ifp->if_capenable & (IFCAP_CSUM_IPv4_Rx |
1891 	     IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) != 0)
1892 		cfg |= RE_CPLUSCMD_RXCSUM_ENB;
1893 	if ((sc->sc_quirk & RTKQ_MACSTAT) != 0) {
1894 		cfg |= RE_CPLUSCMD_MACSTAT_DIS;
1895 		cfg |= RE_CPLUSCMD_TXENB;
1896 	} else
1897 		cfg |= RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB;
1898 
1899 	CSR_WRITE_2(sc, RTK_CPLUS_CMD, cfg);
1900 
1901 	/* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */
1902 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
1903 		if ((sc->sc_quirk & RTKQ_IM_HW) == 0) {
1904 			CSR_WRITE_2(sc, RTK_IM, 0x0000);
1905 		} else {
1906 			CSR_WRITE_2(sc, RTK_IM, 0x5151);
1907 		}
1908 	}
1909 
1910 	DELAY(10000);
1911 
1912 #ifdef RE_USE_EECMD
1913 	/*
1914 	 * Init our MAC address.  Even though the chipset
1915 	 * documentation doesn't mention it, we need to enter "Config
1916 	 * register write enable" mode to modify the ID registers.
1917 	 */
1918 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG);
1919 	enaddr = CLLADDR(ifp->if_sadl);
1920 	reg = enaddr[0] | (enaddr[1] << 8) |
1921 	    (enaddr[2] << 16) | (enaddr[3] << 24);
1922 	CSR_WRITE_4(sc, RTK_IDR0, reg);
1923 	reg = enaddr[4] | (enaddr[5] << 8);
1924 	CSR_WRITE_4(sc, RTK_IDR4, reg);
1925 	CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
1926 #endif
1927 
1928 	/*
1929 	 * For C+ mode, initialize the RX descriptors and mbufs.
1930 	 */
1931 	re_rx_list_init(sc);
1932 	re_tx_list_init(sc);
1933 
1934 	/*
1935 	 * Load the addresses of the RX and TX lists into the chip.
1936 	 */
1937 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI,
1938 	    RE_ADDR_HI(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1939 	CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO,
1940 	    RE_ADDR_LO(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1941 
1942 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI,
1943 	    RE_ADDR_HI(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1944 	CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO,
1945 	    RE_ADDR_LO(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1946 
1947 	if (sc->sc_quirk & RTKQ_RXDV_GATED) {
1948 		CSR_WRITE_4(sc, RTK_MISC,
1949 		    CSR_READ_4(sc, RTK_MISC) & ~RTK_MISC_RXDV_GATED_EN);
1950 	}
1951 
1952 	/*
1953 	 * Enable transmit and receive.
1954 	 */
1955 	if ((sc->sc_quirk & RTKQ_TXRXEN_LATER) == 0)
1956 		CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1957 
1958 	/*
1959 	 * Set the initial TX and RX configuration.
1960 	 */
1961 	if (sc->re_testmode && (sc->sc_quirk & RTKQ_8169NONS) != 0) {
1962 		/* test mode is needed only for old 8169 */
1963 		CSR_WRITE_4(sc, RTK_TXCFG,
1964 		    RE_TXCFG_CONFIG | RTK_LOOPTEST_ON);
1965 	} else
1966 		CSR_WRITE_4(sc, RTK_TXCFG, RE_TXCFG_CONFIG);
1967 
1968 	CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16);
1969 
1970 	CSR_WRITE_4(sc, RTK_RXCFG, RE_RXCFG_CONFIG);
1971 
1972 	/* Set the individual bit to receive frames for this host only. */
1973 	rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1974 	rxcfg |= RTK_RXCFG_RX_INDIV;
1975 
1976 	/* If we want promiscuous mode, set the allframes bit. */
1977 	if (ifp->if_flags & IFF_PROMISC)
1978 		rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1979 	else
1980 		rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1981 	CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1982 
1983 	/*
1984 	 * Set capture broadcast bit to capture broadcast frames.
1985 	 */
1986 	if (ifp->if_flags & IFF_BROADCAST)
1987 		rxcfg |= RTK_RXCFG_RX_BROAD;
1988 	else
1989 		rxcfg &= ~RTK_RXCFG_RX_BROAD;
1990 	CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1991 
1992 	/*
1993 	 * Program the multicast filter, if necessary.
1994 	 */
1995 	rtk_setmulti(sc);
1996 
1997 	/*
1998 	 * some chips require to enable TX/RX *AFTER* TX/RX configuration
1999 	 */
2000 	if ((sc->sc_quirk & RTKQ_TXRXEN_LATER) != 0)
2001 		CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
2002 
2003 	/*
2004 	 * Enable interrupts.
2005 	 */
2006 	if (sc->re_testmode)
2007 		CSR_WRITE_2(sc, RTK_IMR, 0);
2008 	else if ((sc->sc_quirk & RTKQ_IM_HW) != 0)
2009 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_IM_HW);
2010 	else
2011 		CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
2012 
2013 	/* Start RX/TX process. */
2014 	CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
2015 #ifdef notdef
2016 	/* Enable receiver and transmitter. */
2017 	CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
2018 #endif
2019 
2020 	/*
2021 	 * Initialize the timer interrupt register so that
2022 	 * a timer interrupt will be generated once the timer
2023 	 * reaches a certain number of ticks. The timer is
2024 	 * reloaded on each transmit. This gives us TX interrupt
2025 	 * moderation, which dramatically improves TX frame rate.
2026 	 */
2027 
2028 	unsigned defer;		/* timer interval / ns */
2029 	unsigned period;	/* busclock period / ns */
2030 
2031 	/*
2032 	 * Maximum frame rate
2033 	 * 1500 byte PDU -> 81274 Hz
2034 	 *   46 byte PDU -> 1488096 Hz
2035 	 *
2036 	 * Deferring interrupts by up to 128us needs descriptors for
2037 	 * 1500 byte PDU -> 10.4 frames
2038 	 *   46 byte PDU -> 190.4 frames
2039 	 *
2040 	 */
2041 	defer = 128000;
2042 
2043 	if ((sc->sc_quirk & RTKQ_IM_HW) != 0) {
2044 		period = 1;
2045 		defer = 0;
2046 	} else if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
2047 		period = 8;
2048 	} else {
2049 		switch (CSR_READ_1(sc, RTK_CFG2_BUSFREQ) & 0x7) {
2050 		case RTK_BUSFREQ_33MHZ:
2051 			period = 30;
2052 			break;
2053 		case RTK_BUSFREQ_66MHZ:
2054 			period = 15;
2055 			break;
2056 		default:
2057 			/* lowest possible clock */
2058 			period = 60;
2059 			break;
2060 		}
2061 	}
2062 
2063 	/* Timer Interrupt register address varies */
2064 	uint16_t re8139_reg;
2065 	if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
2066 		re8139_reg = RTK_TIMERINT;
2067 	else
2068 		re8139_reg = RTK_TIMERINT_8169;
2069 	CSR_WRITE_4(sc, re8139_reg, defer / period);
2070 
2071 	if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
2072 		/*
2073 		 * For 8169 gigE NICs, set the max allowed RX packet
2074 		 * size so we can receive jumbo frames.
2075 		 */
2076 		CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383);
2077 	}
2078 
2079 	if (sc->re_testmode)
2080 		return 0;
2081 
2082 	CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD);
2083 
2084 	ifp->if_flags |= IFF_RUNNING;
2085 	ifp->if_flags &= ~IFF_OACTIVE;
2086 
2087 	callout_schedule(&sc->rtk_tick_ch, hz);
2088 
2089  out:
2090 	if (error) {
2091 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2092 		ifp->if_timer = 0;
2093 		printf("%s: interface not running\n",
2094 		    device_xname(sc->sc_dev));
2095 	}
2096 
2097 	return error;
2098 }
2099 
2100 static int
2101 re_ioctl(struct ifnet *ifp, u_long command, void *data)
2102 {
2103 	struct rtk_softc *sc = ifp->if_softc;
2104 	struct ifreq *ifr = data;
2105 	int s, error = 0;
2106 
2107 	s = splnet();
2108 
2109 	switch (command) {
2110 	case SIOCSIFMTU:
2111 		/*
2112 		 * Disable jumbo frames if it's not supported.
2113 		 */
2114 		if ((sc->sc_quirk & RTKQ_NOJUMBO) != 0 &&
2115 		    ifr->ifr_mtu > ETHERMTU) {
2116 			error = EINVAL;
2117 			break;
2118 		}
2119 
2120 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
2121 			error = EINVAL;
2122 		else if ((error = ifioctl_common(ifp, command, data)) ==
2123 		    ENETRESET)
2124 			error = 0;
2125 		break;
2126 	default:
2127 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2128 			break;
2129 
2130 		error = 0;
2131 
2132 		if (command == SIOCSIFCAP)
2133 			error = (*ifp->if_init)(ifp);
2134 		else if (command != SIOCADDMULTI && command != SIOCDELMULTI)
2135 			;
2136 		else if (ifp->if_flags & IFF_RUNNING)
2137 			rtk_setmulti(sc);
2138 		break;
2139 	}
2140 
2141 	splx(s);
2142 
2143 	return error;
2144 }
2145 
2146 static void
2147 re_watchdog(struct ifnet *ifp)
2148 {
2149 	struct rtk_softc *sc;
2150 	int s;
2151 
2152 	sc = ifp->if_softc;
2153 	s = splnet();
2154 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
2155 	if_statinc(ifp, if_oerrors);
2156 
2157 	re_txeof(sc);
2158 	re_rxeof(sc);
2159 
2160 	re_init(ifp);
2161 
2162 	splx(s);
2163 }
2164 
2165 /*
2166  * Stop the adapter and free any mbufs allocated to the
2167  * RX and TX lists.
2168  */
2169 static void
2170 re_stop(struct ifnet *ifp, int disable)
2171 {
2172 	int i;
2173 	struct rtk_softc *sc = ifp->if_softc;
2174 
2175 	callout_stop(&sc->rtk_tick_ch);
2176 
2177 	mii_down(&sc->mii);
2178 
2179 	if ((sc->sc_quirk & RTKQ_CMDSTOP) != 0)
2180 		CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_STOPREQ | RTK_CMD_TX_ENB |
2181 		    RTK_CMD_RX_ENB);
2182 	else
2183 		CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
2184 	DELAY(1000);
2185 	CSR_WRITE_2(sc, RTK_IMR, 0x0000);
2186 	CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
2187 
2188 	if (sc->re_head != NULL) {
2189 		m_freem(sc->re_head);
2190 		sc->re_head = sc->re_tail = NULL;
2191 	}
2192 
2193 	/* Free the TX list buffers. */
2194 	for (i = 0; i < RE_TX_QLEN; i++) {
2195 		if (sc->re_ldata.re_txq[i].txq_mbuf != NULL) {
2196 			bus_dmamap_unload(sc->sc_dmat,
2197 			    sc->re_ldata.re_txq[i].txq_dmamap);
2198 			m_freem(sc->re_ldata.re_txq[i].txq_mbuf);
2199 			sc->re_ldata.re_txq[i].txq_mbuf = NULL;
2200 		}
2201 	}
2202 
2203 	/* Free the RX list buffers. */
2204 	for (i = 0; i < RE_RX_DESC_CNT; i++) {
2205 		if (sc->re_ldata.re_rxsoft[i].rxs_mbuf != NULL) {
2206 			bus_dmamap_unload(sc->sc_dmat,
2207 			    sc->re_ldata.re_rxsoft[i].rxs_dmamap);
2208 			m_freem(sc->re_ldata.re_rxsoft[i].rxs_mbuf);
2209 			sc->re_ldata.re_rxsoft[i].rxs_mbuf = NULL;
2210 		}
2211 	}
2212 
2213 	if (disable)
2214 		re_disable(sc);
2215 
2216 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2217 	ifp->if_timer = 0;
2218 }
2219