xref: /openbsd-src/sys/dev/ic/re.c (revision 5a38ef86d0b61900239c7913d24a05e7b88a58f0)
1 /*	$OpenBSD: re.c,v 1.211 2021/05/17 11:59:53 visa Exp $	*/
2 /*	$FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $	*/
3 /*
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Realtek 8139C+/8169/8169S/8110S PCI NIC driver
37  *
38  * Written by Bill Paul <wpaul@windriver.com>
39  * Senior Networking Software Engineer
40  * Wind River Systems
41  */
42 
43 /*
44  * This driver is designed to support Realtek's next generation of
45  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
46  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
47  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
48  *
49  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
50  * with the older 8139 family, however it also supports a special
51  * C+ mode of operation that provides several new performance enhancing
52  * features. These include:
53  *
54  *	o Descriptor based DMA mechanism. Each descriptor represents
55  *	  a single packet fragment. Data buffers may be aligned on
56  *	  any byte boundary.
57  *
58  *	o 64-bit DMA
59  *
60  *	o TCP/IP checksum offload for both RX and TX
61  *
62  *	o High and normal priority transmit DMA rings
63  *
64  *	o VLAN tag insertion and extraction
65  *
66  *	o TCP large send (segmentation offload)
67  *
68  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
69  * programming API is fairly straightforward. The RX filtering, EEPROM
70  * access and PHY access is the same as it is on the older 8139 series
71  * chips.
72  *
73  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
74  * same programming API and feature set as the 8139C+ with the following
75  * differences and additions:
76  *
77  *	o 1000Mbps mode
78  *
79  *	o Jumbo frames
80  *
81  * 	o GMII and TBI ports/registers for interfacing with copper
82  *	  or fiber PHYs
83  *
84  *      o RX and TX DMA rings can have up to 1024 descriptors
85  *        (the 8139C+ allows a maximum of 64)
86  *
87  *	o Slight differences in register layout from the 8139C+
88  *
89  * The TX start and timer interrupt registers are at different locations
90  * on the 8169 than they are on the 8139C+. Also, the status word in the
91  * RX descriptor has a slightly different bit layout. The 8169 does not
92  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
93  * copper gigE PHY.
94  *
95  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
96  * (the 'S' stands for 'single-chip'). These devices have the same
97  * programming API as the older 8169, but also have some vendor-specific
98  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
99  * part designed to be pin-compatible with the Realtek 8100 10/100 chip.
100  *
101  * This driver takes advantage of the RX and TX checksum offload and
102  * VLAN tag insertion/extraction features. It also implements TX
103  * interrupt moderation using the timer interrupt registers, which
104  * significantly reduces TX interrupt load. There is also support
105  * for jumbo frames, however the 8169/8169S/8110S can not transmit
106  * jumbo frames larger than 7440, so the max MTU possible with this
107  * driver is 7422 bytes.
108  */
109 
110 #include "bpfilter.h"
111 #include "vlan.h"
112 
113 #include <sys/param.h>
114 #include <sys/endian.h>
115 #include <sys/systm.h>
116 #include <sys/sockio.h>
117 #include <sys/mbuf.h>
118 #include <sys/malloc.h>
119 #include <sys/kernel.h>
120 #include <sys/device.h>
121 #include <sys/timeout.h>
122 #include <sys/socket.h>
123 #include <sys/atomic.h>
124 
125 #include <machine/bus.h>
126 
127 #include <net/if.h>
128 #include <net/if_media.h>
129 
130 #include <netinet/in.h>
131 #include <netinet/ip.h>
132 #include <netinet/if_ether.h>
133 
134 #if NBPFILTER > 0
135 #include <net/bpf.h>
136 #endif
137 
138 #include <dev/mii/mii.h>
139 #include <dev/mii/miivar.h>
140 
141 #include <dev/pci/pcidevs.h>
142 
143 #include <dev/ic/rtl81x9reg.h>
144 #include <dev/ic/revar.h>
145 
146 #ifdef RE_DEBUG
147 int redebug = 0;
148 #define DPRINTF(x)	do { if (redebug) printf x; } while (0)
149 #else
150 #define DPRINTF(x)
151 #endif
152 
153 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t);
154 
155 int	re_encap(struct rl_softc *, unsigned int, struct mbuf *);
156 
157 int	re_newbuf(struct rl_softc *);
158 int	re_rx_list_init(struct rl_softc *);
159 void	re_rx_list_fill(struct rl_softc *);
160 int	re_tx_list_init(struct rl_softc *);
161 int	re_rxeof(struct rl_softc *);
162 int	re_txeof(struct rl_softc *);
163 void	re_tick(void *);
164 void	re_start(struct ifqueue *);
165 void	re_txstart(void *);
166 int	re_ioctl(struct ifnet *, u_long, caddr_t);
167 void	re_watchdog(struct ifnet *);
168 int	re_ifmedia_upd(struct ifnet *);
169 void	re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
170 
171 void	re_set_jumbo(struct rl_softc *);
172 
173 void	re_eeprom_putbyte(struct rl_softc *, int);
174 void	re_eeprom_getword(struct rl_softc *, int, u_int16_t *);
175 void	re_read_eeprom(struct rl_softc *, caddr_t, int, int);
176 
177 int	re_gmii_readreg(struct device *, int, int);
178 void	re_gmii_writereg(struct device *, int, int, int);
179 
180 int	re_miibus_readreg(struct device *, int, int);
181 void	re_miibus_writereg(struct device *, int, int, int);
182 void	re_miibus_statchg(struct device *);
183 
184 void	re_iff(struct rl_softc *);
185 
186 void	re_setup_hw_im(struct rl_softc *);
187 void	re_setup_sim_im(struct rl_softc *);
188 void	re_disable_hw_im(struct rl_softc *);
189 void	re_disable_sim_im(struct rl_softc *);
190 void	re_config_imtype(struct rl_softc *, int);
191 void	re_setup_intr(struct rl_softc *, int, int);
192 #ifndef SMALL_KERNEL
193 int	re_wol(struct ifnet*, int);
194 #endif
195 
196 void	in_delayed_cksum(struct mbuf *);
197 
198 struct cfdriver re_cd = {
199 	0, "re", DV_IFNET
200 };
201 
202 #define EE_SET(x)					\
203 	CSR_WRITE_1(sc, RL_EECMD,			\
204 		CSR_READ_1(sc, RL_EECMD) | x)
205 
206 #define EE_CLR(x)					\
207 	CSR_WRITE_1(sc, RL_EECMD,			\
208 		CSR_READ_1(sc, RL_EECMD) & ~x)
209 
210 #define RL_FRAMELEN(mtu)				\
211 	(mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +		\
212 		ETHER_VLAN_ENCAP_LEN)
213 
214 static const struct re_revision {
215 	u_int32_t		re_chipid;
216 	const char		*re_name;
217 } re_revisions[] = {
218 	{ RL_HWREV_8100,	"RTL8100" },
219 	{ RL_HWREV_8100E,	"RTL8100E" },
220 	{ RL_HWREV_8100E_SPIN2, "RTL8100E 2" },
221 	{ RL_HWREV_8101,	"RTL8101" },
222 	{ RL_HWREV_8101E,	"RTL8101E" },
223 	{ RL_HWREV_8102E,	"RTL8102E" },
224 	{ RL_HWREV_8106E,	"RTL8106E" },
225 	{ RL_HWREV_8401E,	"RTL8401E" },
226 	{ RL_HWREV_8402,	"RTL8402" },
227 	{ RL_HWREV_8411,	"RTL8411" },
228 	{ RL_HWREV_8411B,	"RTL8411B" },
229 	{ RL_HWREV_8102EL,	"RTL8102EL" },
230 	{ RL_HWREV_8102EL_SPIN1, "RTL8102EL 1" },
231 	{ RL_HWREV_8103E,       "RTL8103E" },
232 	{ RL_HWREV_8110S,	"RTL8110S" },
233 	{ RL_HWREV_8139CPLUS,	"RTL8139C+" },
234 	{ RL_HWREV_8168B_SPIN1,	"RTL8168 1" },
235 	{ RL_HWREV_8168B_SPIN2,	"RTL8168 2" },
236 	{ RL_HWREV_8168B_SPIN3,	"RTL8168 3" },
237 	{ RL_HWREV_8168C,	"RTL8168C/8111C" },
238 	{ RL_HWREV_8168C_SPIN2,	"RTL8168C/8111C" },
239 	{ RL_HWREV_8168CP,	"RTL8168CP/8111CP" },
240 	{ RL_HWREV_8168F,	"RTL8168F/8111F" },
241 	{ RL_HWREV_8168G,	"RTL8168G/8111G" },
242 	{ RL_HWREV_8168GU,	"RTL8168GU/8111GU" },
243 	{ RL_HWREV_8168H,	"RTL8168H/8111H" },
244 	{ RL_HWREV_8105E,	"RTL8105E" },
245 	{ RL_HWREV_8105E_SPIN1,	"RTL8105E" },
246 	{ RL_HWREV_8168D,	"RTL8168D/8111D" },
247 	{ RL_HWREV_8168DP,      "RTL8168DP/8111DP" },
248 	{ RL_HWREV_8168E,       "RTL8168E/8111E" },
249 	{ RL_HWREV_8168E_VL,	"RTL8168E/8111E-VL" },
250 	{ RL_HWREV_8168EP,	"RTL8168EP/8111EP" },
251 	{ RL_HWREV_8168FP,	"RTL8168FP/8111FP" },
252 	{ RL_HWREV_8169,	"RTL8169" },
253 	{ RL_HWREV_8169_8110SB,	"RTL8169/8110SB" },
254 	{ RL_HWREV_8169_8110SBL, "RTL8169SBL" },
255 	{ RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" },
256 	{ RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" },
257 	{ RL_HWREV_8169S,	"RTL8169S" },
258 
259 	{ 0, NULL }
260 };
261 
262 
263 static inline void
264 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr)
265 {
266 	d->rl_bufaddr_lo = htole32((uint32_t)addr);
267 	if (sizeof(bus_addr_t) == sizeof(uint64_t))
268 		d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32);
269 	else
270 		d->rl_bufaddr_hi = 0;
271 }
272 
273 /*
274  * Send a read command and address to the EEPROM, check for ACK.
275  */
276 void
277 re_eeprom_putbyte(struct rl_softc *sc, int addr)
278 {
279 	int	d, i;
280 
281 	d = addr | (RL_9346_READ << sc->rl_eewidth);
282 
283 	/*
284 	 * Feed in each bit and strobe the clock.
285 	 */
286 
287 	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
288 		if (d & i)
289 			EE_SET(RL_EE_DATAIN);
290 		else
291 			EE_CLR(RL_EE_DATAIN);
292 		DELAY(100);
293 		EE_SET(RL_EE_CLK);
294 		DELAY(150);
295 		EE_CLR(RL_EE_CLK);
296 		DELAY(100);
297 	}
298 }
299 
300 /*
301  * Read a word of data stored in the EEPROM at address 'addr.'
302  */
303 void
304 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
305 {
306 	int		i;
307 	u_int16_t	word = 0;
308 
309 	/*
310 	 * Send address of word we want to read.
311 	 */
312 	re_eeprom_putbyte(sc, addr);
313 
314 	/*
315 	 * Start reading bits from EEPROM.
316 	 */
317 	for (i = 0x8000; i; i >>= 1) {
318 		EE_SET(RL_EE_CLK);
319 		DELAY(100);
320 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
321 			word |= i;
322 		EE_CLR(RL_EE_CLK);
323 		DELAY(100);
324 	}
325 
326 	*dest = word;
327 }
328 
329 /*
330  * Read a sequence of words from the EEPROM.
331  */
332 void
333 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
334 {
335 	int		i;
336 	u_int16_t	word = 0, *ptr;
337 
338 	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
339 
340 	DELAY(100);
341 
342 	for (i = 0; i < cnt; i++) {
343 		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
344 		re_eeprom_getword(sc, off + i, &word);
345 		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
346 		ptr = (u_int16_t *)(dest + (i * 2));
347 		*ptr = word;
348 	}
349 
350 	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
351 }
352 
353 int
354 re_gmii_readreg(struct device *self, int phy, int reg)
355 {
356 	struct rl_softc	*sc = (struct rl_softc *)self;
357 	u_int32_t	rval;
358 	int		i;
359 
360 	if (phy != 7)
361 		return (0);
362 
363 	/* Let the rgephy driver read the GMEDIASTAT register */
364 
365 	if (reg == RL_GMEDIASTAT) {
366 		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
367 		return (rval);
368 	}
369 
370 	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
371 
372 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
373 		rval = CSR_READ_4(sc, RL_PHYAR);
374 		if (rval & RL_PHYAR_BUSY)
375 			break;
376 		DELAY(25);
377 	}
378 
379 	if (i == RL_PHY_TIMEOUT) {
380 		printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname);
381 		return (0);
382 	}
383 
384 	DELAY(20);
385 
386 	return (rval & RL_PHYAR_PHYDATA);
387 }
388 
389 void
390 re_gmii_writereg(struct device *dev, int phy, int reg, int data)
391 {
392 	struct rl_softc	*sc = (struct rl_softc *)dev;
393 	u_int32_t	rval;
394 	int		i;
395 
396 	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
397 	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
398 
399 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
400 		rval = CSR_READ_4(sc, RL_PHYAR);
401 		if (!(rval & RL_PHYAR_BUSY))
402 			break;
403 		DELAY(25);
404 	}
405 
406 	if (i == RL_PHY_TIMEOUT)
407 		printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname);
408 
409 	DELAY(20);
410 }
411 
412 int
413 re_miibus_readreg(struct device *dev, int phy, int reg)
414 {
415 	struct rl_softc	*sc = (struct rl_softc *)dev;
416 	u_int16_t	rval = 0;
417 	u_int16_t	re8139_reg = 0;
418 	int		s;
419 
420 	s = splnet();
421 
422 	if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
423 		rval = re_gmii_readreg(dev, phy, reg);
424 		splx(s);
425 		return (rval);
426 	}
427 
428 	/* Pretend the internal PHY is only at address 0 */
429 	if (phy) {
430 		splx(s);
431 		return (0);
432 	}
433 	switch(reg) {
434 	case MII_BMCR:
435 		re8139_reg = RL_BMCR;
436 		break;
437 	case MII_BMSR:
438 		re8139_reg = RL_BMSR;
439 		break;
440 	case MII_ANAR:
441 		re8139_reg = RL_ANAR;
442 		break;
443 	case MII_ANER:
444 		re8139_reg = RL_ANER;
445 		break;
446 	case MII_ANLPAR:
447 		re8139_reg = RL_LPAR;
448 		break;
449 	case MII_PHYIDR1:
450 	case MII_PHYIDR2:
451 		splx(s);
452 		return (0);
453 	/*
454 	 * Allow the rlphy driver to read the media status
455 	 * register. If we have a link partner which does not
456 	 * support NWAY, this is the register which will tell
457 	 * us the results of parallel detection.
458 	 */
459 	case RL_MEDIASTAT:
460 		rval = CSR_READ_1(sc, RL_MEDIASTAT);
461 		splx(s);
462 		return (rval);
463 	default:
464 		printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
465 		splx(s);
466 		return (0);
467 	}
468 	rval = CSR_READ_2(sc, re8139_reg);
469 	if (re8139_reg == RL_BMCR) {
470 		/* 8139C+ has different bit layout. */
471 		rval &= ~(BMCR_LOOP | BMCR_ISO);
472 	}
473 	splx(s);
474 	return (rval);
475 }
476 
477 void
478 re_miibus_writereg(struct device *dev, int phy, int reg, int data)
479 {
480 	struct rl_softc	*sc = (struct rl_softc *)dev;
481 	u_int16_t	re8139_reg = 0;
482 	int		s;
483 
484 	s = splnet();
485 
486 	if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
487 		re_gmii_writereg(dev, phy, reg, data);
488 		splx(s);
489 		return;
490 	}
491 
492 	/* Pretend the internal PHY is only at address 0 */
493 	if (phy) {
494 		splx(s);
495 		return;
496 	}
497 	switch(reg) {
498 	case MII_BMCR:
499 		re8139_reg = RL_BMCR;
500 		/* 8139C+ has different bit layout. */
501 		data &= ~(BMCR_LOOP | BMCR_ISO);
502 		break;
503 	case MII_BMSR:
504 		re8139_reg = RL_BMSR;
505 		break;
506 	case MII_ANAR:
507 		re8139_reg = RL_ANAR;
508 		break;
509 	case MII_ANER:
510 		re8139_reg = RL_ANER;
511 		break;
512 	case MII_ANLPAR:
513 		re8139_reg = RL_LPAR;
514 		break;
515 	case MII_PHYIDR1:
516 	case MII_PHYIDR2:
517 		splx(s);
518 		return;
519 		break;
520 	default:
521 		printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
522 		splx(s);
523 		return;
524 	}
525 	CSR_WRITE_2(sc, re8139_reg, data);
526 	splx(s);
527 }
528 
529 void
530 re_miibus_statchg(struct device *dev)
531 {
532 	struct rl_softc		*sc = (struct rl_softc *)dev;
533 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
534 	struct mii_data		*mii = &sc->sc_mii;
535 
536 	if ((ifp->if_flags & IFF_RUNNING) == 0)
537 		return;
538 
539 	sc->rl_flags &= ~RL_FLAG_LINK;
540 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
541 	    (IFM_ACTIVE | IFM_AVALID)) {
542 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
543 		case IFM_10_T:
544 		case IFM_100_TX:
545 			sc->rl_flags |= RL_FLAG_LINK;
546 			break;
547 		case IFM_1000_T:
548 			if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
549 				break;
550 			sc->rl_flags |= RL_FLAG_LINK;
551 			break;
552 		default:
553 			break;
554 		}
555 	}
556 
557 	/*
558 	 * Realtek controllers do not provide an interface to
559 	 * Tx/Rx MACs for resolved speed, duplex and flow-control
560 	 * parameters.
561 	 */
562 }
563 
564 void
565 re_iff(struct rl_softc *sc)
566 {
567 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
568 	int			h = 0;
569 	u_int32_t		hashes[2];
570 	u_int32_t		rxfilt;
571 	struct arpcom		*ac = &sc->sc_arpcom;
572 	struct ether_multi	*enm;
573 	struct ether_multistep	step;
574 
575 	rxfilt = CSR_READ_4(sc, RL_RXCFG);
576 	rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
577 	    RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI);
578 	ifp->if_flags &= ~IFF_ALLMULTI;
579 
580 	/*
581 	 * Always accept frames destined to our station address.
582 	 * Always accept broadcast frames.
583 	 */
584 	rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
585 
586 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
587 		ifp->if_flags |= IFF_ALLMULTI;
588 		rxfilt |= RL_RXCFG_RX_MULTI;
589 		if (ifp->if_flags & IFF_PROMISC)
590 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
591 		hashes[0] = hashes[1] = 0xFFFFFFFF;
592 	} else {
593 		rxfilt |= RL_RXCFG_RX_MULTI;
594 		/* Program new filter. */
595 		bzero(hashes, sizeof(hashes));
596 
597 		ETHER_FIRST_MULTI(step, ac, enm);
598 		while (enm != NULL) {
599 			h = ether_crc32_be(enm->enm_addrlo,
600 			    ETHER_ADDR_LEN) >> 26;
601 
602 			if (h < 32)
603 				hashes[0] |= (1 << h);
604 			else
605 				hashes[1] |= (1 << (h - 32));
606 
607 			ETHER_NEXT_MULTI(step, enm);
608 		}
609 	}
610 
611 	/*
612 	 * For some unfathomable reason, Realtek decided to reverse
613 	 * the order of the multicast hash registers in the PCI Express
614 	 * parts. This means we have to write the hash pattern in reverse
615 	 * order for those devices.
616 	 */
617 	if (sc->rl_flags & RL_FLAG_PCIE) {
618 		CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1]));
619 		CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0]));
620 	} else {
621 		CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
622 		CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
623 	}
624 
625 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
626 }
627 
628 void
629 re_reset(struct rl_softc *sc)
630 {
631 	int	i;
632 
633 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
634 
635 	for (i = 0; i < RL_TIMEOUT; i++) {
636 		DELAY(10);
637 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
638 			break;
639 	}
640 	if (i == RL_TIMEOUT)
641 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
642 
643 	if (sc->rl_flags & RL_FLAG_MACRESET)
644 		CSR_WRITE_1(sc, RL_LDPS, 1);
645 }
646 
647 /*
648  * Attach the interface. Allocate softc structures, do ifmedia
649  * setup and ethernet/BPF attach.
650  */
651 int
652 re_attach(struct rl_softc *sc, const char *intrstr)
653 {
654 	u_char		eaddr[ETHER_ADDR_LEN];
655 	u_int16_t	as[ETHER_ADDR_LEN / 2];
656 	struct ifnet	*ifp;
657 	u_int16_t	re_did = 0;
658 	int		error = 0, i;
659 	const struct re_revision *rr;
660 	const char	*re_name = NULL;
661 
662 	sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
663 
664 	switch (sc->sc_hwrev) {
665 	case RL_HWREV_8139CPLUS:
666 		sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
667 		sc->rl_max_mtu = RL_MTU;
668 		break;
669 	case RL_HWREV_8100E:
670 	case RL_HWREV_8100E_SPIN2:
671 	case RL_HWREV_8101E:
672 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
673 		sc->rl_max_mtu = RL_MTU;
674 		break;
675 	case RL_HWREV_8103E:
676 		sc->rl_flags |= RL_FLAG_MACSLEEP;
677 		/* FALLTHROUGH */
678 	case RL_HWREV_8102E:
679 	case RL_HWREV_8102EL:
680 	case RL_HWREV_8102EL_SPIN1:
681 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
682 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER |
683 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
684 		sc->rl_max_mtu = RL_MTU;
685 		break;
686 	case RL_HWREV_8401E:
687 	case RL_HWREV_8105E:
688 	case RL_HWREV_8105E_SPIN1:
689 	case RL_HWREV_8106E:
690 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
691 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
692 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
693 		sc->rl_max_mtu = RL_MTU;
694 		break;
695 	case RL_HWREV_8402:
696 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
697 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
698 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
699 		    RL_FLAG_CMDSTOP_WAIT_TXQ;
700 		sc->rl_max_mtu = RL_MTU;
701 		break;
702 	case RL_HWREV_8168B_SPIN1:
703 	case RL_HWREV_8168B_SPIN2:
704 		sc->rl_flags |= RL_FLAG_WOLRXENB;
705 		/* FALLTHROUGH */
706 	case RL_HWREV_8168B_SPIN3:
707 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
708 		sc->rl_max_mtu = RL_MTU;
709 		break;
710 	case RL_HWREV_8168C_SPIN2:
711 		sc->rl_flags |= RL_FLAG_MACSLEEP;
712 		/* FALLTHROUGH */
713 	case RL_HWREV_8168C:
714 	case RL_HWREV_8168CP:
715 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
716 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
717 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
718 		sc->rl_max_mtu = RL_JUMBO_MTU_6K;
719 		break;
720 	case RL_HWREV_8168D:
721 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
722 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
723 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
724 		    RL_FLAG_WOL_MANLINK;
725 		sc->rl_max_mtu = RL_JUMBO_MTU_9K;
726 		break;
727 	case RL_HWREV_8168DP:
728 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
729 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
730 		    RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
731 		sc->rl_max_mtu = RL_JUMBO_MTU_9K;
732 		break;
733 	case RL_HWREV_8168E:
734 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
735 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
736 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
737 		    RL_FLAG_WOL_MANLINK;
738 		sc->rl_max_mtu = RL_JUMBO_MTU_9K;
739 		break;
740 	case RL_HWREV_8168E_VL:
741 		sc->rl_flags |= RL_FLAG_EARLYOFF | RL_FLAG_PHYWAKE | RL_FLAG_PAR |
742 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
743 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ |
744 		    RL_FLAG_WOL_MANLINK;
745 		sc->rl_max_mtu = RL_JUMBO_MTU_6K;
746 		break;
747 	case RL_HWREV_8168F:
748 		sc->rl_flags |= RL_FLAG_EARLYOFF;
749 		/* FALLTHROUGH */
750 	case RL_HWREV_8411:
751 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
752 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
753 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ |
754 		    RL_FLAG_WOL_MANLINK;
755 		sc->rl_max_mtu = RL_JUMBO_MTU_9K;
756 		break;
757 	case RL_HWREV_8168EP:
758 	case RL_HWREV_8168FP:
759 	case RL_HWREV_8168G:
760 	case RL_HWREV_8168GU:
761 	case RL_HWREV_8168H:
762 	case RL_HWREV_8411B:
763 		if (sc->sc_product == PCI_PRODUCT_REALTEK_RT8101E) {
764 			/* RTL8106EUS */
765 			sc->rl_flags |= RL_FLAG_FASTETHER;
766 			sc->rl_max_mtu = RL_MTU;
767 		} else {
768 			sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
769 			sc->rl_max_mtu = RL_JUMBO_MTU_9K;
770 		}
771 
772 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
773 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
774 		    RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
775 		    RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED;
776 		break;
777 	case RL_HWREV_8169_8110SB:
778 	case RL_HWREV_8169_8110SBL:
779 	case RL_HWREV_8169_8110SCd:
780 	case RL_HWREV_8169_8110SCe:
781 		sc->rl_flags |= RL_FLAG_PHYWAKE;
782 		/* FALLTHROUGH */
783 	case RL_HWREV_8169:
784 	case RL_HWREV_8169S:
785 	case RL_HWREV_8110S:
786 		sc->rl_flags |= RL_FLAG_MACRESET;
787 		sc->rl_max_mtu = RL_JUMBO_MTU_7K;
788 		break;
789 	default:
790 		break;
791 	}
792 
793 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS) {
794 		sc->rl_cfg0 = RL_8139_CFG0;
795 		sc->rl_cfg1 = RL_8139_CFG1;
796 		sc->rl_cfg2 = 0;
797 		sc->rl_cfg3 = RL_8139_CFG3;
798 		sc->rl_cfg4 = RL_8139_CFG4;
799 		sc->rl_cfg5 = RL_8139_CFG5;
800 	} else {
801 		sc->rl_cfg0 = RL_CFG0;
802 		sc->rl_cfg1 = RL_CFG1;
803 		sc->rl_cfg2 = RL_CFG2;
804 		sc->rl_cfg3 = RL_CFG3;
805 		sc->rl_cfg4 = RL_CFG4;
806 		sc->rl_cfg5 = RL_CFG5;
807 	}
808 
809 	/* Reset the adapter. */
810 	re_reset(sc);
811 
812 	sc->rl_tx_time = 5;		/* 125us */
813 	sc->rl_rx_time = 2;		/* 50us */
814 	if (sc->rl_flags & RL_FLAG_PCIE)
815 		sc->rl_sim_time = 75;	/* 75us */
816 	else
817 		sc->rl_sim_time = 125;	/* 125us */
818 	sc->rl_imtype = RL_IMTYPE_SIM;	/* simulated interrupt moderation */
819 
820 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
821 		sc->rl_bus_speed = 33; /* XXX */
822 	else if (sc->rl_flags & RL_FLAG_PCIE)
823 		sc->rl_bus_speed = 125;
824 	else {
825 		u_int8_t cfg2;
826 
827 		cfg2 = CSR_READ_1(sc, sc->rl_cfg2);
828 		switch (cfg2 & RL_CFG2_PCI_MASK) {
829 		case RL_CFG2_PCI_33MHZ:
830  			sc->rl_bus_speed = 33;
831 			break;
832 		case RL_CFG2_PCI_66MHZ:
833 			sc->rl_bus_speed = 66;
834 			break;
835 		default:
836 			printf("%s: unknown bus speed, assume 33MHz\n",
837 			    sc->sc_dev.dv_xname);
838 			sc->rl_bus_speed = 33;
839 			break;
840 		}
841 
842 		if (cfg2 & RL_CFG2_PCI_64BIT)
843 			sc->rl_flags |= RL_FLAG_PCI64;
844 	}
845 
846 	re_config_imtype(sc, sc->rl_imtype);
847 
848 	if (sc->rl_flags & RL_FLAG_PAR) {
849 		/*
850 		 * XXX Should have a better way to extract station
851 		 * address from EEPROM.
852 		 */
853 		for (i = 0; i < ETHER_ADDR_LEN; i++)
854 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
855 	} else {
856 		sc->rl_eewidth = RL_9356_ADDR_LEN;
857 		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
858 		if (re_did != 0x8129)
859 			sc->rl_eewidth = RL_9346_ADDR_LEN;
860 
861 		/*
862 		 * Get station address from the EEPROM.
863 		 */
864 		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
865 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
866 			as[i] = letoh16(as[i]);
867 		bcopy(as, eaddr, ETHER_ADDR_LEN);
868 	}
869 
870 	/*
871 	 * Set RX length mask, TX poll request register
872 	 * and descriptor count.
873 	 */
874 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS) {
875 		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
876 		sc->rl_txstart = RL_TXSTART;
877 		sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
878 		sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
879 		sc->rl_ldata.rl_tx_ndescs = RL_8139_NTXSEGS;
880 	} else {
881 		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
882 		sc->rl_txstart = RL_GTXSTART;
883 		sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
884 		sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
885 		sc->rl_ldata.rl_tx_ndescs = RL_8169_NTXSEGS;
886 	}
887 
888 	bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
889 
890 	for (rr = re_revisions; rr->re_name != NULL; rr++) {
891 		if (rr->re_chipid == sc->sc_hwrev)
892 			re_name = rr->re_name;
893 	}
894 
895 	if (re_name == NULL)
896 		printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16);
897 	else
898 		printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16);
899 
900 	printf(", %s, address %s\n", intrstr,
901 	    ether_sprintf(sc->sc_arpcom.ac_enaddr));
902 
903 	/* Allocate DMA'able memory for the TX ring */
904 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc),
905 		    RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1,
906 		    &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT |
907 		    BUS_DMA_ZERO)) != 0) {
908 		printf("%s: can't allocate tx listseg, error = %d\n",
909 		    sc->sc_dev.dv_xname, error);
910 		goto fail_0;
911 	}
912 
913 	/* Load the map for the TX ring. */
914 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg,
915 		    sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc),
916 		    (caddr_t *)&sc->rl_ldata.rl_tx_list,
917 		    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
918 		printf("%s: can't map tx list, error = %d\n",
919 		    sc->sc_dev.dv_xname, error);
920 		goto fail_1;
921 	}
922 
923 	if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1,
924 		    RL_TX_LIST_SZ(sc), 0, 0,
925 		    &sc->rl_ldata.rl_tx_list_map)) != 0) {
926 		printf("%s: can't create tx list map, error = %d\n",
927 		    sc->sc_dev.dv_xname, error);
928 		goto fail_2;
929 	}
930 
931 	if ((error = bus_dmamap_load(sc->sc_dmat,
932 		    sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
933 		    RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
934 		printf("%s: can't load tx list, error = %d\n",
935 		    sc->sc_dev.dv_xname, error);
936 		goto fail_3;
937 	}
938 
939 	/* Create DMA maps for TX buffers */
940 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
941 		error = bus_dmamap_create(sc->sc_dmat,
942 		    RL_JUMBO_FRAMELEN, sc->rl_ldata.rl_tx_ndescs,
943 		    RL_JUMBO_FRAMELEN, 0, 0,
944 		    &sc->rl_ldata.rl_txq[i].txq_dmamap);
945 		if (error) {
946 			printf("%s: can't create DMA map for TX\n",
947 			    sc->sc_dev.dv_xname);
948 			goto fail_4;
949 		}
950 	}
951 
952         /* Allocate DMA'able memory for the RX ring */
953 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc),
954 		    RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1,
955 		    &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT |
956 		    BUS_DMA_ZERO)) != 0) {
957 		printf("%s: can't allocate rx listnseg, error = %d\n",
958 		    sc->sc_dev.dv_xname, error);
959 		goto fail_4;
960 	}
961 
962         /* Load the map for the RX ring. */
963 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg,
964 		    sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ(sc),
965 		    (caddr_t *)&sc->rl_ldata.rl_rx_list,
966 		    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
967 		printf("%s: can't map rx list, error = %d\n",
968 		    sc->sc_dev.dv_xname, error);
969 		goto fail_5;
970 
971 	}
972 
973 	if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ(sc), 1,
974 		    RL_RX_DMAMEM_SZ(sc), 0, 0,
975 		    &sc->rl_ldata.rl_rx_list_map)) != 0) {
976 		printf("%s: can't create rx list map, error = %d\n",
977 		    sc->sc_dev.dv_xname, error);
978 		goto fail_6;
979 	}
980 
981 	if ((error = bus_dmamap_load(sc->sc_dmat,
982 		    sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
983 		    RL_RX_DMAMEM_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
984 		printf("%s: can't load rx list, error = %d\n",
985 		    sc->sc_dev.dv_xname, error);
986 		goto fail_7;
987 	}
988 
989 	/* Create DMA maps for RX buffers */
990 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
991 		error = bus_dmamap_create(sc->sc_dmat,
992 		    RL_FRAMELEN(sc->rl_max_mtu), 1,
993 		    RL_FRAMELEN(sc->rl_max_mtu), 0, 0,
994 		    &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
995 		if (error) {
996 			printf("%s: can't create DMA map for RX\n",
997 			    sc->sc_dev.dv_xname);
998 			goto fail_8;
999 		}
1000 	}
1001 
1002 	ifp = &sc->sc_arpcom.ac_if;
1003 	ifp->if_softc = sc;
1004 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1005 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1006 	ifp->if_xflags = IFXF_MPSAFE;
1007 	ifp->if_ioctl = re_ioctl;
1008 	ifp->if_qstart = re_start;
1009 	ifp->if_watchdog = re_watchdog;
1010 	ifp->if_hardmtu = sc->rl_max_mtu;
1011 	ifq_set_maxlen(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt);
1012 
1013 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
1014 	    IFCAP_CSUM_UDPv4;
1015 
1016 	/*
1017 	 * RTL8168/8111C generates wrong IP checksummed frame if the
1018 	 * packet has IP options so disable TX IP checksum offloading.
1019 	 */
1020 	switch (sc->sc_hwrev) {
1021 	case RL_HWREV_8168C:
1022 	case RL_HWREV_8168C_SPIN2:
1023 	case RL_HWREV_8168CP:
1024 		break;
1025 	default:
1026 		ifp->if_capabilities |= IFCAP_CSUM_IPv4;
1027 	}
1028 
1029 #if NVLAN > 0
1030 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1031 #endif
1032 
1033 #ifndef SMALL_KERNEL
1034 	ifp->if_capabilities |= IFCAP_WOL;
1035 	ifp->if_wol = re_wol;
1036 	re_wol(ifp, 0);
1037 #endif
1038 	timeout_set(&sc->timer_handle, re_tick, sc);
1039 	task_set(&sc->rl_start, re_txstart, sc);
1040 
1041 	/* Take PHY out of power down mode. */
1042 	if (sc->rl_flags & RL_FLAG_PHYWAKE_PM) {
1043 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1044 		if (sc->sc_hwrev == RL_HWREV_8401E)
1045 			CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1046 	}
1047 	if (sc->rl_flags & RL_FLAG_PHYWAKE) {
1048 		re_gmii_writereg((struct device *)sc, 1, 0x1f, 0);
1049 		re_gmii_writereg((struct device *)sc, 1, 0x0e, 0);
1050 	}
1051 
1052 	/* Do MII setup */
1053 	sc->sc_mii.mii_ifp = ifp;
1054 	sc->sc_mii.mii_readreg = re_miibus_readreg;
1055 	sc->sc_mii.mii_writereg = re_miibus_writereg;
1056 	sc->sc_mii.mii_statchg = re_miibus_statchg;
1057 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd,
1058 	    re_ifmedia_sts);
1059 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1060 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
1061 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1062 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1063 		ifmedia_add(&sc->sc_mii.mii_media,
1064 		    IFM_ETHER|IFM_NONE, 0, NULL);
1065 		ifmedia_set(&sc->sc_mii.mii_media,
1066 		    IFM_ETHER|IFM_NONE);
1067 	} else
1068 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1069 
1070 	/*
1071 	 * Call MI attach routine.
1072 	 */
1073 	if_attach(ifp);
1074 	ether_ifattach(ifp);
1075 
1076 	return (0);
1077 
1078 fail_8:
1079 	/* Destroy DMA maps for RX buffers. */
1080 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1081 		if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL)
1082 			bus_dmamap_destroy(sc->sc_dmat,
1083 			    sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
1084 	}
1085 
1086 	/* Free DMA'able memory for the RX ring. */
1087 	bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1088 fail_7:
1089 	bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1090 fail_6:
1091 	bus_dmamem_unmap(sc->sc_dmat,
1092 	    (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ(sc));
1093 fail_5:
1094 	bus_dmamem_free(sc->sc_dmat,
1095 	    &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg);
1096 
1097 fail_4:
1098 	/* Destroy DMA maps for TX buffers. */
1099 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1100 		if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL)
1101 			bus_dmamap_destroy(sc->sc_dmat,
1102 			    sc->rl_ldata.rl_txq[i].txq_dmamap);
1103 	}
1104 
1105 	/* Free DMA'able memory for the TX ring. */
1106 	bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1107 fail_3:
1108 	bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1109 fail_2:
1110 	bus_dmamem_unmap(sc->sc_dmat,
1111 	    (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc));
1112 fail_1:
1113 	bus_dmamem_free(sc->sc_dmat,
1114 	    &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg);
1115 fail_0:
1116  	return (1);
1117 }
1118 
1119 
1120 int
1121 re_newbuf(struct rl_softc *sc)
1122 {
1123 	struct mbuf	*m;
1124 	bus_dmamap_t	map;
1125 	struct rl_desc	*d;
1126 	struct rl_rxsoft *rxs;
1127 	u_int32_t	cmdstat;
1128 	int		error, idx;
1129 
1130 	m = MCLGETL(NULL, M_DONTWAIT, RL_FRAMELEN(sc->rl_max_mtu));
1131 	if (!m)
1132 		return (ENOBUFS);
1133 
1134 	/*
1135 	 * Initialize mbuf length fields and fixup
1136 	 * alignment so that the frame payload is
1137 	 * longword aligned on strict alignment archs.
1138 	 */
1139 	m->m_len = m->m_pkthdr.len = RL_FRAMELEN(sc->rl_max_mtu);
1140 	m->m_data += RE_ETHER_ALIGN;
1141 
1142 	idx = sc->rl_ldata.rl_rx_prodidx;
1143 	rxs = &sc->rl_ldata.rl_rxsoft[idx];
1144 	map = rxs->rxs_dmamap;
1145 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1146 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1147 	if (error) {
1148 		m_freem(m);
1149 		return (ENOBUFS);
1150 	}
1151 
1152 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1153 	    BUS_DMASYNC_PREREAD);
1154 
1155 	d = &sc->rl_ldata.rl_rx_list[idx];
1156 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1157 	cmdstat = letoh32(d->rl_cmdstat);
1158 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1159 	if (cmdstat & RL_RDESC_STAT_OWN) {
1160 		printf("%s: tried to map busy RX descriptor\n",
1161 		    sc->sc_dev.dv_xname);
1162 		m_freem(m);
1163 		return (ENOBUFS);
1164 	}
1165 
1166 	rxs->rxs_mbuf = m;
1167 
1168 	d->rl_vlanctl = 0;
1169 	cmdstat = map->dm_segs[0].ds_len;
1170 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1171 		cmdstat |= RL_RDESC_CMD_EOR;
1172 	re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1173 	d->rl_cmdstat = htole32(cmdstat);
1174 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1175 	cmdstat |= RL_RDESC_CMD_OWN;
1176 	d->rl_cmdstat = htole32(cmdstat);
1177 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1178 
1179 	sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx);
1180 
1181 	return (0);
1182 }
1183 
1184 
1185 int
1186 re_tx_list_init(struct rl_softc *sc)
1187 {
1188 	int i;
1189 
1190 	memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc));
1191 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1192 		sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
1193 	}
1194 
1195 	bus_dmamap_sync(sc->sc_dmat,
1196 	    sc->rl_ldata.rl_tx_list_map, 0,
1197 	    sc->rl_ldata.rl_tx_list_map->dm_mapsize,
1198 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1199 	sc->rl_ldata.rl_txq_prodidx = 0;
1200 	sc->rl_ldata.rl_txq_considx = 0;
1201 	sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
1202 	sc->rl_ldata.rl_tx_nextfree = 0;
1203 
1204 	return (0);
1205 }
1206 
1207 int
1208 re_rx_list_init(struct rl_softc *sc)
1209 {
1210 	bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ(sc));
1211 
1212 	sc->rl_ldata.rl_rx_prodidx = 0;
1213 	sc->rl_ldata.rl_rx_considx = 0;
1214 	sc->rl_head = sc->rl_tail = NULL;
1215 
1216 	if_rxr_init(&sc->rl_ldata.rl_rx_ring, 2,
1217 	    sc->rl_ldata.rl_rx_desc_cnt - 1);
1218 	re_rx_list_fill(sc);
1219 
1220 	return (0);
1221 }
1222 
1223 void
1224 re_rx_list_fill(struct rl_softc *sc)
1225 {
1226 	u_int slots;
1227 
1228 	for (slots = if_rxr_get(&sc->rl_ldata.rl_rx_ring,
1229 	    sc->rl_ldata.rl_rx_desc_cnt);
1230 	    slots > 0; slots--) {
1231 		if (re_newbuf(sc) == ENOBUFS)
1232 			break;
1233 	}
1234 	if_rxr_put(&sc->rl_ldata.rl_rx_ring, slots);
1235 }
1236 
1237 /*
1238  * RX handler for C+ and 8169. For the gigE chips, we support
1239  * the reception of jumbo frames that have been fragmented
1240  * across multiple 2K mbuf cluster buffers.
1241  */
1242 int
1243 re_rxeof(struct rl_softc *sc)
1244 {
1245 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1246 	struct mbuf	*m;
1247 	struct ifnet	*ifp;
1248 	int		i, total_len, rx = 0;
1249 	struct rl_desc	*cur_rx;
1250 	struct rl_rxsoft *rxs;
1251 	u_int32_t	rxstat, rxvlan;
1252 
1253 	ifp = &sc->sc_arpcom.ac_if;
1254 
1255 	for (i = sc->rl_ldata.rl_rx_considx;
1256 	    if_rxr_inuse(&sc->rl_ldata.rl_rx_ring) > 0;
1257 	     i = RL_NEXT_RX_DESC(sc, i)) {
1258 		cur_rx = &sc->rl_ldata.rl_rx_list[i];
1259 		RL_RXDESCSYNC(sc, i,
1260 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1261 		rxstat = letoh32(cur_rx->rl_cmdstat);
1262 		rxvlan = letoh32(cur_rx->rl_vlanctl);
1263 		RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1264 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
1265 			break;
1266 		total_len = rxstat & sc->rl_rxlenmask;
1267 		rxs = &sc->rl_ldata.rl_rxsoft[i];
1268 		m = rxs->rxs_mbuf;
1269 		rxs->rxs_mbuf = NULL;
1270 		if_rxr_put(&sc->rl_ldata.rl_rx_ring, 1);
1271 		rx = 1;
1272 
1273 		/* Invalidate the RX mbuf and unload its map */
1274 
1275 		bus_dmamap_sync(sc->sc_dmat,
1276 		    rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1277 		    BUS_DMASYNC_POSTREAD);
1278 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1279 
1280 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
1281 		    (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
1282 		    (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
1283 			ifp->if_ierrors++;
1284 			m_freem(m);
1285 			continue;
1286 		} else if (!(rxstat & RL_RDESC_STAT_EOF)) {
1287 			m->m_len = RL_FRAMELEN(sc->rl_max_mtu);
1288 			if (sc->rl_head == NULL)
1289 				sc->rl_head = sc->rl_tail = m;
1290 			else {
1291 				m->m_flags &= ~M_PKTHDR;
1292 				sc->rl_tail->m_next = m;
1293 				sc->rl_tail = m;
1294 			}
1295 			continue;
1296 		}
1297 
1298 		/*
1299 		 * NOTE: for the 8139C+, the frame length field
1300 		 * is always 12 bits in size, but for the gigE chips,
1301 		 * it is 13 bits (since the max RX frame length is 16K).
1302 		 * Unfortunately, all 32 bits in the status word
1303 		 * were already used, so to make room for the extra
1304 		 * length bit, Realtek took out the 'frame alignment
1305 		 * error' bit and shifted the other status bits
1306 		 * over one slot. The OWN, EOR, FS and LS bits are
1307 		 * still in the same places. We have already extracted
1308 		 * the frame length and checked the OWN bit, so rather
1309 		 * than using an alternate bit mapping, we shift the
1310 		 * status bits one space to the right so we can evaluate
1311 		 * them using the 8169 status as though it was in the
1312 		 * same format as that of the 8139C+.
1313 		 */
1314 		if (sc->sc_hwrev != RL_HWREV_8139CPLUS)
1315 			rxstat >>= 1;
1316 
1317 		/*
1318 		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
1319 		 * set, but if CRC is clear, it will still be a valid frame.
1320 		 */
1321 		if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0 &&
1322 	 	    !(rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 &&
1323 		    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT))) {
1324 			ifp->if_ierrors++;
1325 			/*
1326 			 * If this is part of a multi-fragment packet,
1327 			 * discard all the pieces.
1328 			 */
1329 			if (sc->rl_head != NULL) {
1330 				m_freem(sc->rl_head);
1331 				sc->rl_head = sc->rl_tail = NULL;
1332 			}
1333 			m_freem(m);
1334 			continue;
1335 		}
1336 
1337 		if (sc->rl_head != NULL) {
1338 			m->m_len = total_len % RL_FRAMELEN(sc->rl_max_mtu);
1339 			if (m->m_len == 0)
1340 				m->m_len = RL_FRAMELEN(sc->rl_max_mtu);
1341 			/*
1342 			 * Special case: if there's 4 bytes or less
1343 			 * in this buffer, the mbuf can be discarded:
1344 			 * the last 4 bytes is the CRC, which we don't
1345 			 * care about anyway.
1346 			 */
1347 			if (m->m_len <= ETHER_CRC_LEN) {
1348 				sc->rl_tail->m_len -=
1349 				    (ETHER_CRC_LEN - m->m_len);
1350 				m_freem(m);
1351 			} else {
1352 				m->m_len -= ETHER_CRC_LEN;
1353 				m->m_flags &= ~M_PKTHDR;
1354 				sc->rl_tail->m_next = m;
1355 			}
1356 			m = sc->rl_head;
1357 			sc->rl_head = sc->rl_tail = NULL;
1358 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1359 		} else
1360 			m->m_pkthdr.len = m->m_len =
1361 			    (total_len - ETHER_CRC_LEN);
1362 
1363 		/* Do RX checksumming */
1364 
1365 		if (sc->rl_flags & RL_FLAG_DESCV2) {
1366 			/* Check IP header checksum */
1367 			if ((rxvlan & RL_RDESC_IPV4) &&
1368 			    !(rxstat & RL_RDESC_STAT_IPSUMBAD))
1369 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1370 
1371 			/* Check TCP/UDP checksum */
1372 			if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) &&
1373 			    (((rxstat & RL_RDESC_STAT_TCP) &&
1374 			    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1375 			    ((rxstat & RL_RDESC_STAT_UDP) &&
1376 			    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))))
1377 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1378 				    M_UDP_CSUM_IN_OK;
1379 		} else {
1380 			/* Check IP header checksum */
1381 			if ((rxstat & RL_RDESC_STAT_PROTOID) &&
1382 			    !(rxstat & RL_RDESC_STAT_IPSUMBAD))
1383 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1384 
1385 			/* Check TCP/UDP checksum */
1386 			if ((RL_TCPPKT(rxstat) &&
1387 			    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1388 			    (RL_UDPPKT(rxstat) &&
1389 			    !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))
1390 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1391 				    M_UDP_CSUM_IN_OK;
1392 		}
1393 #if NVLAN > 0
1394 		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
1395 			m->m_pkthdr.ether_vtag =
1396 			    ntohs((rxvlan & RL_RDESC_VLANCTL_DATA));
1397 			m->m_flags |= M_VLANTAG;
1398 		}
1399 #endif
1400 
1401 		ml_enqueue(&ml, m);
1402 	}
1403 
1404 	if (ifiq_input(&ifp->if_rcv, &ml))
1405 		if_rxr_livelocked(&sc->rl_ldata.rl_rx_ring);
1406 
1407 	sc->rl_ldata.rl_rx_considx = i;
1408 	re_rx_list_fill(sc);
1409 
1410 
1411 	return (rx);
1412 }
1413 
1414 int
1415 re_txeof(struct rl_softc *sc)
1416 {
1417 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
1418 	struct rl_txq	*txq;
1419 	uint32_t	txstat;
1420 	unsigned int	prod, cons;
1421 	unsigned int	idx;
1422 	int		free = 0;
1423 
1424 	prod = sc->rl_ldata.rl_txq_prodidx;
1425 	cons = sc->rl_ldata.rl_txq_considx;
1426 
1427 	while (prod != cons) {
1428 		txq = &sc->rl_ldata.rl_txq[cons];
1429 
1430 		idx = txq->txq_descidx;
1431 		RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD);
1432 		txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat);
1433 		RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1434 		if (ISSET(txstat, RL_TDESC_CMD_OWN)) {
1435 			free = 2;
1436 			break;
1437 		}
1438 
1439 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1440 		    0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1441 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1442 		m_freem(txq->txq_mbuf);
1443 		txq->txq_mbuf = NULL;
1444 
1445 		if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT))
1446 			ifp->if_collisions++;
1447 		if (txstat & RL_TDESC_STAT_TXERRSUM)
1448 			ifp->if_oerrors++;
1449 
1450 		cons = RL_NEXT_TX_DESC(sc, idx);
1451 		free = 1;
1452 	}
1453 
1454 	if (free == 0)
1455 		return (0);
1456 
1457 	sc->rl_ldata.rl_txq_considx = cons;
1458 
1459 	/*
1460 	 * Some chips will ignore a second TX request issued while an
1461 	 * existing transmission is in progress. If the transmitter goes
1462 	 * idle but there are still packets waiting to be sent, we need
1463 	 * to restart the channel here to flush them out. This only
1464 	 * seems to be required with the PCIe devices.
1465 	 */
1466 	if (ifq_is_oactive(&ifp->if_snd))
1467 		ifq_restart(&ifp->if_snd);
1468 	else if (free == 2)
1469 		ifq_serialize(&ifp->if_snd, &sc->rl_start);
1470 	else
1471 		ifp->if_timer = 0;
1472 
1473 	return (1);
1474 }
1475 
1476 void
1477 re_tick(void *xsc)
1478 {
1479 	struct rl_softc	*sc = xsc;
1480 	struct mii_data	*mii;
1481 	int s;
1482 
1483 	mii = &sc->sc_mii;
1484 
1485 	s = splnet();
1486 
1487 	mii_tick(mii);
1488 
1489 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1490 		re_miibus_statchg(&sc->sc_dev);
1491 
1492 	splx(s);
1493 
1494 	timeout_add_sec(&sc->timer_handle, 1);
1495 }
1496 
1497 int
1498 re_intr(void *arg)
1499 {
1500 	struct rl_softc	*sc = arg;
1501 	struct ifnet	*ifp;
1502 	u_int16_t	status;
1503 	int		claimed = 0, rx, tx;
1504 
1505 	ifp = &sc->sc_arpcom.ac_if;
1506 
1507 	if (!(ifp->if_flags & IFF_RUNNING))
1508 		return (0);
1509 
1510 	/* Disable interrupts. */
1511 	CSR_WRITE_2(sc, RL_IMR, 0);
1512 
1513 	rx = tx = 0;
1514 	status = CSR_READ_2(sc, RL_ISR);
1515 	/* If the card has gone away the read returns 0xffff. */
1516 	if (status == 0xffff)
1517 		return (0);
1518 	if (status)
1519 		CSR_WRITE_2(sc, RL_ISR, status);
1520 
1521 	if (status & RL_ISR_TIMEOUT_EXPIRED)
1522 		claimed = 1;
1523 
1524 	if (status & RL_INTRS_CPLUS) {
1525 		if (status &
1526 		    (sc->rl_rx_ack | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW)) {
1527 			rx |= re_rxeof(sc);
1528 			claimed = 1;
1529 		}
1530 
1531 		if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) {
1532 			tx |= re_txeof(sc);
1533 			claimed = 1;
1534 		}
1535 
1536 		if (status & RL_ISR_SYSTEM_ERR) {
1537 			KERNEL_LOCK();
1538 			re_init(ifp);
1539 			KERNEL_UNLOCK();
1540 			claimed = 1;
1541 		}
1542 	}
1543 
1544 	if (sc->rl_imtype == RL_IMTYPE_SIM) {
1545 		if (sc->rl_timerintr) {
1546 			if ((tx | rx) == 0) {
1547 				/*
1548 				 * Nothing needs to be processed, fallback
1549 				 * to use TX/RX interrupts.
1550 				 */
1551 				re_setup_intr(sc, 1, RL_IMTYPE_NONE);
1552 
1553 				/*
1554 				 * Recollect, mainly to avoid the possible
1555 				 * race introduced by changing interrupt
1556 				 * masks.
1557 				 */
1558 				re_rxeof(sc);
1559 				re_txeof(sc);
1560 			} else
1561 				CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */
1562 		} else if (tx | rx) {
1563 			/*
1564 			 * Assume that using simulated interrupt moderation
1565 			 * (hardware timer based) could reduce the interrupt
1566 			 * rate.
1567 			 */
1568 			re_setup_intr(sc, 1, RL_IMTYPE_SIM);
1569 		}
1570 	}
1571 
1572 	CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs);
1573 
1574 	return (claimed);
1575 }
1576 
1577 int
1578 re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m)
1579 {
1580 	struct rl_txq	*txq;
1581 	bus_dmamap_t	map;
1582 	int		error, seg, nsegs, curidx, lastidx, pad;
1583 	int		off;
1584 	struct ip	*ip;
1585 	struct rl_desc	*d;
1586 	u_int32_t	cmdstat, vlanctl = 0, csum_flags = 0;
1587 
1588 	/*
1589 	 * Set up checksum offload. Note: checksum offload bits must
1590 	 * appear in all descriptors of a multi-descriptor transmit
1591 	 * attempt. This is according to testing done with an 8169
1592 	 * chip. This is a requirement.
1593 	 */
1594 
1595 	/*
1596 	 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading
1597 	 * is requested.  Otherwise, RL_TDESC_CMD_TCPCSUM/
1598 	 * RL_TDESC_CMD_UDPCSUM does not take affect.
1599 	 */
1600 
1601 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) &&
1602 	    m->m_pkthdr.len > RL_MTU &&
1603 	    (m->m_pkthdr.csum_flags &
1604 	    (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) {
1605 		struct mbuf mh, *mp;
1606 
1607 		mp = m_getptr(m, ETHER_HDR_LEN, &off);
1608 		mh.m_flags = 0;
1609 		mh.m_data = mtod(mp, caddr_t) + off;
1610 		mh.m_next = mp->m_next;
1611 		mh.m_pkthdr.len = mp->m_pkthdr.len - ETHER_HDR_LEN;
1612 		mh.m_len = mp->m_len - off;
1613 		ip = (struct ip *)mh.m_data;
1614 
1615 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1616 			ip->ip_sum = in_cksum(&mh, sizeof(struct ip));
1617 		if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT|M_UDP_CSUM_OUT))
1618 			in_delayed_cksum(&mh);
1619 
1620 		m->m_pkthdr.csum_flags &=
1621 		    ~(M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT);
1622 	}
1623 
1624 	if ((m->m_pkthdr.csum_flags &
1625 	    (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) {
1626 		if (sc->rl_flags & RL_FLAG_DESCV2) {
1627 			vlanctl |= RL_TDESC_CMD_IPCSUMV2;
1628 			if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1629 				vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
1630 			if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1631 				vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
1632 		} else {
1633 			csum_flags |= RL_TDESC_CMD_IPCSUM;
1634 			if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1635 				csum_flags |= RL_TDESC_CMD_TCPCSUM;
1636 			if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1637 				csum_flags |= RL_TDESC_CMD_UDPCSUM;
1638 		}
1639 	}
1640 
1641 	txq = &sc->rl_ldata.rl_txq[idx];
1642 	map = txq->txq_dmamap;
1643 
1644 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1645 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1646 	switch (error) {
1647 	case 0:
1648 		break;
1649 
1650 	case EFBIG:
1651 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1652 		    bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1653 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0)
1654 			break;
1655 
1656 		/* FALLTHROUGH */
1657 	default:
1658 		return (0);
1659 	}
1660 
1661 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1662 	    BUS_DMASYNC_PREWRITE);
1663 
1664 	nsegs = map->dm_nsegs;
1665 	pad = 0;
1666 
1667 	/*
1668 	 * With some of the Realtek chips, using the checksum offload
1669 	 * support in conjunction with the autopadding feature results
1670 	 * in the transmission of corrupt frames. For example, if we
1671 	 * need to send a really small IP fragment that's less than 60
1672 	 * bytes in size, and IP header checksumming is enabled, the
1673 	 * resulting ethernet frame that appears on the wire will
1674 	 * have garbled payload. To work around this, if TX IP checksum
1675 	 * offload is enabled, we always manually pad short frames out
1676 	 * to the minimum ethernet frame size.
1677 	 */
1678 	if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
1679 	    m->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
1680 	    (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) != 0) {
1681 		pad = 1;
1682 		nsegs++;
1683 	}
1684 
1685 	/*
1686 	 * Set up hardware VLAN tagging. Note: vlan tag info must
1687 	 * appear in all descriptors of a multi-descriptor
1688 	 * transmission attempt.
1689 	 */
1690 #if NVLAN > 0
1691 	if (m->m_flags & M_VLANTAG)
1692 		vlanctl |= swap16(m->m_pkthdr.ether_vtag) |
1693 		    RL_TDESC_VLANCTL_TAG;
1694 #endif
1695 
1696 	/*
1697 	 * Map the segment array into descriptors. Note that we set the
1698 	 * start-of-frame and end-of-frame markers for either TX or RX, but
1699 	 * they really only have meaning in the TX case. (In the RX case,
1700 	 * it's the chip that tells us where packets begin and end.)
1701 	 * We also keep track of the end of the ring and set the
1702 	 * end-of-ring bits as needed, and we set the ownership bits
1703 	 * in all except the very first descriptor. (The caller will
1704 	 * set this descriptor later when it start transmission or
1705 	 * reception.)
1706 	 */
1707 	curidx = idx;
1708 	cmdstat = RL_TDESC_CMD_SOF;
1709 
1710 	for (seg = 0; seg < map->dm_nsegs; seg++) {
1711 		d = &sc->rl_ldata.rl_tx_list[curidx];
1712 
1713 		RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE);
1714 
1715 		d->rl_vlanctl = htole32(vlanctl);
1716 		re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1717 		cmdstat |= csum_flags | map->dm_segs[seg].ds_len;
1718 
1719 		if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1)
1720 			cmdstat |= RL_TDESC_CMD_EOR;
1721 
1722 		d->rl_cmdstat = htole32(cmdstat);
1723 
1724 		RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE);
1725 
1726 		lastidx = curidx;
1727 		cmdstat = RL_TDESC_CMD_OWN;
1728 		curidx = RL_NEXT_TX_DESC(sc, curidx);
1729 	}
1730 
1731 	if (pad) {
1732 		d = &sc->rl_ldata.rl_tx_list[curidx];
1733 
1734 		RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE);
1735 
1736 		d->rl_vlanctl = htole32(vlanctl);
1737 		re_set_bufaddr(d, RL_TXPADDADDR(sc));
1738 		cmdstat = csum_flags |
1739 		    RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF |
1740 		    (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1741 
1742 		if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1)
1743 			cmdstat |= RL_TDESC_CMD_EOR;
1744 
1745 		d->rl_cmdstat = htole32(cmdstat);
1746 
1747 		RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE);
1748 
1749 		lastidx = curidx;
1750 	}
1751 
1752 	/* d is already pointing at the last descriptor */
1753 	d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
1754 
1755 	/* Transfer ownership of packet to the chip. */
1756 	d = &sc->rl_ldata.rl_tx_list[idx];
1757 
1758 	RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE);
1759 	d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN);
1760 	RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE);
1761 
1762 	/* update info of TX queue and descriptors */
1763 	txq->txq_mbuf = m;
1764 	txq->txq_descidx = lastidx;
1765 
1766 	return (nsegs);
1767 }
1768 
1769 void
1770 re_txstart(void *xsc)
1771 {
1772 	struct rl_softc *sc = xsc;
1773 
1774 	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
1775 }
1776 
1777 /*
1778  * Main transmit routine for C+ and gigE NICs.
1779  */
1780 
1781 void
1782 re_start(struct ifqueue *ifq)
1783 {
1784 	struct ifnet	*ifp = ifq->ifq_if;
1785 	struct rl_softc	*sc = ifp->if_softc;
1786 	struct mbuf	*m;
1787 	unsigned int	idx;
1788 	unsigned int	free, used;
1789 	int		post = 0;
1790 
1791 	if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) {
1792 		ifq_purge(ifq);
1793 		return;
1794 	}
1795 
1796 	free = sc->rl_ldata.rl_txq_considx;
1797 	idx = sc->rl_ldata.rl_txq_prodidx;
1798 	if (free <= idx)
1799 		free += sc->rl_ldata.rl_tx_desc_cnt;
1800 	free -= idx;
1801 
1802 	for (;;) {
1803 		if (sc->rl_ldata.rl_tx_ndescs >= free + 2) {
1804 			ifq_set_oactive(ifq);
1805 			break;
1806 		}
1807 
1808 		m = ifq_dequeue(ifq);
1809 		if (m == NULL)
1810 			break;
1811 
1812 		used = re_encap(sc, idx, m);
1813 		if (used == 0) {
1814 			m_freem(m);
1815 			continue;
1816 		}
1817 
1818 #if NBPFILTER > 0
1819 		if (ifp->if_bpf)
1820 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1821 #endif
1822 
1823 		KASSERT(used <= free);
1824 		free -= used;
1825 
1826 		idx += used;
1827 		if (idx >= sc->rl_ldata.rl_tx_desc_cnt)
1828 			idx -= sc->rl_ldata.rl_tx_desc_cnt;
1829 
1830 		post = 1;
1831 	}
1832 
1833 	if (post == 0)
1834 		return;
1835 
1836 	ifp->if_timer = 5;
1837 	sc->rl_ldata.rl_txq_prodidx = idx;
1838 	ifq_serialize(ifq, &sc->rl_start);
1839 }
1840 
1841 int
1842 re_init(struct ifnet *ifp)
1843 {
1844 	struct rl_softc *sc = ifp->if_softc;
1845 	u_int16_t	cfg;
1846 	uint32_t	rxcfg;
1847 	int		s;
1848 	union {
1849 		u_int32_t align_dummy;
1850 		u_char eaddr[ETHER_ADDR_LEN];
1851 	} eaddr;
1852 
1853 	s = splnet();
1854 
1855 	/*
1856 	 * Cancel pending I/O and free all RX/TX buffers.
1857 	 */
1858 	re_stop(ifp);
1859 
1860 	/* Put controller into known state. */
1861 	re_reset(sc);
1862 
1863 	/*
1864 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
1865 	 * RX checksum offload. We must configure the C+ register
1866 	 * before all others.
1867 	 */
1868 	cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW |
1869 	    RL_CPLUSCMD_RXCSUM_ENB;
1870 
1871 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1872 		cfg |= RL_CPLUSCMD_VLANSTRIP;
1873 
1874 	if (sc->rl_flags & RL_FLAG_MACSTAT)
1875 		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
1876 	else
1877 		cfg |= RL_CPLUSCMD_RXENB;
1878 
1879 	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
1880 
1881 	/*
1882 	 * Init our MAC address.  Even though the chipset
1883 	 * documentation doesn't mention it, we need to enter "Config
1884 	 * register write enable" mode to modify the ID registers.
1885 	 */
1886 	bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN);
1887 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1888 	CSR_WRITE_4(sc, RL_IDR4,
1889 	    htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
1890 	CSR_WRITE_4(sc, RL_IDR0,
1891 	    htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
1892 	/*
1893 	 * Default on PC Engines APU1 is to have all LEDs off unless
1894 	 * there is network activity. Override to provide a link status
1895 	 * LED.
1896 	 */
1897 	if (sc->sc_hwrev == RL_HWREV_8168E &&
1898 	    hw_vendor != NULL && hw_prod != NULL &&
1899 	    strcmp(hw_vendor, "PC Engines") == 0 &&
1900 	    strcmp(hw_prod, "APU") == 0) {
1901 		CSR_SETBIT_1(sc, RL_CFG4, RL_CFG4_CUSTOM_LED);
1902 		CSR_WRITE_1(sc, RL_LEDSEL, RL_LED_LINK | RL_LED_ACT << 4);
1903 	}
1904 	/*
1905 	 * Protect config register again
1906 	 */
1907 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1908 
1909 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1910 		re_set_jumbo(sc);
1911 
1912 	/*
1913 	 * For C+ mode, initialize the RX descriptors and mbufs.
1914 	 */
1915 	re_rx_list_init(sc);
1916 	re_tx_list_init(sc);
1917 
1918 	/*
1919 	 * Load the addresses of the RX and TX lists into the chip.
1920 	 */
1921 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
1922 	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1923 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
1924 	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1925 
1926 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
1927 	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1928 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
1929 	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1930 
1931 	if (sc->rl_flags & RL_FLAG_RXDV_GATED)
1932 		CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
1933 		    ~0x00080000);
1934 
1935 	/*
1936 	 * Set the initial TX and RX configuration.
1937 	 */
1938 	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1939 
1940 	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
1941 
1942 	rxcfg = RL_RXCFG_CONFIG;
1943 	if (sc->rl_flags & RL_FLAG_EARLYOFF)
1944 		rxcfg |= RL_RXCFG_EARLYOFF;
1945 	else if (sc->rl_flags & RL_FLAG_EARLYOFFV2)
1946 		rxcfg |= RL_RXCFG_EARLYOFFV2;
1947 	CSR_WRITE_4(sc, RL_RXCFG, rxcfg);
1948 
1949 	/*
1950 	 * Enable transmit and receive.
1951 	 */
1952 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
1953 
1954 	/* Program promiscuous mode and multicast filters. */
1955 	re_iff(sc);
1956 
1957 	/*
1958 	 * Enable interrupts.
1959 	 */
1960 	re_setup_intr(sc, 1, sc->rl_imtype);
1961 	CSR_WRITE_2(sc, RL_ISR, sc->rl_intrs);
1962 
1963 	/* Start RX/TX process. */
1964 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1965 
1966 	/*
1967 	 * For 8169 gigE NICs, set the max allowed RX packet
1968 	 * size so we can receive jumbo frames.
1969 	 */
1970 	if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
1971 		if (sc->rl_flags & RL_FLAG_PCIE &&
1972 		    (sc->rl_flags & RL_FLAG_JUMBOV2) == 0)
1973 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
1974 		else
1975 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
1976 	}
1977 
1978 	CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
1979 	    RL_CFG1_DRVLOAD);
1980 
1981 	ifp->if_flags |= IFF_RUNNING;
1982 	ifq_clr_oactive(&ifp->if_snd);
1983 
1984 	splx(s);
1985 
1986 	sc->rl_flags &= ~RL_FLAG_LINK;
1987 	mii_mediachg(&sc->sc_mii);
1988 
1989 	timeout_add_sec(&sc->timer_handle, 1);
1990 
1991 	return (0);
1992 }
1993 
1994 /*
1995  * Set media options.
1996  */
1997 int
1998 re_ifmedia_upd(struct ifnet *ifp)
1999 {
2000 	struct rl_softc	*sc;
2001 
2002 	sc = ifp->if_softc;
2003 
2004 	return (mii_mediachg(&sc->sc_mii));
2005 }
2006 
2007 /*
2008  * Report current media status.
2009  */
2010 void
2011 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2012 {
2013 	struct rl_softc	*sc;
2014 
2015 	sc = ifp->if_softc;
2016 
2017 	mii_pollstat(&sc->sc_mii);
2018 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
2019 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
2020 }
2021 
2022 int
2023 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2024 {
2025 	struct rl_softc	*sc = ifp->if_softc;
2026 	struct ifreq	*ifr = (struct ifreq *) data;
2027 	int		s, error = 0;
2028 
2029 	s = splnet();
2030 
2031 	switch(command) {
2032 	case SIOCSIFADDR:
2033 		ifp->if_flags |= IFF_UP;
2034 		if (!(ifp->if_flags & IFF_RUNNING))
2035 			re_init(ifp);
2036 		break;
2037 	case SIOCSIFFLAGS:
2038 		if (ifp->if_flags & IFF_UP) {
2039 			if (ifp->if_flags & IFF_RUNNING)
2040 				error = ENETRESET;
2041 			else
2042 				re_init(ifp);
2043 		} else {
2044 			if (ifp->if_flags & IFF_RUNNING)
2045 				re_stop(ifp);
2046 		}
2047 		break;
2048 	case SIOCGIFMEDIA:
2049 	case SIOCSIFMEDIA:
2050 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2051 		break;
2052 	case SIOCGIFRXR:
2053 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
2054 		    NULL, RL_FRAMELEN(sc->rl_max_mtu), &sc->rl_ldata.rl_rx_ring);
2055  		break;
2056 	default:
2057 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2058 	}
2059 
2060 	if (error == ENETRESET) {
2061 		if (ifp->if_flags & IFF_RUNNING)
2062 			re_iff(sc);
2063 		error = 0;
2064 	}
2065 
2066 	splx(s);
2067 	return (error);
2068 }
2069 
2070 void
2071 re_watchdog(struct ifnet *ifp)
2072 {
2073 	struct rl_softc	*sc;
2074 	int	s;
2075 
2076 	sc = ifp->if_softc;
2077 	s = splnet();
2078 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2079 
2080 	re_init(ifp);
2081 
2082 	splx(s);
2083 }
2084 
2085 /*
2086  * Stop the adapter and free any mbufs allocated to the
2087  * RX and TX lists.
2088  */
2089 void
2090 re_stop(struct ifnet *ifp)
2091 {
2092 	struct rl_softc *sc;
2093 	int	i;
2094 
2095 	sc = ifp->if_softc;
2096 
2097 	ifp->if_timer = 0;
2098 	sc->rl_flags &= ~RL_FLAG_LINK;
2099 	sc->rl_timerintr = 0;
2100 
2101 	timeout_del(&sc->timer_handle);
2102 	ifp->if_flags &= ~IFF_RUNNING;
2103 
2104 	/*
2105 	 * Disable accepting frames to put RX MAC into idle state.
2106 	 * Otherwise it's possible to get frames while stop command
2107 	 * execution is in progress and controller can DMA the frame
2108 	 * to already freed RX buffer during that period.
2109 	 */
2110 	CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
2111 	    ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_INDIV |
2112 	    RL_RXCFG_RX_MULTI));
2113 
2114 	if (sc->rl_flags & RL_FLAG_WAIT_TXPOLL) {
2115 		for (i = RL_TIMEOUT; i > 0; i--) {
2116 			if ((CSR_READ_1(sc, sc->rl_txstart) &
2117 			    RL_TXSTART_START) == 0)
2118 				break;
2119 			DELAY(20);
2120 		}
2121 		if (i == 0)
2122 			printf("%s: stopping TX poll timed out!\n",
2123 			    sc->sc_dev.dv_xname);
2124 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2125 	} else if (sc->rl_flags & RL_FLAG_CMDSTOP) {
2126 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
2127 		    RL_CMD_RX_ENB);
2128 		if (sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) {
2129 			for (i = RL_TIMEOUT; i > 0; i--) {
2130 				if ((CSR_READ_4(sc, RL_TXCFG) &
2131 				    RL_TXCFG_QUEUE_EMPTY) != 0)
2132 					break;
2133 				DELAY(100);
2134 			}
2135 			if (i == 0)
2136 				printf("%s: stopping TXQ timed out!\n",
2137 				    sc->sc_dev.dv_xname);
2138 		}
2139 	} else
2140 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2141 	DELAY(1000);
2142 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
2143 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
2144 
2145 	intr_barrier(sc->sc_ih);
2146 	ifq_barrier(&ifp->if_snd);
2147 
2148 	ifq_clr_oactive(&ifp->if_snd);
2149 	mii_down(&sc->sc_mii);
2150 
2151 	if (sc->rl_head != NULL) {
2152 		m_freem(sc->rl_head);
2153 		sc->rl_head = sc->rl_tail = NULL;
2154 	}
2155 
2156 	/* Free the TX list buffers. */
2157 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
2158 		if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) {
2159 			bus_dmamap_unload(sc->sc_dmat,
2160 			    sc->rl_ldata.rl_txq[i].txq_dmamap);
2161 			m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf);
2162 			sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
2163 		}
2164 	}
2165 
2166 	/* Free the RX list buffers. */
2167 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2168 		if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) {
2169 			bus_dmamap_unload(sc->sc_dmat,
2170 			    sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
2171 			m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf);
2172 			sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL;
2173 		}
2174 	}
2175 }
2176 
2177 void
2178 re_setup_hw_im(struct rl_softc *sc)
2179 {
2180 	KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2181 
2182 	/*
2183 	 * Interrupt moderation
2184 	 *
2185 	 * 0xABCD
2186 	 * A - unknown (maybe TX related)
2187 	 * B - TX timer (unit: 25us)
2188 	 * C - unknown (maybe RX related)
2189 	 * D - RX timer (unit: 25us)
2190 	 *
2191 	 *
2192 	 * re(4)'s interrupt moderation is actually controlled by
2193 	 * two variables, like most other NICs (bge, bnx etc.)
2194 	 * o  timer
2195 	 * o  number of packets [P]
2196 	 *
2197 	 * The logic relationship between these two variables is
2198 	 * similar to other NICs too:
2199 	 * if (timer expire || packets > [P])
2200 	 *     Interrupt is delivered
2201 	 *
2202 	 * Currently we only know how to set 'timer', but not
2203 	 * 'number of packets', which should be ~30, as far as I
2204 	 * tested (sink ~900Kpps, interrupt rate is 30KHz)
2205 	 */
2206 	CSR_WRITE_2(sc, RL_IM,
2207 		    RL_IM_RXTIME(sc->rl_rx_time) |
2208 		    RL_IM_TXTIME(sc->rl_tx_time) |
2209 		    RL_IM_MAGIC);
2210 }
2211 
2212 void
2213 re_disable_hw_im(struct rl_softc *sc)
2214 {
2215 	if (sc->rl_flags & RL_FLAG_HWIM)
2216 		CSR_WRITE_2(sc, RL_IM, 0);
2217 }
2218 
2219 void
2220 re_setup_sim_im(struct rl_softc *sc)
2221 {
2222 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
2223 		CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */
2224 	else {
2225 		u_int32_t nticks;
2226 
2227 		/*
2228 		 * Datasheet says tick decreases at bus speed,
2229 		 * but it seems the clock runs a little bit
2230 		 * faster, so we do some compensation here.
2231 		 */
2232 		nticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5;
2233 		CSR_WRITE_4(sc, RL_TIMERINT_8169, nticks);
2234 	}
2235 	CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */
2236 	sc->rl_timerintr = 1;
2237 }
2238 
2239 void
2240 re_disable_sim_im(struct rl_softc *sc)
2241 {
2242 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
2243 		CSR_WRITE_4(sc, RL_TIMERINT, 0);
2244 	else
2245 		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0);
2246 	sc->rl_timerintr = 0;
2247 }
2248 
2249 void
2250 re_config_imtype(struct rl_softc *sc, int imtype)
2251 {
2252 	switch (imtype) {
2253 	case RL_IMTYPE_HW:
2254 		KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2255 		/* FALLTHROUGH */
2256 	case RL_IMTYPE_NONE:
2257 		sc->rl_intrs = RL_INTRS_CPLUS;
2258 		sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW |
2259 				RL_ISR_RX_OVERRUN;
2260 		sc->rl_tx_ack = RL_ISR_TX_OK;
2261 		break;
2262 
2263 	case RL_IMTYPE_SIM:
2264 		sc->rl_intrs = RL_INTRS_TIMER;
2265 		sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED;
2266 		sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED;
2267 		break;
2268 
2269 	default:
2270 		panic("%s: unknown imtype %d",
2271 		      sc->sc_dev.dv_xname, imtype);
2272 	}
2273 }
2274 
2275 void
2276 re_set_jumbo(struct rl_softc *sc)
2277 {
2278 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2279 	CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) |
2280 	    RL_CFG3_JUMBO_EN0);
2281 
2282 	switch (sc->sc_hwrev) {
2283 	case RL_HWREV_8168DP:
2284 		break;
2285 	case RL_HWREV_8168E:
2286 		CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2287 		    RL_CFG4_8168E_JUMBO_EN1);
2288 		break;
2289 	default:
2290 		CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2291 		    RL_CFG4_JUMBO_EN1);
2292 		break;
2293 	}
2294 
2295 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2296 }
2297 
2298 void
2299 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype)
2300 {
2301 	re_config_imtype(sc, imtype);
2302 
2303 	if (enable_intrs)
2304 		CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs);
2305 	else
2306 		CSR_WRITE_2(sc, RL_IMR, 0);
2307 
2308 	switch (imtype) {
2309 	case RL_IMTYPE_NONE:
2310 		re_disable_sim_im(sc);
2311 		re_disable_hw_im(sc);
2312 		break;
2313 
2314 	case RL_IMTYPE_HW:
2315 		KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2316 		re_disable_sim_im(sc);
2317 		re_setup_hw_im(sc);
2318 		break;
2319 
2320 	case RL_IMTYPE_SIM:
2321 		re_disable_hw_im(sc);
2322 		re_setup_sim_im(sc);
2323 		break;
2324 
2325 	default:
2326 		panic("%s: unknown imtype %d",
2327 		      sc->sc_dev.dv_xname, imtype);
2328 	}
2329 }
2330 
2331 #ifndef SMALL_KERNEL
2332 int
2333 re_wol(struct ifnet *ifp, int enable)
2334 {
2335 	struct rl_softc *sc = ifp->if_softc;
2336 	u_int8_t val;
2337 
2338 	if (enable) {
2339 		if ((CSR_READ_1(sc, sc->rl_cfg1) & RL_CFG1_PME) == 0) {
2340 			printf("%s: power management is disabled, "
2341 			    "cannot do WOL\n", sc->sc_dev.dv_xname);
2342 			return (ENOTSUP);
2343 		}
2344 		if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_AUXPWR) == 0)
2345 			printf("%s: no auxiliary power, cannot do WOL from D3 "
2346 			    "(power-off) state\n", sc->sc_dev.dv_xname);
2347 	}
2348 
2349 	re_iff(sc);
2350 
2351 	/* Temporarily enable write to configuration registers. */
2352 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2353 
2354 	/* Always disable all wake events except magic packet. */
2355 	if (enable) {
2356 		val = CSR_READ_1(sc, sc->rl_cfg5);
2357 		val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST |
2358 		    RL_CFG5_WOL_BCAST);
2359 		CSR_WRITE_1(sc, sc->rl_cfg5, val);
2360 
2361 		val = CSR_READ_1(sc, sc->rl_cfg3);
2362 		val |= RL_CFG3_WOL_MAGIC;
2363 		val &= ~RL_CFG3_WOL_LINK;
2364 		CSR_WRITE_1(sc, sc->rl_cfg3, val);
2365 	} else {
2366 		val = CSR_READ_1(sc, sc->rl_cfg5);
2367 		val &= ~(RL_CFG5_WOL_UCAST | RL_CFG5_WOL_MCAST |
2368 		    RL_CFG5_WOL_BCAST);
2369 		CSR_WRITE_1(sc, sc->rl_cfg5, val);
2370 
2371 		val = CSR_READ_1(sc, sc->rl_cfg3);
2372 		val &= ~(RL_CFG3_WOL_MAGIC | RL_CFG3_WOL_LINK);
2373 		CSR_WRITE_1(sc, sc->rl_cfg3, val);
2374 	}
2375 
2376 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2377 
2378 	return (0);
2379 }
2380 #endif
2381