xref: /openbsd-src/sys/dev/ic/re.c (revision 5054e3e78af0749a9bb00ba9a024b3ee2d90290f)
1 /*	$OpenBSD: re.c,v 1.115 2009/11/13 23:50:30 sthen Exp $	*/
2 /*	$FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $	*/
3 /*
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
37  *
38  * Written by Bill Paul <wpaul@windriver.com>
39  * Senior Networking Software Engineer
40  * Wind River Systems
41  */
42 
43 /*
44  * This driver is designed to support RealTek's next generation of
45  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
46  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
47  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
48  *
49  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
50  * with the older 8139 family, however it also supports a special
51  * C+ mode of operation that provides several new performance enhancing
52  * features. These include:
53  *
54  *	o Descriptor based DMA mechanism. Each descriptor represents
55  *	  a single packet fragment. Data buffers may be aligned on
56  *	  any byte boundary.
57  *
58  *	o 64-bit DMA
59  *
60  *	o TCP/IP checksum offload for both RX and TX
61  *
62  *	o High and normal priority transmit DMA rings
63  *
64  *	o VLAN tag insertion and extraction
65  *
66  *	o TCP large send (segmentation offload)
67  *
68  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
69  * programming API is fairly straightforward. The RX filtering, EEPROM
70  * access and PHY access is the same as it is on the older 8139 series
71  * chips.
72  *
73  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
74  * same programming API and feature set as the 8139C+ with the following
75  * differences and additions:
76  *
77  *	o 1000Mbps mode
78  *
79  *	o Jumbo frames
80  *
81  * 	o GMII and TBI ports/registers for interfacing with copper
82  *	  or fiber PHYs
83  *
84  *      o RX and TX DMA rings can have up to 1024 descriptors
85  *        (the 8139C+ allows a maximum of 64)
86  *
87  *	o Slight differences in register layout from the 8139C+
88  *
89  * The TX start and timer interrupt registers are at different locations
90  * on the 8169 than they are on the 8139C+. Also, the status word in the
91  * RX descriptor has a slightly different bit layout. The 8169 does not
92  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
93  * copper gigE PHY.
94  *
95  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
96  * (the 'S' stands for 'single-chip'). These devices have the same
97  * programming API as the older 8169, but also have some vendor-specific
98  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
99  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
100  *
101  * This driver takes advantage of the RX and TX checksum offload and
102  * VLAN tag insertion/extraction features. It also implements TX
103  * interrupt moderation using the timer interrupt registers, which
104  * significantly reduces TX interrupt load. There is also support
105  * for jumbo frames, however the 8169/8169S/8110S can not transmit
106  * jumbo frames larger than 7440, so the max MTU possible with this
107  * driver is 7422 bytes.
108  */
109 
110 #include "bpfilter.h"
111 #include "vlan.h"
112 
113 #include <sys/param.h>
114 #include <sys/endian.h>
115 #include <sys/systm.h>
116 #include <sys/sockio.h>
117 #include <sys/mbuf.h>
118 #include <sys/malloc.h>
119 #include <sys/kernel.h>
120 #include <sys/device.h>
121 #include <sys/timeout.h>
122 #include <sys/socket.h>
123 
124 #include <net/if.h>
125 #include <net/if_dl.h>
126 #include <net/if_media.h>
127 
128 #ifdef INET
129 #include <netinet/in.h>
130 #include <netinet/in_systm.h>
131 #include <netinet/in_var.h>
132 #include <netinet/ip.h>
133 #include <netinet/if_ether.h>
134 #endif
135 
136 #if NVLAN > 0
137 #include <net/if_types.h>
138 #include <net/if_vlan_var.h>
139 #endif
140 
141 #if NBPFILTER > 0
142 #include <net/bpf.h>
143 #endif
144 
145 #include <dev/mii/mii.h>
146 #include <dev/mii/miivar.h>
147 
148 #include <dev/pci/pcireg.h>
149 #include <dev/pci/pcivar.h>
150 
151 #include <dev/ic/rtl81x9reg.h>
152 #include <dev/ic/revar.h>
153 
154 #ifdef RE_DEBUG
155 int redebug = 0;
156 #define DPRINTF(x)	do { if (redebug) printf x; } while (0)
157 #else
158 #define DPRINTF(x)
159 #endif
160 
161 static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t);
162 
163 int	re_encap(struct rl_softc *, struct mbuf *, int *);
164 
165 int	re_newbuf(struct rl_softc *);
166 int	re_rx_list_init(struct rl_softc *);
167 void	re_rx_list_fill(struct rl_softc *);
168 int	re_tx_list_init(struct rl_softc *);
169 int	re_rxeof(struct rl_softc *);
170 int	re_txeof(struct rl_softc *);
171 void	re_tick(void *);
172 void	re_start(struct ifnet *);
173 int	re_ioctl(struct ifnet *, u_long, caddr_t);
174 void	re_watchdog(struct ifnet *);
175 int	re_ifmedia_upd(struct ifnet *);
176 void	re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
177 
178 void	re_eeprom_putbyte(struct rl_softc *, int);
179 void	re_eeprom_getword(struct rl_softc *, int, u_int16_t *);
180 void	re_read_eeprom(struct rl_softc *, caddr_t, int, int);
181 
182 int	re_gmii_readreg(struct device *, int, int);
183 void	re_gmii_writereg(struct device *, int, int, int);
184 
185 int	re_miibus_readreg(struct device *, int, int);
186 void	re_miibus_writereg(struct device *, int, int, int);
187 void	re_miibus_statchg(struct device *);
188 
189 void	re_iff(struct rl_softc *);
190 void	re_reset(struct rl_softc *);
191 
192 void	re_setup_hw_im(struct rl_softc *);
193 void	re_setup_sim_im(struct rl_softc *);
194 void	re_disable_hw_im(struct rl_softc *);
195 void	re_disable_sim_im(struct rl_softc *);
196 void	re_config_imtype(struct rl_softc *, int);
197 void	re_setup_intr(struct rl_softc *, int, int);
198 
199 #ifdef RE_DIAG
200 int	re_diag(struct rl_softc *);
201 #endif
202 
203 struct cfdriver re_cd = {
204 	0, "re", DV_IFNET
205 };
206 
207 #define EE_SET(x)					\
208 	CSR_WRITE_1(sc, RL_EECMD,			\
209 		CSR_READ_1(sc, RL_EECMD) | x)
210 
211 #define EE_CLR(x)					\
212 	CSR_WRITE_1(sc, RL_EECMD,			\
213 		CSR_READ_1(sc, RL_EECMD) & ~x)
214 
215 static const struct re_revision {
216 	u_int32_t		re_chipid;
217 	const char		*re_name;
218 } re_revisions[] = {
219 	{ RL_HWREV_8100,	"RTL8100" },
220 	{ RL_HWREV_8100E_SPIN1,	"RTL8100E 1" },
221 	{ RL_HWREV_8100E_SPIN2, "RTL8100E 2" },
222 	{ RL_HWREV_8101,	"RTL8101" },
223 	{ RL_HWREV_8101E,	"RTL8101E" },
224 	{ RL_HWREV_8102E,	"RTL8102E" },
225 	{ RL_HWREV_8102EL,	"RTL8102EL" },
226 	{ RL_HWREV_8103E,       "RTL8103E" },
227 	{ RL_HWREV_8110S,	"RTL8110S" },
228 	{ RL_HWREV_8139CPLUS,	"RTL8139C+" },
229 	{ RL_HWREV_8168_SPIN1,	"RTL8168 1" },
230 	{ RL_HWREV_8168_SPIN2,	"RTL8168 2" },
231 	{ RL_HWREV_8168_SPIN3,	"RTL8168 3" },
232 	{ RL_HWREV_8168C,	"RTL8168C/8111C" },
233 	{ RL_HWREV_8168C_SPIN2,	"RTL8168C/8111C" },
234 	{ RL_HWREV_8168CP,	"RTL8168CP/8111CP" },
235 	{ RL_HWREV_8168D,	"RTL8168D/8111D" },
236 	{ RL_HWREV_8168DP,      "RTL8168DP/8111DP" },
237 	{ RL_HWREV_8169,	"RTL8169" },
238 	{ RL_HWREV_8169_8110SB,	"RTL8169/8110SB" },
239 	{ RL_HWREV_8169_8110SBL, "RTL8169SBL" },
240 	{ RL_HWREV_8169_8110SCd, "RTL8169/8110SCd" },
241 	{ RL_HWREV_8169_8110SCe, "RTL8169/8110SCe" },
242 	{ RL_HWREV_8169S,	"RTL8169S" },
243 
244 	{ 0, NULL }
245 };
246 
247 
248 static inline void
249 re_set_bufaddr(struct rl_desc *d, bus_addr_t addr)
250 {
251 	d->rl_bufaddr_lo = htole32((uint32_t)addr);
252 	if (sizeof(bus_addr_t) == sizeof(uint64_t))
253 		d->rl_bufaddr_hi = htole32((uint64_t)addr >> 32);
254 	else
255 		d->rl_bufaddr_hi = 0;
256 }
257 
258 /*
259  * Send a read command and address to the EEPROM, check for ACK.
260  */
261 void
262 re_eeprom_putbyte(struct rl_softc *sc, int addr)
263 {
264 	int	d, i;
265 
266 	d = addr | (RL_9346_READ << sc->rl_eewidth);
267 
268 	/*
269 	 * Feed in each bit and strobe the clock.
270 	 */
271 
272 	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
273 		if (d & i)
274 			EE_SET(RL_EE_DATAIN);
275 		else
276 			EE_CLR(RL_EE_DATAIN);
277 		DELAY(100);
278 		EE_SET(RL_EE_CLK);
279 		DELAY(150);
280 		EE_CLR(RL_EE_CLK);
281 		DELAY(100);
282 	}
283 }
284 
285 /*
286  * Read a word of data stored in the EEPROM at address 'addr.'
287  */
288 void
289 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
290 {
291 	int		i;
292 	u_int16_t	word = 0;
293 
294 	/*
295 	 * Send address of word we want to read.
296 	 */
297 	re_eeprom_putbyte(sc, addr);
298 
299 	/*
300 	 * Start reading bits from EEPROM.
301 	 */
302 	for (i = 0x8000; i; i >>= 1) {
303 		EE_SET(RL_EE_CLK);
304 		DELAY(100);
305 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
306 			word |= i;
307 		EE_CLR(RL_EE_CLK);
308 		DELAY(100);
309 	}
310 
311 	*dest = word;
312 }
313 
314 /*
315  * Read a sequence of words from the EEPROM.
316  */
317 void
318 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
319 {
320 	int		i;
321 	u_int16_t	word = 0, *ptr;
322 
323 	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
324 
325 	DELAY(100);
326 
327 	for (i = 0; i < cnt; i++) {
328 		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
329 		re_eeprom_getword(sc, off + i, &word);
330 		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
331 		ptr = (u_int16_t *)(dest + (i * 2));
332 		*ptr = word;
333 	}
334 
335 	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
336 }
337 
338 int
339 re_gmii_readreg(struct device *self, int phy, int reg)
340 {
341 	struct rl_softc	*sc = (struct rl_softc *)self;
342 	u_int32_t	rval;
343 	int		i;
344 
345 	if (phy != 7)
346 		return (0);
347 
348 	/* Let the rgephy driver read the GMEDIASTAT register */
349 
350 	if (reg == RL_GMEDIASTAT) {
351 		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
352 		return (rval);
353 	}
354 
355 	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
356 	DELAY(1000);
357 
358 	for (i = 0; i < RL_TIMEOUT; i++) {
359 		rval = CSR_READ_4(sc, RL_PHYAR);
360 		if (rval & RL_PHYAR_BUSY)
361 			break;
362 		DELAY(100);
363 	}
364 
365 	if (i == RL_TIMEOUT) {
366 		printf ("%s: PHY read failed\n", sc->sc_dev.dv_xname);
367 		return (0);
368 	}
369 
370 	return (rval & RL_PHYAR_PHYDATA);
371 }
372 
373 void
374 re_gmii_writereg(struct device *dev, int phy, int reg, int data)
375 {
376 	struct rl_softc	*sc = (struct rl_softc *)dev;
377 	u_int32_t	rval;
378 	int		i;
379 
380 	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
381 	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
382 	DELAY(1000);
383 
384 	for (i = 0; i < RL_TIMEOUT; i++) {
385 		rval = CSR_READ_4(sc, RL_PHYAR);
386 		if (!(rval & RL_PHYAR_BUSY))
387 			break;
388 		DELAY(100);
389 	}
390 
391 	if (i == RL_TIMEOUT)
392 		printf ("%s: PHY write failed\n", sc->sc_dev.dv_xname);
393 }
394 
395 int
396 re_miibus_readreg(struct device *dev, int phy, int reg)
397 {
398 	struct rl_softc	*sc = (struct rl_softc *)dev;
399 	u_int16_t	rval = 0;
400 	u_int16_t	re8139_reg = 0;
401 	int		s;
402 
403 	s = splnet();
404 
405 	if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
406 		rval = re_gmii_readreg(dev, phy, reg);
407 		splx(s);
408 		return (rval);
409 	}
410 
411 	/* Pretend the internal PHY is only at address 0 */
412 	if (phy) {
413 		splx(s);
414 		return (0);
415 	}
416 	switch(reg) {
417 	case MII_BMCR:
418 		re8139_reg = RL_BMCR;
419 		break;
420 	case MII_BMSR:
421 		re8139_reg = RL_BMSR;
422 		break;
423 	case MII_ANAR:
424 		re8139_reg = RL_ANAR;
425 		break;
426 	case MII_ANER:
427 		re8139_reg = RL_ANER;
428 		break;
429 	case MII_ANLPAR:
430 		re8139_reg = RL_LPAR;
431 		break;
432 	case MII_PHYIDR1:
433 	case MII_PHYIDR2:
434 		splx(s);
435 		return (0);
436 	/*
437 	 * Allow the rlphy driver to read the media status
438 	 * register. If we have a link partner which does not
439 	 * support NWAY, this is the register which will tell
440 	 * us the results of parallel detection.
441 	 */
442 	case RL_MEDIASTAT:
443 		rval = CSR_READ_1(sc, RL_MEDIASTAT);
444 		splx(s);
445 		return (rval);
446 	default:
447 		printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
448 		splx(s);
449 		return (0);
450 	}
451 	rval = CSR_READ_2(sc, re8139_reg);
452 	if (re8139_reg == RL_BMCR) {
453 		/* 8139C+ has different bit layout. */
454 		rval &= ~(BMCR_LOOP | BMCR_ISO);
455 	}
456 	splx(s);
457 	return (rval);
458 }
459 
460 void
461 re_miibus_writereg(struct device *dev, int phy, int reg, int data)
462 {
463 	struct rl_softc	*sc = (struct rl_softc *)dev;
464 	u_int16_t	re8139_reg = 0;
465 	int		s;
466 
467 	s = splnet();
468 
469 	if (sc->sc_hwrev != RL_HWREV_8139CPLUS) {
470 		re_gmii_writereg(dev, phy, reg, data);
471 		splx(s);
472 		return;
473 	}
474 
475 	/* Pretend the internal PHY is only at address 0 */
476 	if (phy) {
477 		splx(s);
478 		return;
479 	}
480 	switch(reg) {
481 	case MII_BMCR:
482 		re8139_reg = RL_BMCR;
483 		/* 8139C+ has different bit layout. */
484 		data &= ~(BMCR_LOOP | BMCR_ISO);
485 		break;
486 	case MII_BMSR:
487 		re8139_reg = RL_BMSR;
488 		break;
489 	case MII_ANAR:
490 		re8139_reg = RL_ANAR;
491 		break;
492 	case MII_ANER:
493 		re8139_reg = RL_ANER;
494 		break;
495 	case MII_ANLPAR:
496 		re8139_reg = RL_LPAR;
497 		break;
498 	case MII_PHYIDR1:
499 	case MII_PHYIDR2:
500 		splx(s);
501 		return;
502 		break;
503 	default:
504 		printf("%s: bad phy register %x\n", sc->sc_dev.dv_xname, reg);
505 		splx(s);
506 		return;
507 	}
508 	CSR_WRITE_2(sc, re8139_reg, data);
509 	splx(s);
510 }
511 
512 void
513 re_miibus_statchg(struct device *dev)
514 {
515 }
516 
517 void
518 re_iff(struct rl_softc *sc)
519 {
520 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
521 	int			h = 0;
522 	u_int32_t		hashes[2];
523 	u_int32_t		rxfilt;
524 	struct arpcom		*ac = &sc->sc_arpcom;
525 	struct ether_multi	*enm;
526 	struct ether_multistep	step;
527 
528 	rxfilt = CSR_READ_4(sc, RL_RXCFG);
529 	rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
530 	    RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI);
531 	ifp->if_flags &= ~IFF_ALLMULTI;
532 
533 	/*
534 	 * Always accept frames destined to our station address.
535 	 * Always accept broadcast frames.
536 	 */
537 	rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
538 
539 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
540 		ifp->if_flags |= IFF_ALLMULTI;
541 		rxfilt |= RL_RXCFG_RX_MULTI;
542 		if (ifp->if_flags & IFF_PROMISC)
543 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
544 		hashes[0] = hashes[1] = 0xFFFFFFFF;
545 	} else {
546 		rxfilt |= RL_RXCFG_RX_MULTI;
547 		/* Program new filter. */
548 		bzero(hashes, sizeof(hashes));
549 
550 		ETHER_FIRST_MULTI(step, ac, enm);
551 		while (enm != NULL) {
552 			h = ether_crc32_be(enm->enm_addrlo,
553 			    ETHER_ADDR_LEN) >> 26;
554 
555 			if (h < 32)
556 				hashes[0] |= (1 << h);
557 			else
558 				hashes[1] |= (1 << (h - 32));
559 
560 			ETHER_NEXT_MULTI(step, enm);
561 		}
562 	}
563 
564 	/*
565 	 * For some unfathomable reason, RealTek decided to reverse
566 	 * the order of the multicast hash registers in the PCI Express
567 	 * parts. This means we have to write the hash pattern in reverse
568 	 * order for those devices.
569 	 */
570 	if (sc->rl_flags & RL_FLAG_INVMAR) {
571 		CSR_WRITE_4(sc, RL_MAR0, swap32(hashes[1]));
572 		CSR_WRITE_4(sc, RL_MAR4, swap32(hashes[0]));
573 	} else {
574 		CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
575 		CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
576 	}
577 
578 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
579 }
580 
581 void
582 re_reset(struct rl_softc *sc)
583 {
584 	int	i;
585 
586 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
587 
588 	for (i = 0; i < RL_TIMEOUT; i++) {
589 		DELAY(10);
590 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
591 			break;
592 	}
593 	if (i == RL_TIMEOUT)
594 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
595 
596 	if (sc->rl_flags & RL_FLAG_MACLDPS)
597 		CSR_WRITE_1(sc, RL_LDPS, 1);
598 }
599 
600 #ifdef RE_DIAG
601 
602 /*
603  * The following routine is designed to test for a defect on some
604  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
605  * lines connected to the bus, however for a 32-bit only card, they
606  * should be pulled high. The result of this defect is that the
607  * NIC will not work right if you plug it into a 64-bit slot: DMA
608  * operations will be done with 64-bit transfers, which will fail
609  * because the 64-bit data lines aren't connected.
610  *
611  * There's no way to work around this (short of talking a soldering
612  * iron to the board), however we can detect it. The method we use
613  * here is to put the NIC into digital loopback mode, set the receiver
614  * to promiscuous mode, and then try to send a frame. We then compare
615  * the frame data we sent to what was received. If the data matches,
616  * then the NIC is working correctly, otherwise we know the user has
617  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
618  * slot. In the latter case, there's no way the NIC can work correctly,
619  * so we print out a message on the console and abort the device attach.
620  */
621 
622 int
623 re_diag(struct rl_softc *sc)
624 {
625 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
626 	struct mbuf		*m0;
627 	struct ether_header	*eh;
628 	struct rl_rxsoft	*rxs;
629 	struct rl_desc		*cur_rx;
630 	bus_dmamap_t		dmamap;
631 	u_int16_t		status;
632 	u_int32_t		rxstat;
633 	int			total_len, i, s, error = 0, phyaddr;
634 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
635 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
636 
637 	DPRINTF(("inside re_diag\n"));
638 	/* Allocate a single mbuf */
639 
640 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
641 	if (m0 == NULL)
642 		return (ENOBUFS);
643 
644 	/*
645 	 * Initialize the NIC in test mode. This sets the chip up
646 	 * so that it can send and receive frames, but performs the
647 	 * following special functions:
648 	 * - Puts receiver in promiscuous mode
649 	 * - Enables digital loopback mode
650 	 * - Leaves interrupts turned off
651 	 */
652 
653 	ifp->if_flags |= IFF_PROMISC;
654 	sc->rl_testmode = 1;
655 	re_reset(sc);
656 	re_init(ifp);
657 	sc->rl_flags |= RL_FLAG_LINK;
658 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
659 		phyaddr = 0;
660 	else
661 		phyaddr = 1;
662 
663 	re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR,
664 	    BMCR_RESET);
665 	for (i = 0; i < RL_TIMEOUT; i++) {
666 		status = re_miibus_readreg((struct device *)sc,
667 		    phyaddr, MII_BMCR);
668 		if (!(status & BMCR_RESET))
669 			break;
670 	}
671 
672 	re_miibus_writereg((struct device *)sc, phyaddr, MII_BMCR,
673 	    BMCR_LOOP);
674 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
675 
676 	DELAY(100000);
677 
678 	/* Put some data in the mbuf */
679 
680 	eh = mtod(m0, struct ether_header *);
681 	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
682 	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
683 	eh->ether_type = htons(ETHERTYPE_IP);
684 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
685 
686 	/*
687 	 * Queue the packet, start transmission.
688 	 */
689 
690 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
691 	s = splnet();
692 	IFQ_ENQUEUE(&ifp->if_snd, m0, NULL, error);
693 	re_start(ifp);
694 	splx(s);
695 	m0 = NULL;
696 
697 	DPRINTF(("re_diag: transmission started\n"));
698 
699 	/* Wait for it to propagate through the chip */
700 
701 	DELAY(100000);
702 	for (i = 0; i < RL_TIMEOUT; i++) {
703 		status = CSR_READ_2(sc, RL_ISR);
704 		CSR_WRITE_2(sc, RL_ISR, status);
705 		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
706 		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
707 			break;
708 		DELAY(10);
709 	}
710 	if (i == RL_TIMEOUT) {
711 		printf("%s: diagnostic failed, failed to receive packet "
712 		    "in loopback mode\n", sc->sc_dev.dv_xname);
713 		error = EIO;
714 		goto done;
715 	}
716 
717 	/*
718 	 * The packet should have been dumped into the first
719 	 * entry in the RX DMA ring. Grab it from there.
720 	 */
721 
722 	rxs = &sc->rl_ldata.rl_rxsoft[0];
723 	dmamap = rxs->rxs_dmamap;
724 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
725 	    BUS_DMASYNC_POSTREAD);
726 	bus_dmamap_unload(sc->sc_dmat, dmamap);
727 
728 	m0 = rxs->rxs_mbuf;
729 	rxs->rxs_mbuf = NULL;
730 	eh = mtod(m0, struct ether_header *);
731 
732 	RL_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
733 	cur_rx = &sc->rl_ldata.rl_rx_list[0];
734 	rxstat = letoh32(cur_rx->rl_cmdstat);
735 	total_len = rxstat & sc->rl_rxlenmask;
736 
737 	if (total_len != ETHER_MIN_LEN) {
738 		printf("%s: diagnostic failed, received short packet\n",
739 		    sc->sc_dev.dv_xname);
740 		error = EIO;
741 		goto done;
742 	}
743 
744 	DPRINTF(("re_diag: packet received\n"));
745 
746 	/* Test that the received packet data matches what we sent. */
747 
748 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
749 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
750 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
751 		printf("%s: WARNING, DMA FAILURE!\n", sc->sc_dev.dv_xname);
752 		printf("%s: expected TX data: %s",
753 		    sc->sc_dev.dv_xname, ether_sprintf(dst));
754 		printf("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP);
755 		printf("%s: received RX data: %s",
756 		    sc->sc_dev.dv_xname,
757 		    ether_sprintf(eh->ether_dhost));
758 		printf("/%s/0x%x\n", ether_sprintf(eh->ether_shost),
759 		    ntohs(eh->ether_type));
760 		printf("%s: You may have a defective 32-bit NIC plugged "
761 		    "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname);
762 		printf("%s: Please re-install the NIC in a 32-bit slot "
763 		    "for proper operation.\n", sc->sc_dev.dv_xname);
764 		printf("%s: Read the re(4) man page for more details.\n",
765 		    sc->sc_dev.dv_xname);
766 		error = EIO;
767 	}
768 
769 done:
770 	/* Turn interface off, release resources */
771 	sc->rl_testmode = 0;
772 	sc->rl_flags &= ~RL_FLAG_LINK;
773 	ifp->if_flags &= ~IFF_PROMISC;
774 	re_stop(ifp, 1);
775 	if (m0 != NULL)
776 		m_freem(m0);
777 	DPRINTF(("leaving re_diag\n"));
778 
779 	return (error);
780 }
781 
782 #endif
783 
784 #ifdef __armish__
785 /*
786  * Thecus N2100 doesn't store the full mac address in eeprom
787  * so we read the old mac address from the device before the reset
788  * in hopes that the proper mac address is already there.
789  */
790 union {
791 	u_int32_t eaddr_word[2];
792 	u_char eaddr[ETHER_ADDR_LEN];
793 } boot_eaddr;
794 int boot_eaddr_valid;
795 #endif /* __armish__ */
796 /*
797  * Attach the interface. Allocate softc structures, do ifmedia
798  * setup and ethernet/BPF attach.
799  */
800 int
801 re_attach(struct rl_softc *sc, const char *intrstr)
802 {
803 	u_char		eaddr[ETHER_ADDR_LEN];
804 	u_int16_t	as[ETHER_ADDR_LEN / 2];
805 	struct ifnet	*ifp;
806 	u_int16_t	re_did = 0;
807 	int		error = 0, i;
808 	const struct re_revision *rr;
809 	const char	*re_name = NULL;
810 
811 	sc->sc_hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
812 
813 	switch (sc->sc_hwrev) {
814 	case RL_HWREV_8139CPLUS:
815 		sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_AUTOPAD;
816 		break;
817 	case RL_HWREV_8100E_SPIN1:
818 	case RL_HWREV_8100E_SPIN2:
819 	case RL_HWREV_8101E:
820 		sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR |
821 		    RL_FLAG_PHYWAKE;
822 		break;
823 	case RL_HWREV_8102E:
824 	case RL_HWREV_8102EL:
825 	case RL_HWREV_8103E:
826 		sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_INVMAR |
827 		    RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
828 		    RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
829 		break;
830 	case RL_HWREV_8168_SPIN1:
831 	case RL_HWREV_8168_SPIN2:
832 	case RL_HWREV_8168_SPIN3:
833 		sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE |
834 		    RL_FLAG_MACSTAT | RL_FLAG_HWIM;
835 		break;
836 	case RL_HWREV_8168C_SPIN2:
837 		sc->rl_flags |= RL_FLAG_MACSLEEP;
838 		/* FALLTHROUGH */
839 	case RL_HWREV_8168C:
840 	case RL_HWREV_8168CP:
841 	case RL_HWREV_8168D:
842 	case RL_HWREV_8168DP:
843 		sc->rl_flags |= RL_FLAG_INVMAR | RL_FLAG_PHYWAKE |
844 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
845 		    RL_FLAG_HWIM | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
846 		/*
847 		 * These controllers support jumbo frame but it seems
848 		 * that enabling it requires touching additional magic
849 		 * registers. Depending on MAC revisions some
850 		 * controllers need to disable checksum offload. So
851 		 * disable jumbo frame until I have better idea what
852 		 * it really requires to make it support.
853 		 * RTL8168C/CP : supports up to 6KB jumbo frame.
854 		 * RTL8111C/CP : supports up to 9KB jumbo frame.
855 		 */
856 		sc->rl_flags |= RL_FLAG_NOJUMBO;
857 		break;
858 	case RL_HWREV_8169_8110SB:
859 	case RL_HWREV_8169_8110SBL:
860 	case RL_HWREV_8169_8110SCd:
861 	case RL_HWREV_8169_8110SCe:
862 		sc->rl_flags |= RL_FLAG_PHYWAKE;
863 		/* FALLTHROUGH */
864 	case RL_HWREV_8169:
865 	case RL_HWREV_8169S:
866 	case RL_HWREV_8110S:
867 		sc->rl_flags |= RL_FLAG_MACLDPS;
868 		break;
869 	default:
870 		break;
871 	}
872 
873 	/* Reset the adapter. */
874 	re_reset(sc);
875 
876 	sc->rl_tx_time = 5;		/* 125us */
877 	sc->rl_rx_time = 2;		/* 50us */
878 	if (sc->rl_flags & RL_FLAG_PCIE)
879 		sc->rl_sim_time = 75;	/* 75us */
880 	else
881 		sc->rl_sim_time = 125;	/* 125us */
882 	sc->rl_imtype = RL_IMTYPE_SIM;	/* simulated interrupt moderation */
883 
884 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
885 		sc->rl_bus_speed = 33; /* XXX */
886 	else if (sc->rl_flags & RL_FLAG_PCIE)
887 		sc->rl_bus_speed = 125;
888 	else {
889 		u_int8_t cfg2;
890 
891 		cfg2 = CSR_READ_1(sc, RL_CFG2);
892 		switch (cfg2 & RL_CFG2_PCI_MASK) {
893 		case RL_CFG2_PCI_33MHZ:
894  			sc->rl_bus_speed = 33;
895 			break;
896 		case RL_CFG2_PCI_66MHZ:
897 			sc->rl_bus_speed = 66;
898 			break;
899 		default:
900 			printf("%s: unknown bus speed, assume 33MHz\n",
901 			    sc->sc_dev.dv_xname);
902 			sc->rl_bus_speed = 33;
903 			break;
904 		}
905 
906 		if (cfg2 & RL_CFG2_PCI_64BIT)
907 			sc->rl_flags |= RL_FLAG_PCI64;
908 	}
909 
910 	re_config_imtype(sc, sc->rl_imtype);
911 
912 	if (sc->rl_flags & RL_FLAG_PAR) {
913 		/*
914 		 * XXX Should have a better way to extract station
915 		 * address from EEPROM.
916 		 */
917 		for (i = 0; i < ETHER_ADDR_LEN; i++)
918 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
919 	} else {
920 		sc->rl_eewidth = RL_9356_ADDR_LEN;
921 		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
922 		if (re_did != 0x8129)
923 			sc->rl_eewidth = RL_9346_ADDR_LEN;
924 
925 		/*
926 		 * Get station address from the EEPROM.
927 		 */
928 		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
929 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
930 			as[i] = letoh16(as[i]);
931 		bcopy(as, eaddr, sizeof(eaddr));
932 
933 #ifdef __armish__
934 		/*
935 		 * On the Thecus N2100, the MAC address in the EEPROM is
936 		 * always 00:14:fd:10:00:00.  The proper MAC address is
937 		 * stored in flash.  Fortunately RedBoot configures the
938 		 * proper MAC address (for the first onboard interface)
939 		 * which we can read from the IDR.
940 		 */
941 		if (eaddr[0] == 0x00 && eaddr[1] == 0x14 &&
942 		    eaddr[2] == 0xfd && eaddr[3] == 0x10 &&
943 		    eaddr[4] == 0x00 && eaddr[5] == 0x00) {
944 			if (boot_eaddr_valid == 0) {
945 				boot_eaddr.eaddr_word[1] =
946 				    letoh32(CSR_READ_4(sc, RL_IDR4));
947 				boot_eaddr.eaddr_word[0] =
948 				    letoh32(CSR_READ_4(sc, RL_IDR0));
949 				boot_eaddr_valid = 1;
950 			}
951 
952 			bcopy(boot_eaddr.eaddr, eaddr, sizeof(eaddr));
953 			eaddr[5] += sc->sc_dev.dv_unit;
954 		}
955 #endif
956 	}
957 
958 	/*
959 	 * Set RX length mask, TX poll request register
960 	 * and TX descriptor count.
961 	 */
962 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS) {
963 		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
964 		sc->rl_txstart = RL_TXSTART;
965 		sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8139;
966 	} else {
967 		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
968 		sc->rl_txstart = RL_GTXSTART;
969 		sc->rl_ldata.rl_tx_desc_cnt = RL_TX_DESC_CNT_8169;
970 	}
971 
972 	bcopy(eaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
973 
974 	for (rr = re_revisions; rr->re_name != NULL; rr++) {
975 		if (rr->re_chipid == sc->sc_hwrev)
976 			re_name = rr->re_name;
977 	}
978 
979 	if (re_name == NULL)
980 		printf(": unknown ASIC (0x%04x)", sc->sc_hwrev >> 16);
981 	else
982 		printf(": %s (0x%04x)", re_name, sc->sc_hwrev >> 16);
983 
984 	printf(", %s, address %s\n", intrstr,
985 	    ether_sprintf(sc->sc_arpcom.ac_enaddr));
986 
987 	if (sc->rl_ldata.rl_tx_desc_cnt >
988 	    PAGE_SIZE / sizeof(struct rl_desc)) {
989 		sc->rl_ldata.rl_tx_desc_cnt =
990 		    PAGE_SIZE / sizeof(struct rl_desc);
991 	}
992 
993 	/* Allocate DMA'able memory for the TX ring */
994 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_TX_LIST_SZ(sc),
995 		    RL_RING_ALIGN, 0, &sc->rl_ldata.rl_tx_listseg, 1,
996 		    &sc->rl_ldata.rl_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
997 		printf("%s: can't allocate tx listseg, error = %d\n",
998 		    sc->sc_dev.dv_xname, error);
999 		goto fail_0;
1000 	}
1001 
1002 	/* Load the map for the TX ring. */
1003 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_tx_listseg,
1004 		    sc->rl_ldata.rl_tx_listnseg, RL_TX_LIST_SZ(sc),
1005 		    (caddr_t *)&sc->rl_ldata.rl_tx_list,
1006 		    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
1007 		printf("%s: can't map tx list, error = %d\n",
1008 		    sc->sc_dev.dv_xname, error);
1009 		goto fail_1;
1010 	}
1011 	memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc));
1012 
1013 	if ((error = bus_dmamap_create(sc->sc_dmat, RL_TX_LIST_SZ(sc), 1,
1014 		    RL_TX_LIST_SZ(sc), 0, 0,
1015 		    &sc->rl_ldata.rl_tx_list_map)) != 0) {
1016 		printf("%s: can't create tx list map, error = %d\n",
1017 		    sc->sc_dev.dv_xname, error);
1018 		goto fail_2;
1019 	}
1020 
1021 	if ((error = bus_dmamap_load(sc->sc_dmat,
1022 		    sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1023 		    RL_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
1024 		printf("%s: can't load tx list, error = %d\n",
1025 		    sc->sc_dev.dv_xname, error);
1026 		goto fail_3;
1027 	}
1028 
1029 	/* Create DMA maps for TX buffers */
1030 	for (i = 0; i < RL_TX_QLEN; i++) {
1031 		error = bus_dmamap_create(sc->sc_dmat,
1032 		    RL_JUMBO_FRAMELEN,
1033 		    RL_TX_DESC_CNT(sc) - RL_NTXDESC_RSVD, RL_TDESC_CMD_FRAGLEN,
1034 		    0, 0, &sc->rl_ldata.rl_txq[i].txq_dmamap);
1035 		if (error) {
1036 			printf("%s: can't create DMA map for TX\n",
1037 			    sc->sc_dev.dv_xname);
1038 			goto fail_4;
1039 		}
1040 	}
1041 
1042         /* Allocate DMA'able memory for the RX ring */
1043 	if ((error = bus_dmamem_alloc(sc->sc_dmat, RL_RX_DMAMEM_SZ,
1044 		    RL_RING_ALIGN, 0, &sc->rl_ldata.rl_rx_listseg, 1,
1045 		    &sc->rl_ldata.rl_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
1046 		printf("%s: can't allocate rx listnseg, error = %d\n",
1047 		    sc->sc_dev.dv_xname, error);
1048 		goto fail_4;
1049 	}
1050 
1051         /* Load the map for the RX ring. */
1052 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->rl_ldata.rl_rx_listseg,
1053 		    sc->rl_ldata.rl_rx_listnseg, RL_RX_DMAMEM_SZ,
1054 		    (caddr_t *)&sc->rl_ldata.rl_rx_list,
1055 		    BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
1056 		printf("%s: can't map rx list, error = %d\n",
1057 		    sc->sc_dev.dv_xname, error);
1058 		goto fail_5;
1059 
1060 	}
1061 	memset(sc->rl_ldata.rl_rx_list, 0, RL_RX_DMAMEM_SZ);
1062 
1063 	if ((error = bus_dmamap_create(sc->sc_dmat, RL_RX_DMAMEM_SZ, 1,
1064 		    RL_RX_DMAMEM_SZ, 0, 0,
1065 		    &sc->rl_ldata.rl_rx_list_map)) != 0) {
1066 		printf("%s: can't create rx list map, error = %d\n",
1067 		    sc->sc_dev.dv_xname, error);
1068 		goto fail_6;
1069 	}
1070 
1071 	if ((error = bus_dmamap_load(sc->sc_dmat,
1072 		    sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1073 		    RL_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
1074 		printf("%s: can't load rx list, error = %d\n",
1075 		    sc->sc_dev.dv_xname, error);
1076 		goto fail_7;
1077 	}
1078 
1079 	/* Create DMA maps for RX buffers */
1080 	for (i = 0; i < RL_RX_DESC_CNT; i++) {
1081 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1082 		    0, 0, &sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
1083 		if (error) {
1084 			printf("%s: can't create DMA map for RX\n",
1085 			    sc->sc_dev.dv_xname);
1086 			goto fail_8;
1087 		}
1088 	}
1089 
1090 	ifp = &sc->sc_arpcom.ac_if;
1091 	ifp->if_softc = sc;
1092 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1093 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1094 	ifp->if_ioctl = re_ioctl;
1095 	ifp->if_start = re_start;
1096 	ifp->if_watchdog = re_watchdog;
1097 	ifp->if_init = re_init;
1098 	if ((sc->rl_flags & RL_FLAG_NOJUMBO) == 0)
1099 		ifp->if_hardmtu = RL_JUMBO_MTU;
1100 	IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN);
1101 	IFQ_SET_READY(&ifp->if_snd);
1102 
1103 	m_clsetwms(ifp, MCLBYTES, 2, RL_RX_DESC_CNT);
1104 
1105 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
1106 			       IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1107 
1108 #if NVLAN > 0
1109 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1110 #endif
1111 
1112 	timeout_set(&sc->timer_handle, re_tick, sc);
1113 
1114 	/* Take PHY out of power down mode. */
1115 	if (sc->rl_flags & RL_FLAG_PHYWAKE) {
1116 		re_gmii_writereg((struct device *)sc, 1, 0x1f, 0);
1117 		re_gmii_writereg((struct device *)sc, 1, 0x0e, 0);
1118 	}
1119 
1120 	/* Do MII setup */
1121 	sc->sc_mii.mii_ifp = ifp;
1122 	sc->sc_mii.mii_readreg = re_miibus_readreg;
1123 	sc->sc_mii.mii_writereg = re_miibus_writereg;
1124 	sc->sc_mii.mii_statchg = re_miibus_statchg;
1125 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, re_ifmedia_upd,
1126 	    re_ifmedia_sts);
1127 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1128 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
1129 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1130 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1131 		ifmedia_add(&sc->sc_mii.mii_media,
1132 		    IFM_ETHER|IFM_NONE, 0, NULL);
1133 		ifmedia_set(&sc->sc_mii.mii_media,
1134 		    IFM_ETHER|IFM_NONE);
1135 	} else
1136 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1137 
1138 	/*
1139 	 * Call MI attach routine.
1140 	 */
1141 	re_reset(sc);
1142 	if_attach(ifp);
1143 	ether_ifattach(ifp);
1144 
1145 #ifdef RE_DIAG
1146 	/*
1147 	 * Perform hardware diagnostic on the original RTL8169.
1148 	 * Some 32-bit cards were incorrectly wired and would
1149 	 * malfunction if plugged into a 64-bit slot.
1150 	 */
1151 	if (sc->sc_hwrev == RL_HWREV_8169) {
1152 		error = re_diag(sc);
1153 		if (error) {
1154 			printf("%s: attach aborted due to hardware diag failure\n",
1155 			    sc->sc_dev.dv_xname);
1156 			ether_ifdetach(ifp);
1157 			goto fail_8;
1158 		}
1159 	}
1160 #endif
1161 
1162 	return (0);
1163 
1164 fail_8:
1165 	/* Destroy DMA maps for RX buffers. */
1166 	for (i = 0; i < RL_RX_DESC_CNT; i++) {
1167 		if (sc->rl_ldata.rl_rxsoft[i].rxs_dmamap != NULL)
1168 			bus_dmamap_destroy(sc->sc_dmat,
1169 			    sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
1170 	}
1171 
1172 	/* Free DMA'able memory for the RX ring. */
1173 	bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1174 fail_7:
1175 	bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_rx_list_map);
1176 fail_6:
1177 	bus_dmamem_unmap(sc->sc_dmat,
1178 	    (caddr_t)sc->rl_ldata.rl_rx_list, RL_RX_DMAMEM_SZ);
1179 fail_5:
1180 	bus_dmamem_free(sc->sc_dmat,
1181 	    &sc->rl_ldata.rl_rx_listseg, sc->rl_ldata.rl_rx_listnseg);
1182 
1183 fail_4:
1184 	/* Destroy DMA maps for TX buffers. */
1185 	for (i = 0; i < RL_TX_QLEN; i++) {
1186 		if (sc->rl_ldata.rl_txq[i].txq_dmamap != NULL)
1187 			bus_dmamap_destroy(sc->sc_dmat,
1188 			    sc->rl_ldata.rl_txq[i].txq_dmamap);
1189 	}
1190 
1191 	/* Free DMA'able memory for the TX ring. */
1192 	bus_dmamap_unload(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1193 fail_3:
1194 	bus_dmamap_destroy(sc->sc_dmat, sc->rl_ldata.rl_tx_list_map);
1195 fail_2:
1196 	bus_dmamem_unmap(sc->sc_dmat,
1197 	    (caddr_t)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ(sc));
1198 fail_1:
1199 	bus_dmamem_free(sc->sc_dmat,
1200 	    &sc->rl_ldata.rl_tx_listseg, sc->rl_ldata.rl_tx_listnseg);
1201 fail_0:
1202  	return (1);
1203 }
1204 
1205 
1206 int
1207 re_newbuf(struct rl_softc *sc)
1208 {
1209 	struct mbuf	*m;
1210 	bus_dmamap_t	map;
1211 	struct rl_desc	*d;
1212 	struct rl_rxsoft *rxs;
1213 	u_int32_t	cmdstat;
1214 	int		error, idx;
1215 
1216 	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
1217 	if (!m)
1218 		return (ENOBUFS);
1219 
1220 	/*
1221 	 * Initialize mbuf length fields and fixup
1222 	 * alignment so that the frame payload is
1223 	 * longword aligned on strict alignment archs.
1224 	 */
1225 	m->m_len = m->m_pkthdr.len = RE_RX_DESC_BUFLEN;
1226 	m->m_data += RE_ETHER_ALIGN;
1227 
1228 	idx = sc->rl_ldata.rl_rx_prodidx;
1229 	rxs = &sc->rl_ldata.rl_rxsoft[idx];
1230 	map = rxs->rxs_dmamap;
1231 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1232 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1233 	if (error) {
1234 		m_freem(m);
1235 		return (ENOBUFS);
1236 	}
1237 
1238 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1239 	    BUS_DMASYNC_PREREAD);
1240 
1241 	d = &sc->rl_ldata.rl_rx_list[idx];
1242 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1243 	cmdstat = letoh32(d->rl_cmdstat);
1244 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1245 	if (cmdstat & RL_RDESC_STAT_OWN) {
1246 		printf("%s: tried to map busy RX descriptor\n",
1247 		    sc->sc_dev.dv_xname);
1248 		m_freem(m);
1249 		return (ENOBUFS);
1250 	}
1251 
1252 	rxs->rxs_mbuf = m;
1253 
1254 	d->rl_vlanctl = 0;
1255 	cmdstat = map->dm_segs[0].ds_len;
1256 	if (idx == (RL_RX_DESC_CNT - 1))
1257 		cmdstat |= RL_RDESC_CMD_EOR;
1258 	re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1259 	d->rl_cmdstat = htole32(cmdstat);
1260 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1261 	cmdstat |= RL_RDESC_CMD_OWN;
1262 	d->rl_cmdstat = htole32(cmdstat);
1263 	RL_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1264 
1265 	sc->rl_ldata.rl_rx_prodidx = RL_NEXT_RX_DESC(sc, idx);
1266 	sc->rl_ldata.rl_rx_cnt++;
1267 
1268 	return (0);
1269 }
1270 
1271 
1272 int
1273 re_tx_list_init(struct rl_softc *sc)
1274 {
1275 	int i;
1276 
1277 	memset(sc->rl_ldata.rl_tx_list, 0, RL_TX_LIST_SZ(sc));
1278 	for (i = 0; i < RL_TX_QLEN; i++) {
1279 		sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
1280 	}
1281 
1282 	bus_dmamap_sync(sc->sc_dmat,
1283 	    sc->rl_ldata.rl_tx_list_map, 0,
1284 	    sc->rl_ldata.rl_tx_list_map->dm_mapsize,
1285 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1286 	sc->rl_ldata.rl_txq_prodidx = 0;
1287 	sc->rl_ldata.rl_txq_considx = 0;
1288 	sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT(sc);
1289 	sc->rl_ldata.rl_tx_nextfree = 0;
1290 
1291 	return (0);
1292 }
1293 
1294 int
1295 re_rx_list_init(struct rl_softc *sc)
1296 {
1297 	bzero(sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ);
1298 
1299 	sc->rl_ldata.rl_rx_prodidx = 0;
1300 	sc->rl_ldata.rl_rx_considx = 0;
1301 	sc->rl_ldata.rl_rx_cnt = 0;
1302 	sc->rl_head = sc->rl_tail = NULL;
1303 
1304 	re_rx_list_fill(sc);
1305 
1306 	return (0);
1307 }
1308 
1309 void
1310 re_rx_list_fill(struct rl_softc *sc)
1311 {
1312 	while (sc->rl_ldata.rl_rx_cnt < RL_RX_DESC_CNT) {
1313 		if (re_newbuf(sc) == ENOBUFS)
1314 			break;
1315 	}
1316 }
1317 
1318 /*
1319  * RX handler for C+ and 8169. For the gigE chips, we support
1320  * the reception of jumbo frames that have been fragmented
1321  * across multiple 2K mbuf cluster buffers.
1322  */
1323 int
1324 re_rxeof(struct rl_softc *sc)
1325 {
1326 	struct mbuf	*m;
1327 	struct ifnet	*ifp;
1328 	int		i, total_len, rx = 0;
1329 	struct rl_desc	*cur_rx;
1330 	struct rl_rxsoft *rxs;
1331 	u_int32_t	rxstat, rxvlan;
1332 
1333 	ifp = &sc->sc_arpcom.ac_if;
1334 
1335 	for (i = sc->rl_ldata.rl_rx_considx; sc->rl_ldata.rl_rx_cnt > 0;
1336 	     i = RL_NEXT_RX_DESC(sc, i)) {
1337 		cur_rx = &sc->rl_ldata.rl_rx_list[i];
1338 		RL_RXDESCSYNC(sc, i,
1339 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1340 		rxstat = letoh32(cur_rx->rl_cmdstat);
1341 		rxvlan = letoh32(cur_rx->rl_vlanctl);
1342 		RL_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1343 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
1344 			break;
1345 		total_len = rxstat & sc->rl_rxlenmask;
1346 		rxs = &sc->rl_ldata.rl_rxsoft[i];
1347 		m = rxs->rxs_mbuf;
1348 		rxs->rxs_mbuf = NULL;
1349 		sc->rl_ldata.rl_rx_cnt--;
1350 		rx = 1;
1351 
1352 		/* Invalidate the RX mbuf and unload its map */
1353 
1354 		bus_dmamap_sync(sc->sc_dmat,
1355 		    rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1356 		    BUS_DMASYNC_POSTREAD);
1357 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1358 
1359 		if (!(rxstat & RL_RDESC_STAT_EOF)) {
1360 			m->m_len = RE_RX_DESC_BUFLEN;
1361 			if (sc->rl_head == NULL)
1362 				sc->rl_head = sc->rl_tail = m;
1363 			else {
1364 				m->m_flags &= ~M_PKTHDR;
1365 				sc->rl_tail->m_next = m;
1366 				sc->rl_tail = m;
1367 			}
1368 			continue;
1369 		}
1370 
1371 		/*
1372 		 * NOTE: for the 8139C+, the frame length field
1373 		 * is always 12 bits in size, but for the gigE chips,
1374 		 * it is 13 bits (since the max RX frame length is 16K).
1375 		 * Unfortunately, all 32 bits in the status word
1376 		 * were already used, so to make room for the extra
1377 		 * length bit, RealTek took out the 'frame alignment
1378 		 * error' bit and shifted the other status bits
1379 		 * over one slot. The OWN, EOR, FS and LS bits are
1380 		 * still in the same places. We have already extracted
1381 		 * the frame length and checked the OWN bit, so rather
1382 		 * than using an alternate bit mapping, we shift the
1383 		 * status bits one space to the right so we can evaluate
1384 		 * them using the 8169 status as though it was in the
1385 		 * same format as that of the 8139C+.
1386 		 */
1387 		if (sc->sc_hwrev != RL_HWREV_8139CPLUS)
1388 			rxstat >>= 1;
1389 
1390 		/*
1391 		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
1392 		 * set, but if CRC is clear, it will still be a valid frame.
1393 		 */
1394 		if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 &&
1395 		    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) {
1396 			ifp->if_ierrors++;
1397 			/*
1398 			 * If this is part of a multi-fragment packet,
1399 			 * discard all the pieces.
1400 			 */
1401 			if (sc->rl_head != NULL) {
1402 				m_freem(sc->rl_head);
1403 				sc->rl_head = sc->rl_tail = NULL;
1404 			}
1405 			continue;
1406 		}
1407 
1408 		if (sc->rl_head != NULL) {
1409 			m->m_len = total_len % RE_RX_DESC_BUFLEN;
1410 			if (m->m_len == 0)
1411 				m->m_len = RE_RX_DESC_BUFLEN;
1412 			/*
1413 			 * Special case: if there's 4 bytes or less
1414 			 * in this buffer, the mbuf can be discarded:
1415 			 * the last 4 bytes is the CRC, which we don't
1416 			 * care about anyway.
1417 			 */
1418 			if (m->m_len <= ETHER_CRC_LEN) {
1419 				sc->rl_tail->m_len -=
1420 				    (ETHER_CRC_LEN - m->m_len);
1421 				m_freem(m);
1422 			} else {
1423 				m->m_len -= ETHER_CRC_LEN;
1424 				m->m_flags &= ~M_PKTHDR;
1425 				sc->rl_tail->m_next = m;
1426 			}
1427 			m = sc->rl_head;
1428 			sc->rl_head = sc->rl_tail = NULL;
1429 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1430 		} else
1431 			m->m_pkthdr.len = m->m_len =
1432 			    (total_len - ETHER_CRC_LEN);
1433 
1434 		ifp->if_ipackets++;
1435 		m->m_pkthdr.rcvif = ifp;
1436 
1437 		/* Do RX checksumming */
1438 
1439 		if (sc->rl_flags & RL_FLAG_DESCV2) {
1440 			/* Check IP header checksum */
1441 			if ((rxvlan & RL_RDESC_IPV4) &&
1442 			    !(rxstat & RL_RDESC_STAT_IPSUMBAD))
1443 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1444 
1445 			/* Check TCP/UDP checksum */
1446 			if ((rxvlan & (RL_RDESC_IPV4|RL_RDESC_IPV6)) &&
1447 			    (((rxstat & RL_RDESC_STAT_TCP) &&
1448 			    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1449 			    ((rxstat & RL_RDESC_STAT_UDP) &&
1450 			    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))))
1451 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1452 				    M_UDP_CSUM_IN_OK;
1453 		} else {
1454 			/* Check IP header checksum */
1455 			if ((rxstat & RL_RDESC_STAT_PROTOID) &&
1456 			    !(rxstat & RL_RDESC_STAT_IPSUMBAD))
1457 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1458 
1459 			/* Check TCP/UDP checksum */
1460 			if ((RL_TCPPKT(rxstat) &&
1461 			    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
1462 			    (RL_UDPPKT(rxstat) &&
1463 			    !(rxstat & RL_RDESC_STAT_UDPSUMBAD)))
1464 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1465 				    M_UDP_CSUM_IN_OK;
1466 		}
1467 #if NVLAN > 0
1468 		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
1469 			m->m_pkthdr.ether_vtag =
1470 			    ntohs((rxvlan & RL_RDESC_VLANCTL_DATA));
1471 			m->m_flags |= M_VLANTAG;
1472 		}
1473 #endif
1474 
1475 #if NBPFILTER > 0
1476 		if (ifp->if_bpf)
1477 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1478 #endif
1479 		ether_input_mbuf(ifp, m);
1480 	}
1481 
1482 	sc->rl_ldata.rl_rx_considx = i;
1483 	re_rx_list_fill(sc);
1484 
1485 	return (rx);
1486 }
1487 
1488 int
1489 re_txeof(struct rl_softc *sc)
1490 {
1491 	struct ifnet	*ifp;
1492 	struct rl_txq	*txq;
1493 	uint32_t	txstat;
1494 	int		idx, descidx, tx = 0;
1495 
1496 	ifp = &sc->sc_arpcom.ac_if;
1497 
1498 	for (idx = sc->rl_ldata.rl_txq_considx;; idx = RL_NEXT_TXQ(sc, idx)) {
1499 		txq = &sc->rl_ldata.rl_txq[idx];
1500 
1501 		if (txq->txq_mbuf == NULL) {
1502 			KASSERT(idx == sc->rl_ldata.rl_txq_prodidx);
1503 			break;
1504 		}
1505 
1506 		descidx = txq->txq_descidx;
1507 		RL_TXDESCSYNC(sc, descidx,
1508 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1509 		txstat =
1510 		    letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat);
1511 		RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD);
1512 		KASSERT((txstat & RL_TDESC_CMD_EOF) != 0);
1513 		if (txstat & RL_TDESC_CMD_OWN)
1514 			break;
1515 
1516 		tx = 1;
1517 		sc->rl_ldata.rl_tx_free += txq->txq_nsegs;
1518 		KASSERT(sc->rl_ldata.rl_tx_free <= RL_TX_DESC_CNT(sc));
1519 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1520 		    0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1521 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1522 		m_freem(txq->txq_mbuf);
1523 		txq->txq_mbuf = NULL;
1524 
1525 		if (txstat & (RL_TDESC_STAT_EXCESSCOL | RL_TDESC_STAT_COLCNT))
1526 			ifp->if_collisions++;
1527 		if (txstat & RL_TDESC_STAT_TXERRSUM)
1528 			ifp->if_oerrors++;
1529 		else
1530 			ifp->if_opackets++;
1531 	}
1532 
1533 	sc->rl_ldata.rl_txq_considx = idx;
1534 
1535 	if (sc->rl_ldata.rl_tx_free > RL_NTXDESC_RSVD)
1536 		ifp->if_flags &= ~IFF_OACTIVE;
1537 
1538 	/*
1539 	 * Some chips will ignore a second TX request issued while an
1540 	 * existing transmission is in progress. If the transmitter goes
1541 	 * idle but there are still packets waiting to be sent, we need
1542 	 * to restart the channel here to flush them out. This only
1543 	 * seems to be required with the PCIe devices.
1544 	 */
1545 	if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT(sc))
1546 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
1547 	else
1548 		ifp->if_timer = 0;
1549 
1550 	return (tx);
1551 }
1552 
1553 void
1554 re_tick(void *xsc)
1555 {
1556 	struct rl_softc	*sc = xsc;
1557 	struct mii_data	*mii;
1558 	struct ifnet	*ifp;
1559 	int s;
1560 
1561 	ifp = &sc->sc_arpcom.ac_if;
1562 	mii = &sc->sc_mii;
1563 
1564 	s = splnet();
1565 
1566 	mii_tick(mii);
1567 	if (sc->rl_flags & RL_FLAG_LINK) {
1568 		if (!(mii->mii_media_status & IFM_ACTIVE))
1569 			sc->rl_flags &= ~RL_FLAG_LINK;
1570 	} else {
1571 		if (mii->mii_media_status & IFM_ACTIVE &&
1572 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1573 			sc->rl_flags |= RL_FLAG_LINK;
1574 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
1575 				re_start(ifp);
1576 		}
1577 	}
1578 	splx(s);
1579 
1580 	timeout_add_sec(&sc->timer_handle, 1);
1581 }
1582 
1583 int
1584 re_intr(void *arg)
1585 {
1586 	struct rl_softc	*sc = arg;
1587 	struct ifnet	*ifp;
1588 	u_int16_t	status;
1589 	int		claimed = 0, rx, tx;
1590 
1591 	ifp = &sc->sc_arpcom.ac_if;
1592 
1593 	if (!(ifp->if_flags & IFF_RUNNING))
1594 		return (0);
1595 
1596 	rx = tx = 0;
1597 	for (;;) {
1598 
1599 		status = CSR_READ_2(sc, RL_ISR);
1600 		/* If the card has gone away the read returns 0xffff. */
1601 		if (status == 0xffff)
1602 			break;
1603 		if (status)
1604 			CSR_WRITE_2(sc, RL_ISR, status);
1605 
1606 		if ((status & RL_INTRS_CPLUS) == 0)
1607 			break;
1608 
1609 		if (status & (sc->rl_rx_ack | RL_ISR_RX_ERR)) {
1610 			rx |= re_rxeof(sc);
1611 			claimed = 1;
1612 		}
1613 
1614 		if (status & (sc->rl_tx_ack | RL_ISR_TX_ERR)) {
1615 			tx |= re_txeof(sc);
1616 			claimed = 1;
1617 		}
1618 
1619 		if (status & RL_ISR_SYSTEM_ERR) {
1620 			re_reset(sc);
1621 			re_init(ifp);
1622 			claimed = 1;
1623 		}
1624 
1625 		if (status & RL_ISR_LINKCHG) {
1626 			timeout_del(&sc->timer_handle);
1627 			re_tick(sc);
1628 			claimed = 1;
1629 		}
1630 	}
1631 
1632 	if (sc->rl_imtype == RL_IMTYPE_SIM) {
1633 		if ((sc->rl_flags & RL_FLAG_TIMERINTR)) {
1634 			if ((tx | rx) == 0) {
1635 				/*
1636 				 * Nothing needs to be processed, fallback
1637 				 * to use TX/RX interrupts.
1638 				 */
1639 				re_setup_intr(sc, 1, RL_IMTYPE_NONE);
1640 
1641 				/*
1642 				 * Recollect, mainly to avoid the possible
1643 				 * race introduced by changing interrupt
1644 				 * masks.
1645 				 */
1646 				re_rxeof(sc);
1647 				tx = re_txeof(sc);
1648 			} else
1649 				CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */
1650 		} else if (tx | rx) {
1651 			/*
1652 			 * Assume that using simulated interrupt moderation
1653 			 * (hardware timer based) could reduce the interrupt
1654 			 * rate.
1655 			 */
1656 			re_setup_intr(sc, 1, RL_IMTYPE_SIM);
1657 		}
1658 	}
1659 
1660 	if (tx && !IFQ_IS_EMPTY(&ifp->if_snd))
1661 		re_start(ifp);
1662 
1663 	return (claimed);
1664 }
1665 
1666 int
1667 re_encap(struct rl_softc *sc, struct mbuf *m, int *idx)
1668 {
1669 	bus_dmamap_t	map;
1670 	int		error, seg, nsegs, uidx, startidx, curidx, lastidx, pad;
1671 	struct rl_desc	*d;
1672 	u_int32_t	cmdstat, vlanctl = 0, csum_flags = 0;
1673 	struct rl_txq	*txq;
1674 
1675 	if (sc->rl_ldata.rl_tx_free <= RL_NTXDESC_RSVD)
1676 		return (EFBIG);
1677 
1678 	/*
1679 	 * Set up checksum offload. Note: checksum offload bits must
1680 	 * appear in all descriptors of a multi-descriptor transmit
1681 	 * attempt. This is according to testing done with an 8169
1682 	 * chip. This is a requirement.
1683 	 */
1684 
1685 	/*
1686 	 * Set RL_TDESC_CMD_IPCSUM if any checksum offloading
1687 	 * is requested.  Otherwise, RL_TDESC_CMD_TCPCSUM/
1688 	 * RL_TDESC_CMD_UDPCSUM does not take affect.
1689 	 */
1690 
1691 	if ((m->m_pkthdr.csum_flags &
1692 	    (M_IPV4_CSUM_OUT|M_TCPV4_CSUM_OUT|M_UDPV4_CSUM_OUT)) != 0) {
1693 		if (sc->rl_flags & RL_FLAG_DESCV2) {
1694 			vlanctl |= RL_TDESC_CMD_IPCSUMV2;
1695 			if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1696 				vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
1697 			if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1698 				vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
1699 		} else {
1700 			csum_flags |= RL_TDESC_CMD_IPCSUM;
1701 			if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1702 				csum_flags |= RL_TDESC_CMD_TCPCSUM;
1703 			if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1704 				csum_flags |= RL_TDESC_CMD_UDPCSUM;
1705 		}
1706 	}
1707 
1708 	txq = &sc->rl_ldata.rl_txq[*idx];
1709 	map = txq->txq_dmamap;
1710 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1711 	    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1712 	if (error) {
1713 		/* XXX try to defrag if EFBIG? */
1714 		printf("%s: can't map mbuf (error %d)\n",
1715 		    sc->sc_dev.dv_xname, error);
1716 		return (error);
1717 	}
1718 
1719 	nsegs = map->dm_nsegs;
1720 	pad = 0;
1721 	if ((sc->rl_flags & RL_FLAG_DESCV2) == 0 &&
1722 	    m->m_pkthdr.len <= RL_IP4CSUMTX_PADLEN &&
1723 	    (csum_flags & RL_TDESC_CMD_IPCSUM) != 0) {
1724 		pad = 1;
1725 		nsegs++;
1726 	}
1727 
1728 	if (nsegs > sc->rl_ldata.rl_tx_free - RL_NTXDESC_RSVD) {
1729 		error = EFBIG;
1730 		goto fail_unload;
1731 	}
1732 
1733 	/*
1734 	 * Make sure that the caches are synchronized before we
1735 	 * ask the chip to start DMA for the packet data.
1736 	 */
1737 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1738 		BUS_DMASYNC_PREWRITE);
1739 
1740 	/*
1741 	 * Set up hardware VLAN tagging. Note: vlan tag info must
1742 	 * appear in all descriptors of a multi-descriptor
1743 	 * transmission attempt.
1744 	 */
1745 #if NVLAN > 0
1746 	if (m->m_flags & M_VLANTAG)
1747 		vlanctl |= swap16(m->m_pkthdr.ether_vtag) |
1748 		    RL_TDESC_VLANCTL_TAG;
1749 #endif
1750 
1751 	/*
1752 	 * Map the segment array into descriptors. Note that we set the
1753 	 * start-of-frame and end-of-frame markers for either TX or RX, but
1754 	 * they really only have meaning in the TX case. (In the RX case,
1755 	 * it's the chip that tells us where packets begin and end.)
1756 	 * We also keep track of the end of the ring and set the
1757 	 * end-of-ring bits as needed, and we set the ownership bits
1758 	 * in all except the very first descriptor. (The caller will
1759 	 * set this descriptor later when it start transmission or
1760 	 * reception.)
1761 	 */
1762 	curidx = startidx = sc->rl_ldata.rl_tx_nextfree;
1763 	lastidx = -1;
1764 	for (seg = 0; seg < map->dm_nsegs;
1765 	    seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) {
1766 		d = &sc->rl_ldata.rl_tx_list[curidx];
1767 		RL_TXDESCSYNC(sc, curidx,
1768 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1769 		cmdstat = letoh32(d->rl_cmdstat);
1770 		RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD);
1771 		if (cmdstat & RL_TDESC_STAT_OWN) {
1772 			printf("%s: tried to map busy TX descriptor\n",
1773 			    sc->sc_dev.dv_xname);
1774 			for (; seg > 0; seg --) {
1775 				uidx = (curidx + RL_TX_DESC_CNT(sc) - seg) %
1776 				    RL_TX_DESC_CNT(sc);
1777 				sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0;
1778 				RL_TXDESCSYNC(sc, uidx,
1779 				    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1780 			}
1781 			error = ENOBUFS;
1782 			goto fail_unload;
1783 		}
1784 
1785 		d->rl_vlanctl = htole32(vlanctl);
1786 		re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1787 		cmdstat = csum_flags | map->dm_segs[seg].ds_len;
1788 		if (seg == 0)
1789 			cmdstat |= RL_TDESC_CMD_SOF;
1790 		else
1791 			cmdstat |= RL_TDESC_CMD_OWN;
1792 		if (curidx == (RL_TX_DESC_CNT(sc) - 1))
1793 			cmdstat |= RL_TDESC_CMD_EOR;
1794 		if (seg == nsegs - 1) {
1795 			cmdstat |= RL_TDESC_CMD_EOF;
1796 			lastidx = curidx;
1797 		}
1798 		d->rl_cmdstat = htole32(cmdstat);
1799 		RL_TXDESCSYNC(sc, curidx,
1800 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1801 	}
1802 	if (pad) {
1803 		d = &sc->rl_ldata.rl_tx_list[curidx];
1804 		d->rl_vlanctl = htole32(vlanctl);
1805 		re_set_bufaddr(d, RL_TXPADDADDR(sc));
1806 		cmdstat = csum_flags |
1807 		    RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF |
1808 		    (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1809 		if (curidx == (RL_TX_DESC_CNT(sc) - 1))
1810 			cmdstat |= RL_TDESC_CMD_EOR;
1811 		d->rl_cmdstat = htole32(cmdstat);
1812 		RL_TXDESCSYNC(sc, curidx,
1813 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1814 		lastidx = curidx;
1815 		curidx = RL_NEXT_TX_DESC(sc, curidx);
1816 	}
1817 	KASSERT(lastidx != -1);
1818 
1819 	/* Transfer ownership of packet to the chip. */
1820 
1821 	sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |=
1822 	    htole32(RL_TDESC_CMD_OWN);
1823 	RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1824 
1825 	/* update info of TX queue and descriptors */
1826 	txq->txq_mbuf = m;
1827 	txq->txq_descidx = lastidx;
1828 	txq->txq_nsegs = nsegs;
1829 
1830 	sc->rl_ldata.rl_tx_free -= nsegs;
1831 	sc->rl_ldata.rl_tx_nextfree = curidx;
1832 
1833 	*idx = RL_NEXT_TXQ(sc, *idx);
1834 
1835 	return (0);
1836 
1837 fail_unload:
1838 	bus_dmamap_unload(sc->sc_dmat, map);
1839 
1840 	return (error);
1841 }
1842 
1843 /*
1844  * Main transmit routine for C+ and gigE NICs.
1845  */
1846 
1847 void
1848 re_start(struct ifnet *ifp)
1849 {
1850 	struct rl_softc	*sc;
1851 	int		idx, queued = 0;
1852 
1853 	sc = ifp->if_softc;
1854 
1855 	if (ifp->if_flags & IFF_OACTIVE)
1856 		return;
1857 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1858 		return;
1859 
1860 	idx = sc->rl_ldata.rl_txq_prodidx;
1861 	for (;;) {
1862 		struct mbuf *m;
1863 		int error;
1864 
1865 		IFQ_POLL(&ifp->if_snd, m);
1866 		if (m == NULL)
1867 			break;
1868 
1869 		if (sc->rl_ldata.rl_txq[idx].txq_mbuf != NULL) {
1870 			KASSERT(idx == sc->rl_ldata.rl_txq_considx);
1871 			ifp->if_flags |= IFF_OACTIVE;
1872 			break;
1873 		}
1874 
1875 		error = re_encap(sc, m, &idx);
1876 		if (error == EFBIG &&
1877 		    sc->rl_ldata.rl_tx_free == RL_TX_DESC_CNT(sc)) {
1878 			IFQ_DEQUEUE(&ifp->if_snd, m);
1879 			m_freem(m);
1880 			ifp->if_oerrors++;
1881 			continue;
1882 		}
1883 		if (error) {
1884 			ifp->if_flags |= IFF_OACTIVE;
1885 			break;
1886 		}
1887 
1888 		IFQ_DEQUEUE(&ifp->if_snd, m);
1889 		queued++;
1890 
1891 #if NBPFILTER > 0
1892 		/*
1893 		 * If there's a BPF listener, bounce a copy of this frame
1894 		 * to him.
1895 		 */
1896 		if (ifp->if_bpf)
1897 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1898 #endif
1899 	}
1900 
1901 	if (queued == 0)
1902 		return;
1903 
1904 	sc->rl_ldata.rl_txq_prodidx = idx;
1905 
1906 	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
1907 
1908 	/*
1909 	 * Set a timeout in case the chip goes out to lunch.
1910 	 */
1911 	ifp->if_timer = 5;
1912 }
1913 
1914 int
1915 re_init(struct ifnet *ifp)
1916 {
1917 	struct rl_softc *sc = ifp->if_softc;
1918 	u_int16_t	cfg;
1919 	int		s;
1920 	union {
1921 		u_int32_t align_dummy;
1922 		u_char eaddr[ETHER_ADDR_LEN];
1923 	} eaddr;
1924 
1925 	s = splnet();
1926 
1927 	/*
1928 	 * Cancel pending I/O and free all RX/TX buffers.
1929 	 */
1930 	re_stop(ifp, 0);
1931 
1932 	/*
1933 	 * Enable C+ RX and TX mode, as well as RX checksum offload.
1934 	 * We must configure the C+ register before all others.
1935 	 */
1936 	cfg = RL_CPLUSCMD_TXENB | RL_CPLUSCMD_PCI_MRW;
1937 
1938 	if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
1939 		cfg |= RL_CPLUSCMD_RXCSUM_ENB;
1940 
1941 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1942 		cfg |= RL_CPLUSCMD_VLANSTRIP;
1943 
1944 	if (sc->rl_flags & RL_FLAG_MACSTAT)
1945 		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
1946 	else
1947 		cfg |= RL_CPLUSCMD_RXENB;
1948 
1949 	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
1950 
1951 	/*
1952 	 * Init our MAC address.  Even though the chipset
1953 	 * documentation doesn't mention it, we need to enter "Config
1954 	 * register write enable" mode to modify the ID registers.
1955 	 */
1956 	bcopy(sc->sc_arpcom.ac_enaddr, eaddr.eaddr, ETHER_ADDR_LEN);
1957 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1958 	CSR_WRITE_4(sc, RL_IDR4,
1959 	    htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
1960 	CSR_WRITE_4(sc, RL_IDR0,
1961 	    htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
1962 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1963 
1964 	/*
1965 	 * For C+ mode, initialize the RX descriptors and mbufs.
1966 	 */
1967 	re_rx_list_init(sc);
1968 	re_tx_list_init(sc);
1969 
1970 	/*
1971 	 * Load the addresses of the RX and TX lists into the chip.
1972 	 */
1973 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
1974 	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1975 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
1976 	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_map->dm_segs[0].ds_addr));
1977 
1978 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
1979 	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1980 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
1981 	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_map->dm_segs[0].ds_addr));
1982 
1983 	/*
1984 	 * Enable transmit and receive.
1985 	 */
1986 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1987 
1988 	/*
1989 	 * Set the initial TX and RX configuration.
1990 	 */
1991 	if (sc->rl_testmode) {
1992 		if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
1993 			CSR_WRITE_4(sc, RL_TXCFG,
1994 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
1995 		else
1996 			CSR_WRITE_4(sc, RL_TXCFG,
1997 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
1998 	} else
1999 		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
2000 
2001 	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
2002 
2003 	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
2004 
2005 	/* Program promiscuous mode and multicast filters. */
2006 	re_iff(sc);
2007 
2008 	/*
2009 	 * Enable interrupts.
2010 	 */
2011 	if (sc->rl_testmode)
2012 		CSR_WRITE_2(sc, RL_IMR, 0);
2013 	else
2014 		re_setup_intr(sc, 1, sc->rl_imtype);
2015 	CSR_WRITE_2(sc, RL_ISR, sc->rl_imtype);
2016 
2017 	/* Start RX/TX process. */
2018 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
2019 #ifdef notdef
2020 	/* Enable receiver and transmitter. */
2021 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
2022 #endif
2023 
2024 	/*
2025 	 * For 8169 gigE NICs, set the max allowed RX packet
2026 	 * size so we can receive jumbo frames.
2027 	 */
2028 	if (sc->sc_hwrev != RL_HWREV_8139CPLUS)
2029 		CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
2030 
2031 	if (sc->rl_testmode)
2032 		return (0);
2033 
2034 	mii_mediachg(&sc->sc_mii);
2035 
2036 	CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD);
2037 
2038 	ifp->if_flags |= IFF_RUNNING;
2039 	ifp->if_flags &= ~IFF_OACTIVE;
2040 
2041 	splx(s);
2042 
2043 	sc->rl_flags &= ~RL_FLAG_LINK;
2044 
2045 	timeout_add_sec(&sc->timer_handle, 1);
2046 
2047 	return (0);
2048 }
2049 
2050 /*
2051  * Set media options.
2052  */
2053 int
2054 re_ifmedia_upd(struct ifnet *ifp)
2055 {
2056 	struct rl_softc	*sc;
2057 
2058 	sc = ifp->if_softc;
2059 
2060 	return (mii_mediachg(&sc->sc_mii));
2061 }
2062 
2063 /*
2064  * Report current media status.
2065  */
2066 void
2067 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2068 {
2069 	struct rl_softc	*sc;
2070 
2071 	sc = ifp->if_softc;
2072 
2073 	mii_pollstat(&sc->sc_mii);
2074 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
2075 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
2076 }
2077 
2078 int
2079 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2080 {
2081 	struct rl_softc	*sc = ifp->if_softc;
2082 	struct ifreq	*ifr = (struct ifreq *) data;
2083 	struct ifaddr *ifa = (struct ifaddr *)data;
2084 	int		s, error = 0;
2085 
2086 	s = splnet();
2087 
2088 	switch(command) {
2089 	case SIOCSIFADDR:
2090 		ifp->if_flags |= IFF_UP;
2091 		if (!(ifp->if_flags & IFF_RUNNING))
2092 			re_init(ifp);
2093 #ifdef INET
2094 		if (ifa->ifa_addr->sa_family == AF_INET)
2095 			arp_ifinit(&sc->sc_arpcom, ifa);
2096 #endif /* INET */
2097 		break;
2098 	case SIOCSIFFLAGS:
2099 		if (ifp->if_flags & IFF_UP) {
2100 			if (ifp->if_flags & IFF_RUNNING)
2101 				error = ENETRESET;
2102 			else
2103 				re_init(ifp);
2104 		} else {
2105 			if (ifp->if_flags & IFF_RUNNING)
2106 				re_stop(ifp, 1);
2107 		}
2108 		break;
2109 	case SIOCGIFMEDIA:
2110 	case SIOCSIFMEDIA:
2111 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2112 		break;
2113 	default:
2114 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2115 	}
2116 
2117 	if (error == ENETRESET) {
2118 		if (ifp->if_flags & IFF_RUNNING)
2119 			re_iff(sc);
2120 		error = 0;
2121 	}
2122 
2123 	splx(s);
2124 	return (error);
2125 }
2126 
2127 void
2128 re_watchdog(struct ifnet *ifp)
2129 {
2130 	struct rl_softc	*sc;
2131 	int	s;
2132 
2133 	sc = ifp->if_softc;
2134 	s = splnet();
2135 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2136 	ifp->if_oerrors++;
2137 
2138 	re_txeof(sc);
2139 	re_rxeof(sc);
2140 
2141 	re_init(ifp);
2142 
2143 	splx(s);
2144 }
2145 
2146 /*
2147  * Stop the adapter and free any mbufs allocated to the
2148  * RX and TX lists.
2149  */
2150 void
2151 re_stop(struct ifnet *ifp, int disable)
2152 {
2153 	struct rl_softc *sc;
2154 	int	i;
2155 
2156 	sc = ifp->if_softc;
2157 
2158 	ifp->if_timer = 0;
2159 	sc->rl_flags &= ~(RL_FLAG_LINK|RL_FLAG_TIMERINTR);
2160 
2161 	timeout_del(&sc->timer_handle);
2162 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2163 
2164 	mii_down(&sc->sc_mii);
2165 
2166 	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
2167 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
2168 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
2169 
2170 	if (sc->rl_head != NULL) {
2171 		m_freem(sc->rl_head);
2172 		sc->rl_head = sc->rl_tail = NULL;
2173 	}
2174 
2175 	/* Free the TX list buffers. */
2176 	for (i = 0; i < RL_TX_QLEN; i++) {
2177 		if (sc->rl_ldata.rl_txq[i].txq_mbuf != NULL) {
2178 			bus_dmamap_unload(sc->sc_dmat,
2179 			    sc->rl_ldata.rl_txq[i].txq_dmamap);
2180 			m_freem(sc->rl_ldata.rl_txq[i].txq_mbuf);
2181 			sc->rl_ldata.rl_txq[i].txq_mbuf = NULL;
2182 		}
2183 	}
2184 
2185 	/* Free the RX list buffers. */
2186 	for (i = 0; i < RL_RX_DESC_CNT; i++) {
2187 		if (sc->rl_ldata.rl_rxsoft[i].rxs_mbuf != NULL) {
2188 			bus_dmamap_unload(sc->sc_dmat,
2189 			    sc->rl_ldata.rl_rxsoft[i].rxs_dmamap);
2190 			m_freem(sc->rl_ldata.rl_rxsoft[i].rxs_mbuf);
2191 			sc->rl_ldata.rl_rxsoft[i].rxs_mbuf = NULL;
2192 		}
2193 	}
2194 }
2195 
2196 void
2197 re_setup_hw_im(struct rl_softc *sc)
2198 {
2199 	KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2200 
2201 	/*
2202 	 * Interrupt moderation
2203 	 *
2204 	 * 0xABCD
2205 	 * A - unknown (maybe TX related)
2206 	 * B - TX timer (unit: 25us)
2207 	 * C - unknown (maybe RX related)
2208 	 * D - RX timer (unit: 25us)
2209 	 *
2210 	 *
2211 	 * re(4)'s interrupt moderation is actually controlled by
2212 	 * two variables, like most other NICs (bge, bnx etc.)
2213 	 * o  timer
2214 	 * o  number of packets [P]
2215 	 *
2216 	 * The logic relationship between these two variables is
2217 	 * similar to other NICs too:
2218 	 * if (timer expire || packets > [P])
2219 	 *     Interrupt is delivered
2220 	 *
2221 	 * Currently we only know how to set 'timer', but not
2222 	 * 'number of packets', which should be ~30, as far as I
2223 	 * tested (sink ~900Kpps, interrupt rate is 30KHz)
2224 	 */
2225 	CSR_WRITE_2(sc, RL_IM,
2226 		    RL_IM_RXTIME(sc->rl_rx_time) |
2227 		    RL_IM_TXTIME(sc->rl_tx_time) |
2228 		    RL_IM_MAGIC);
2229 }
2230 
2231 void
2232 re_disable_hw_im(struct rl_softc *sc)
2233 {
2234 	if (sc->rl_flags & RL_FLAG_HWIM)
2235 		CSR_WRITE_2(sc, RL_IM, 0);
2236 }
2237 
2238 void
2239 re_setup_sim_im(struct rl_softc *sc)
2240 {
2241 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
2242 		CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* XXX */
2243 	else {
2244 		u_int32_t ticks;
2245 
2246 		/*
2247 		 * Datasheet says tick decreases at bus speed,
2248 		 * but it seems the clock runs a little bit
2249 		 * faster, so we do some compensation here.
2250 		 */
2251 		ticks = (sc->rl_sim_time * sc->rl_bus_speed * 8) / 5;
2252 		CSR_WRITE_4(sc, RL_TIMERINT_8169, ticks);
2253 	}
2254 	CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* reload */
2255 	sc->rl_flags |= RL_FLAG_TIMERINTR;
2256 }
2257 
2258 void
2259 re_disable_sim_im(struct rl_softc *sc)
2260 {
2261 	if (sc->sc_hwrev == RL_HWREV_8139CPLUS)
2262 		CSR_WRITE_4(sc, RL_TIMERINT, 0);
2263 	else
2264 		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0);
2265 	sc->rl_flags &= ~RL_FLAG_TIMERINTR;
2266 }
2267 
2268 void
2269 re_config_imtype(struct rl_softc *sc, int imtype)
2270 {
2271 	switch (imtype) {
2272 	case RL_IMTYPE_HW:
2273 		KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2274 		/* FALLTHROUGH */
2275 	case RL_IMTYPE_NONE:
2276 		sc->rl_intrs = RL_INTRS_CPLUS;
2277 		sc->rl_rx_ack = RL_ISR_RX_OK | RL_ISR_FIFO_OFLOW |
2278 				RL_ISR_RX_OVERRUN;
2279 		sc->rl_tx_ack = RL_ISR_TX_OK;
2280 		break;
2281 
2282 	case RL_IMTYPE_SIM:
2283 		sc->rl_intrs = RL_INTRS_TIMER;
2284 		sc->rl_rx_ack = RL_ISR_TIMEOUT_EXPIRED;
2285 		sc->rl_tx_ack = RL_ISR_TIMEOUT_EXPIRED;
2286 		break;
2287 
2288 	default:
2289 		panic("%s: unknown imtype %d\n",
2290 		      sc->sc_dev.dv_xname, imtype);
2291 	}
2292 }
2293 
2294 void
2295 re_setup_intr(struct rl_softc *sc, int enable_intrs, int imtype)
2296 {
2297 	re_config_imtype(sc, imtype);
2298 
2299 	if (enable_intrs)
2300 		CSR_WRITE_2(sc, RL_IMR, sc->rl_intrs);
2301 	else
2302 		CSR_WRITE_2(sc, RL_IMR, 0);
2303 
2304 	switch (imtype) {
2305 	case RL_IMTYPE_NONE:
2306 		re_disable_sim_im(sc);
2307 		re_disable_hw_im(sc);
2308 		break;
2309 
2310 	case RL_IMTYPE_HW:
2311 		KASSERT(sc->rl_flags & RL_FLAG_HWIM);
2312 		re_disable_sim_im(sc);
2313 		re_setup_hw_im(sc);
2314 		break;
2315 
2316 	case RL_IMTYPE_SIM:
2317 		re_disable_hw_im(sc);
2318 		re_setup_sim_im(sc);
2319 		break;
2320 
2321 	default:
2322 		panic("%s: unknown imtype %d\n",
2323 		      sc->sc_dev.dv_xname, imtype);
2324 	}
2325 }
2326