xref: /openbsd-src/sys/dev/pci/if_nge.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: if_nge.c,v 1.91 2016/04/13 10:34:32 mpi Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001
5  *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_nge.c,v 1.35 2002/08/08 18:33:28 ambrisko Exp $
35  */
36 
37 /*
38  * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39  * for FreeBSD. Datasheets are available from:
40  *
41  * http://www.national.com/ds/DP/DP83820.pdf
42  * http://www.national.com/ds/DP/DP83821.pdf
43  *
44  * These chips are used on several low cost gigabit ethernet NICs
45  * sold by D-Link, Addtron, SMC and Asante. Both parts are
46  * virtually the same, except the 83820 is a 64-bit/32-bit part,
47  * while the 83821 is 32-bit only.
48  *
49  * Many cards also use National gigE transceivers, such as the
50  * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51  * contains a full register description that applies to all of these
52  * components:
53  *
54  * http://www.national.com/ds/DP/DP83861.pdf
55  *
56  * Written by Bill Paul <wpaul@bsdi.com>
57  * BSDi Open Source Solutions
58  */
59 
60 /*
61  * The NatSemi DP83820 and 83821 controllers are enhanced versions
62  * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63  * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64  * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65  * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66  * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67  * matching buffers, one perfect address filter buffer and interrupt
68  * moderation. The 83820 supports both 64-bit and 32-bit addressing
69  * and data transfers: the 64-bit support can be toggled on or off
70  * via software. This affects the size of certain fields in the DMA
71  * descriptors.
72  *
73  * There are two bugs/misfeatures in the 83820/83821 that I have
74  * discovered so far:
75  *
76  * - Receive buffers must be aligned on 64-bit boundaries, which means
77  *   you must resort to copying data in order to fix up the payload
78  *   alignment.
79  *
80  * - In order to transmit jumbo frames larger than 8170 bytes, you have
81  *   to turn off transmit checksum offloading, because the chip can't
82  *   compute the checksum on an outgoing frame unless it fits entirely
83  *   within the TX FIFO, which is only 8192 bytes in size. If you have
84  *   TX checksum offload enabled and you transmit attempt to transmit a
85  *   frame larger than 8170 bytes, the transmitter will wedge.
86  *
87  * To work around the latter problem, TX checksum offload is disabled
88  * if the user selects an MTU larger than 8152 (8170 - 18).
89  */
90 
91 #include "bpfilter.h"
92 #include "vlan.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/sockio.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kernel.h>
100 #include <sys/device.h>
101 #include <sys/socket.h>
102 
103 #include <net/if.h>
104 #include <net/if_media.h>
105 
106 #include <netinet/in.h>
107 #include <netinet/if_ether.h>
108 
109 #if NBPFILTER > 0
110 #include <net/bpf.h>
111 #endif
112 
113 #include <uvm/uvm_extern.h>              /* for vtophys */
114 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
115 
116 #include <dev/pci/pcireg.h>
117 #include <dev/pci/pcivar.h>
118 #include <dev/pci/pcidevs.h>
119 
120 #include <dev/mii/mii.h>
121 #include <dev/mii/miivar.h>
122 
123 #define NGE_USEIOSPACE
124 
125 #include <dev/pci/if_ngereg.h>
126 
127 int nge_probe(struct device *, void *, void *);
128 void nge_attach(struct device *, struct device *, void *);
129 
130 int nge_newbuf(struct nge_softc *, struct nge_desc *,
131 			     struct mbuf *);
132 int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
133 void nge_rxeof(struct nge_softc *);
134 void nge_txeof(struct nge_softc *);
135 int nge_intr(void *);
136 void nge_tick(void *);
137 void nge_start(struct ifnet *);
138 int nge_ioctl(struct ifnet *, u_long, caddr_t);
139 void nge_init(void *);
140 void nge_stop(struct nge_softc *);
141 void nge_watchdog(struct ifnet *);
142 int nge_ifmedia_mii_upd(struct ifnet *);
143 void nge_ifmedia_mii_sts(struct ifnet *, struct ifmediareq *);
144 int nge_ifmedia_tbi_upd(struct ifnet *);
145 void nge_ifmedia_tbi_sts(struct ifnet *, struct ifmediareq *);
146 
147 void nge_delay(struct nge_softc *);
148 void nge_eeprom_idle(struct nge_softc *);
149 void nge_eeprom_putbyte(struct nge_softc *, int);
150 void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
151 void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
152 
153 void nge_mii_sync(struct nge_softc *);
154 void nge_mii_send(struct nge_softc *, u_int32_t, int);
155 int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
156 int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
157 
158 int nge_miibus_readreg(struct device *, int, int);
159 void nge_miibus_writereg(struct device *, int, int, int);
160 void nge_miibus_statchg(struct device *);
161 
162 void nge_setmulti(struct nge_softc *);
163 void nge_reset(struct nge_softc *);
164 int nge_list_rx_init(struct nge_softc *);
165 int nge_list_tx_init(struct nge_softc *);
166 
167 #ifdef NGE_USEIOSPACE
168 #define NGE_RES			SYS_RES_IOPORT
169 #define NGE_RID			NGE_PCI_LOIO
170 #else
171 #define NGE_RES			SYS_RES_MEMORY
172 #define NGE_RID			NGE_PCI_LOMEM
173 #endif
174 
175 #ifdef NGE_DEBUG
176 #define DPRINTF(x)	if (ngedebug) printf x
177 #define DPRINTFN(n,x)	if (ngedebug >= (n)) printf x
178 int	ngedebug = 0;
179 #else
180 #define DPRINTF(x)
181 #define DPRINTFN(n,x)
182 #endif
183 
184 #define NGE_SETBIT(sc, reg, x)				\
185 	CSR_WRITE_4(sc, reg,				\
186 		CSR_READ_4(sc, reg) | (x))
187 
188 #define NGE_CLRBIT(sc, reg, x)				\
189 	CSR_WRITE_4(sc, reg,				\
190 		CSR_READ_4(sc, reg) & ~(x))
191 
192 #define SIO_SET(x)					\
193 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
194 
195 #define SIO_CLR(x)					\
196 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
197 
198 void
199 nge_delay(struct nge_softc *sc)
200 {
201 	int			idx;
202 
203 	for (idx = (300 / 33) + 1; idx > 0; idx--)
204 		CSR_READ_4(sc, NGE_CSR);
205 }
206 
207 void
208 nge_eeprom_idle(struct nge_softc *sc)
209 {
210 	int		i;
211 
212 	SIO_SET(NGE_MEAR_EE_CSEL);
213 	nge_delay(sc);
214 	SIO_SET(NGE_MEAR_EE_CLK);
215 	nge_delay(sc);
216 
217 	for (i = 0; i < 25; i++) {
218 		SIO_CLR(NGE_MEAR_EE_CLK);
219 		nge_delay(sc);
220 		SIO_SET(NGE_MEAR_EE_CLK);
221 		nge_delay(sc);
222 	}
223 
224 	SIO_CLR(NGE_MEAR_EE_CLK);
225 	nge_delay(sc);
226 	SIO_CLR(NGE_MEAR_EE_CSEL);
227 	nge_delay(sc);
228 	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
229 }
230 
231 /*
232  * Send a read command and address to the EEPROM, check for ACK.
233  */
234 void
235 nge_eeprom_putbyte(struct nge_softc *sc, int addr)
236 {
237 	int			d, i;
238 
239 	d = addr | NGE_EECMD_READ;
240 
241 	/*
242 	 * Feed in each bit and strobe the clock.
243 	 */
244 	for (i = 0x400; i; i >>= 1) {
245 		if (d & i) {
246 			SIO_SET(NGE_MEAR_EE_DIN);
247 		} else {
248 			SIO_CLR(NGE_MEAR_EE_DIN);
249 		}
250 		nge_delay(sc);
251 		SIO_SET(NGE_MEAR_EE_CLK);
252 		nge_delay(sc);
253 		SIO_CLR(NGE_MEAR_EE_CLK);
254 		nge_delay(sc);
255 	}
256 }
257 
258 /*
259  * Read a word of data stored in the EEPROM at address 'addr.'
260  */
261 void
262 nge_eeprom_getword(struct nge_softc *sc, int addr, u_int16_t *dest)
263 {
264 	int			i;
265 	u_int16_t		word = 0;
266 
267 	/* Force EEPROM to idle state. */
268 	nge_eeprom_idle(sc);
269 
270 	/* Enter EEPROM access mode. */
271 	nge_delay(sc);
272 	SIO_CLR(NGE_MEAR_EE_CLK);
273 	nge_delay(sc);
274 	SIO_SET(NGE_MEAR_EE_CSEL);
275 	nge_delay(sc);
276 
277 	/*
278 	 * Send address of word we want to read.
279 	 */
280 	nge_eeprom_putbyte(sc, addr);
281 
282 	/*
283 	 * Start reading bits from EEPROM.
284 	 */
285 	for (i = 0x8000; i; i >>= 1) {
286 		SIO_SET(NGE_MEAR_EE_CLK);
287 		nge_delay(sc);
288 		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
289 			word |= i;
290 		nge_delay(sc);
291 		SIO_CLR(NGE_MEAR_EE_CLK);
292 		nge_delay(sc);
293 	}
294 
295 	/* Turn off EEPROM access mode. */
296 	nge_eeprom_idle(sc);
297 
298 	*dest = word;
299 }
300 
301 /*
302  * Read a sequence of words from the EEPROM.
303  */
304 void
305 nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt, int swap)
306 {
307 	int			i;
308 	u_int16_t		word = 0, *ptr;
309 
310 	for (i = 0; i < cnt; i++) {
311 		nge_eeprom_getword(sc, off + i, &word);
312 		ptr = (u_int16_t *)(dest + (i * 2));
313 		if (swap)
314 			*ptr = ntohs(word);
315 		else
316 			*ptr = word;
317 	}
318 }
319 
320 /*
321  * Sync the PHYs by setting data bit and strobing the clock 32 times.
322  */
323 void
324 nge_mii_sync(struct nge_softc *sc)
325 {
326 	int			i;
327 
328 	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
329 
330 	for (i = 0; i < 32; i++) {
331 		SIO_SET(NGE_MEAR_MII_CLK);
332 		DELAY(1);
333 		SIO_CLR(NGE_MEAR_MII_CLK);
334 		DELAY(1);
335 	}
336 }
337 
338 /*
339  * Clock a series of bits through the MII.
340  */
341 void
342 nge_mii_send(struct nge_softc *sc, u_int32_t bits, int cnt)
343 {
344 	int			i;
345 
346 	SIO_CLR(NGE_MEAR_MII_CLK);
347 
348 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
349                 if (bits & i) {
350 			SIO_SET(NGE_MEAR_MII_DATA);
351                 } else {
352 			SIO_CLR(NGE_MEAR_MII_DATA);
353                 }
354 		DELAY(1);
355 		SIO_CLR(NGE_MEAR_MII_CLK);
356 		DELAY(1);
357 		SIO_SET(NGE_MEAR_MII_CLK);
358 	}
359 }
360 
361 /*
362  * Read an PHY register through the MII.
363  */
364 int
365 nge_mii_readreg(struct nge_softc *sc, struct nge_mii_frame *frame)
366 {
367 	int			i, ack, s;
368 
369 	s = splnet();
370 
371 	/*
372 	 * Set up frame for RX.
373 	 */
374 	frame->mii_stdelim = NGE_MII_STARTDELIM;
375 	frame->mii_opcode = NGE_MII_READOP;
376 	frame->mii_turnaround = 0;
377 	frame->mii_data = 0;
378 
379 	CSR_WRITE_4(sc, NGE_MEAR, 0);
380 
381 	/*
382 	 * Turn on data xmit.
383 	 */
384 	SIO_SET(NGE_MEAR_MII_DIR);
385 
386 	nge_mii_sync(sc);
387 
388 	/*
389 	 * Send command/address info.
390 	 */
391 	nge_mii_send(sc, frame->mii_stdelim, 2);
392 	nge_mii_send(sc, frame->mii_opcode, 2);
393 	nge_mii_send(sc, frame->mii_phyaddr, 5);
394 	nge_mii_send(sc, frame->mii_regaddr, 5);
395 
396 	/* Idle bit */
397 	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
398 	DELAY(1);
399 	SIO_SET(NGE_MEAR_MII_CLK);
400 	DELAY(1);
401 
402 	/* Turn off xmit. */
403 	SIO_CLR(NGE_MEAR_MII_DIR);
404 	/* Check for ack */
405 	SIO_CLR(NGE_MEAR_MII_CLK);
406 	DELAY(1);
407 	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
408 	SIO_SET(NGE_MEAR_MII_CLK);
409 	DELAY(1);
410 
411 	/*
412 	 * Now try reading data bits. If the ack failed, we still
413 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
414 	 */
415 	if (ack) {
416 		for(i = 0; i < 16; i++) {
417 			SIO_CLR(NGE_MEAR_MII_CLK);
418 			DELAY(1);
419 			SIO_SET(NGE_MEAR_MII_CLK);
420 			DELAY(1);
421 		}
422 		goto fail;
423 	}
424 
425 	for (i = 0x8000; i; i >>= 1) {
426 		SIO_CLR(NGE_MEAR_MII_CLK);
427 		DELAY(1);
428 		if (!ack) {
429 			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
430 				frame->mii_data |= i;
431 			DELAY(1);
432 		}
433 		SIO_SET(NGE_MEAR_MII_CLK);
434 		DELAY(1);
435 	}
436 
437 fail:
438 
439 	SIO_CLR(NGE_MEAR_MII_CLK);
440 	DELAY(1);
441 	SIO_SET(NGE_MEAR_MII_CLK);
442 	DELAY(1);
443 
444 	splx(s);
445 
446 	if (ack)
447 		return(1);
448 	return(0);
449 }
450 
451 /*
452  * Write to a PHY register through the MII.
453  */
454 int
455 nge_mii_writereg(struct nge_softc *sc, struct nge_mii_frame *frame)
456 {
457 	int			s;
458 
459 	s = splnet();
460 	/*
461 	 * Set up frame for TX.
462 	 */
463 
464 	frame->mii_stdelim = NGE_MII_STARTDELIM;
465 	frame->mii_opcode = NGE_MII_WRITEOP;
466 	frame->mii_turnaround = NGE_MII_TURNAROUND;
467 
468 	/*
469 	 * Turn on data output.
470 	 */
471 	SIO_SET(NGE_MEAR_MII_DIR);
472 
473 	nge_mii_sync(sc);
474 
475 	nge_mii_send(sc, frame->mii_stdelim, 2);
476 	nge_mii_send(sc, frame->mii_opcode, 2);
477 	nge_mii_send(sc, frame->mii_phyaddr, 5);
478 	nge_mii_send(sc, frame->mii_regaddr, 5);
479 	nge_mii_send(sc, frame->mii_turnaround, 2);
480 	nge_mii_send(sc, frame->mii_data, 16);
481 
482 	/* Idle bit. */
483 	SIO_SET(NGE_MEAR_MII_CLK);
484 	DELAY(1);
485 	SIO_CLR(NGE_MEAR_MII_CLK);
486 	DELAY(1);
487 
488 	/*
489 	 * Turn off xmit.
490 	 */
491 	SIO_CLR(NGE_MEAR_MII_DIR);
492 
493 	splx(s);
494 
495 	return(0);
496 }
497 
498 int
499 nge_miibus_readreg(struct device *dev, int phy, int reg)
500 {
501 	struct nge_softc	*sc = (struct nge_softc *)dev;
502 	struct nge_mii_frame	frame;
503 
504 	DPRINTFN(9, ("%s: nge_miibus_readreg\n", sc->sc_dv.dv_xname));
505 
506 	bzero(&frame, sizeof(frame));
507 
508 	frame.mii_phyaddr = phy;
509 	frame.mii_regaddr = reg;
510 	nge_mii_readreg(sc, &frame);
511 
512 	return(frame.mii_data);
513 }
514 
515 void
516 nge_miibus_writereg(struct device *dev, int phy, int reg, int data)
517 {
518 	struct nge_softc	*sc = (struct nge_softc *)dev;
519 	struct nge_mii_frame	frame;
520 
521 
522 	DPRINTFN(9, ("%s: nge_miibus_writereg\n", sc->sc_dv.dv_xname));
523 
524 	bzero(&frame, sizeof(frame));
525 
526 	frame.mii_phyaddr = phy;
527 	frame.mii_regaddr = reg;
528 	frame.mii_data = data;
529 	nge_mii_writereg(sc, &frame);
530 }
531 
532 void
533 nge_miibus_statchg(struct device *dev)
534 {
535 	struct nge_softc	*sc = (struct nge_softc *)dev;
536 	struct mii_data		*mii = &sc->nge_mii;
537 	u_int32_t		txcfg, rxcfg;
538 
539 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
540 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
541 
542 	DPRINTFN(4, ("%s: nge_miibus_statchg txcfg=%#x, rxcfg=%#x\n",
543 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
544 
545 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
546 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
547 		rxcfg |= (NGE_RXCFG_RX_FDX);
548 	} else {
549 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
550 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
551 	}
552 
553 	txcfg |= NGE_TXCFG_AUTOPAD;
554 
555 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
556 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
557 
558 	/* If we have a 1000Mbps link, set the mode_1000 bit. */
559 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
560 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
561 	else
562 		NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
563 }
564 
565 void
566 nge_setmulti(struct nge_softc *sc)
567 {
568 	struct arpcom		*ac = &sc->arpcom;
569 	struct ifnet		*ifp = &ac->ac_if;
570 	struct ether_multi      *enm;
571 	struct ether_multistep  step;
572 	u_int32_t		h = 0, i, filtsave;
573 	int			bit, index;
574 
575 	if (ac->ac_multirangecnt > 0)
576 		ifp->if_flags |= IFF_ALLMULTI;
577 
578 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
579 		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
580 		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
581 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
582 		return;
583 	}
584 
585 	/*
586 	 * We have to explicitly enable the multicast hash table
587 	 * on the NatSemi chip if we want to use it, which we do.
588 	 * We also have to tell it that we don't want to use the
589 	 * hash table for matching unicast addresses.
590 	 */
591 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
592 	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
593 	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
594 
595 	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
596 
597 	/* first, zot all the existing hash bits */
598 	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
599 		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
600 		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
601 	}
602 
603 	/*
604 	 * From the 11 bits returned by the crc routine, the top 7
605 	 * bits represent the 16-bit word in the mcast hash table
606 	 * that needs to be updated, and the lower 4 bits represent
607 	 * which bit within that byte needs to be set.
608 	 */
609 	ETHER_FIRST_MULTI(step, ac, enm);
610 	while (enm != NULL) {
611 		h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 21) &
612 		    0x00000FFF;
613 		index = (h >> 4) & 0x7F;
614 		bit = h & 0xF;
615 		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
616 		    NGE_FILTADDR_MCAST_LO + (index * 2));
617 		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
618 		ETHER_NEXT_MULTI(step, enm);
619 	}
620 
621 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
622 }
623 
624 void
625 nge_reset(struct nge_softc *sc)
626 {
627 	int			i;
628 
629 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
630 
631 	for (i = 0; i < NGE_TIMEOUT; i++) {
632 		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
633 			break;
634 	}
635 
636 	if (i == NGE_TIMEOUT)
637 		printf("%s: reset never completed\n", sc->sc_dv.dv_xname);
638 
639 	/* Wait a little while for the chip to get its brains in order. */
640 	DELAY(1000);
641 
642 	/*
643 	 * If this is a NetSemi chip, make sure to clear
644 	 * PME mode.
645 	 */
646 	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
647 	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
648 }
649 
650 /*
651  * Probe for an NatSemi chip. Check the PCI vendor and device
652  * IDs against our list and return a device name if we find a match.
653  */
654 int
655 nge_probe(struct device *parent, void *match, void *aux)
656 {
657 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
658 
659 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS &&
660 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_DP83820)
661 		return (1);
662 
663 	return (0);
664 }
665 
666 /*
667  * Attach the interface. Allocate softc structures, do ifmedia
668  * setup and ethernet/BPF attach.
669  */
670 void
671 nge_attach(struct device *parent, struct device *self, void *aux)
672 {
673 	struct nge_softc	*sc = (struct nge_softc *)self;
674 	struct pci_attach_args	*pa = aux;
675 	pci_chipset_tag_t	pc = pa->pa_pc;
676 	pci_intr_handle_t	ih;
677 	const char		*intrstr = NULL;
678 	bus_size_t		size;
679 	bus_dma_segment_t	seg;
680 	bus_dmamap_t		dmamap;
681 	int			rseg;
682 	u_char			eaddr[ETHER_ADDR_LEN];
683 #ifndef NGE_USEIOSPACE
684 	pcireg_t		memtype;
685 #endif
686 	struct ifnet		*ifp;
687 	caddr_t			kva;
688 
689 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
690 
691 	/*
692 	 * Map control/status registers.
693 	 */
694 	DPRINTFN(5, ("%s: map control/status regs\n", sc->sc_dv.dv_xname));
695 
696 #ifdef NGE_USEIOSPACE
697 	DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname));
698 	if (pci_mapreg_map(pa, NGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
699 	    &sc->nge_btag, &sc->nge_bhandle, NULL, &size, 0)) {
700 		printf(": can't map i/o space\n");
701 		return;
702 	}
703 #else
704 	DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname));
705 	memtype = pci_mapreg_type(pc, pa->pa_tag, NGE_PCI_LOMEM);
706 	if (pci_mapreg_map(pa, NGE_PCI_LOMEM, memtype, 0, &sc->nge_btag,
707 	    &sc->nge_bhandle, NULL, &size, 0)) {
708 		printf(": can't map mem space\n");
709 		return;
710 	}
711 #endif
712 
713 	/* Disable all interrupts */
714 	CSR_WRITE_4(sc, NGE_IER, 0);
715 
716 	DPRINTFN(5, ("%s: pci_intr_map\n", sc->sc_dv.dv_xname));
717 	if (pci_intr_map(pa, &ih)) {
718 		printf(": couldn't map interrupt\n");
719 		goto fail_1;
720 	}
721 
722 	DPRINTFN(5, ("%s: pci_intr_string\n", sc->sc_dv.dv_xname));
723 	intrstr = pci_intr_string(pc, ih);
724 	DPRINTFN(5, ("%s: pci_intr_establish\n", sc->sc_dv.dv_xname));
725 	sc->nge_intrhand = pci_intr_establish(pc, ih, IPL_NET, nge_intr, sc,
726 					      sc->sc_dv.dv_xname);
727 	if (sc->nge_intrhand == NULL) {
728 		printf(": couldn't establish interrupt");
729 		if (intrstr != NULL)
730 			printf(" at %s", intrstr);
731 		printf("\n");
732 		goto fail_1;
733 	}
734 	printf(": %s", intrstr);
735 
736 	/* Reset the adapter. */
737 	DPRINTFN(5, ("%s: nge_reset\n", sc->sc_dv.dv_xname));
738 	nge_reset(sc);
739 
740 	/*
741 	 * Get station address from the EEPROM.
742 	 */
743 	DPRINTFN(5, ("%s: nge_read_eeprom\n", sc->sc_dv.dv_xname));
744 	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
745 	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
746 	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
747 
748 	/*
749 	 * A NatSemi chip was detected. Inform the world.
750 	 */
751 	printf(", address %s\n", ether_sprintf(eaddr));
752 
753 	bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
754 
755 	sc->sc_dmatag = pa->pa_dmat;
756 	DPRINTFN(5, ("%s: bus_dmamem_alloc\n", sc->sc_dv.dv_xname));
757 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct nge_list_data),
758 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT |
759 			     BUS_DMA_ZERO)) {
760 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
761 		goto fail_2;
762 	}
763 	DPRINTFN(5, ("%s: bus_dmamem_map\n", sc->sc_dv.dv_xname));
764 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
765 			   sizeof(struct nge_list_data), &kva,
766 			   BUS_DMA_NOWAIT)) {
767 		printf("%s: can't map dma buffers (%zd bytes)\n",
768 		       sc->sc_dv.dv_xname, sizeof(struct nge_list_data));
769 		goto fail_3;
770 	}
771 	DPRINTFN(5, ("%s: bus_dmamem_create\n", sc->sc_dv.dv_xname));
772 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct nge_list_data), 1,
773 			      sizeof(struct nge_list_data), 0,
774 			      BUS_DMA_NOWAIT, &dmamap)) {
775 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
776 		goto fail_4;
777 	}
778 	DPRINTFN(5, ("%s: bus_dmamem_load\n", sc->sc_dv.dv_xname));
779 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
780 			    sizeof(struct nge_list_data), NULL,
781 			    BUS_DMA_NOWAIT)) {
782 		goto fail_5;
783 	}
784 
785 	DPRINTFN(5, ("%s: bzero\n", sc->sc_dv.dv_xname));
786 	sc->nge_ldata = (struct nge_list_data *)kva;
787 
788 	ifp = &sc->arpcom.ac_if;
789 	ifp->if_softc = sc;
790 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
791 	ifp->if_ioctl = nge_ioctl;
792 	ifp->if_start = nge_start;
793 	ifp->if_watchdog = nge_watchdog;
794 	ifp->if_hardmtu = NGE_JUMBO_MTU;
795 	IFQ_SET_MAXLEN(&ifp->if_snd, NGE_TX_LIST_CNT - 1);
796 	DPRINTFN(5, ("%s: bcopy\n", sc->sc_dv.dv_xname));
797 	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
798 
799 	ifp->if_capabilities = IFCAP_VLAN_MTU;
800 
801 #if NVLAN > 0
802 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
803 #endif
804 
805 	/*
806 	 * Do MII setup.
807 	 */
808 	DPRINTFN(5, ("%s: mii setup\n", sc->sc_dv.dv_xname));
809 	if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
810 		DPRINTFN(5, ("%s: TBI mode\n", sc->sc_dv.dv_xname));
811 		sc->nge_tbi = 1;
812 
813 		ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_tbi_upd,
814 			     nge_ifmedia_tbi_sts);
815 
816 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_NONE, 0, NULL),
817 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
818 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
819 			    0, NULL);
820 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
821 
822 		ifmedia_set(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO);
823 
824 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
825 			    | NGE_GPIO_GP4_OUT
826 			    | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
827 			    | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP4_OUTENB
828 			    | NGE_GPIO_GP5_OUTENB);
829 
830 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
831 	} else {
832 		sc->nge_mii.mii_ifp = ifp;
833 		sc->nge_mii.mii_readreg = nge_miibus_readreg;
834 		sc->nge_mii.mii_writereg = nge_miibus_writereg;
835 		sc->nge_mii.mii_statchg = nge_miibus_statchg;
836 
837 		ifmedia_init(&sc->nge_mii.mii_media, 0, nge_ifmedia_mii_upd,
838 			     nge_ifmedia_mii_sts);
839 		mii_attach(&sc->sc_dv, &sc->nge_mii, 0xffffffff, MII_PHY_ANY,
840 			   MII_OFFSET_ANY, 0);
841 
842 		if (LIST_FIRST(&sc->nge_mii.mii_phys) == NULL) {
843 
844 			printf("%s: no PHY found!\n", sc->sc_dv.dv_xname);
845 			ifmedia_add(&sc->nge_mii.mii_media,
846 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
847 			ifmedia_set(&sc->nge_mii.mii_media,
848 				    IFM_ETHER|IFM_MANUAL);
849 		}
850 		else
851 			ifmedia_set(&sc->nge_mii.mii_media,
852 				    IFM_ETHER|IFM_AUTO);
853 	}
854 
855 	/*
856 	 * Call MI attach routine.
857 	 */
858 	DPRINTFN(5, ("%s: if_attach\n", sc->sc_dv.dv_xname));
859 	if_attach(ifp);
860 	DPRINTFN(5, ("%s: ether_ifattach\n", sc->sc_dv.dv_xname));
861 	ether_ifattach(ifp);
862 	DPRINTFN(5, ("%s: timeout_set\n", sc->sc_dv.dv_xname));
863 	timeout_set(&sc->nge_timeout, nge_tick, sc);
864 	timeout_add_sec(&sc->nge_timeout, 1);
865 	return;
866 
867 fail_5:
868 	bus_dmamap_destroy(sc->sc_dmatag, dmamap);
869 
870 fail_4:
871 	bus_dmamem_unmap(sc->sc_dmatag, kva,
872 	    sizeof(struct nge_list_data));
873 
874 fail_3:
875 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
876 
877 fail_2:
878 	pci_intr_disestablish(pc, sc->nge_intrhand);
879 
880 fail_1:
881 	bus_space_unmap(sc->nge_btag, sc->nge_bhandle, size);
882 }
883 
884 /*
885  * Initialize the transmit descriptors.
886  */
887 int
888 nge_list_tx_init(struct nge_softc *sc)
889 {
890 	struct nge_list_data	*ld;
891 	struct nge_ring_data	*cd;
892 	int			i;
893 
894 	cd = &sc->nge_cdata;
895 	ld = sc->nge_ldata;
896 
897 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
898 		if (i == (NGE_TX_LIST_CNT - 1)) {
899 			ld->nge_tx_list[i].nge_nextdesc =
900 			    &ld->nge_tx_list[0];
901 			ld->nge_tx_list[i].nge_next =
902 			    VTOPHYS(&ld->nge_tx_list[0]);
903 		} else {
904 			ld->nge_tx_list[i].nge_nextdesc =
905 			    &ld->nge_tx_list[i + 1];
906 			ld->nge_tx_list[i].nge_next =
907 			    VTOPHYS(&ld->nge_tx_list[i + 1]);
908 		}
909 		ld->nge_tx_list[i].nge_mbuf = NULL;
910 		ld->nge_tx_list[i].nge_ptr = 0;
911 		ld->nge_tx_list[i].nge_ctl = 0;
912 	}
913 
914 	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
915 
916 	return(0);
917 }
918 
919 
920 /*
921  * Initialize the RX descriptors and allocate mbufs for them. Note that
922  * we arrange the descriptors in a closed ring, so that the last descriptor
923  * points back to the first.
924  */
925 int
926 nge_list_rx_init(struct nge_softc *sc)
927 {
928 	struct nge_list_data	*ld;
929 	struct nge_ring_data	*cd;
930 	int			i;
931 
932 	ld = sc->nge_ldata;
933 	cd = &sc->nge_cdata;
934 
935 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
936 		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
937 			return(ENOBUFS);
938 		if (i == (NGE_RX_LIST_CNT - 1)) {
939 			ld->nge_rx_list[i].nge_nextdesc =
940 			    &ld->nge_rx_list[0];
941 			ld->nge_rx_list[i].nge_next =
942 			    VTOPHYS(&ld->nge_rx_list[0]);
943 		} else {
944 			ld->nge_rx_list[i].nge_nextdesc =
945 			    &ld->nge_rx_list[i + 1];
946 			ld->nge_rx_list[i].nge_next =
947 			    VTOPHYS(&ld->nge_rx_list[i + 1]);
948 		}
949 	}
950 
951 	cd->nge_rx_prod = 0;
952 
953 	return(0);
954 }
955 
956 /*
957  * Initialize an RX descriptor and attach an MBUF cluster.
958  */
959 int
960 nge_newbuf(struct nge_softc *sc, struct nge_desc *c, struct mbuf *m)
961 {
962 	struct mbuf		*m_new = NULL;
963 
964 	if (m == NULL) {
965 		m_new = MCLGETI(NULL, NGE_MCLBYTES, NULL, M_DONTWAIT);
966 		if (m_new == NULL)
967 			return (ENOBUFS);
968 	} else {
969 		/*
970 		 * We're re-using a previously allocated mbuf;
971 		 * be sure to re-init pointers and lengths to
972 		 * default values.
973 		 */
974 		m_new = m;
975 		m_new->m_data = m_new->m_ext.ext_buf;
976 	}
977 
978 	m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES;
979 	m_adj(m_new, sizeof(u_int64_t));
980 
981 	c->nge_mbuf = m_new;
982 	c->nge_ptr = VTOPHYS(mtod(m_new, caddr_t));
983 	DPRINTFN(7,("%s: c->nge_ptr=%#x\n", sc->sc_dv.dv_xname,
984 		    c->nge_ptr));
985 	c->nge_ctl = m_new->m_len;
986 	c->nge_extsts = 0;
987 
988 	return(0);
989 }
990 
991 /*
992  * A frame has been uploaded: pass the resulting mbuf chain up to
993  * the higher level protocols.
994  */
995 void
996 nge_rxeof(struct nge_softc *sc)
997 {
998 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
999         struct mbuf		*m;
1000         struct ifnet		*ifp;
1001 	struct nge_desc		*cur_rx;
1002 	int			i, total_len = 0;
1003 	u_int32_t		rxstat;
1004 
1005 	ifp = &sc->arpcom.ac_if;
1006 	i = sc->nge_cdata.nge_rx_prod;
1007 
1008 	while (NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1009 		struct mbuf		*m0 = NULL;
1010 		u_int32_t		extsts;
1011 
1012 		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1013 		rxstat = cur_rx->nge_rxstat;
1014 		extsts = cur_rx->nge_extsts;
1015 		m = cur_rx->nge_mbuf;
1016 		cur_rx->nge_mbuf = NULL;
1017 		total_len = NGE_RXBYTES(cur_rx);
1018 		NGE_INC(i, NGE_RX_LIST_CNT);
1019 
1020 		/*
1021 		 * If an error occurs, update stats, clear the
1022 		 * status word and leave the mbuf cluster in place:
1023 		 * it should simply get re-used next time this descriptor
1024 		 * comes up in the ring.
1025 		 */
1026 		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1027 #if NVLAN > 0
1028 			if ((rxstat & NGE_RXSTAT_RUNT) &&
1029 			    total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN -
1030 			    ETHER_VLAN_ENCAP_LEN)) {
1031 				/*
1032 				 * Workaround a hardware bug. Accept runt
1033 				 * frames if its length is larger than or
1034 				 * equal to 56.
1035 				 */
1036 			} else {
1037 #endif
1038 				ifp->if_ierrors++;
1039 				nge_newbuf(sc, cur_rx, m);
1040 				continue;
1041 #if NVLAN > 0
1042 			}
1043 #endif
1044 		}
1045 
1046 		/*
1047 		 * Ok. NatSemi really screwed up here. This is the
1048 		 * only gigE chip I know of with alignment constraints
1049 		 * on receive buffers. RX buffers must be 64-bit aligned.
1050 		 */
1051 #ifndef __STRICT_ALIGNMENT
1052 		/*
1053 		 * By popular demand, ignore the alignment problems
1054 		 * on the Intel x86 platform. The performance hit
1055 		 * incurred due to unaligned accesses is much smaller
1056 		 * than the hit produced by forcing buffer copies all
1057 		 * the time, especially with jumbo frames. We still
1058 		 * need to fix up the alignment everywhere else though.
1059 		 */
1060 		if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
1061 #endif
1062 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
1063 			nge_newbuf(sc, cur_rx, m);
1064 			if (m0 == NULL) {
1065 				ifp->if_ierrors++;
1066 				continue;
1067 			}
1068 			m_adj(m0, ETHER_ALIGN);
1069 			m = m0;
1070 #ifndef __STRICT_ALIGNMENT
1071 		} else {
1072 			m->m_pkthdr.len = m->m_len = total_len;
1073 		}
1074 #endif
1075 
1076 #if NVLAN > 0
1077 		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1078 			m->m_pkthdr.ether_vtag =
1079 			    ntohs(extsts & NGE_RXEXTSTS_VTCI);
1080 			m->m_flags |= M_VLANTAG;
1081 		}
1082 #endif
1083 
1084 		/* Do IP checksum checking. */
1085 		if (extsts & NGE_RXEXTSTS_IPPKT) {
1086 			if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1087 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1088 			if ((extsts & NGE_RXEXTSTS_TCPPKT) &&
1089 			    (!(extsts & NGE_RXEXTSTS_TCPCSUMERR)))
1090 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
1091 			else if ((extsts & NGE_RXEXTSTS_UDPPKT) &&
1092 				 (!(extsts & NGE_RXEXTSTS_UDPCSUMERR)))
1093 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
1094 		}
1095 
1096 		ml_enqueue(&ml, m);
1097 	}
1098 
1099 	if_input(ifp, &ml);
1100 
1101 	sc->nge_cdata.nge_rx_prod = i;
1102 }
1103 
1104 /*
1105  * A frame was downloaded to the chip. It's safe for us to clean up
1106  * the list buffers.
1107  */
1108 
1109 void
1110 nge_txeof(struct nge_softc *sc)
1111 {
1112 	struct nge_desc		*cur_tx;
1113 	struct ifnet		*ifp;
1114 	u_int32_t		idx;
1115 
1116 	ifp = &sc->arpcom.ac_if;
1117 
1118 	/*
1119 	 * Go through our tx list and free mbufs for those
1120 	 * frames that have been transmitted.
1121 	 */
1122 	idx = sc->nge_cdata.nge_tx_cons;
1123 	while (idx != sc->nge_cdata.nge_tx_prod) {
1124 		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1125 
1126 		if (NGE_OWNDESC(cur_tx))
1127 			break;
1128 
1129 		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1130 			sc->nge_cdata.nge_tx_cnt--;
1131 			NGE_INC(idx, NGE_TX_LIST_CNT);
1132 			continue;
1133 		}
1134 
1135 		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1136 			ifp->if_oerrors++;
1137 			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1138 				ifp->if_collisions++;
1139 			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1140 				ifp->if_collisions++;
1141 		}
1142 
1143 		ifp->if_collisions +=
1144 		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1145 
1146 		ifp->if_opackets++;
1147 		if (cur_tx->nge_mbuf != NULL) {
1148 			m_freem(cur_tx->nge_mbuf);
1149 			cur_tx->nge_mbuf = NULL;
1150 			ifq_clr_oactive(&ifp->if_snd);
1151 		}
1152 
1153 		sc->nge_cdata.nge_tx_cnt--;
1154 		NGE_INC(idx, NGE_TX_LIST_CNT);
1155 	}
1156 
1157 	sc->nge_cdata.nge_tx_cons = idx;
1158 
1159 	if (idx == sc->nge_cdata.nge_tx_prod)
1160 		ifp->if_timer = 0;
1161 }
1162 
1163 void
1164 nge_tick(void *xsc)
1165 {
1166 	struct nge_softc	*sc = xsc;
1167 	struct mii_data		*mii = &sc->nge_mii;
1168 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1169 	int			s;
1170 
1171 	s = splnet();
1172 
1173 	DPRINTFN(10, ("%s: nge_tick: link=%d\n", sc->sc_dv.dv_xname,
1174 		      sc->nge_link));
1175 
1176 	timeout_add_sec(&sc->nge_timeout, 1);
1177 	if (sc->nge_link) {
1178 		splx(s);
1179 		return;
1180 	}
1181 
1182 	if (sc->nge_tbi) {
1183 		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1184 		    == IFM_AUTO) {
1185 			u_int32_t bmsr, anlpar, txcfg, rxcfg;
1186 
1187 			bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
1188 			DPRINTFN(2, ("%s: nge_tick: bmsr=%#x\n",
1189 				     sc->sc_dv.dv_xname, bmsr));
1190 
1191 			if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
1192 				CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1193 
1194 				splx(s);
1195 				return;
1196 			}
1197 
1198 			anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
1199 			txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1200 			rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1201 
1202 			DPRINTFN(2, ("%s: nge_tick: anlpar=%#x, txcfg=%#x, "
1203 				     "rxcfg=%#x\n", sc->sc_dv.dv_xname, anlpar,
1204 				     txcfg, rxcfg));
1205 
1206 			if (anlpar == 0 || anlpar & NGE_TBIANAR_FDX) {
1207 				txcfg |= (NGE_TXCFG_IGN_HBEAT|
1208 					  NGE_TXCFG_IGN_CARR);
1209 				rxcfg |= NGE_RXCFG_RX_FDX;
1210 			} else {
1211 				txcfg &= ~(NGE_TXCFG_IGN_HBEAT|
1212 					   NGE_TXCFG_IGN_CARR);
1213 				rxcfg &= ~(NGE_RXCFG_RX_FDX);
1214 			}
1215 			txcfg |= NGE_TXCFG_AUTOPAD;
1216 			CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1217 			CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1218 		}
1219 
1220 		DPRINTF(("%s: gigabit link up\n", sc->sc_dv.dv_xname));
1221 		sc->nge_link++;
1222 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1223 			nge_start(ifp);
1224 	} else {
1225 		mii_tick(mii);
1226 		if (mii->mii_media_status & IFM_ACTIVE &&
1227 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1228 			sc->nge_link++;
1229 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1230 				DPRINTF(("%s: gigabit link up\n",
1231 					 sc->sc_dv.dv_xname));
1232 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
1233 				nge_start(ifp);
1234 		}
1235 
1236 	}
1237 
1238 	splx(s);
1239 }
1240 
1241 int
1242 nge_intr(void *arg)
1243 {
1244 	struct nge_softc	*sc;
1245 	struct ifnet		*ifp;
1246 	u_int32_t		status;
1247 	int			claimed = 0;
1248 
1249 	sc = arg;
1250 	ifp = &sc->arpcom.ac_if;
1251 
1252 	/* Suppress unwanted interrupts */
1253 	if (!(ifp->if_flags & IFF_UP)) {
1254 		nge_stop(sc);
1255 		return (0);
1256 	}
1257 
1258 	/* Disable interrupts. */
1259 	CSR_WRITE_4(sc, NGE_IER, 0);
1260 
1261 	/* Data LED on for TBI mode */
1262 	if(sc->nge_tbi)
1263 		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1264 			     | NGE_GPIO_GP3_OUT);
1265 
1266 	for (;;) {
1267 		/* Reading the ISR register clears all interrupts. */
1268 		status = CSR_READ_4(sc, NGE_ISR);
1269 
1270 		if ((status & NGE_INTRS) == 0)
1271 			break;
1272 
1273 		claimed = 1;
1274 
1275 		if ((status & NGE_ISR_TX_DESC_OK) ||
1276 		    (status & NGE_ISR_TX_ERR) ||
1277 		    (status & NGE_ISR_TX_OK) ||
1278 		    (status & NGE_ISR_TX_IDLE))
1279 			nge_txeof(sc);
1280 
1281 		if ((status & NGE_ISR_RX_DESC_OK) ||
1282 		    (status & NGE_ISR_RX_ERR) ||
1283 		    (status & NGE_ISR_RX_OFLOW) ||
1284 		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1285 		    (status & NGE_ISR_RX_IDLE) ||
1286 		    (status & NGE_ISR_RX_OK))
1287 			nge_rxeof(sc);
1288 
1289 		if ((status & NGE_ISR_RX_IDLE))
1290 			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1291 
1292 		if (status & NGE_ISR_SYSERR) {
1293 			nge_reset(sc);
1294 			ifp->if_flags &= ~IFF_RUNNING;
1295 			nge_init(sc);
1296 		}
1297 
1298 #if 0
1299 		/*
1300 		 * XXX: nge_tick() is not ready to be called this way
1301 		 * it screws up the aneg timeout because mii_tick() is
1302 		 * only to be called once per second.
1303 		 */
1304 		if (status & NGE_IMR_PHY_INTR) {
1305 			sc->nge_link = 0;
1306 			nge_tick(sc);
1307 		}
1308 #endif
1309 	}
1310 
1311 	/* Re-enable interrupts. */
1312 	CSR_WRITE_4(sc, NGE_IER, 1);
1313 
1314 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1315 		nge_start(ifp);
1316 
1317 	/* Data LED off for TBI mode */
1318 	if(sc->nge_tbi)
1319 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1320 			    & ~NGE_GPIO_GP3_OUT);
1321 
1322 	return claimed;
1323 }
1324 
1325 /*
1326  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1327  * pointers to the fragment pointers.
1328  */
1329 int
1330 nge_encap(struct nge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1331 {
1332 	struct nge_desc		*f = NULL;
1333 	struct mbuf		*m;
1334 	int			frag, cur, cnt = 0;
1335 
1336 	/*
1337 	 * Start packing the mbufs in this chain into
1338 	 * the fragment pointers. Stop when we run out
1339 	 * of fragments or hit the end of the mbuf chain.
1340 	 */
1341 	m = m_head;
1342 	cur = frag = *txidx;
1343 
1344 	for (m = m_head; m != NULL; m = m->m_next) {
1345 		if (m->m_len != 0) {
1346 			if ((NGE_TX_LIST_CNT -
1347 			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1348 				return(ENOBUFS);
1349 			f = &sc->nge_ldata->nge_tx_list[frag];
1350 			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1351 			f->nge_ptr = VTOPHYS(mtod(m, vaddr_t));
1352 			DPRINTFN(7,("%s: f->nge_ptr=%#x\n",
1353 				    sc->sc_dv.dv_xname, f->nge_ptr));
1354 			if (cnt != 0)
1355 				f->nge_ctl |= NGE_CMDSTS_OWN;
1356 			cur = frag;
1357 			NGE_INC(frag, NGE_TX_LIST_CNT);
1358 			cnt++;
1359 		}
1360 	}
1361 
1362 	if (m != NULL)
1363 		return(ENOBUFS);
1364 
1365 	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1366 
1367 #if NVLAN > 0
1368 	if (m_head->m_flags & M_VLANTAG) {
1369 		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1370 		    (NGE_TXEXTSTS_VLANPKT|htons(m_head->m_pkthdr.ether_vtag));
1371 	}
1372 #endif
1373 
1374 	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1375 	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1376 	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1377 	sc->nge_cdata.nge_tx_cnt += cnt;
1378 	*txidx = frag;
1379 
1380 	return(0);
1381 }
1382 
1383 /*
1384  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1385  * to the mbuf data regions directly in the transmit lists. We also save a
1386  * copy of the pointers since the transmit list fragment pointers are
1387  * physical addresses.
1388  */
1389 
1390 void
1391 nge_start(struct ifnet *ifp)
1392 {
1393 	struct nge_softc	*sc;
1394 	struct mbuf		*m_head = NULL;
1395 	u_int32_t		idx;
1396 	int			pkts = 0;
1397 
1398 	sc = ifp->if_softc;
1399 
1400 	if (!sc->nge_link)
1401 		return;
1402 
1403 	idx = sc->nge_cdata.nge_tx_prod;
1404 
1405 	if (ifq_is_oactive(&ifp->if_snd))
1406 		return;
1407 
1408 	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1409 		m_head = ifq_deq_begin(&ifp->if_snd);
1410 		if (m_head == NULL)
1411 			break;
1412 
1413 		if (nge_encap(sc, m_head, &idx)) {
1414 			ifq_deq_rollback(&ifp->if_snd, m_head);
1415 			ifq_set_oactive(&ifp->if_snd);
1416 			break;
1417 		}
1418 
1419 		/* now we are committed to transmit the packet */
1420 		ifq_deq_commit(&ifp->if_snd, m_head);
1421 		pkts++;
1422 
1423 #if NBPFILTER > 0
1424 		/*
1425 		 * If there's a BPF listener, bounce a copy of this frame
1426 		 * to him.
1427 		 */
1428 		if (ifp->if_bpf)
1429 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1430 #endif
1431 	}
1432 	if (pkts == 0)
1433 		return;
1434 
1435 	/* Transmit */
1436 	sc->nge_cdata.nge_tx_prod = idx;
1437 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1438 
1439 	/*
1440 	 * Set a timeout in case the chip goes out to lunch.
1441 	 */
1442 	ifp->if_timer = 5;
1443 }
1444 
1445 void
1446 nge_init(void *xsc)
1447 {
1448 	struct nge_softc	*sc = xsc;
1449 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1450 	struct mii_data		*mii;
1451 	u_int32_t		txcfg, rxcfg;
1452 	uint64_t		media;
1453 	int			s;
1454 
1455 	if (ifp->if_flags & IFF_RUNNING)
1456 		return;
1457 
1458 	s = splnet();
1459 
1460 	/*
1461 	 * Cancel pending I/O and free all RX/TX buffers.
1462 	 */
1463 	nge_stop(sc);
1464 
1465 	mii = sc->nge_tbi ? NULL: &sc->nge_mii;
1466 
1467 	/* Set MAC address */
1468 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1469 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1470 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1471 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1472 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1473 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1474 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1475 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1476 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1477 
1478 	/* Init circular RX list. */
1479 	if (nge_list_rx_init(sc) == ENOBUFS) {
1480 		printf("%s: initialization failed: no "
1481 			"memory for rx buffers\n", sc->sc_dv.dv_xname);
1482 		nge_stop(sc);
1483 		splx(s);
1484 		return;
1485 	}
1486 
1487 	/*
1488 	 * Init tx descriptors.
1489 	 */
1490 	nge_list_tx_init(sc);
1491 
1492 	/*
1493 	 * For the NatSemi chip, we have to explicitly enable the
1494 	 * reception of ARP frames, as well as turn on the 'perfect
1495 	 * match' filter where we store the station address, otherwise
1496 	 * we won't receive unicasts meant for this host.
1497 	 */
1498 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1499 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1500 
1501 	 /* If we want promiscuous mode, set the allframes bit. */
1502 	if (ifp->if_flags & IFF_PROMISC)
1503 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1504 	else
1505 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1506 
1507 	/*
1508 	 * Set the capture broadcast bit to capture broadcast frames.
1509 	 */
1510 	if (ifp->if_flags & IFF_BROADCAST)
1511 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1512 	else
1513 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1514 
1515 	/*
1516 	 * Load the multicast filter.
1517 	 */
1518 	nge_setmulti(sc);
1519 
1520 	/* Turn the receive filter on */
1521 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1522 
1523 	/*
1524 	 * Load the address of the RX and TX lists.
1525 	 */
1526 	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1527 	    VTOPHYS(&sc->nge_ldata->nge_rx_list[0]));
1528 	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1529 	    VTOPHYS(&sc->nge_ldata->nge_tx_list[0]));
1530 
1531 	/* Set RX configuration */
1532 	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1533 
1534 	/*
1535 	 * Enable hardware checksum validation for all IPv4
1536 	 * packets, do not reject packets with bad checksums.
1537 	 */
1538 	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1539 
1540 	/*
1541 	 * If VLAN support is enabled, tell the chip to detect
1542 	 * and strip VLAN tag info from received frames. The tag
1543 	 * will be provided in the extsts field in the RX descriptors.
1544 	 */
1545 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1546 		NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1547 		    NGE_VIPRXCTL_TAG_DETECT_ENB | NGE_VIPRXCTL_TAG_STRIP_ENB);
1548 
1549 	/* Set TX configuration */
1550 	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1551 
1552 	/*
1553 	 * If VLAN support is enabled, tell the chip to insert
1554 	 * VLAN tags on a per-packet basis as dictated by the
1555 	 * code in the frame encapsulation routine.
1556 	 */
1557 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1558 		NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1559 
1560 	/* Set full/half duplex mode. */
1561 	if (sc->nge_tbi)
1562 		media = sc->nge_ifmedia.ifm_cur->ifm_media;
1563 	else
1564 		media = mii->mii_media_active;
1565 
1566 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1567 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1568 
1569 	DPRINTFN(4, ("%s: nge_init txcfg=%#x, rxcfg=%#x\n",
1570 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
1571 
1572 	if ((media & IFM_GMASK) == IFM_FDX) {
1573 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1574 		rxcfg |= (NGE_RXCFG_RX_FDX);
1575 	} else {
1576 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1577 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
1578 	}
1579 
1580 	txcfg |= NGE_TXCFG_AUTOPAD;
1581 
1582 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1583 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1584 
1585 	nge_tick(sc);
1586 
1587 	/*
1588 	 * Enable the delivery of PHY interrupts based on
1589 	 * link/speed/duplex status changes and enable return
1590 	 * of extended status information in the DMA descriptors,
1591 	 * required for checksum offloading.
1592 	 */
1593 	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|NGE_CFG_PHYINTR_LNK|
1594 		   NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1595 
1596 	DPRINTFN(1, ("%s: nge_init: config=%#x\n", sc->sc_dv.dv_xname,
1597 		     CSR_READ_4(sc, NGE_CFG)));
1598 
1599 	/*
1600 	 * Configure interrupt holdoff (moderation). We can
1601 	 * have the chip delay interrupt delivery for a certain
1602 	 * period. Units are in 100us, and the max setting
1603 	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1604 	 */
1605 	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1606 
1607 	/*
1608 	 * Enable interrupts.
1609 	 */
1610 	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1611 	CSR_WRITE_4(sc, NGE_IER, 1);
1612 
1613 	/* Enable receiver and transmitter. */
1614 	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1615 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1616 
1617 	if (sc->nge_tbi)
1618 	    nge_ifmedia_tbi_upd(ifp);
1619 	else
1620 	    nge_ifmedia_mii_upd(ifp);
1621 
1622 	ifp->if_flags |= IFF_RUNNING;
1623 	ifq_clr_oactive(&ifp->if_snd);
1624 
1625 	splx(s);
1626 }
1627 
1628 /*
1629  * Set mii media options.
1630  */
1631 int
1632 nge_ifmedia_mii_upd(struct ifnet *ifp)
1633 {
1634 	struct nge_softc	*sc = ifp->if_softc;
1635 	struct mii_data 	*mii = &sc->nge_mii;
1636 
1637 	DPRINTFN(2, ("%s: nge_ifmedia_mii_upd\n", sc->sc_dv.dv_xname));
1638 
1639 	sc->nge_link = 0;
1640 
1641 	if (mii->mii_instance) {
1642 		struct mii_softc *miisc;
1643 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1644 			mii_phy_reset(miisc);
1645 	}
1646 	mii_mediachg(mii);
1647 
1648 	return(0);
1649 }
1650 
1651 /*
1652  * Report current mii media status.
1653  */
1654 void
1655 nge_ifmedia_mii_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1656 {
1657 	struct nge_softc	*sc = ifp->if_softc;
1658 	struct mii_data *mii = &sc->nge_mii;
1659 
1660 	DPRINTFN(2, ("%s: nge_ifmedia_mii_sts\n", sc->sc_dv.dv_xname));
1661 
1662 	mii_pollstat(mii);
1663 	ifmr->ifm_active = mii->mii_media_active;
1664 	ifmr->ifm_status = mii->mii_media_status;
1665 }
1666 
1667 /*
1668  * Set mii media options.
1669  */
1670 int
1671 nge_ifmedia_tbi_upd(struct ifnet *ifp)
1672 {
1673 	struct nge_softc	*sc = ifp->if_softc;
1674 
1675 	DPRINTFN(2, ("%s: nge_ifmedia_tbi_upd\n", sc->sc_dv.dv_xname));
1676 
1677 	sc->nge_link = 0;
1678 
1679 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1680 	    == IFM_AUTO) {
1681 		u_int32_t anar, bmcr;
1682 		anar = CSR_READ_4(sc, NGE_TBI_ANAR);
1683 		anar |= (NGE_TBIANAR_HDX | NGE_TBIANAR_FDX);
1684 		CSR_WRITE_4(sc, NGE_TBI_ANAR, anar);
1685 
1686 		bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
1687 		bmcr |= (NGE_TBIBMCR_ENABLE_ANEG|NGE_TBIBMCR_RESTART_ANEG);
1688 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
1689 
1690 		bmcr &= ~(NGE_TBIBMCR_RESTART_ANEG);
1691 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
1692 	} else {
1693 		u_int32_t txcfg, rxcfg;
1694 		txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1695 		rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1696 
1697 		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1698 		    == IFM_FDX) {
1699 			txcfg |= NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR;
1700 			rxcfg |= NGE_RXCFG_RX_FDX;
1701 		} else {
1702 			txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1703 			rxcfg &= ~(NGE_RXCFG_RX_FDX);
1704 		}
1705 
1706 		txcfg |= NGE_TXCFG_AUTOPAD;
1707 		CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1708 		CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1709 	}
1710 
1711 	NGE_CLRBIT(sc, NGE_GPIO, NGE_GPIO_GP3_OUT);
1712 
1713 	return(0);
1714 }
1715 
1716 /*
1717  * Report current tbi media status.
1718  */
1719 void
1720 nge_ifmedia_tbi_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1721 {
1722 	struct nge_softc	*sc = ifp->if_softc;
1723 	u_int32_t		bmcr;
1724 
1725 	bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
1726 
1727 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
1728 		u_int32_t bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
1729 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmsr=%#x, bmcr=%#x\n",
1730 			     sc->sc_dv.dv_xname, bmsr, bmcr));
1731 
1732 		if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
1733 			ifmr->ifm_active = IFM_ETHER|IFM_NONE;
1734 			ifmr->ifm_status = IFM_AVALID;
1735 			return;
1736 		}
1737 	} else {
1738 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmcr=%#x\n",
1739 			     sc->sc_dv.dv_xname, bmcr));
1740 	}
1741 
1742 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
1743 	ifmr->ifm_active = IFM_ETHER|IFM_1000_SX;
1744 
1745 	if (bmcr & NGE_TBIBMCR_LOOPBACK)
1746 		ifmr->ifm_active |= IFM_LOOP;
1747 
1748 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
1749 		u_int32_t anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
1750 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts anlpar=%#x\n",
1751 			     sc->sc_dv.dv_xname, anlpar));
1752 
1753 		ifmr->ifm_active |= IFM_AUTO;
1754 		if (anlpar & NGE_TBIANLPAR_FDX) {
1755 			ifmr->ifm_active |= IFM_FDX;
1756 		} else if (anlpar & NGE_TBIANLPAR_HDX) {
1757 			ifmr->ifm_active |= IFM_HDX;
1758 		} else
1759 			ifmr->ifm_active |= IFM_FDX;
1760 
1761 	} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX)
1762 		ifmr->ifm_active |= IFM_FDX;
1763 	else
1764 		ifmr->ifm_active |= IFM_HDX;
1765 
1766 }
1767 
1768 int
1769 nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1770 {
1771 	struct nge_softc	*sc = ifp->if_softc;
1772 	struct ifreq		*ifr = (struct ifreq *) data;
1773 	struct mii_data		*mii;
1774 	int			s, error = 0;
1775 
1776 	s = splnet();
1777 
1778 	switch(command) {
1779 	case SIOCSIFADDR:
1780 		ifp->if_flags |= IFF_UP;
1781 		nge_init(sc);
1782 		break;
1783 
1784 	case SIOCSIFFLAGS:
1785 		if (ifp->if_flags & IFF_UP) {
1786 			if (ifp->if_flags & IFF_RUNNING &&
1787 			    ifp->if_flags & IFF_PROMISC &&
1788 			    !(sc->nge_if_flags & IFF_PROMISC)) {
1789 				NGE_SETBIT(sc, NGE_RXFILT_CTL,
1790 				    NGE_RXFILTCTL_ALLPHYS|
1791 				    NGE_RXFILTCTL_ALLMULTI);
1792 			} else if (ifp->if_flags & IFF_RUNNING &&
1793 			    !(ifp->if_flags & IFF_PROMISC) &&
1794 			    sc->nge_if_flags & IFF_PROMISC) {
1795 				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
1796 				    NGE_RXFILTCTL_ALLPHYS);
1797 				if (!(ifp->if_flags & IFF_ALLMULTI))
1798 					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
1799 					    NGE_RXFILTCTL_ALLMULTI);
1800 			} else {
1801 				ifp->if_flags &= ~IFF_RUNNING;
1802 				nge_init(sc);
1803 			}
1804 		} else {
1805 			if (ifp->if_flags & IFF_RUNNING)
1806 				nge_stop(sc);
1807 		}
1808 		sc->nge_if_flags = ifp->if_flags;
1809 		error = 0;
1810 		break;
1811 
1812 	case SIOCGIFMEDIA:
1813 	case SIOCSIFMEDIA:
1814 		if (sc->nge_tbi) {
1815 			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
1816 					      command);
1817 		} else {
1818 			mii = &sc->nge_mii;
1819 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
1820 					      command);
1821 		}
1822 		break;
1823 
1824 	default:
1825 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1826 	}
1827 
1828 	if (error == ENETRESET) {
1829 		if (ifp->if_flags & IFF_RUNNING)
1830 			nge_setmulti(sc);
1831 		error = 0;
1832 	}
1833 
1834 	splx(s);
1835 	return(error);
1836 }
1837 
1838 void
1839 nge_watchdog(struct ifnet *ifp)
1840 {
1841 	struct nge_softc	*sc;
1842 
1843 	sc = ifp->if_softc;
1844 
1845 	ifp->if_oerrors++;
1846 	printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
1847 
1848 	nge_stop(sc);
1849 	nge_reset(sc);
1850 	ifp->if_flags &= ~IFF_RUNNING;
1851 	nge_init(sc);
1852 
1853 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1854 		nge_start(ifp);
1855 }
1856 
1857 /*
1858  * Stop the adapter and free any mbufs allocated to the
1859  * RX and TX lists.
1860  */
1861 void
1862 nge_stop(struct nge_softc *sc)
1863 {
1864 	int			i;
1865 	struct ifnet		*ifp;
1866 	struct mii_data		*mii;
1867 
1868 	ifp = &sc->arpcom.ac_if;
1869 	ifp->if_timer = 0;
1870 	if (sc->nge_tbi) {
1871 		mii = NULL;
1872 	} else {
1873 		mii = &sc->nge_mii;
1874 	}
1875 
1876 	timeout_del(&sc->nge_timeout);
1877 
1878 	ifp->if_flags &= ~IFF_RUNNING;
1879 	ifq_clr_oactive(&ifp->if_snd);
1880 
1881 	CSR_WRITE_4(sc, NGE_IER, 0);
1882 	CSR_WRITE_4(sc, NGE_IMR, 0);
1883 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1884 	DELAY(1000);
1885 	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
1886 	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
1887 
1888 	if (!sc->nge_tbi)
1889 		mii_down(mii);
1890 
1891 	sc->nge_link = 0;
1892 
1893 	/*
1894 	 * Free data in the RX lists.
1895 	 */
1896 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1897 		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
1898 			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
1899 			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
1900 		}
1901 	}
1902 	bzero(&sc->nge_ldata->nge_rx_list,
1903 		sizeof(sc->nge_ldata->nge_rx_list));
1904 
1905 	/*
1906 	 * Free the TX list buffers.
1907 	 */
1908 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
1909 		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
1910 			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
1911 			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
1912 		}
1913 	}
1914 
1915 	bzero(&sc->nge_ldata->nge_tx_list,
1916 		sizeof(sc->nge_ldata->nge_tx_list));
1917 }
1918 
1919 struct cfattach nge_ca = {
1920 	sizeof(struct nge_softc), nge_probe, nge_attach
1921 };
1922 
1923 struct cfdriver nge_cd = {
1924 	NULL, "nge", DV_IFNET
1925 };
1926