xref: /openbsd-src/sys/dev/pci/if_nge.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: if_nge.c,v 1.71 2011/06/22 16:44:27 tedu Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001
5  *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_nge.c,v 1.35 2002/08/08 18:33:28 ambrisko Exp $
35  */
36 
37 /*
38  * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39  * for FreeBSD. Datasheets are available from:
40  *
41  * http://www.national.com/ds/DP/DP83820.pdf
42  * http://www.national.com/ds/DP/DP83821.pdf
43  *
44  * These chips are used on several low cost gigabit ethernet NICs
45  * sold by D-Link, Addtron, SMC and Asante. Both parts are
46  * virtually the same, except the 83820 is a 64-bit/32-bit part,
47  * while the 83821 is 32-bit only.
48  *
49  * Many cards also use National gigE transceivers, such as the
50  * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51  * contains a full register description that applies to all of these
52  * components:
53  *
54  * http://www.national.com/ds/DP/DP83861.pdf
55  *
56  * Written by Bill Paul <wpaul@bsdi.com>
57  * BSDi Open Source Solutions
58  */
59 
60 /*
61  * The NatSemi DP83820 and 83821 controllers are enhanced versions
62  * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63  * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64  * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65  * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66  * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67  * matching buffers, one perfect address filter buffer and interrupt
68  * moderation. The 83820 supports both 64-bit and 32-bit addressing
69  * and data transfers: the 64-bit support can be toggled on or off
70  * via software. This affects the size of certain fields in the DMA
71  * descriptors.
72  *
73  * There are two bugs/misfeatures in the 83820/83821 that I have
74  * discovered so far:
75  *
76  * - Receive buffers must be aligned on 64-bit boundaries, which means
77  *   you must resort to copying data in order to fix up the payload
78  *   alignment.
79  *
80  * - In order to transmit jumbo frames larger than 8170 bytes, you have
81  *   to turn off transmit checksum offloading, because the chip can't
82  *   compute the checksum on an outgoing frame unless it fits entirely
83  *   within the TX FIFO, which is only 8192 bytes in size. If you have
84  *   TX checksum offload enabled and you transmit attempt to transmit a
85  *   frame larger than 8170 bytes, the transmitter will wedge.
86  *
87  * To work around the latter problem, TX checksum offload is disabled
88  * if the user selects an MTU larger than 8152 (8170 - 18).
89  */
90 
91 #include "bpfilter.h"
92 #include "vlan.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/sockio.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kernel.h>
100 #include <sys/device.h>
101 #include <sys/socket.h>
102 
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 
107 #ifdef INET
108 #include <netinet/in.h>
109 #include <netinet/in_systm.h>
110 #include <netinet/in_var.h>
111 #include <netinet/ip.h>
112 #include <netinet/if_ether.h>
113 #endif
114 
115 #if NVLAN > 0
116 #include <net/if_types.h>
117 #include <net/if_vlan_var.h>
118 #endif
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 
124 #include <uvm/uvm_extern.h>              /* for vtophys */
125 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/miivar.h>
133 
134 #define NGE_USEIOSPACE
135 
136 #include <dev/pci/if_ngereg.h>
137 
138 int nge_probe(struct device *, void *, void *);
139 void nge_attach(struct device *, struct device *, void *);
140 
141 int nge_alloc_jumbo_mem(struct nge_softc *);
142 void *nge_jalloc(struct nge_softc *);
143 void nge_jfree(caddr_t, u_int, void *);
144 
145 int nge_newbuf(struct nge_softc *, struct nge_desc *,
146 			     struct mbuf *);
147 int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
148 void nge_rxeof(struct nge_softc *);
149 void nge_txeof(struct nge_softc *);
150 int nge_intr(void *);
151 void nge_tick(void *);
152 void nge_start(struct ifnet *);
153 int nge_ioctl(struct ifnet *, u_long, caddr_t);
154 void nge_init(void *);
155 void nge_stop(struct nge_softc *);
156 void nge_watchdog(struct ifnet *);
157 int nge_ifmedia_mii_upd(struct ifnet *);
158 void nge_ifmedia_mii_sts(struct ifnet *, struct ifmediareq *);
159 int nge_ifmedia_tbi_upd(struct ifnet *);
160 void nge_ifmedia_tbi_sts(struct ifnet *, struct ifmediareq *);
161 
162 void nge_delay(struct nge_softc *);
163 void nge_eeprom_idle(struct nge_softc *);
164 void nge_eeprom_putbyte(struct nge_softc *, int);
165 void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
166 void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
167 
168 void nge_mii_sync(struct nge_softc *);
169 void nge_mii_send(struct nge_softc *, u_int32_t, int);
170 int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
171 int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
172 
173 int nge_miibus_readreg(struct device *, int, int);
174 void nge_miibus_writereg(struct device *, int, int, int);
175 void nge_miibus_statchg(struct device *);
176 
177 void nge_setmulti(struct nge_softc *);
178 void nge_reset(struct nge_softc *);
179 int nge_list_rx_init(struct nge_softc *);
180 int nge_list_tx_init(struct nge_softc *);
181 
182 #ifdef NGE_USEIOSPACE
183 #define NGE_RES			SYS_RES_IOPORT
184 #define NGE_RID			NGE_PCI_LOIO
185 #else
186 #define NGE_RES			SYS_RES_MEMORY
187 #define NGE_RID			NGE_PCI_LOMEM
188 #endif
189 
190 #ifdef NGE_DEBUG
191 #define DPRINTF(x)	if (ngedebug) printf x
192 #define DPRINTFN(n,x)	if (ngedebug >= (n)) printf x
193 int	ngedebug = 0;
194 #else
195 #define DPRINTF(x)
196 #define DPRINTFN(n,x)
197 #endif
198 
199 #define NGE_SETBIT(sc, reg, x)				\
200 	CSR_WRITE_4(sc, reg,				\
201 		CSR_READ_4(sc, reg) | (x))
202 
203 #define NGE_CLRBIT(sc, reg, x)				\
204 	CSR_WRITE_4(sc, reg,				\
205 		CSR_READ_4(sc, reg) & ~(x))
206 
207 #define SIO_SET(x)					\
208 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
209 
210 #define SIO_CLR(x)					\
211 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
212 
213 void
214 nge_delay(sc)
215 	struct nge_softc	*sc;
216 {
217 	int			idx;
218 
219 	for (idx = (300 / 33) + 1; idx > 0; idx--)
220 		CSR_READ_4(sc, NGE_CSR);
221 }
222 
223 void
224 nge_eeprom_idle(sc)
225 	struct nge_softc	*sc;
226 {
227 	int		i;
228 
229 	SIO_SET(NGE_MEAR_EE_CSEL);
230 	nge_delay(sc);
231 	SIO_SET(NGE_MEAR_EE_CLK);
232 	nge_delay(sc);
233 
234 	for (i = 0; i < 25; i++) {
235 		SIO_CLR(NGE_MEAR_EE_CLK);
236 		nge_delay(sc);
237 		SIO_SET(NGE_MEAR_EE_CLK);
238 		nge_delay(sc);
239 	}
240 
241 	SIO_CLR(NGE_MEAR_EE_CLK);
242 	nge_delay(sc);
243 	SIO_CLR(NGE_MEAR_EE_CSEL);
244 	nge_delay(sc);
245 	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
246 }
247 
248 /*
249  * Send a read command and address to the EEPROM, check for ACK.
250  */
251 void
252 nge_eeprom_putbyte(sc, addr)
253 	struct nge_softc	*sc;
254 	int			addr;
255 {
256 	int			d, i;
257 
258 	d = addr | NGE_EECMD_READ;
259 
260 	/*
261 	 * Feed in each bit and strobe the clock.
262 	 */
263 	for (i = 0x400; i; i >>= 1) {
264 		if (d & i) {
265 			SIO_SET(NGE_MEAR_EE_DIN);
266 		} else {
267 			SIO_CLR(NGE_MEAR_EE_DIN);
268 		}
269 		nge_delay(sc);
270 		SIO_SET(NGE_MEAR_EE_CLK);
271 		nge_delay(sc);
272 		SIO_CLR(NGE_MEAR_EE_CLK);
273 		nge_delay(sc);
274 	}
275 }
276 
277 /*
278  * Read a word of data stored in the EEPROM at address 'addr.'
279  */
280 void
281 nge_eeprom_getword(sc, addr, dest)
282 	struct nge_softc	*sc;
283 	int			addr;
284 	u_int16_t		*dest;
285 {
286 	int			i;
287 	u_int16_t		word = 0;
288 
289 	/* Force EEPROM to idle state. */
290 	nge_eeprom_idle(sc);
291 
292 	/* Enter EEPROM access mode. */
293 	nge_delay(sc);
294 	SIO_CLR(NGE_MEAR_EE_CLK);
295 	nge_delay(sc);
296 	SIO_SET(NGE_MEAR_EE_CSEL);
297 	nge_delay(sc);
298 
299 	/*
300 	 * Send address of word we want to read.
301 	 */
302 	nge_eeprom_putbyte(sc, addr);
303 
304 	/*
305 	 * Start reading bits from EEPROM.
306 	 */
307 	for (i = 0x8000; i; i >>= 1) {
308 		SIO_SET(NGE_MEAR_EE_CLK);
309 		nge_delay(sc);
310 		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
311 			word |= i;
312 		nge_delay(sc);
313 		SIO_CLR(NGE_MEAR_EE_CLK);
314 		nge_delay(sc);
315 	}
316 
317 	/* Turn off EEPROM access mode. */
318 	nge_eeprom_idle(sc);
319 
320 	*dest = word;
321 }
322 
323 /*
324  * Read a sequence of words from the EEPROM.
325  */
326 void
327 nge_read_eeprom(sc, dest, off, cnt, swap)
328 	struct nge_softc	*sc;
329 	caddr_t			dest;
330 	int			off;
331 	int			cnt;
332 	int			swap;
333 {
334 	int			i;
335 	u_int16_t		word = 0, *ptr;
336 
337 	for (i = 0; i < cnt; i++) {
338 		nge_eeprom_getword(sc, off + i, &word);
339 		ptr = (u_int16_t *)(dest + (i * 2));
340 		if (swap)
341 			*ptr = ntohs(word);
342 		else
343 			*ptr = word;
344 	}
345 }
346 
347 /*
348  * Sync the PHYs by setting data bit and strobing the clock 32 times.
349  */
350 void
351 nge_mii_sync(sc)
352 	struct nge_softc		*sc;
353 {
354 	int			i;
355 
356 	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
357 
358 	for (i = 0; i < 32; i++) {
359 		SIO_SET(NGE_MEAR_MII_CLK);
360 		DELAY(1);
361 		SIO_CLR(NGE_MEAR_MII_CLK);
362 		DELAY(1);
363 	}
364 }
365 
366 /*
367  * Clock a series of bits through the MII.
368  */
369 void
370 nge_mii_send(sc, bits, cnt)
371 	struct nge_softc		*sc;
372 	u_int32_t		bits;
373 	int			cnt;
374 {
375 	int			i;
376 
377 	SIO_CLR(NGE_MEAR_MII_CLK);
378 
379 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
380                 if (bits & i) {
381 			SIO_SET(NGE_MEAR_MII_DATA);
382                 } else {
383 			SIO_CLR(NGE_MEAR_MII_DATA);
384                 }
385 		DELAY(1);
386 		SIO_CLR(NGE_MEAR_MII_CLK);
387 		DELAY(1);
388 		SIO_SET(NGE_MEAR_MII_CLK);
389 	}
390 }
391 
392 /*
393  * Read an PHY register through the MII.
394  */
395 int
396 nge_mii_readreg(sc, frame)
397 	struct nge_softc		*sc;
398 	struct nge_mii_frame	*frame;
399 {
400 	int			i, ack, s;
401 
402 	s = splnet();
403 
404 	/*
405 	 * Set up frame for RX.
406 	 */
407 	frame->mii_stdelim = NGE_MII_STARTDELIM;
408 	frame->mii_opcode = NGE_MII_READOP;
409 	frame->mii_turnaround = 0;
410 	frame->mii_data = 0;
411 
412 	CSR_WRITE_4(sc, NGE_MEAR, 0);
413 
414 	/*
415 	 * Turn on data xmit.
416 	 */
417 	SIO_SET(NGE_MEAR_MII_DIR);
418 
419 	nge_mii_sync(sc);
420 
421 	/*
422 	 * Send command/address info.
423 	 */
424 	nge_mii_send(sc, frame->mii_stdelim, 2);
425 	nge_mii_send(sc, frame->mii_opcode, 2);
426 	nge_mii_send(sc, frame->mii_phyaddr, 5);
427 	nge_mii_send(sc, frame->mii_regaddr, 5);
428 
429 	/* Idle bit */
430 	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
431 	DELAY(1);
432 	SIO_SET(NGE_MEAR_MII_CLK);
433 	DELAY(1);
434 
435 	/* Turn off xmit. */
436 	SIO_CLR(NGE_MEAR_MII_DIR);
437 	/* Check for ack */
438 	SIO_CLR(NGE_MEAR_MII_CLK);
439 	DELAY(1);
440 	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
441 	SIO_SET(NGE_MEAR_MII_CLK);
442 	DELAY(1);
443 
444 	/*
445 	 * Now try reading data bits. If the ack failed, we still
446 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
447 	 */
448 	if (ack) {
449 		for(i = 0; i < 16; i++) {
450 			SIO_CLR(NGE_MEAR_MII_CLK);
451 			DELAY(1);
452 			SIO_SET(NGE_MEAR_MII_CLK);
453 			DELAY(1);
454 		}
455 		goto fail;
456 	}
457 
458 	for (i = 0x8000; i; i >>= 1) {
459 		SIO_CLR(NGE_MEAR_MII_CLK);
460 		DELAY(1);
461 		if (!ack) {
462 			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
463 				frame->mii_data |= i;
464 			DELAY(1);
465 		}
466 		SIO_SET(NGE_MEAR_MII_CLK);
467 		DELAY(1);
468 	}
469 
470 fail:
471 
472 	SIO_CLR(NGE_MEAR_MII_CLK);
473 	DELAY(1);
474 	SIO_SET(NGE_MEAR_MII_CLK);
475 	DELAY(1);
476 
477 	splx(s);
478 
479 	if (ack)
480 		return(1);
481 	return(0);
482 }
483 
484 /*
485  * Write to a PHY register through the MII.
486  */
487 int
488 nge_mii_writereg(sc, frame)
489 	struct nge_softc		*sc;
490 	struct nge_mii_frame	*frame;
491 {
492 	int			s;
493 
494 	s = splnet();
495 	/*
496 	 * Set up frame for TX.
497 	 */
498 
499 	frame->mii_stdelim = NGE_MII_STARTDELIM;
500 	frame->mii_opcode = NGE_MII_WRITEOP;
501 	frame->mii_turnaround = NGE_MII_TURNAROUND;
502 
503 	/*
504 	 * Turn on data output.
505 	 */
506 	SIO_SET(NGE_MEAR_MII_DIR);
507 
508 	nge_mii_sync(sc);
509 
510 	nge_mii_send(sc, frame->mii_stdelim, 2);
511 	nge_mii_send(sc, frame->mii_opcode, 2);
512 	nge_mii_send(sc, frame->mii_phyaddr, 5);
513 	nge_mii_send(sc, frame->mii_regaddr, 5);
514 	nge_mii_send(sc, frame->mii_turnaround, 2);
515 	nge_mii_send(sc, frame->mii_data, 16);
516 
517 	/* Idle bit. */
518 	SIO_SET(NGE_MEAR_MII_CLK);
519 	DELAY(1);
520 	SIO_CLR(NGE_MEAR_MII_CLK);
521 	DELAY(1);
522 
523 	/*
524 	 * Turn off xmit.
525 	 */
526 	SIO_CLR(NGE_MEAR_MII_DIR);
527 
528 	splx(s);
529 
530 	return(0);
531 }
532 
533 int
534 nge_miibus_readreg(dev, phy, reg)
535 	struct device		*dev;
536 	int			phy, reg;
537 {
538 	struct nge_softc	*sc = (struct nge_softc *)dev;
539 	struct nge_mii_frame	frame;
540 
541 	DPRINTFN(9, ("%s: nge_miibus_readreg\n", sc->sc_dv.dv_xname));
542 
543 	bzero(&frame, sizeof(frame));
544 
545 	frame.mii_phyaddr = phy;
546 	frame.mii_regaddr = reg;
547 	nge_mii_readreg(sc, &frame);
548 
549 	return(frame.mii_data);
550 }
551 
552 void
553 nge_miibus_writereg(dev, phy, reg, data)
554 	struct device		*dev;
555 	int			phy, reg, data;
556 {
557 	struct nge_softc	*sc = (struct nge_softc *)dev;
558 	struct nge_mii_frame	frame;
559 
560 
561 	DPRINTFN(9, ("%s: nge_miibus_writereg\n", sc->sc_dv.dv_xname));
562 
563 	bzero(&frame, sizeof(frame));
564 
565 	frame.mii_phyaddr = phy;
566 	frame.mii_regaddr = reg;
567 	frame.mii_data = data;
568 	nge_mii_writereg(sc, &frame);
569 }
570 
571 void
572 nge_miibus_statchg(dev)
573 	struct device		*dev;
574 {
575 	struct nge_softc	*sc = (struct nge_softc *)dev;
576 	struct mii_data		*mii = &sc->nge_mii;
577 	u_int32_t		txcfg, rxcfg;
578 
579 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
580 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
581 
582 	DPRINTFN(4, ("%s: nge_miibus_statchg txcfg=%#x, rxcfg=%#x\n",
583 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
584 
585 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
586 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
587 		rxcfg |= (NGE_RXCFG_RX_FDX);
588 	} else {
589 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
590 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
591 	}
592 
593 	txcfg |= NGE_TXCFG_AUTOPAD;
594 
595 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
596 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
597 
598 	/* If we have a 1000Mbps link, set the mode_1000 bit. */
599 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
600 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
601 	else
602 		NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
603 }
604 
605 void
606 nge_setmulti(sc)
607 	struct nge_softc	*sc;
608 {
609 	struct arpcom		*ac = &sc->arpcom;
610 	struct ifnet		*ifp = &ac->ac_if;
611 	struct ether_multi      *enm;
612 	struct ether_multistep  step;
613 	u_int32_t		h = 0, i, filtsave;
614 	int			bit, index;
615 
616 allmulti:
617 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
618 		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
619 		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
620 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
621 		return;
622 	}
623 
624 	/*
625 	 * We have to explicitly enable the multicast hash table
626 	 * on the NatSemi chip if we want to use it, which we do.
627 	 * We also have to tell it that we don't want to use the
628 	 * hash table for matching unicast addresses.
629 	 */
630 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
631 	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
632 	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
633 
634 	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
635 
636 	/* first, zot all the existing hash bits */
637 	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
638 		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
639 		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
640 	}
641 
642 	/*
643 	 * From the 11 bits returned by the crc routine, the top 7
644 	 * bits represent the 16-bit word in the mcast hash table
645 	 * that needs to be updated, and the lower 4 bits represent
646 	 * which bit within that byte needs to be set.
647 	 */
648 	ETHER_FIRST_MULTI(step, ac, enm);
649 	while (enm != NULL) {
650 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
651 			ifp->if_flags |= IFF_ALLMULTI;
652 			goto allmulti;
653 		}
654 		h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 21) &
655 		    0x00000FFF;
656 		index = (h >> 4) & 0x7F;
657 		bit = h & 0xF;
658 		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
659 		    NGE_FILTADDR_MCAST_LO + (index * 2));
660 		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
661 		ETHER_NEXT_MULTI(step, enm);
662 	}
663 
664 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
665 }
666 
667 void
668 nge_reset(sc)
669 	struct nge_softc	*sc;
670 {
671 	int			i;
672 
673 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
674 
675 	for (i = 0; i < NGE_TIMEOUT; i++) {
676 		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
677 			break;
678 	}
679 
680 	if (i == NGE_TIMEOUT)
681 		printf("%s: reset never completed\n", sc->sc_dv.dv_xname);
682 
683 	/* Wait a little while for the chip to get its brains in order. */
684 	DELAY(1000);
685 
686 	/*
687 	 * If this is a NetSemi chip, make sure to clear
688 	 * PME mode.
689 	 */
690 	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
691 	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
692 }
693 
694 /*
695  * Probe for an NatSemi chip. Check the PCI vendor and device
696  * IDs against our list and return a device name if we find a match.
697  */
698 int
699 nge_probe(parent, match, aux)
700 	struct device *parent;
701 	void *match;
702 	void *aux;
703 {
704 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
705 
706 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS &&
707 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_DP83820)
708 		return (1);
709 
710 	return (0);
711 }
712 
713 /*
714  * Attach the interface. Allocate softc structures, do ifmedia
715  * setup and ethernet/BPF attach.
716  */
717 void
718 nge_attach(parent, self, aux)
719 	struct device *parent, *self;
720 	void *aux;
721 {
722 	struct nge_softc	*sc = (struct nge_softc *)self;
723 	struct pci_attach_args	*pa = aux;
724 	pci_chipset_tag_t	pc = pa->pa_pc;
725 	pci_intr_handle_t	ih;
726 	const char		*intrstr = NULL;
727 	bus_size_t		size;
728 	bus_dma_segment_t	seg;
729 	bus_dmamap_t		dmamap;
730 	int			rseg;
731 	u_char			eaddr[ETHER_ADDR_LEN];
732 	pcireg_t		command;
733 #ifndef NGE_USEIOSPACE
734 	pcireg_t		memtype;
735 #endif
736 	struct ifnet		*ifp;
737 	caddr_t			kva;
738 
739 	/*
740 	 * Handle power management nonsense.
741 	 */
742 	DPRINTFN(5, ("%s: preparing for conf read\n", sc->sc_dv.dv_xname));
743 	command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_CAPID) & 0x000000FF;
744 	if (command == 0x01) {
745 		command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_PWRMGMTCTRL);
746 		if (command & NGE_PSTATE_MASK) {
747 			pcireg_t	iobase, membase, irq;
748 
749 			/* Save important PCI config data. */
750 			iobase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOIO);
751 			membase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOMEM);
752 			irq = pci_conf_read(pc, pa->pa_tag, NGE_PCI_INTLINE);
753 
754 			/* Reset the power state. */
755 			printf("%s: chip is in D%d power mode "
756 			       "-- setting to D0\n", sc->sc_dv.dv_xname,
757 			       command & NGE_PSTATE_MASK);
758 			command &= 0xFFFFFFFC;
759 			pci_conf_write(pc, pa->pa_tag,
760 				       NGE_PCI_PWRMGMTCTRL, command);
761 
762 			/* Restore PCI config data. */
763 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOIO, iobase);
764 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOMEM, membase);
765 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_INTLINE, irq);
766 		}
767 	}
768 
769 	/*
770 	 * Map control/status registers.
771 	 */
772 	DPRINTFN(5, ("%s: map control/status regs\n", sc->sc_dv.dv_xname));
773 
774 #ifdef NGE_USEIOSPACE
775 	DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname));
776 	if (pci_mapreg_map(pa, NGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
777 	    &sc->nge_btag, &sc->nge_bhandle, NULL, &size, 0)) {
778 		printf(": can't map i/o space\n");
779 		return;
780 	}
781 #else
782 	DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname));
783 	memtype = pci_mapreg_type(pc, pa->pa_tag, NGE_PCI_LOMEM);
784 	if (pci_mapreg_map(pa, NGE_PCI_LOMEM, memtype, 0, &sc->nge_btag,
785 	    &sc->nge_bhandle, NULL, &size, 0)) {
786 		printf(": can't map mem space\n");
787 		return;
788 	}
789 #endif
790 
791 	/* Disable all interrupts */
792 	CSR_WRITE_4(sc, NGE_IER, 0);
793 
794 	DPRINTFN(5, ("%s: pci_intr_map\n", sc->sc_dv.dv_xname));
795 	if (pci_intr_map(pa, &ih)) {
796 		printf(": couldn't map interrupt\n");
797 		goto fail_1;
798 	}
799 
800 	DPRINTFN(5, ("%s: pci_intr_string\n", sc->sc_dv.dv_xname));
801 	intrstr = pci_intr_string(pc, ih);
802 	DPRINTFN(5, ("%s: pci_intr_establish\n", sc->sc_dv.dv_xname));
803 	sc->nge_intrhand = pci_intr_establish(pc, ih, IPL_NET, nge_intr, sc,
804 					      sc->sc_dv.dv_xname);
805 	if (sc->nge_intrhand == NULL) {
806 		printf(": couldn't establish interrupt");
807 		if (intrstr != NULL)
808 			printf(" at %s", intrstr);
809 		printf("\n");
810 		goto fail_1;
811 	}
812 	printf(": %s", intrstr);
813 
814 	/* Reset the adapter. */
815 	DPRINTFN(5, ("%s: nge_reset\n", sc->sc_dv.dv_xname));
816 	nge_reset(sc);
817 
818 	/*
819 	 * Get station address from the EEPROM.
820 	 */
821 	DPRINTFN(5, ("%s: nge_read_eeprom\n", sc->sc_dv.dv_xname));
822 	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
823 	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
824 	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
825 
826 	/*
827 	 * A NatSemi chip was detected. Inform the world.
828 	 */
829 	printf(", address %s\n", ether_sprintf(eaddr));
830 
831 	bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
832 
833 	sc->sc_dmatag = pa->pa_dmat;
834 	DPRINTFN(5, ("%s: bus_dmamem_alloc\n", sc->sc_dv.dv_xname));
835 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct nge_list_data),
836 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT |
837 			     BUS_DMA_ZERO)) {
838 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
839 		goto fail_2;
840 	}
841 	DPRINTFN(5, ("%s: bus_dmamem_map\n", sc->sc_dv.dv_xname));
842 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
843 			   sizeof(struct nge_list_data), &kva,
844 			   BUS_DMA_NOWAIT)) {
845 		printf("%s: can't map dma buffers (%d bytes)\n",
846 		       sc->sc_dv.dv_xname, sizeof(struct nge_list_data));
847 		goto fail_3;
848 	}
849 	DPRINTFN(5, ("%s: bus_dmamem_create\n", sc->sc_dv.dv_xname));
850 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct nge_list_data), 1,
851 			      sizeof(struct nge_list_data), 0,
852 			      BUS_DMA_NOWAIT, &dmamap)) {
853 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
854 		goto fail_4;
855 	}
856 	DPRINTFN(5, ("%s: bus_dmamem_load\n", sc->sc_dv.dv_xname));
857 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
858 			    sizeof(struct nge_list_data), NULL,
859 			    BUS_DMA_NOWAIT)) {
860 		goto fail_5;
861 	}
862 
863 	DPRINTFN(5, ("%s: bzero\n", sc->sc_dv.dv_xname));
864 	sc->nge_ldata = (struct nge_list_data *)kva;
865 
866 	/* Try to allocate memory for jumbo buffers. */
867 	DPRINTFN(5, ("%s: nge_alloc_jumbo_mem\n", sc->sc_dv.dv_xname));
868 	if (nge_alloc_jumbo_mem(sc)) {
869 		printf("%s: jumbo buffer allocation failed\n",
870 		       sc->sc_dv.dv_xname);
871 		goto fail_5;
872 	}
873 
874 	ifp = &sc->arpcom.ac_if;
875 	ifp->if_softc = sc;
876 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
877 	ifp->if_ioctl = nge_ioctl;
878 	ifp->if_start = nge_start;
879 	ifp->if_watchdog = nge_watchdog;
880 	ifp->if_baudrate = 1000000000;
881 	ifp->if_hardmtu = NGE_JUMBO_MTU;
882 	IFQ_SET_MAXLEN(&ifp->if_snd, NGE_TX_LIST_CNT - 1);
883 	IFQ_SET_READY(&ifp->if_snd);
884 	DPRINTFN(5, ("%s: bcopy\n", sc->sc_dv.dv_xname));
885 	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
886 
887 	ifp->if_capabilities = IFCAP_VLAN_MTU;
888 
889 #if NVLAN > 0
890 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
891 #endif
892 
893 	/*
894 	 * Do MII setup.
895 	 */
896 	DPRINTFN(5, ("%s: mii setup\n", sc->sc_dv.dv_xname));
897 	if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
898 		DPRINTFN(5, ("%s: TBI mode\n", sc->sc_dv.dv_xname));
899 		sc->nge_tbi = 1;
900 
901 		ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_tbi_upd,
902 			     nge_ifmedia_tbi_sts);
903 
904 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_NONE, 0, NULL),
905 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
906 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
907 			    0, NULL);
908 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
909 
910 		ifmedia_set(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO);
911 
912 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
913 			    | NGE_GPIO_GP4_OUT
914 			    | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
915 			    | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP4_OUTENB
916 			    | NGE_GPIO_GP5_OUTENB);
917 
918 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
919 	} else {
920 		sc->nge_mii.mii_ifp = ifp;
921 		sc->nge_mii.mii_readreg = nge_miibus_readreg;
922 		sc->nge_mii.mii_writereg = nge_miibus_writereg;
923 		sc->nge_mii.mii_statchg = nge_miibus_statchg;
924 
925 		ifmedia_init(&sc->nge_mii.mii_media, 0, nge_ifmedia_mii_upd,
926 			     nge_ifmedia_mii_sts);
927 		mii_attach(&sc->sc_dv, &sc->nge_mii, 0xffffffff, MII_PHY_ANY,
928 			   MII_OFFSET_ANY, 0);
929 
930 		if (LIST_FIRST(&sc->nge_mii.mii_phys) == NULL) {
931 
932 			printf("%s: no PHY found!\n", sc->sc_dv.dv_xname);
933 			ifmedia_add(&sc->nge_mii.mii_media,
934 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
935 			ifmedia_set(&sc->nge_mii.mii_media,
936 				    IFM_ETHER|IFM_MANUAL);
937 		}
938 		else
939 			ifmedia_set(&sc->nge_mii.mii_media,
940 				    IFM_ETHER|IFM_AUTO);
941 	}
942 
943 	/*
944 	 * Call MI attach routine.
945 	 */
946 	DPRINTFN(5, ("%s: if_attach\n", sc->sc_dv.dv_xname));
947 	if_attach(ifp);
948 	DPRINTFN(5, ("%s: ether_ifattach\n", sc->sc_dv.dv_xname));
949 	ether_ifattach(ifp);
950 	DPRINTFN(5, ("%s: timeout_set\n", sc->sc_dv.dv_xname));
951 	timeout_set(&sc->nge_timeout, nge_tick, sc);
952 	timeout_add_sec(&sc->nge_timeout, 1);
953 	return;
954 
955 fail_5:
956 	bus_dmamap_destroy(sc->sc_dmatag, dmamap);
957 
958 fail_4:
959 	bus_dmamem_unmap(sc->sc_dmatag, kva,
960 	    sizeof(struct nge_list_data));
961 
962 fail_3:
963 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
964 
965 fail_2:
966 	pci_intr_disestablish(pc, sc->nge_intrhand);
967 
968 fail_1:
969 	bus_space_unmap(sc->nge_btag, sc->nge_bhandle, size);
970 }
971 
972 /*
973  * Initialize the transmit descriptors.
974  */
975 int
976 nge_list_tx_init(sc)
977 	struct nge_softc	*sc;
978 {
979 	struct nge_list_data	*ld;
980 	struct nge_ring_data	*cd;
981 	int			i;
982 
983 	cd = &sc->nge_cdata;
984 	ld = sc->nge_ldata;
985 
986 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
987 		if (i == (NGE_TX_LIST_CNT - 1)) {
988 			ld->nge_tx_list[i].nge_nextdesc =
989 			    &ld->nge_tx_list[0];
990 			ld->nge_tx_list[i].nge_next =
991 			    VTOPHYS(&ld->nge_tx_list[0]);
992 		} else {
993 			ld->nge_tx_list[i].nge_nextdesc =
994 			    &ld->nge_tx_list[i + 1];
995 			ld->nge_tx_list[i].nge_next =
996 			    VTOPHYS(&ld->nge_tx_list[i + 1]);
997 		}
998 		ld->nge_tx_list[i].nge_mbuf = NULL;
999 		ld->nge_tx_list[i].nge_ptr = 0;
1000 		ld->nge_tx_list[i].nge_ctl = 0;
1001 	}
1002 
1003 	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1004 
1005 	return(0);
1006 }
1007 
1008 
1009 /*
1010  * Initialize the RX descriptors and allocate mbufs for them. Note that
1011  * we arrange the descriptors in a closed ring, so that the last descriptor
1012  * points back to the first.
1013  */
1014 int
1015 nge_list_rx_init(sc)
1016 	struct nge_softc	*sc;
1017 {
1018 	struct nge_list_data	*ld;
1019 	struct nge_ring_data	*cd;
1020 	int			i;
1021 
1022 	ld = sc->nge_ldata;
1023 	cd = &sc->nge_cdata;
1024 
1025 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1026 		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1027 			return(ENOBUFS);
1028 		if (i == (NGE_RX_LIST_CNT - 1)) {
1029 			ld->nge_rx_list[i].nge_nextdesc =
1030 			    &ld->nge_rx_list[0];
1031 			ld->nge_rx_list[i].nge_next =
1032 			    VTOPHYS(&ld->nge_rx_list[0]);
1033 		} else {
1034 			ld->nge_rx_list[i].nge_nextdesc =
1035 			    &ld->nge_rx_list[i + 1];
1036 			ld->nge_rx_list[i].nge_next =
1037 			    VTOPHYS(&ld->nge_rx_list[i + 1]);
1038 		}
1039 	}
1040 
1041 	cd->nge_rx_prod = 0;
1042 
1043 	return(0);
1044 }
1045 
1046 /*
1047  * Initialize an RX descriptor and attach an MBUF cluster.
1048  */
1049 int
1050 nge_newbuf(sc, c, m)
1051 	struct nge_softc	*sc;
1052 	struct nge_desc		*c;
1053 	struct mbuf		*m;
1054 {
1055 	struct mbuf		*m_new = NULL;
1056 
1057 	if (m == NULL) {
1058 		caddr_t buf = NULL;
1059 
1060 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1061 		if (m_new == NULL)
1062 			return (ENOBUFS);
1063 
1064 		/* Allocate the jumbo buffer */
1065 		buf = nge_jalloc(sc);
1066 		if (buf == NULL) {
1067 			m_freem(m_new);
1068 			return (ENOBUFS);
1069 		}
1070 
1071 		/* Attach the buffer to the mbuf */
1072 		m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES;
1073 		MEXTADD(m_new, buf, NGE_MCLBYTES, 0, nge_jfree, sc);
1074 	} else {
1075 		/*
1076 		 * We're re-using a previously allocated mbuf;
1077 		 * be sure to re-init pointers and lengths to
1078 		 * default values.
1079 		 */
1080 		m_new = m;
1081 		m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES;
1082 		m_new->m_data = m_new->m_ext.ext_buf;
1083 	}
1084 
1085 	m_adj(m_new, sizeof(u_int64_t));
1086 
1087 	c->nge_mbuf = m_new;
1088 	c->nge_ptr = VTOPHYS(mtod(m_new, caddr_t));
1089 	DPRINTFN(7,("%s: c->nge_ptr=%#x\n", sc->sc_dv.dv_xname,
1090 		    c->nge_ptr));
1091 	c->nge_ctl = m_new->m_len;
1092 	c->nge_extsts = 0;
1093 
1094 	return(0);
1095 }
1096 
1097 int
1098 nge_alloc_jumbo_mem(sc)
1099 	struct nge_softc	*sc;
1100 {
1101 	caddr_t			ptr, kva;
1102 	bus_dma_segment_t	seg;
1103 	bus_dmamap_t		dmamap;
1104 	int			i, rseg, state, error;
1105 	struct nge_jpool_entry	*entry;
1106 
1107 	state = error = 0;
1108 
1109 	if (bus_dmamem_alloc(sc->sc_dmatag, NGE_JMEM, PAGE_SIZE, 0,
1110 			     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1111 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
1112 		return (ENOBUFS);
1113 	}
1114 
1115 	state = 1;
1116 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, NGE_JMEM, &kva,
1117 			   BUS_DMA_NOWAIT)) {
1118 		printf("%s: can't map dma buffers (%d bytes)\n",
1119 		       sc->sc_dv.dv_xname, NGE_JMEM);
1120 		error = ENOBUFS;
1121 		goto out;
1122 	}
1123 
1124 	state = 2;
1125 	if (bus_dmamap_create(sc->sc_dmatag, NGE_JMEM, 1,
1126 			      NGE_JMEM, 0, BUS_DMA_NOWAIT, &dmamap)) {
1127 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
1128 		error = ENOBUFS;
1129 		goto out;
1130 	}
1131 
1132 	state = 3;
1133 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, NGE_JMEM,
1134 			    NULL, BUS_DMA_NOWAIT)) {
1135 		printf("%s: can't load dma map\n", sc->sc_dv.dv_xname);
1136 		error = ENOBUFS;
1137 		goto out;
1138         }
1139 
1140 	state = 4;
1141 	sc->nge_cdata.nge_jumbo_buf = (caddr_t)kva;
1142 	DPRINTFN(1,("%s: nge_jumbo_buf=%#x, NGE_MCLBYTES=%#x\n",
1143 		    sc->sc_dv.dv_xname , sc->nge_cdata.nge_jumbo_buf,
1144 		    NGE_MCLBYTES));
1145 
1146 	LIST_INIT(&sc->nge_jfree_listhead);
1147 	LIST_INIT(&sc->nge_jinuse_listhead);
1148 
1149 	/*
1150 	 * Now divide it up into 9K pieces and save the addresses
1151 	 * in an array. Note that we play an evil trick here by using
1152 	 * the first few bytes in the buffer to hold the address
1153 	 * of the softc structure for this interface. This is because
1154 	 * nge_jfree() needs it, but it is called by the mbuf management
1155 	 * code which will not pass it to us explicitly.
1156 	 */
1157 	ptr = sc->nge_cdata.nge_jumbo_buf;
1158 	for (i = 0; i < NGE_JSLOTS; i++) {
1159 		sc->nge_cdata.nge_jslots[i].nge_buf = ptr;
1160 		sc->nge_cdata.nge_jslots[i].nge_inuse = 0;
1161 		ptr += NGE_MCLBYTES;
1162 		entry = malloc(sizeof(struct nge_jpool_entry),
1163 			       M_DEVBUF, M_NOWAIT);
1164 		if (entry == NULL) {
1165 			sc->nge_cdata.nge_jumbo_buf = NULL;
1166 			printf("%s: no memory for jumbo buffer queue!\n",
1167 			       sc->sc_dv.dv_xname);
1168 			error = ENOBUFS;
1169 			goto out;
1170 		}
1171 		entry->slot = i;
1172 		LIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry,
1173 				 jpool_entries);
1174 	}
1175 out:
1176 	if (error != 0) {
1177 		switch (state) {
1178 		case 4:
1179 			bus_dmamap_unload(sc->sc_dmatag, dmamap);
1180 		case 3:
1181 			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
1182 		case 2:
1183 			bus_dmamem_unmap(sc->sc_dmatag, kva, NGE_JMEM);
1184 		case 1:
1185 			bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1186 			break;
1187 		default:
1188 			break;
1189 		}
1190 	}
1191 
1192 	return (error);
1193 }
1194 
1195 /*
1196  * Allocate a jumbo buffer.
1197  */
1198 void *
1199 nge_jalloc(sc)
1200 	struct nge_softc	*sc;
1201 {
1202 	struct nge_jpool_entry   *entry;
1203 
1204 	entry = LIST_FIRST(&sc->nge_jfree_listhead);
1205 
1206 	if (entry == NULL)
1207 		return (NULL);
1208 
1209 	LIST_REMOVE(entry, jpool_entries);
1210 	LIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries);
1211 	sc->nge_cdata.nge_jslots[entry->slot].nge_inuse = 1;
1212 	return(sc->nge_cdata.nge_jslots[entry->slot].nge_buf);
1213 }
1214 
1215 /*
1216  * Release a jumbo buffer.
1217  */
1218 void
1219 nge_jfree(buf, size, arg)
1220 	caddr_t		buf;
1221 	u_int		size;
1222 	void		*arg;
1223 {
1224 	struct nge_softc	*sc;
1225 	int		        i;
1226 	struct nge_jpool_entry *entry;
1227 
1228 	/* Extract the softc struct pointer. */
1229 	sc = (struct nge_softc *)arg;
1230 
1231 	if (sc == NULL)
1232 		panic("nge_jfree: can't find softc pointer!");
1233 
1234 	/* calculate the slot this buffer belongs to */
1235 
1236 	i = ((vaddr_t)buf - (vaddr_t)sc->nge_cdata.nge_jumbo_buf)
1237 	  / NGE_MCLBYTES;
1238 
1239 	if ((i < 0) || (i >= NGE_JSLOTS))
1240 		panic("nge_jfree: asked to free buffer that we don't manage!");
1241 	else if (sc->nge_cdata.nge_jslots[i].nge_inuse == 0)
1242 		panic("nge_jfree: buffer already free!");
1243 	else {
1244 		sc->nge_cdata.nge_jslots[i].nge_inuse--;
1245 		if(sc->nge_cdata.nge_jslots[i].nge_inuse == 0) {
1246 			entry = LIST_FIRST(&sc->nge_jinuse_listhead);
1247 			if (entry == NULL)
1248 				panic("nge_jfree: buffer not in use!");
1249 			entry->slot = i;
1250 			LIST_REMOVE(entry, jpool_entries);
1251 			LIST_INSERT_HEAD(&sc->nge_jfree_listhead,
1252 					 entry, jpool_entries);
1253 		}
1254 	}
1255 }
1256 
1257 /*
1258  * A frame has been uploaded: pass the resulting mbuf chain up to
1259  * the higher level protocols.
1260  */
1261 void
1262 nge_rxeof(sc)
1263 	struct nge_softc	*sc;
1264 {
1265         struct mbuf		*m;
1266         struct ifnet		*ifp;
1267 	struct nge_desc		*cur_rx;
1268 	int			i, total_len = 0;
1269 	u_int32_t		rxstat;
1270 
1271 	ifp = &sc->arpcom.ac_if;
1272 	i = sc->nge_cdata.nge_rx_prod;
1273 
1274 	while (NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1275 		struct mbuf		*m0 = NULL;
1276 		u_int32_t		extsts;
1277 
1278 		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1279 		rxstat = cur_rx->nge_rxstat;
1280 		extsts = cur_rx->nge_extsts;
1281 		m = cur_rx->nge_mbuf;
1282 		cur_rx->nge_mbuf = NULL;
1283 		total_len = NGE_RXBYTES(cur_rx);
1284 		NGE_INC(i, NGE_RX_LIST_CNT);
1285 
1286 		/*
1287 		 * If an error occurs, update stats, clear the
1288 		 * status word and leave the mbuf cluster in place:
1289 		 * it should simply get re-used next time this descriptor
1290 		 * comes up in the ring.
1291 		 */
1292 		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1293 #if NVLAN > 0
1294 			if ((rxstat & NGE_RXSTAT_RUNT) &&
1295 			    total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN -
1296 			    ETHER_VLAN_ENCAP_LEN)) {
1297 				/*
1298 				 * Workaround a hardware bug. Accept runt
1299 				 * frames if its length is larger than or
1300 				 * equal to 56.
1301 				 */
1302 			} else {
1303 #endif
1304 				ifp->if_ierrors++;
1305 				nge_newbuf(sc, cur_rx, m);
1306 				continue;
1307 #if NVLAN > 0
1308 			}
1309 #endif
1310 		}
1311 
1312 		/*
1313 		 * Ok. NatSemi really screwed up here. This is the
1314 		 * only gigE chip I know of with alignment constraints
1315 		 * on receive buffers. RX buffers must be 64-bit aligned.
1316 		 */
1317 #ifndef __STRICT_ALIGNMENT
1318 		/*
1319 		 * By popular demand, ignore the alignment problems
1320 		 * on the Intel x86 platform. The performance hit
1321 		 * incurred due to unaligned accesses is much smaller
1322 		 * than the hit produced by forcing buffer copies all
1323 		 * the time, especially with jumbo frames. We still
1324 		 * need to fix up the alignment everywhere else though.
1325 		 */
1326 		if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
1327 #endif
1328 			m0 = m_devget(mtod(m, char *), total_len,
1329 			    ETHER_ALIGN, ifp, NULL);
1330 			nge_newbuf(sc, cur_rx, m);
1331 			if (m0 == NULL) {
1332 				ifp->if_ierrors++;
1333 				continue;
1334 			}
1335 			m_adj(m0, ETHER_ALIGN);
1336 			m = m0;
1337 #ifndef __STRICT_ALIGNMENT
1338 		} else {
1339 			m->m_pkthdr.rcvif = ifp;
1340 			m->m_pkthdr.len = m->m_len = total_len;
1341 		}
1342 #endif
1343 
1344 		ifp->if_ipackets++;
1345 
1346 #if NVLAN > 0
1347 		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1348 			m->m_pkthdr.ether_vtag =
1349 			    ntohs(extsts & NGE_RXEXTSTS_VTCI);
1350 			m->m_flags |= M_VLANTAG;
1351 		}
1352 #endif
1353 
1354 #if NBPFILTER > 0
1355 		/*
1356 		 * Handle BPF listeners. Let the BPF user see the packet.
1357 		 */
1358 		if (ifp->if_bpf)
1359 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1360 #endif
1361 
1362 		/* Do IP checksum checking. */
1363 		if (extsts & NGE_RXEXTSTS_IPPKT) {
1364 			if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1365 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1366 			if ((extsts & NGE_RXEXTSTS_TCPPKT) &&
1367 			    (!(extsts & NGE_RXEXTSTS_TCPCSUMERR)))
1368 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
1369 			else if ((extsts & NGE_RXEXTSTS_UDPPKT) &&
1370 				 (!(extsts & NGE_RXEXTSTS_UDPCSUMERR)))
1371 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
1372 		}
1373 
1374 		ether_input_mbuf(ifp, m);
1375 	}
1376 
1377 	sc->nge_cdata.nge_rx_prod = i;
1378 }
1379 
1380 /*
1381  * A frame was downloaded to the chip. It's safe for us to clean up
1382  * the list buffers.
1383  */
1384 
1385 void
1386 nge_txeof(sc)
1387 	struct nge_softc	*sc;
1388 {
1389 	struct nge_desc		*cur_tx;
1390 	struct ifnet		*ifp;
1391 	u_int32_t		idx;
1392 
1393 	ifp = &sc->arpcom.ac_if;
1394 
1395 	/*
1396 	 * Go through our tx list and free mbufs for those
1397 	 * frames that have been transmitted.
1398 	 */
1399 	idx = sc->nge_cdata.nge_tx_cons;
1400 	while (idx != sc->nge_cdata.nge_tx_prod) {
1401 		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1402 
1403 		if (NGE_OWNDESC(cur_tx))
1404 			break;
1405 
1406 		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1407 			sc->nge_cdata.nge_tx_cnt--;
1408 			NGE_INC(idx, NGE_TX_LIST_CNT);
1409 			continue;
1410 		}
1411 
1412 		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1413 			ifp->if_oerrors++;
1414 			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1415 				ifp->if_collisions++;
1416 			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1417 				ifp->if_collisions++;
1418 		}
1419 
1420 		ifp->if_collisions +=
1421 		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1422 
1423 		ifp->if_opackets++;
1424 		if (cur_tx->nge_mbuf != NULL) {
1425 			m_freem(cur_tx->nge_mbuf);
1426 			cur_tx->nge_mbuf = NULL;
1427 			ifp->if_flags &= ~IFF_OACTIVE;
1428 		}
1429 
1430 		sc->nge_cdata.nge_tx_cnt--;
1431 		NGE_INC(idx, NGE_TX_LIST_CNT);
1432 	}
1433 
1434 	sc->nge_cdata.nge_tx_cons = idx;
1435 
1436 	if (idx == sc->nge_cdata.nge_tx_prod)
1437 		ifp->if_timer = 0;
1438 }
1439 
1440 void
1441 nge_tick(xsc)
1442 	void			*xsc;
1443 {
1444 	struct nge_softc	*sc = xsc;
1445 	struct mii_data		*mii = &sc->nge_mii;
1446 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1447 	int			s;
1448 
1449 	s = splnet();
1450 
1451 	DPRINTFN(10, ("%s: nge_tick: link=%d\n", sc->sc_dv.dv_xname,
1452 		      sc->nge_link));
1453 
1454 	timeout_add_sec(&sc->nge_timeout, 1);
1455 	if (sc->nge_link) {
1456 		splx(s);
1457 		return;
1458 	}
1459 
1460 	if (sc->nge_tbi) {
1461 		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1462 		    == IFM_AUTO) {
1463 			u_int32_t bmsr, anlpar, txcfg, rxcfg;
1464 
1465 			bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
1466 			DPRINTFN(2, ("%s: nge_tick: bmsr=%#x\n",
1467 				     sc->sc_dv.dv_xname, bmsr));
1468 
1469 			if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
1470 				CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1471 
1472 				splx(s);
1473 				return;
1474 			}
1475 
1476 			anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
1477 			txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1478 			rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1479 
1480 			DPRINTFN(2, ("%s: nge_tick: anlpar=%#x, txcfg=%#x, "
1481 				     "rxcfg=%#x\n", sc->sc_dv.dv_xname, anlpar,
1482 				     txcfg, rxcfg));
1483 
1484 			if (anlpar == 0 || anlpar & NGE_TBIANAR_FDX) {
1485 				txcfg |= (NGE_TXCFG_IGN_HBEAT|
1486 					  NGE_TXCFG_IGN_CARR);
1487 				rxcfg |= NGE_RXCFG_RX_FDX;
1488 			} else {
1489 				txcfg &= ~(NGE_TXCFG_IGN_HBEAT|
1490 					   NGE_TXCFG_IGN_CARR);
1491 				rxcfg &= ~(NGE_RXCFG_RX_FDX);
1492 			}
1493 			txcfg |= NGE_TXCFG_AUTOPAD;
1494 			CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1495 			CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1496 		}
1497 
1498 		DPRINTF(("%s: gigabit link up\n", sc->sc_dv.dv_xname));
1499 		sc->nge_link++;
1500 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1501 			nge_start(ifp);
1502 	} else {
1503 		mii_tick(mii);
1504 		if (mii->mii_media_status & IFM_ACTIVE &&
1505 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1506 			sc->nge_link++;
1507 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1508 				DPRINTF(("%s: gigabit link up\n",
1509 					 sc->sc_dv.dv_xname));
1510 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
1511 				nge_start(ifp);
1512 		}
1513 
1514 	}
1515 
1516 	splx(s);
1517 }
1518 
1519 int
1520 nge_intr(arg)
1521 	void			*arg;
1522 {
1523 	struct nge_softc	*sc;
1524 	struct ifnet		*ifp;
1525 	u_int32_t		status;
1526 	int			claimed = 0;
1527 
1528 	sc = arg;
1529 	ifp = &sc->arpcom.ac_if;
1530 
1531 	/* Suppress unwanted interrupts */
1532 	if (!(ifp->if_flags & IFF_UP)) {
1533 		nge_stop(sc);
1534 		return (0);
1535 	}
1536 
1537 	/* Disable interrupts. */
1538 	CSR_WRITE_4(sc, NGE_IER, 0);
1539 
1540 	/* Data LED on for TBI mode */
1541 	if(sc->nge_tbi)
1542 		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1543 			     | NGE_GPIO_GP3_OUT);
1544 
1545 	for (;;) {
1546 		/* Reading the ISR register clears all interrupts. */
1547 		status = CSR_READ_4(sc, NGE_ISR);
1548 
1549 		if ((status & NGE_INTRS) == 0)
1550 			break;
1551 
1552 		claimed = 1;
1553 
1554 		if ((status & NGE_ISR_TX_DESC_OK) ||
1555 		    (status & NGE_ISR_TX_ERR) ||
1556 		    (status & NGE_ISR_TX_OK) ||
1557 		    (status & NGE_ISR_TX_IDLE))
1558 			nge_txeof(sc);
1559 
1560 		if ((status & NGE_ISR_RX_DESC_OK) ||
1561 		    (status & NGE_ISR_RX_ERR) ||
1562 		    (status & NGE_ISR_RX_OFLOW) ||
1563 		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1564 		    (status & NGE_ISR_RX_IDLE) ||
1565 		    (status & NGE_ISR_RX_OK))
1566 			nge_rxeof(sc);
1567 
1568 		if ((status & NGE_ISR_RX_IDLE))
1569 			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1570 
1571 		if (status & NGE_ISR_SYSERR) {
1572 			nge_reset(sc);
1573 			ifp->if_flags &= ~IFF_RUNNING;
1574 			nge_init(sc);
1575 		}
1576 
1577 #if 0
1578 		/*
1579 		 * XXX: nge_tick() is not ready to be called this way
1580 		 * it screws up the aneg timeout because mii_tick() is
1581 		 * only to be called once per second.
1582 		 */
1583 		if (status & NGE_IMR_PHY_INTR) {
1584 			sc->nge_link = 0;
1585 			nge_tick(sc);
1586 		}
1587 #endif
1588 	}
1589 
1590 	/* Re-enable interrupts. */
1591 	CSR_WRITE_4(sc, NGE_IER, 1);
1592 
1593 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1594 		nge_start(ifp);
1595 
1596 	/* Data LED off for TBI mode */
1597 	if(sc->nge_tbi)
1598 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1599 			    & ~NGE_GPIO_GP3_OUT);
1600 
1601 	return claimed;
1602 }
1603 
1604 /*
1605  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1606  * pointers to the fragment pointers.
1607  */
1608 int
1609 nge_encap(sc, m_head, txidx)
1610 	struct nge_softc	*sc;
1611 	struct mbuf		*m_head;
1612 	u_int32_t		*txidx;
1613 {
1614 	struct nge_desc		*f = NULL;
1615 	struct mbuf		*m;
1616 	int			frag, cur, cnt = 0;
1617 
1618 	/*
1619 	 * Start packing the mbufs in this chain into
1620 	 * the fragment pointers. Stop when we run out
1621 	 * of fragments or hit the end of the mbuf chain.
1622 	 */
1623 	m = m_head;
1624 	cur = frag = *txidx;
1625 
1626 	for (m = m_head; m != NULL; m = m->m_next) {
1627 		if (m->m_len != 0) {
1628 			if ((NGE_TX_LIST_CNT -
1629 			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1630 				return(ENOBUFS);
1631 			f = &sc->nge_ldata->nge_tx_list[frag];
1632 			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1633 			f->nge_ptr = VTOPHYS(mtod(m, vaddr_t));
1634 			DPRINTFN(7,("%s: f->nge_ptr=%#x\n",
1635 				    sc->sc_dv.dv_xname, f->nge_ptr));
1636 			if (cnt != 0)
1637 				f->nge_ctl |= NGE_CMDSTS_OWN;
1638 			cur = frag;
1639 			NGE_INC(frag, NGE_TX_LIST_CNT);
1640 			cnt++;
1641 		}
1642 	}
1643 
1644 	if (m != NULL)
1645 		return(ENOBUFS);
1646 
1647 	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1648 
1649 #if NVLAN > 0
1650 	if (m_head->m_flags & M_VLANTAG) {
1651 		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1652 		    (NGE_TXEXTSTS_VLANPKT|htons(m_head->m_pkthdr.ether_vtag));
1653 	}
1654 #endif
1655 
1656 	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1657 	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1658 	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1659 	sc->nge_cdata.nge_tx_cnt += cnt;
1660 	*txidx = frag;
1661 
1662 	return(0);
1663 }
1664 
1665 /*
1666  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1667  * to the mbuf data regions directly in the transmit lists. We also save a
1668  * copy of the pointers since the transmit list fragment pointers are
1669  * physical addresses.
1670  */
1671 
1672 void
1673 nge_start(ifp)
1674 	struct ifnet		*ifp;
1675 {
1676 	struct nge_softc	*sc;
1677 	struct mbuf		*m_head = NULL;
1678 	u_int32_t		idx;
1679 	int			pkts = 0;
1680 
1681 	sc = ifp->if_softc;
1682 
1683 	if (!sc->nge_link)
1684 		return;
1685 
1686 	idx = sc->nge_cdata.nge_tx_prod;
1687 
1688 	if (ifp->if_flags & IFF_OACTIVE)
1689 		return;
1690 
1691 	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1692 		IFQ_POLL(&ifp->if_snd, m_head);
1693 		if (m_head == NULL)
1694 			break;
1695 
1696 		if (nge_encap(sc, m_head, &idx)) {
1697 			ifp->if_flags |= IFF_OACTIVE;
1698 			break;
1699 		}
1700 
1701 		/* now we are committed to transmit the packet */
1702 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1703 		pkts++;
1704 
1705 #if NBPFILTER > 0
1706 		/*
1707 		 * If there's a BPF listener, bounce a copy of this frame
1708 		 * to him.
1709 		 */
1710 		if (ifp->if_bpf)
1711 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1712 #endif
1713 	}
1714 	if (pkts == 0)
1715 		return;
1716 
1717 	/* Transmit */
1718 	sc->nge_cdata.nge_tx_prod = idx;
1719 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1720 
1721 	/*
1722 	 * Set a timeout in case the chip goes out to lunch.
1723 	 */
1724 	ifp->if_timer = 5;
1725 }
1726 
1727 void
1728 nge_init(xsc)
1729 	void			*xsc;
1730 {
1731 	struct nge_softc	*sc = xsc;
1732 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1733 	struct mii_data		*mii;
1734 	u_int32_t		txcfg, rxcfg;
1735 	int			s, media;
1736 
1737 	if (ifp->if_flags & IFF_RUNNING)
1738 		return;
1739 
1740 	s = splnet();
1741 
1742 	/*
1743 	 * Cancel pending I/O and free all RX/TX buffers.
1744 	 */
1745 	nge_stop(sc);
1746 
1747 	mii = sc->nge_tbi ? NULL: &sc->nge_mii;
1748 
1749 	/* Set MAC address */
1750 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1751 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1752 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1753 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1754 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1755 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1756 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1757 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1758 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1759 
1760 	/* Init circular RX list. */
1761 	if (nge_list_rx_init(sc) == ENOBUFS) {
1762 		printf("%s: initialization failed: no "
1763 			"memory for rx buffers\n", sc->sc_dv.dv_xname);
1764 		nge_stop(sc);
1765 		splx(s);
1766 		return;
1767 	}
1768 
1769 	/*
1770 	 * Init tx descriptors.
1771 	 */
1772 	nge_list_tx_init(sc);
1773 
1774 	/*
1775 	 * For the NatSemi chip, we have to explicitly enable the
1776 	 * reception of ARP frames, as well as turn on the 'perfect
1777 	 * match' filter where we store the station address, otherwise
1778 	 * we won't receive unicasts meant for this host.
1779 	 */
1780 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1781 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1782 
1783 	 /* If we want promiscuous mode, set the allframes bit. */
1784 	if (ifp->if_flags & IFF_PROMISC)
1785 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1786 	else
1787 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1788 
1789 	/*
1790 	 * Set the capture broadcast bit to capture broadcast frames.
1791 	 */
1792 	if (ifp->if_flags & IFF_BROADCAST)
1793 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1794 	else
1795 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1796 
1797 	/*
1798 	 * Load the multicast filter.
1799 	 */
1800 	nge_setmulti(sc);
1801 
1802 	/* Turn the receive filter on */
1803 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1804 
1805 	/*
1806 	 * Load the address of the RX and TX lists.
1807 	 */
1808 	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1809 	    VTOPHYS(&sc->nge_ldata->nge_rx_list[0]));
1810 	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1811 	    VTOPHYS(&sc->nge_ldata->nge_tx_list[0]));
1812 
1813 	/* Set RX configuration */
1814 	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1815 
1816 	/*
1817 	 * Enable hardware checksum validation for all IPv4
1818 	 * packets, do not reject packets with bad checksums.
1819 	 */
1820 	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1821 
1822 	/*
1823 	 * If VLAN support is enabled, tell the chip to detect
1824 	 * and strip VLAN tag info from received frames. The tag
1825 	 * will be provided in the extsts field in the RX descriptors.
1826 	 */
1827 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1828 		NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1829 		    NGE_VIPRXCTL_TAG_DETECT_ENB | NGE_VIPRXCTL_TAG_STRIP_ENB);
1830 
1831 	/* Set TX configuration */
1832 	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1833 
1834 	/*
1835 	 * If VLAN support is enabled, tell the chip to insert
1836 	 * VLAN tags on a per-packet basis as dictated by the
1837 	 * code in the frame encapsulation routine.
1838 	 */
1839 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1840 		NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1841 
1842 	/* Set full/half duplex mode. */
1843 	if (sc->nge_tbi)
1844 		media = sc->nge_ifmedia.ifm_cur->ifm_media;
1845 	else
1846 		media = mii->mii_media_active;
1847 
1848 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1849 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1850 
1851 	DPRINTFN(4, ("%s: nge_init txcfg=%#x, rxcfg=%#x\n",
1852 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
1853 
1854 	if ((media & IFM_GMASK) == IFM_FDX) {
1855 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1856 		rxcfg |= (NGE_RXCFG_RX_FDX);
1857 	} else {
1858 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1859 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
1860 	}
1861 
1862 	txcfg |= NGE_TXCFG_AUTOPAD;
1863 
1864 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1865 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1866 
1867 	nge_tick(sc);
1868 
1869 	/*
1870 	 * Enable the delivery of PHY interrupts based on
1871 	 * link/speed/duplex status changes and enable return
1872 	 * of extended status information in the DMA descriptors,
1873 	 * required for checksum offloading.
1874 	 */
1875 	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|NGE_CFG_PHYINTR_LNK|
1876 		   NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1877 
1878 	DPRINTFN(1, ("%s: nge_init: config=%#x\n", sc->sc_dv.dv_xname,
1879 		     CSR_READ_4(sc, NGE_CFG)));
1880 
1881 	/*
1882 	 * Configure interrupt holdoff (moderation). We can
1883 	 * have the chip delay interrupt delivery for a certain
1884 	 * period. Units are in 100us, and the max setting
1885 	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1886 	 */
1887 	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1888 
1889 	/*
1890 	 * Enable interrupts.
1891 	 */
1892 	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1893 	CSR_WRITE_4(sc, NGE_IER, 1);
1894 
1895 	/* Enable receiver and transmitter. */
1896 	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1897 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1898 
1899 	if (sc->nge_tbi)
1900 	    nge_ifmedia_tbi_upd(ifp);
1901 	else
1902 	    nge_ifmedia_mii_upd(ifp);
1903 
1904 	ifp->if_flags |= IFF_RUNNING;
1905 	ifp->if_flags &= ~IFF_OACTIVE;
1906 
1907 	splx(s);
1908 }
1909 
1910 /*
1911  * Set mii media options.
1912  */
1913 int
1914 nge_ifmedia_mii_upd(ifp)
1915 	struct ifnet		*ifp;
1916 {
1917 	struct nge_softc	*sc = ifp->if_softc;
1918 	struct mii_data 	*mii = &sc->nge_mii;
1919 
1920 	DPRINTFN(2, ("%s: nge_ifmedia_mii_upd\n", sc->sc_dv.dv_xname));
1921 
1922 	sc->nge_link = 0;
1923 
1924 	if (mii->mii_instance) {
1925 		struct mii_softc *miisc;
1926 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1927 			mii_phy_reset(miisc);
1928 	}
1929 	mii_mediachg(mii);
1930 
1931 	return(0);
1932 }
1933 
1934 /*
1935  * Report current mii media status.
1936  */
1937 void
1938 nge_ifmedia_mii_sts(ifp, ifmr)
1939 	struct ifnet		*ifp;
1940 	struct ifmediareq	*ifmr;
1941 {
1942 	struct nge_softc	*sc = ifp->if_softc;
1943 	struct mii_data *mii = &sc->nge_mii;
1944 
1945 	DPRINTFN(2, ("%s: nge_ifmedia_mii_sts\n", sc->sc_dv.dv_xname));
1946 
1947 	mii_pollstat(mii);
1948 	ifmr->ifm_active = mii->mii_media_active;
1949 	ifmr->ifm_status = mii->mii_media_status;
1950 }
1951 
1952 /*
1953  * Set mii media options.
1954  */
1955 int
1956 nge_ifmedia_tbi_upd(ifp)
1957 	struct ifnet		*ifp;
1958 {
1959 	struct nge_softc	*sc = ifp->if_softc;
1960 
1961 	DPRINTFN(2, ("%s: nge_ifmedia_tbi_upd\n", sc->sc_dv.dv_xname));
1962 
1963 	sc->nge_link = 0;
1964 
1965 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1966 	    == IFM_AUTO) {
1967 		u_int32_t anar, bmcr;
1968 		anar = CSR_READ_4(sc, NGE_TBI_ANAR);
1969 		anar |= (NGE_TBIANAR_HDX | NGE_TBIANAR_FDX);
1970 		CSR_WRITE_4(sc, NGE_TBI_ANAR, anar);
1971 
1972 		bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
1973 		bmcr |= (NGE_TBIBMCR_ENABLE_ANEG|NGE_TBIBMCR_RESTART_ANEG);
1974 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
1975 
1976 		bmcr &= ~(NGE_TBIBMCR_RESTART_ANEG);
1977 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
1978 	} else {
1979 		u_int32_t txcfg, rxcfg;
1980 		txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1981 		rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1982 
1983 		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1984 		    == IFM_FDX) {
1985 			txcfg |= NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR;
1986 			rxcfg |= NGE_RXCFG_RX_FDX;
1987 		} else {
1988 			txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1989 			rxcfg &= ~(NGE_RXCFG_RX_FDX);
1990 		}
1991 
1992 		txcfg |= NGE_TXCFG_AUTOPAD;
1993 		CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1994 		CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1995 	}
1996 
1997 	NGE_CLRBIT(sc, NGE_GPIO, NGE_GPIO_GP3_OUT);
1998 
1999 	return(0);
2000 }
2001 
2002 /*
2003  * Report current tbi media status.
2004  */
2005 void
2006 nge_ifmedia_tbi_sts(ifp, ifmr)
2007 	struct ifnet		*ifp;
2008 	struct ifmediareq	*ifmr;
2009 {
2010 	struct nge_softc	*sc = ifp->if_softc;
2011 	u_int32_t		bmcr;
2012 
2013 	bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
2014 
2015 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
2016 		u_int32_t bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
2017 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmsr=%#x, bmcr=%#x\n",
2018 			     sc->sc_dv.dv_xname, bmsr, bmcr));
2019 
2020 		if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
2021 			ifmr->ifm_active = IFM_ETHER|IFM_NONE;
2022 			ifmr->ifm_status = IFM_AVALID;
2023 			return;
2024 		}
2025 	} else {
2026 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmcr=%#x\n",
2027 			     sc->sc_dv.dv_xname, bmcr));
2028 	}
2029 
2030 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2031 	ifmr->ifm_active = IFM_ETHER|IFM_1000_SX;
2032 
2033 	if (bmcr & NGE_TBIBMCR_LOOPBACK)
2034 		ifmr->ifm_active |= IFM_LOOP;
2035 
2036 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
2037 		u_int32_t anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
2038 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts anlpar=%#x\n",
2039 			     sc->sc_dv.dv_xname, anlpar));
2040 
2041 		ifmr->ifm_active |= IFM_AUTO;
2042 		if (anlpar & NGE_TBIANLPAR_FDX) {
2043 			ifmr->ifm_active |= IFM_FDX;
2044 		} else if (anlpar & NGE_TBIANLPAR_HDX) {
2045 			ifmr->ifm_active |= IFM_HDX;
2046 		} else
2047 			ifmr->ifm_active |= IFM_FDX;
2048 
2049 	} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX)
2050 		ifmr->ifm_active |= IFM_FDX;
2051 	else
2052 		ifmr->ifm_active |= IFM_HDX;
2053 
2054 }
2055 
2056 int
2057 nge_ioctl(ifp, command, data)
2058 	struct ifnet		*ifp;
2059 	u_long			command;
2060 	caddr_t			data;
2061 {
2062 	struct nge_softc	*sc = ifp->if_softc;
2063 	struct ifaddr		*ifa = (struct ifaddr *) data;
2064 	struct ifreq		*ifr = (struct ifreq *) data;
2065 	struct mii_data		*mii;
2066 	int			s, error = 0;
2067 
2068 	s = splnet();
2069 
2070 	switch(command) {
2071 	case SIOCSIFADDR:
2072 		ifp->if_flags |= IFF_UP;
2073 		switch (ifa->ifa_addr->sa_family) {
2074 #ifdef INET
2075 		case AF_INET:
2076 			nge_init(sc);
2077 			arp_ifinit(&sc->arpcom, ifa);
2078 			break;
2079 #endif /* INET */
2080 		default:
2081 			nge_init(sc);
2082 			break;
2083                 }
2084 		break;
2085 
2086 	case SIOCSIFFLAGS:
2087 		if (ifp->if_flags & IFF_UP) {
2088 			if (ifp->if_flags & IFF_RUNNING &&
2089 			    ifp->if_flags & IFF_PROMISC &&
2090 			    !(sc->nge_if_flags & IFF_PROMISC)) {
2091 				NGE_SETBIT(sc, NGE_RXFILT_CTL,
2092 				    NGE_RXFILTCTL_ALLPHYS|
2093 				    NGE_RXFILTCTL_ALLMULTI);
2094 			} else if (ifp->if_flags & IFF_RUNNING &&
2095 			    !(ifp->if_flags & IFF_PROMISC) &&
2096 			    sc->nge_if_flags & IFF_PROMISC) {
2097 				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2098 				    NGE_RXFILTCTL_ALLPHYS);
2099 				if (!(ifp->if_flags & IFF_ALLMULTI))
2100 					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2101 					    NGE_RXFILTCTL_ALLMULTI);
2102 			} else {
2103 				ifp->if_flags &= ~IFF_RUNNING;
2104 				nge_init(sc);
2105 			}
2106 		} else {
2107 			if (ifp->if_flags & IFF_RUNNING)
2108 				nge_stop(sc);
2109 		}
2110 		sc->nge_if_flags = ifp->if_flags;
2111 		error = 0;
2112 		break;
2113 
2114 	case SIOCGIFMEDIA:
2115 	case SIOCSIFMEDIA:
2116 		if (sc->nge_tbi) {
2117 			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2118 					      command);
2119 		} else {
2120 			mii = &sc->nge_mii;
2121 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2122 					      command);
2123 		}
2124 		break;
2125 
2126 	default:
2127 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
2128 	}
2129 
2130 	if (error == ENETRESET) {
2131 		if (ifp->if_flags & IFF_RUNNING)
2132 			nge_setmulti(sc);
2133 		error = 0;
2134 	}
2135 
2136 	splx(s);
2137 	return(error);
2138 }
2139 
2140 void
2141 nge_watchdog(ifp)
2142 	struct ifnet		*ifp;
2143 {
2144 	struct nge_softc	*sc;
2145 
2146 	sc = ifp->if_softc;
2147 
2148 	ifp->if_oerrors++;
2149 	printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
2150 
2151 	nge_stop(sc);
2152 	nge_reset(sc);
2153 	ifp->if_flags &= ~IFF_RUNNING;
2154 	nge_init(sc);
2155 
2156 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2157 		nge_start(ifp);
2158 }
2159 
2160 /*
2161  * Stop the adapter and free any mbufs allocated to the
2162  * RX and TX lists.
2163  */
2164 void
2165 nge_stop(sc)
2166 	struct nge_softc	*sc;
2167 {
2168 	int			i;
2169 	struct ifnet		*ifp;
2170 	struct mii_data		*mii;
2171 
2172 	ifp = &sc->arpcom.ac_if;
2173 	ifp->if_timer = 0;
2174 	if (sc->nge_tbi) {
2175 		mii = NULL;
2176 	} else {
2177 		mii = &sc->nge_mii;
2178 	}
2179 
2180 	timeout_del(&sc->nge_timeout);
2181 
2182 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2183 
2184 	CSR_WRITE_4(sc, NGE_IER, 0);
2185 	CSR_WRITE_4(sc, NGE_IMR, 0);
2186 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2187 	DELAY(1000);
2188 	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2189 	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2190 
2191 	if (!sc->nge_tbi)
2192 		mii_down(mii);
2193 
2194 	sc->nge_link = 0;
2195 
2196 	/*
2197 	 * Free data in the RX lists.
2198 	 */
2199 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2200 		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2201 			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2202 			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2203 		}
2204 	}
2205 	bzero(&sc->nge_ldata->nge_rx_list,
2206 		sizeof(sc->nge_ldata->nge_rx_list));
2207 
2208 	/*
2209 	 * Free the TX list buffers.
2210 	 */
2211 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2212 		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2213 			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2214 			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2215 		}
2216 	}
2217 
2218 	bzero(&sc->nge_ldata->nge_tx_list,
2219 		sizeof(sc->nge_ldata->nge_tx_list));
2220 }
2221 
2222 struct cfattach nge_ca = {
2223 	sizeof(struct nge_softc), nge_probe, nge_attach
2224 };
2225 
2226 struct cfdriver nge_cd = {
2227 	NULL, "nge", DV_IFNET
2228 };
2229