xref: /openbsd-src/sys/dev/pci/if_nge.c (revision daf88648c0e349d5c02e1504293082072c981640)
1 /*	$OpenBSD: if_nge.c,v 1.56 2006/10/25 02:37:50 brad Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001
5  *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_nge.c,v 1.35 2002/08/08 18:33:28 ambrisko Exp $
35  */
36 
37 /*
38  * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39  * for FreeBSD. Datasheets are available from:
40  *
41  * http://www.national.com/ds/DP/DP83820.pdf
42  * http://www.national.com/ds/DP/DP83821.pdf
43  *
44  * These chips are used on several low cost gigabit ethernet NICs
45  * sold by D-Link, Addtron, SMC and Asante. Both parts are
46  * virtually the same, except the 83820 is a 64-bit/32-bit part,
47  * while the 83821 is 32-bit only.
48  *
49  * Many cards also use National gigE transceivers, such as the
50  * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51  * contains a full register description that applies to all of these
52  * components:
53  *
54  * http://www.national.com/ds/DP/DP83861.pdf
55  *
56  * Written by Bill Paul <wpaul@bsdi.com>
57  * BSDi Open Source Solutions
58  */
59 
60 /*
61  * The NatSemi DP83820 and 83821 controllers are enhanced versions
62  * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63  * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64  * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65  * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66  * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67  * matching buffers, one perfect address filter buffer and interrupt
68  * moderation. The 83820 supports both 64-bit and 32-bit addressing
69  * and data transfers: the 64-bit support can be toggled on or off
70  * via software. This affects the size of certain fields in the DMA
71  * descriptors.
72  *
73  * There are two bugs/misfeatures in the 83820/83821 that I have
74  * discovered so far:
75  *
76  * - Receive buffers must be aligned on 64-bit boundaries, which means
77  *   you must resort to copying data in order to fix up the payload
78  *   alignment.
79  *
80  * - In order to transmit jumbo frames larger than 8170 bytes, you have
81  *   to turn off transmit checksum offloading, because the chip can't
82  *   compute the checksum on an outgoing frame unless it fits entirely
83  *   within the TX FIFO, which is only 8192 bytes in size. If you have
84  *   TX checksum offload enabled and you transmit attempt to transmit a
85  *   frame larger than 8170 bytes, the transmitter will wedge.
86  *
87  * To work around the latter problem, TX checksum offload is disabled
88  * if the user selects an MTU larger than 8152 (8170 - 18).
89  */
90 
91 #include "bpfilter.h"
92 #include "vlan.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/sockio.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kernel.h>
100 #include <sys/device.h>
101 #include <sys/socket.h>
102 
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 
107 #ifdef INET
108 #include <netinet/in.h>
109 #include <netinet/in_systm.h>
110 #include <netinet/in_var.h>
111 #include <netinet/ip.h>
112 #include <netinet/if_ether.h>
113 #endif
114 
115 #if NVLAN > 0
116 #include <net/if_types.h>
117 #include <net/if_vlan_var.h>
118 #endif
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 
124 #include <uvm/uvm_extern.h>              /* for vtophys */
125 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/miivar.h>
133 
134 #define NGE_USEIOSPACE
135 
136 #include <dev/pci/if_ngereg.h>
137 
138 int nge_probe(struct device *, void *, void *);
139 void nge_attach(struct device *, struct device *, void *);
140 
141 int nge_alloc_jumbo_mem(struct nge_softc *);
142 void *nge_jalloc(struct nge_softc *);
143 void nge_jfree(caddr_t, u_int, void *);
144 
145 int nge_newbuf(struct nge_softc *, struct nge_desc *,
146 			     struct mbuf *);
147 int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
148 void nge_rxeof(struct nge_softc *);
149 void nge_txeof(struct nge_softc *);
150 int nge_intr(void *);
151 void nge_tick(void *);
152 void nge_start(struct ifnet *);
153 int nge_ioctl(struct ifnet *, u_long, caddr_t);
154 void nge_init(void *);
155 void nge_stop(struct nge_softc *);
156 void nge_watchdog(struct ifnet *);
157 void nge_shutdown(void *);
158 int nge_ifmedia_mii_upd(struct ifnet *);
159 void nge_ifmedia_mii_sts(struct ifnet *, struct ifmediareq *);
160 int nge_ifmedia_tbi_upd(struct ifnet *);
161 void nge_ifmedia_tbi_sts(struct ifnet *, struct ifmediareq *);
162 
163 void nge_delay(struct nge_softc *);
164 void nge_eeprom_idle(struct nge_softc *);
165 void nge_eeprom_putbyte(struct nge_softc *, int);
166 void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
167 void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
168 
169 void nge_mii_sync(struct nge_softc *);
170 void nge_mii_send(struct nge_softc *, u_int32_t, int);
171 int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
172 int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
173 
174 int nge_miibus_readreg(struct device *, int, int);
175 void nge_miibus_writereg(struct device *, int, int, int);
176 void nge_miibus_statchg(struct device *);
177 
178 void nge_setmulti(struct nge_softc *);
179 void nge_reset(struct nge_softc *);
180 int nge_list_rx_init(struct nge_softc *);
181 int nge_list_tx_init(struct nge_softc *);
182 
183 #ifdef NGE_USEIOSPACE
184 #define NGE_RES			SYS_RES_IOPORT
185 #define NGE_RID			NGE_PCI_LOIO
186 #else
187 #define NGE_RES			SYS_RES_MEMORY
188 #define NGE_RID			NGE_PCI_LOMEM
189 #endif
190 
191 #ifdef NGE_DEBUG
192 #define DPRINTF(x)	if (ngedebug) printf x
193 #define DPRINTFN(n,x)	if (ngedebug >= (n)) printf x
194 int	ngedebug = 0;
195 #else
196 #define DPRINTF(x)
197 #define DPRINTFN(n,x)
198 #endif
199 
200 #define NGE_SETBIT(sc, reg, x)				\
201 	CSR_WRITE_4(sc, reg,				\
202 		CSR_READ_4(sc, reg) | (x))
203 
204 #define NGE_CLRBIT(sc, reg, x)				\
205 	CSR_WRITE_4(sc, reg,				\
206 		CSR_READ_4(sc, reg) & ~(x))
207 
208 #define SIO_SET(x)					\
209 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x))
210 
211 #define SIO_CLR(x)					\
212 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x))
213 
214 void
215 nge_delay(sc)
216 	struct nge_softc	*sc;
217 {
218 	int			idx;
219 
220 	for (idx = (300 / 33) + 1; idx > 0; idx--)
221 		CSR_READ_4(sc, NGE_CSR);
222 }
223 
224 void
225 nge_eeprom_idle(sc)
226 	struct nge_softc	*sc;
227 {
228 	int		i;
229 
230 	SIO_SET(NGE_MEAR_EE_CSEL);
231 	nge_delay(sc);
232 	SIO_SET(NGE_MEAR_EE_CLK);
233 	nge_delay(sc);
234 
235 	for (i = 0; i < 25; i++) {
236 		SIO_CLR(NGE_MEAR_EE_CLK);
237 		nge_delay(sc);
238 		SIO_SET(NGE_MEAR_EE_CLK);
239 		nge_delay(sc);
240 	}
241 
242 	SIO_CLR(NGE_MEAR_EE_CLK);
243 	nge_delay(sc);
244 	SIO_CLR(NGE_MEAR_EE_CSEL);
245 	nge_delay(sc);
246 	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
247 }
248 
249 /*
250  * Send a read command and address to the EEPROM, check for ACK.
251  */
252 void
253 nge_eeprom_putbyte(sc, addr)
254 	struct nge_softc	*sc;
255 	int			addr;
256 {
257 	int			d, i;
258 
259 	d = addr | NGE_EECMD_READ;
260 
261 	/*
262 	 * Feed in each bit and strobe the clock.
263 	 */
264 	for (i = 0x400; i; i >>= 1) {
265 		if (d & i) {
266 			SIO_SET(NGE_MEAR_EE_DIN);
267 		} else {
268 			SIO_CLR(NGE_MEAR_EE_DIN);
269 		}
270 		nge_delay(sc);
271 		SIO_SET(NGE_MEAR_EE_CLK);
272 		nge_delay(sc);
273 		SIO_CLR(NGE_MEAR_EE_CLK);
274 		nge_delay(sc);
275 	}
276 }
277 
278 /*
279  * Read a word of data stored in the EEPROM at address 'addr.'
280  */
281 void
282 nge_eeprom_getword(sc, addr, dest)
283 	struct nge_softc	*sc;
284 	int			addr;
285 	u_int16_t		*dest;
286 {
287 	int			i;
288 	u_int16_t		word = 0;
289 
290 	/* Force EEPROM to idle state. */
291 	nge_eeprom_idle(sc);
292 
293 	/* Enter EEPROM access mode. */
294 	nge_delay(sc);
295 	SIO_CLR(NGE_MEAR_EE_CLK);
296 	nge_delay(sc);
297 	SIO_SET(NGE_MEAR_EE_CSEL);
298 	nge_delay(sc);
299 
300 	/*
301 	 * Send address of word we want to read.
302 	 */
303 	nge_eeprom_putbyte(sc, addr);
304 
305 	/*
306 	 * Start reading bits from EEPROM.
307 	 */
308 	for (i = 0x8000; i; i >>= 1) {
309 		SIO_SET(NGE_MEAR_EE_CLK);
310 		nge_delay(sc);
311 		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
312 			word |= i;
313 		nge_delay(sc);
314 		SIO_CLR(NGE_MEAR_EE_CLK);
315 		nge_delay(sc);
316 	}
317 
318 	/* Turn off EEPROM access mode. */
319 	nge_eeprom_idle(sc);
320 
321 	*dest = word;
322 }
323 
324 /*
325  * Read a sequence of words from the EEPROM.
326  */
327 void
328 nge_read_eeprom(sc, dest, off, cnt, swap)
329 	struct nge_softc	*sc;
330 	caddr_t			dest;
331 	int			off;
332 	int			cnt;
333 	int			swap;
334 {
335 	int			i;
336 	u_int16_t		word = 0, *ptr;
337 
338 	for (i = 0; i < cnt; i++) {
339 		nge_eeprom_getword(sc, off + i, &word);
340 		ptr = (u_int16_t *)(dest + (i * 2));
341 		if (swap)
342 			*ptr = ntohs(word);
343 		else
344 			*ptr = word;
345 	}
346 }
347 
348 /*
349  * Sync the PHYs by setting data bit and strobing the clock 32 times.
350  */
351 void
352 nge_mii_sync(sc)
353 	struct nge_softc		*sc;
354 {
355 	int			i;
356 
357 	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
358 
359 	for (i = 0; i < 32; i++) {
360 		SIO_SET(NGE_MEAR_MII_CLK);
361 		DELAY(1);
362 		SIO_CLR(NGE_MEAR_MII_CLK);
363 		DELAY(1);
364 	}
365 }
366 
367 /*
368  * Clock a series of bits through the MII.
369  */
370 void
371 nge_mii_send(sc, bits, cnt)
372 	struct nge_softc		*sc;
373 	u_int32_t		bits;
374 	int			cnt;
375 {
376 	int			i;
377 
378 	SIO_CLR(NGE_MEAR_MII_CLK);
379 
380 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
381                 if (bits & i) {
382 			SIO_SET(NGE_MEAR_MII_DATA);
383                 } else {
384 			SIO_CLR(NGE_MEAR_MII_DATA);
385                 }
386 		DELAY(1);
387 		SIO_CLR(NGE_MEAR_MII_CLK);
388 		DELAY(1);
389 		SIO_SET(NGE_MEAR_MII_CLK);
390 	}
391 }
392 
393 /*
394  * Read an PHY register through the MII.
395  */
396 int
397 nge_mii_readreg(sc, frame)
398 	struct nge_softc		*sc;
399 	struct nge_mii_frame	*frame;
400 {
401 	int			i, ack, s;
402 
403 	s = splnet();
404 
405 	/*
406 	 * Set up frame for RX.
407 	 */
408 	frame->mii_stdelim = NGE_MII_STARTDELIM;
409 	frame->mii_opcode = NGE_MII_READOP;
410 	frame->mii_turnaround = 0;
411 	frame->mii_data = 0;
412 
413 	CSR_WRITE_4(sc, NGE_MEAR, 0);
414 
415 	/*
416 	 * Turn on data xmit.
417 	 */
418 	SIO_SET(NGE_MEAR_MII_DIR);
419 
420 	nge_mii_sync(sc);
421 
422 	/*
423 	 * Send command/address info.
424 	 */
425 	nge_mii_send(sc, frame->mii_stdelim, 2);
426 	nge_mii_send(sc, frame->mii_opcode, 2);
427 	nge_mii_send(sc, frame->mii_phyaddr, 5);
428 	nge_mii_send(sc, frame->mii_regaddr, 5);
429 
430 	/* Idle bit */
431 	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
432 	DELAY(1);
433 	SIO_SET(NGE_MEAR_MII_CLK);
434 	DELAY(1);
435 
436 	/* Turn off xmit. */
437 	SIO_CLR(NGE_MEAR_MII_DIR);
438 	/* Check for ack */
439 	SIO_CLR(NGE_MEAR_MII_CLK);
440 	DELAY(1);
441 	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
442 	SIO_SET(NGE_MEAR_MII_CLK);
443 	DELAY(1);
444 
445 	/*
446 	 * Now try reading data bits. If the ack failed, we still
447 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
448 	 */
449 	if (ack) {
450 		for(i = 0; i < 16; i++) {
451 			SIO_CLR(NGE_MEAR_MII_CLK);
452 			DELAY(1);
453 			SIO_SET(NGE_MEAR_MII_CLK);
454 			DELAY(1);
455 		}
456 		goto fail;
457 	}
458 
459 	for (i = 0x8000; i; i >>= 1) {
460 		SIO_CLR(NGE_MEAR_MII_CLK);
461 		DELAY(1);
462 		if (!ack) {
463 			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
464 				frame->mii_data |= i;
465 			DELAY(1);
466 		}
467 		SIO_SET(NGE_MEAR_MII_CLK);
468 		DELAY(1);
469 	}
470 
471 fail:
472 
473 	SIO_CLR(NGE_MEAR_MII_CLK);
474 	DELAY(1);
475 	SIO_SET(NGE_MEAR_MII_CLK);
476 	DELAY(1);
477 
478 	splx(s);
479 
480 	if (ack)
481 		return(1);
482 	return(0);
483 }
484 
485 /*
486  * Write to a PHY register through the MII.
487  */
488 int
489 nge_mii_writereg(sc, frame)
490 	struct nge_softc		*sc;
491 	struct nge_mii_frame	*frame;
492 {
493 	int			s;
494 
495 	s = splnet();
496 	/*
497 	 * Set up frame for TX.
498 	 */
499 
500 	frame->mii_stdelim = NGE_MII_STARTDELIM;
501 	frame->mii_opcode = NGE_MII_WRITEOP;
502 	frame->mii_turnaround = NGE_MII_TURNAROUND;
503 
504 	/*
505 	 * Turn on data output.
506 	 */
507 	SIO_SET(NGE_MEAR_MII_DIR);
508 
509 	nge_mii_sync(sc);
510 
511 	nge_mii_send(sc, frame->mii_stdelim, 2);
512 	nge_mii_send(sc, frame->mii_opcode, 2);
513 	nge_mii_send(sc, frame->mii_phyaddr, 5);
514 	nge_mii_send(sc, frame->mii_regaddr, 5);
515 	nge_mii_send(sc, frame->mii_turnaround, 2);
516 	nge_mii_send(sc, frame->mii_data, 16);
517 
518 	/* Idle bit. */
519 	SIO_SET(NGE_MEAR_MII_CLK);
520 	DELAY(1);
521 	SIO_CLR(NGE_MEAR_MII_CLK);
522 	DELAY(1);
523 
524 	/*
525 	 * Turn off xmit.
526 	 */
527 	SIO_CLR(NGE_MEAR_MII_DIR);
528 
529 	splx(s);
530 
531 	return(0);
532 }
533 
534 int
535 nge_miibus_readreg(dev, phy, reg)
536 	struct device		*dev;
537 	int			phy, reg;
538 {
539 	struct nge_softc	*sc = (struct nge_softc *)dev;
540 	struct nge_mii_frame	frame;
541 
542 	DPRINTFN(9, ("%s: nge_miibus_readreg\n", sc->sc_dv.dv_xname));
543 
544 	bzero((char *)&frame, sizeof(frame));
545 
546 	frame.mii_phyaddr = phy;
547 	frame.mii_regaddr = reg;
548 	nge_mii_readreg(sc, &frame);
549 
550 	return(frame.mii_data);
551 }
552 
553 void
554 nge_miibus_writereg(dev, phy, reg, data)
555 	struct device		*dev;
556 	int			phy, reg, data;
557 {
558 	struct nge_softc	*sc = (struct nge_softc *)dev;
559 	struct nge_mii_frame	frame;
560 
561 
562 	DPRINTFN(9, ("%s: nge_miibus_writereg\n", sc->sc_dv.dv_xname));
563 
564 	bzero((char *)&frame, sizeof(frame));
565 
566 	frame.mii_phyaddr = phy;
567 	frame.mii_regaddr = reg;
568 	frame.mii_data = data;
569 	nge_mii_writereg(sc, &frame);
570 }
571 
572 void
573 nge_miibus_statchg(dev)
574 	struct device		*dev;
575 {
576 	struct nge_softc	*sc = (struct nge_softc *)dev;
577 	struct mii_data		*mii = &sc->nge_mii;
578 	u_int32_t		txcfg, rxcfg;
579 
580 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
581 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
582 
583 	DPRINTFN(4, ("%s: nge_miibus_statchg txcfg=%#x, rxcfg=%#x\n",
584 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
585 
586 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
587 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
588 		rxcfg |= (NGE_RXCFG_RX_FDX);
589 	} else {
590 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
591 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
592 	}
593 
594 	txcfg |= NGE_TXCFG_AUTOPAD;
595 
596 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
597 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
598 
599 	/* If we have a 1000Mbps link, set the mode_1000 bit. */
600 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
601 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
602 	else
603 		NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
604 }
605 
606 void
607 nge_setmulti(sc)
608 	struct nge_softc	*sc;
609 {
610 	struct arpcom		*ac = &sc->arpcom;
611 	struct ifnet		*ifp = &ac->ac_if;
612 	struct ether_multi      *enm;
613 	struct ether_multistep  step;
614 	u_int32_t		h = 0, i, filtsave;
615 	int			bit, index;
616 
617 allmulti:
618 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
619 		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
620 		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
621 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
622 		return;
623 	}
624 
625 	/*
626 	 * We have to explicitly enable the multicast hash table
627 	 * on the NatSemi chip if we want to use it, which we do.
628 	 * We also have to tell it that we don't want to use the
629 	 * hash table for matching unicast addresses.
630 	 */
631 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
632 	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
633 	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
634 
635 	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
636 
637 	/* first, zot all the existing hash bits */
638 	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
639 		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
640 		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
641 	}
642 
643 	/*
644 	 * From the 11 bits returned by the crc routine, the top 7
645 	 * bits represent the 16-bit word in the mcast hash table
646 	 * that needs to be updated, and the lower 4 bits represent
647 	 * which bit within that byte needs to be set.
648 	 */
649 	ETHER_FIRST_MULTI(step, ac, enm);
650 	while (enm != NULL) {
651 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
652 			ifp->if_flags |= IFF_ALLMULTI;
653 			goto allmulti;
654 		}
655 		h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 21) &
656 		    0x00000FFF;
657 		index = (h >> 4) & 0x7F;
658 		bit = h & 0xF;
659 		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
660 		    NGE_FILTADDR_MCAST_LO + (index * 2));
661 		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
662 		ETHER_NEXT_MULTI(step, enm);
663 	}
664 
665 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
666 }
667 
668 void
669 nge_reset(sc)
670 	struct nge_softc	*sc;
671 {
672 	int			i;
673 
674 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
675 
676 	for (i = 0; i < NGE_TIMEOUT; i++) {
677 		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
678 			break;
679 	}
680 
681 	if (i == NGE_TIMEOUT)
682 		printf("%s: reset never completed\n", sc->sc_dv.dv_xname);
683 
684 	/* Wait a little while for the chip to get its brains in order. */
685 	DELAY(1000);
686 
687 	/*
688 	 * If this is a NetSemi chip, make sure to clear
689 	 * PME mode.
690 	 */
691 	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
692 	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
693 }
694 
695 /*
696  * Probe for an NatSemi chip. Check the PCI vendor and device
697  * IDs against our list and return a device name if we find a match.
698  */
699 int
700 nge_probe(parent, match, aux)
701 	struct device *parent;
702 	void *match;
703 	void *aux;
704 {
705 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
706 
707 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS &&
708 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_DP83820)
709 		return (1);
710 
711 	return (0);
712 }
713 
714 /*
715  * Attach the interface. Allocate softc structures, do ifmedia
716  * setup and ethernet/BPF attach.
717  */
718 void
719 nge_attach(parent, self, aux)
720 	struct device *parent, *self;
721 	void *aux;
722 {
723 	struct nge_softc	*sc = (struct nge_softc *)self;
724 	struct pci_attach_args	*pa = aux;
725 	pci_chipset_tag_t	pc = pa->pa_pc;
726 	pci_intr_handle_t	ih;
727 	const char		*intrstr = NULL;
728 	bus_size_t		size;
729 	bus_dma_segment_t	seg;
730 	bus_dmamap_t		dmamap;
731 	int			rseg;
732 	u_char			eaddr[ETHER_ADDR_LEN];
733 	pcireg_t		command;
734 #ifndef NGE_USEIOSPACE
735 	pcireg_t		memtype;
736 #endif
737 	struct ifnet		*ifp;
738 	caddr_t			kva;
739 
740 	/*
741 	 * Handle power management nonsense.
742 	 */
743 	DPRINTFN(5, ("%s: preparing for conf read\n", sc->sc_dv.dv_xname));
744 	command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_CAPID) & 0x000000FF;
745 	if (command == 0x01) {
746 		command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_PWRMGMTCTRL);
747 		if (command & NGE_PSTATE_MASK) {
748 			pcireg_t	iobase, membase, irq;
749 
750 			/* Save important PCI config data. */
751 			iobase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOIO);
752 			membase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOMEM);
753 			irq = pci_conf_read(pc, pa->pa_tag, NGE_PCI_INTLINE);
754 
755 			/* Reset the power state. */
756 			printf("%s: chip is in D%d power mode "
757 			       "-- setting to D0\n", sc->sc_dv.dv_xname,
758 			       command & NGE_PSTATE_MASK);
759 			command &= 0xFFFFFFFC;
760 			pci_conf_write(pc, pa->pa_tag,
761 				       NGE_PCI_PWRMGMTCTRL, command);
762 
763 			/* Restore PCI config data. */
764 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOIO, iobase);
765 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOMEM, membase);
766 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_INTLINE, irq);
767 		}
768 	}
769 
770 	/*
771 	 * Map control/status registers.
772 	 */
773 	DPRINTFN(5, ("%s: map control/status regs\n", sc->sc_dv.dv_xname));
774 
775 #ifdef NGE_USEIOSPACE
776 	DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname));
777 	if (pci_mapreg_map(pa, NGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
778 	    &sc->nge_btag, &sc->nge_bhandle, NULL, &size, 0)) {
779 		printf(": can't map i/o space\n");
780 		return;
781 	}
782 #else
783 	DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname));
784 	memtype = pci_mapreg_type(pc, pa->pa_tag, NGE_PCI_LOMEM);
785 	switch (memtype) {
786 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
787 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
788 		if (pci_mapreg_map(pa, NGE_PCI_LOMEM,
789 				   memtype, 0, &sc->nge_btag, &sc->nge_bhandle,
790 				   NULL, &size, 0) == 0)
791 			break;
792 	default:
793 		printf(": can't map mem space\n");
794 		return;
795 	}
796 #endif
797 
798 	/* Disable all interrupts */
799 	CSR_WRITE_4(sc, NGE_IER, 0);
800 
801 	DPRINTFN(5, ("%s: pci_intr_map\n", sc->sc_dv.dv_xname));
802 	if (pci_intr_map(pa, &ih)) {
803 		printf(": couldn't map interrupt\n");
804 		goto fail_1;
805 	}
806 
807 	DPRINTFN(5, ("%s: pci_intr_string\n", sc->sc_dv.dv_xname));
808 	intrstr = pci_intr_string(pc, ih);
809 	DPRINTFN(5, ("%s: pci_intr_establish\n", sc->sc_dv.dv_xname));
810 	sc->nge_intrhand = pci_intr_establish(pc, ih, IPL_NET, nge_intr, sc,
811 					      sc->sc_dv.dv_xname);
812 	if (sc->nge_intrhand == NULL) {
813 		printf(": couldn't establish interrupt");
814 		if (intrstr != NULL)
815 			printf(" at %s", intrstr);
816 		printf("\n");
817 		goto fail_1;
818 	}
819 	printf(": %s", intrstr);
820 
821 	/* Reset the adapter. */
822 	DPRINTFN(5, ("%s: nge_reset\n", sc->sc_dv.dv_xname));
823 	nge_reset(sc);
824 
825 	/*
826 	 * Get station address from the EEPROM.
827 	 */
828 	DPRINTFN(5, ("%s: nge_read_eeprom\n", sc->sc_dv.dv_xname));
829 	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
830 	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
831 	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
832 
833 	/*
834 	 * A NatSemi chip was detected. Inform the world.
835 	 */
836 	printf(", address %s\n", ether_sprintf(eaddr));
837 
838 	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
839 
840 	sc->sc_dmatag = pa->pa_dmat;
841 	DPRINTFN(5, ("%s: bus_dmamem_alloc\n", sc->sc_dv.dv_xname));
842 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct nge_list_data),
843 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
844 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
845 		goto fail_2;
846 	}
847 	DPRINTFN(5, ("%s: bus_dmamem_map\n", sc->sc_dv.dv_xname));
848 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
849 			   sizeof(struct nge_list_data), &kva,
850 			   BUS_DMA_NOWAIT)) {
851 		printf("%s: can't map dma buffers (%d bytes)\n",
852 		       sc->sc_dv.dv_xname, sizeof(struct nge_list_data));
853 		goto fail_3;
854 	}
855 	DPRINTFN(5, ("%s: bus_dmamem_create\n", sc->sc_dv.dv_xname));
856 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct nge_list_data), 1,
857 			      sizeof(struct nge_list_data), 0,
858 			      BUS_DMA_NOWAIT, &dmamap)) {
859 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
860 		goto fail_4;
861 	}
862 	DPRINTFN(5, ("%s: bus_dmamem_load\n", sc->sc_dv.dv_xname));
863 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
864 			    sizeof(struct nge_list_data), NULL,
865 			    BUS_DMA_NOWAIT)) {
866 		goto fail_5;
867 	}
868 
869 	DPRINTFN(5, ("%s: bzero\n", sc->sc_dv.dv_xname));
870 	sc->nge_ldata = (struct nge_list_data *)kva;
871 	bzero(sc->nge_ldata, sizeof(struct nge_list_data));
872 
873 	/* Try to allocate memory for jumbo buffers. */
874 	DPRINTFN(5, ("%s: nge_alloc_jumbo_mem\n", sc->sc_dv.dv_xname));
875 	if (nge_alloc_jumbo_mem(sc)) {
876 		printf("%s: jumbo buffer allocation failed\n",
877 		       sc->sc_dv.dv_xname);
878 		goto fail_5;
879 	}
880 
881 	ifp = &sc->arpcom.ac_if;
882 	ifp->if_softc = sc;
883 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
884 	ifp->if_ioctl = nge_ioctl;
885 	ifp->if_start = nge_start;
886 	ifp->if_watchdog = nge_watchdog;
887 	ifp->if_baudrate = 1000000000;
888 	ifp->if_hardmtu = NGE_JUMBO_MTU;
889 	IFQ_SET_MAXLEN(&ifp->if_snd, NGE_TX_LIST_CNT - 1);
890 	IFQ_SET_READY(&ifp->if_snd);
891 	DPRINTFN(5, ("%s: bcopy\n", sc->sc_dv.dv_xname));
892 	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
893 
894 	ifp->if_capabilities = IFCAP_VLAN_MTU;
895 
896 #ifdef NGE_VLAN
897 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
898 #endif
899 
900 	/*
901 	 * Do MII setup.
902 	 */
903 	DPRINTFN(5, ("%s: mii setup\n", sc->sc_dv.dv_xname));
904 	if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
905 		DPRINTFN(5, ("%s: TBI mode\n", sc->sc_dv.dv_xname));
906 		sc->nge_tbi = 1;
907 
908 		ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_tbi_upd,
909 			     nge_ifmedia_tbi_sts);
910 
911 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_NONE, 0, NULL),
912 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
913 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
914 			    0, NULL);
915 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
916 
917 		ifmedia_set(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO);
918 
919 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
920 			    | NGE_GPIO_GP4_OUT
921 			    | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
922 			    | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP4_OUTENB
923 			    | NGE_GPIO_GP5_OUTENB);
924 
925 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
926 	} else {
927 		sc->nge_mii.mii_ifp = ifp;
928 		sc->nge_mii.mii_readreg = nge_miibus_readreg;
929 		sc->nge_mii.mii_writereg = nge_miibus_writereg;
930 		sc->nge_mii.mii_statchg = nge_miibus_statchg;
931 
932 		ifmedia_init(&sc->nge_mii.mii_media, 0, nge_ifmedia_mii_upd,
933 			     nge_ifmedia_mii_sts);
934 		mii_attach(&sc->sc_dv, &sc->nge_mii, 0xffffffff, MII_PHY_ANY,
935 			   MII_OFFSET_ANY, 0);
936 
937 		if (LIST_FIRST(&sc->nge_mii.mii_phys) == NULL) {
938 
939 			printf("%s: no PHY found!\n", sc->sc_dv.dv_xname);
940 			ifmedia_add(&sc->nge_mii.mii_media,
941 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
942 			ifmedia_set(&sc->nge_mii.mii_media,
943 				    IFM_ETHER|IFM_MANUAL);
944 		}
945 		else
946 			ifmedia_set(&sc->nge_mii.mii_media,
947 				    IFM_ETHER|IFM_AUTO);
948 	}
949 
950 	/*
951 	 * Call MI attach routine.
952 	 */
953 	DPRINTFN(5, ("%s: if_attach\n", sc->sc_dv.dv_xname));
954 	if_attach(ifp);
955 	DPRINTFN(5, ("%s: ether_ifattach\n", sc->sc_dv.dv_xname));
956 	ether_ifattach(ifp);
957 	DPRINTFN(5, ("%s: timeout_set\n", sc->sc_dv.dv_xname));
958 	timeout_set(&sc->nge_timeout, nge_tick, sc);
959 	timeout_add(&sc->nge_timeout, hz);
960 	return;
961 
962 fail_5:
963 	bus_dmamap_destroy(sc->sc_dmatag, dmamap);
964 
965 fail_4:
966 	bus_dmamem_unmap(sc->sc_dmatag, kva,
967 	    sizeof(struct nge_list_data));
968 
969 fail_3:
970 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
971 
972 fail_2:
973 	pci_intr_disestablish(pc, sc->nge_intrhand);
974 
975 fail_1:
976 	bus_space_unmap(sc->nge_btag, sc->nge_bhandle, size);
977 }
978 
979 /*
980  * Initialize the transmit descriptors.
981  */
982 int
983 nge_list_tx_init(sc)
984 	struct nge_softc	*sc;
985 {
986 	struct nge_list_data	*ld;
987 	struct nge_ring_data	*cd;
988 	int			i;
989 
990 	cd = &sc->nge_cdata;
991 	ld = sc->nge_ldata;
992 
993 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
994 		if (i == (NGE_TX_LIST_CNT - 1)) {
995 			ld->nge_tx_list[i].nge_nextdesc =
996 			    &ld->nge_tx_list[0];
997 			ld->nge_tx_list[i].nge_next =
998 			    VTOPHYS(&ld->nge_tx_list[0]);
999 		} else {
1000 			ld->nge_tx_list[i].nge_nextdesc =
1001 			    &ld->nge_tx_list[i + 1];
1002 			ld->nge_tx_list[i].nge_next =
1003 			    VTOPHYS(&ld->nge_tx_list[i + 1]);
1004 		}
1005 		ld->nge_tx_list[i].nge_mbuf = NULL;
1006 		ld->nge_tx_list[i].nge_ptr = 0;
1007 		ld->nge_tx_list[i].nge_ctl = 0;
1008 	}
1009 
1010 	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1011 
1012 	return(0);
1013 }
1014 
1015 
1016 /*
1017  * Initialize the RX descriptors and allocate mbufs for them. Note that
1018  * we arrange the descriptors in a closed ring, so that the last descriptor
1019  * points back to the first.
1020  */
1021 int
1022 nge_list_rx_init(sc)
1023 	struct nge_softc	*sc;
1024 {
1025 	struct nge_list_data	*ld;
1026 	struct nge_ring_data	*cd;
1027 	int			i;
1028 
1029 	ld = sc->nge_ldata;
1030 	cd = &sc->nge_cdata;
1031 
1032 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1033 		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1034 			return(ENOBUFS);
1035 		if (i == (NGE_RX_LIST_CNT - 1)) {
1036 			ld->nge_rx_list[i].nge_nextdesc =
1037 			    &ld->nge_rx_list[0];
1038 			ld->nge_rx_list[i].nge_next =
1039 			    VTOPHYS(&ld->nge_rx_list[0]);
1040 		} else {
1041 			ld->nge_rx_list[i].nge_nextdesc =
1042 			    &ld->nge_rx_list[i + 1];
1043 			ld->nge_rx_list[i].nge_next =
1044 			    VTOPHYS(&ld->nge_rx_list[i + 1]);
1045 		}
1046 	}
1047 
1048 	cd->nge_rx_prod = 0;
1049 
1050 	return(0);
1051 }
1052 
1053 /*
1054  * Initialize an RX descriptor and attach an MBUF cluster.
1055  */
1056 int
1057 nge_newbuf(sc, c, m)
1058 	struct nge_softc	*sc;
1059 	struct nge_desc		*c;
1060 	struct mbuf		*m;
1061 {
1062 	struct mbuf		*m_new = NULL;
1063 
1064 	if (m == NULL) {
1065 		caddr_t buf = NULL;
1066 
1067 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1068 		if (m_new == NULL)
1069 			return (ENOBUFS);
1070 
1071 		/* Allocate the jumbo buffer */
1072 		buf = nge_jalloc(sc);
1073 		if (buf == NULL) {
1074 			m_freem(m_new);
1075 			return (ENOBUFS);
1076 		}
1077 
1078 		/* Attach the buffer to the mbuf */
1079 		m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES;
1080 		MEXTADD(m_new, buf, NGE_MCLBYTES, 0, nge_jfree, sc);
1081 	} else {
1082 		/*
1083 		 * We're re-using a previously allocated mbuf;
1084 		 * be sure to re-init pointers and lengths to
1085 		 * default values.
1086 		 */
1087 		m_new = m;
1088 		m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES;
1089 		m_new->m_data = m_new->m_ext.ext_buf;
1090 	}
1091 
1092 	m_adj(m_new, sizeof(u_int64_t));
1093 
1094 	c->nge_mbuf = m_new;
1095 	c->nge_ptr = VTOPHYS(mtod(m_new, caddr_t));
1096 	DPRINTFN(7,("%s: c->nge_ptr=%#x\n", sc->sc_dv.dv_xname,
1097 		    c->nge_ptr));
1098 	c->nge_ctl = m_new->m_len;
1099 	c->nge_extsts = 0;
1100 
1101 	return(0);
1102 }
1103 
1104 int
1105 nge_alloc_jumbo_mem(sc)
1106 	struct nge_softc	*sc;
1107 {
1108 	caddr_t			ptr, kva;
1109 	bus_dma_segment_t	seg;
1110 	bus_dmamap_t		dmamap;
1111 	int			i, rseg, state, error;
1112 	struct nge_jpool_entry	*entry;
1113 
1114 	state = error = 0;
1115 
1116 	if (bus_dmamem_alloc(sc->sc_dmatag, NGE_JMEM, PAGE_SIZE, 0,
1117 			     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1118 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
1119 		return (ENOBUFS);
1120 	}
1121 
1122 	state = 1;
1123 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, NGE_JMEM, &kva,
1124 			   BUS_DMA_NOWAIT)) {
1125 		printf("%s: can't map dma buffers (%d bytes)\n",
1126 		       sc->sc_dv.dv_xname, NGE_JMEM);
1127 		error = ENOBUFS;
1128 		goto out;
1129 	}
1130 
1131 	state = 2;
1132 	if (bus_dmamap_create(sc->sc_dmatag, NGE_JMEM, 1,
1133 			      NGE_JMEM, 0, BUS_DMA_NOWAIT, &dmamap)) {
1134 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
1135 		error = ENOBUFS;
1136 		goto out;
1137 	}
1138 
1139 	state = 3;
1140 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, NGE_JMEM,
1141 			    NULL, BUS_DMA_NOWAIT)) {
1142 		printf("%s: can't load dma map\n", sc->sc_dv.dv_xname);
1143 		error = ENOBUFS;
1144 		goto out;
1145         }
1146 
1147 	state = 4;
1148 	sc->nge_cdata.nge_jumbo_buf = (caddr_t)kva;
1149 	DPRINTFN(1,("%s: nge_jumbo_buf=%#x, NGE_MCLBYTES=%#x\n",
1150 		    sc->sc_dv.dv_xname , sc->nge_cdata.nge_jumbo_buf,
1151 		    NGE_MCLBYTES));
1152 
1153 	LIST_INIT(&sc->nge_jfree_listhead);
1154 	LIST_INIT(&sc->nge_jinuse_listhead);
1155 
1156 	/*
1157 	 * Now divide it up into 9K pieces and save the addresses
1158 	 * in an array. Note that we play an evil trick here by using
1159 	 * the first few bytes in the buffer to hold the address
1160 	 * of the softc structure for this interface. This is because
1161 	 * nge_jfree() needs it, but it is called by the mbuf management
1162 	 * code which will not pass it to us explicitly.
1163 	 */
1164 	ptr = sc->nge_cdata.nge_jumbo_buf;
1165 	for (i = 0; i < NGE_JSLOTS; i++) {
1166 		sc->nge_cdata.nge_jslots[i].nge_buf = ptr;
1167 		sc->nge_cdata.nge_jslots[i].nge_inuse = 0;
1168 		ptr += NGE_MCLBYTES;
1169 		entry = malloc(sizeof(struct nge_jpool_entry),
1170 			       M_DEVBUF, M_NOWAIT);
1171 		if (entry == NULL) {
1172 			sc->nge_cdata.nge_jumbo_buf = NULL;
1173 			printf("%s: no memory for jumbo buffer queue!\n",
1174 			       sc->sc_dv.dv_xname);
1175 			error = ENOBUFS;
1176 			goto out;
1177 		}
1178 		entry->slot = i;
1179 		LIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry,
1180 				 jpool_entries);
1181 	}
1182 out:
1183 	if (error != 0) {
1184 		switch (state) {
1185 		case 4:
1186 			bus_dmamap_unload(sc->sc_dmatag, dmamap);
1187 		case 3:
1188 			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
1189 		case 2:
1190 			bus_dmamem_unmap(sc->sc_dmatag, kva, NGE_JMEM);
1191 		case 1:
1192 			bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1193 			break;
1194 		default:
1195 			break;
1196 		}
1197 	}
1198 
1199 	return (error);
1200 }
1201 
1202 /*
1203  * Allocate a jumbo buffer.
1204  */
1205 void *
1206 nge_jalloc(sc)
1207 	struct nge_softc	*sc;
1208 {
1209 	struct nge_jpool_entry   *entry;
1210 
1211 	entry = LIST_FIRST(&sc->nge_jfree_listhead);
1212 
1213 	if (entry == NULL)
1214 		return (NULL);
1215 
1216 	LIST_REMOVE(entry, jpool_entries);
1217 	LIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries);
1218 	sc->nge_cdata.nge_jslots[entry->slot].nge_inuse = 1;
1219 	return(sc->nge_cdata.nge_jslots[entry->slot].nge_buf);
1220 }
1221 
1222 /*
1223  * Release a jumbo buffer.
1224  */
1225 void
1226 nge_jfree(buf, size, arg)
1227 	caddr_t		buf;
1228 	u_int		size;
1229 	void		*arg;
1230 {
1231 	struct nge_softc	*sc;
1232 	int		        i;
1233 	struct nge_jpool_entry *entry;
1234 
1235 	/* Extract the softc struct pointer. */
1236 	sc = (struct nge_softc *)arg;
1237 
1238 	if (sc == NULL)
1239 		panic("nge_jfree: can't find softc pointer!");
1240 
1241 	/* calculate the slot this buffer belongs to */
1242 
1243 	i = ((vaddr_t)buf - (vaddr_t)sc->nge_cdata.nge_jumbo_buf)
1244 	  / NGE_MCLBYTES;
1245 
1246 	if ((i < 0) || (i >= NGE_JSLOTS))
1247 		panic("nge_jfree: asked to free buffer that we don't manage!");
1248 	else if (sc->nge_cdata.nge_jslots[i].nge_inuse == 0)
1249 		panic("nge_jfree: buffer already free!");
1250 	else {
1251 		sc->nge_cdata.nge_jslots[i].nge_inuse--;
1252 		if(sc->nge_cdata.nge_jslots[i].nge_inuse == 0) {
1253 			entry = LIST_FIRST(&sc->nge_jinuse_listhead);
1254 			if (entry == NULL)
1255 				panic("nge_jfree: buffer not in use!");
1256 			entry->slot = i;
1257 			LIST_REMOVE(entry, jpool_entries);
1258 			LIST_INSERT_HEAD(&sc->nge_jfree_listhead,
1259 					 entry, jpool_entries);
1260 		}
1261 	}
1262 }
1263 
1264 /*
1265  * A frame has been uploaded: pass the resulting mbuf chain up to
1266  * the higher level protocols.
1267  */
1268 void
1269 nge_rxeof(sc)
1270 	struct nge_softc	*sc;
1271 {
1272         struct mbuf		*m;
1273         struct ifnet		*ifp;
1274 	struct nge_desc		*cur_rx;
1275 	int			i, total_len = 0;
1276 	u_int32_t		rxstat;
1277 
1278 	ifp = &sc->arpcom.ac_if;
1279 	i = sc->nge_cdata.nge_rx_prod;
1280 
1281 	while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1282 		struct mbuf		*m0 = NULL;
1283 		u_int32_t		extsts;
1284 
1285 		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1286 		rxstat = cur_rx->nge_rxstat;
1287 		extsts = cur_rx->nge_extsts;
1288 		m = cur_rx->nge_mbuf;
1289 		cur_rx->nge_mbuf = NULL;
1290 		total_len = NGE_RXBYTES(cur_rx);
1291 		NGE_INC(i, NGE_RX_LIST_CNT);
1292 
1293 		/*
1294 		 * If an error occurs, update stats, clear the
1295 		 * status word and leave the mbuf cluster in place:
1296 		 * it should simply get re-used next time this descriptor
1297 		 * comes up in the ring.
1298 		 */
1299 		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1300 			ifp->if_ierrors++;
1301 			nge_newbuf(sc, cur_rx, m);
1302 			continue;
1303 		}
1304 
1305 		/*
1306 		 * Ok. NatSemi really screwed up here. This is the
1307 		 * only gigE chip I know of with alignment constraints
1308 		 * on receive buffers. RX buffers must be 64-bit aligned.
1309 		 */
1310 #ifndef __STRICT_ALIGNMENT
1311 		/*
1312 		 * By popular demand, ignore the alignment problems
1313 		 * on the Intel x86 platform. The performance hit
1314 		 * incurred due to unaligned accesses is much smaller
1315 		 * than the hit produced by forcing buffer copies all
1316 		 * the time, especially with jumbo frames. We still
1317 		 * need to fix up the alignment everywhere else though.
1318 		 */
1319 		if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
1320 #endif
1321 			m0 = m_devget(mtod(m, char *), total_len,
1322 			    ETHER_ALIGN, ifp, NULL);
1323 			nge_newbuf(sc, cur_rx, m);
1324 			if (m0 == NULL) {
1325 				ifp->if_ierrors++;
1326 				continue;
1327 			}
1328 			m_adj(m0, ETHER_ALIGN);
1329 			m = m0;
1330 #ifndef __STRICT_ALIGNMENT
1331 		} else {
1332 			m->m_pkthdr.rcvif = ifp;
1333 			m->m_pkthdr.len = m->m_len = total_len;
1334 		}
1335 #endif
1336 
1337 		ifp->if_ipackets++;
1338 
1339 #if NBPFILTER > 0
1340 		/*
1341 		 * Handle BPF listeners. Let the BPF user see the packet.
1342 		 */
1343 		if (ifp->if_bpf)
1344 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1345 #endif
1346 
1347 		/* Do IP checksum checking. */
1348 		if (extsts & NGE_RXEXTSTS_IPPKT) {
1349 			if (!(extsts & NGE_RXEXTSTS_IPCSUMERR))
1350 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1351 			if ((extsts & NGE_RXEXTSTS_TCPPKT) &&
1352 			    (!(extsts & NGE_RXEXTSTS_TCPCSUMERR)))
1353 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
1354 			else if ((extsts & NGE_RXEXTSTS_UDPPKT) &&
1355 				 (!(extsts & NGE_RXEXTSTS_UDPCSUMERR)))
1356 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
1357 		}
1358 
1359 		ether_input_mbuf(ifp, m);
1360 	}
1361 
1362 	sc->nge_cdata.nge_rx_prod = i;
1363 }
1364 
1365 /*
1366  * A frame was downloaded to the chip. It's safe for us to clean up
1367  * the list buffers.
1368  */
1369 
1370 void
1371 nge_txeof(sc)
1372 	struct nge_softc	*sc;
1373 {
1374 	struct nge_desc		*cur_tx;
1375 	struct ifnet		*ifp;
1376 	u_int32_t		idx;
1377 
1378 	ifp = &sc->arpcom.ac_if;
1379 
1380 	/*
1381 	 * Go through our tx list and free mbufs for those
1382 	 * frames that have been transmitted.
1383 	 */
1384 	idx = sc->nge_cdata.nge_tx_cons;
1385 	while (idx != sc->nge_cdata.nge_tx_prod) {
1386 		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1387 
1388 		if (NGE_OWNDESC(cur_tx))
1389 			break;
1390 
1391 		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1392 			sc->nge_cdata.nge_tx_cnt--;
1393 			NGE_INC(idx, NGE_TX_LIST_CNT);
1394 			continue;
1395 		}
1396 
1397 		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1398 			ifp->if_oerrors++;
1399 			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1400 				ifp->if_collisions++;
1401 			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1402 				ifp->if_collisions++;
1403 		}
1404 
1405 		ifp->if_collisions +=
1406 		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1407 
1408 		ifp->if_opackets++;
1409 		if (cur_tx->nge_mbuf != NULL) {
1410 			m_freem(cur_tx->nge_mbuf);
1411 			cur_tx->nge_mbuf = NULL;
1412 			ifp->if_flags &= ~IFF_OACTIVE;
1413 		}
1414 
1415 		sc->nge_cdata.nge_tx_cnt--;
1416 		NGE_INC(idx, NGE_TX_LIST_CNT);
1417 	}
1418 
1419 	sc->nge_cdata.nge_tx_cons = idx;
1420 
1421 	if (idx == sc->nge_cdata.nge_tx_prod)
1422 		ifp->if_timer = 0;
1423 }
1424 
1425 void
1426 nge_tick(xsc)
1427 	void			*xsc;
1428 {
1429 	struct nge_softc	*sc = xsc;
1430 	struct mii_data		*mii = &sc->nge_mii;
1431 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1432 	int			s;
1433 
1434 	s = splnet();
1435 
1436 	DPRINTFN(10, ("%s: nge_tick: link=%d\n", sc->sc_dv.dv_xname,
1437 		      sc->nge_link));
1438 
1439 	timeout_add(&sc->nge_timeout, hz);
1440 	if (sc->nge_link) {
1441 		splx(s);
1442 		return;
1443 	}
1444 
1445 	if (sc->nge_tbi) {
1446 		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1447 		    == IFM_AUTO) {
1448 			u_int32_t bmsr, anlpar, txcfg, rxcfg;
1449 
1450 			bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
1451 			DPRINTFN(2, ("%s: nge_tick: bmsr=%#x\n",
1452 				     sc->sc_dv.dv_xname, bmsr));
1453 
1454 			if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
1455 				CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1456 
1457 				splx(s);
1458 				return;
1459 			}
1460 
1461 			anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
1462 			txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1463 			rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1464 
1465 			DPRINTFN(2, ("%s: nge_tick: anlpar=%#x, txcfg=%#x, "
1466 				     "rxcfg=%#x\n", sc->sc_dv.dv_xname, anlpar,
1467 				     txcfg, rxcfg));
1468 
1469 			if (anlpar == 0 || anlpar & NGE_TBIANAR_FDX) {
1470 				txcfg |= (NGE_TXCFG_IGN_HBEAT|
1471 					  NGE_TXCFG_IGN_CARR);
1472 				rxcfg |= NGE_RXCFG_RX_FDX;
1473 			} else {
1474 				txcfg &= ~(NGE_TXCFG_IGN_HBEAT|
1475 					   NGE_TXCFG_IGN_CARR);
1476 				rxcfg &= ~(NGE_RXCFG_RX_FDX);
1477 			}
1478 			txcfg |= NGE_TXCFG_AUTOPAD;
1479 			CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1480 			CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1481 		}
1482 
1483 		DPRINTF(("%s: gigabit link up\n", sc->sc_dv.dv_xname));
1484 		sc->nge_link++;
1485 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1486 			nge_start(ifp);
1487 	} else {
1488 		mii_tick(mii);
1489 		if (mii->mii_media_status & IFM_ACTIVE &&
1490 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1491 			sc->nge_link++;
1492 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1493 				DPRINTF(("%s: gigabit link up\n",
1494 					 sc->sc_dv.dv_xname));
1495 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
1496 				nge_start(ifp);
1497 		}
1498 
1499 	}
1500 
1501 	splx(s);
1502 }
1503 
1504 int
1505 nge_intr(arg)
1506 	void			*arg;
1507 {
1508 	struct nge_softc	*sc;
1509 	struct ifnet		*ifp;
1510 	u_int32_t		status;
1511 	int			claimed = 0;
1512 
1513 	sc = arg;
1514 	ifp = &sc->arpcom.ac_if;
1515 
1516 	/* Supress unwanted interrupts */
1517 	if (!(ifp->if_flags & IFF_UP)) {
1518 		nge_stop(sc);
1519 		return (0);
1520 	}
1521 
1522 	/* Disable interrupts. */
1523 	CSR_WRITE_4(sc, NGE_IER, 0);
1524 
1525 	/* Data LED on for TBI mode */
1526 	if(sc->nge_tbi)
1527 		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1528 			     | NGE_GPIO_GP3_OUT);
1529 
1530 	for (;;) {
1531 		/* Reading the ISR register clears all interrupts. */
1532 		status = CSR_READ_4(sc, NGE_ISR);
1533 
1534 		if ((status & NGE_INTRS) == 0)
1535 			break;
1536 
1537 		claimed = 1;
1538 
1539 		if ((status & NGE_ISR_TX_DESC_OK) ||
1540 		    (status & NGE_ISR_TX_ERR) ||
1541 		    (status & NGE_ISR_TX_OK) ||
1542 		    (status & NGE_ISR_TX_IDLE))
1543 			nge_txeof(sc);
1544 
1545 		if ((status & NGE_ISR_RX_DESC_OK) ||
1546 		    (status & NGE_ISR_RX_ERR) ||
1547 		    (status & NGE_ISR_RX_OFLOW) ||
1548 		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1549 		    (status & NGE_ISR_RX_IDLE) ||
1550 		    (status & NGE_ISR_RX_OK))
1551 			nge_rxeof(sc);
1552 
1553 		if ((status & NGE_ISR_RX_IDLE))
1554 			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1555 
1556 		if (status & NGE_ISR_SYSERR) {
1557 			nge_reset(sc);
1558 			ifp->if_flags &= ~IFF_RUNNING;
1559 			nge_init(sc);
1560 		}
1561 
1562 #if 0
1563 		/*
1564 		 * XXX: nge_tick() is not ready to be called this way
1565 		 * it screws up the aneg timeout because mii_tick() is
1566 		 * only to be called once per second.
1567 		 */
1568 		if (status & NGE_IMR_PHY_INTR) {
1569 			sc->nge_link = 0;
1570 			nge_tick(sc);
1571 		}
1572 #endif
1573 	}
1574 
1575 	/* Re-enable interrupts. */
1576 	CSR_WRITE_4(sc, NGE_IER, 1);
1577 
1578 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1579 		nge_start(ifp);
1580 
1581 	/* Data LED off for TBI mode */
1582 	if(sc->nge_tbi)
1583 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1584 			    & ~NGE_GPIO_GP3_OUT);
1585 
1586 	return claimed;
1587 }
1588 
1589 /*
1590  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1591  * pointers to the fragment pointers.
1592  */
1593 int
1594 nge_encap(sc, m_head, txidx)
1595 	struct nge_softc	*sc;
1596 	struct mbuf		*m_head;
1597 	u_int32_t		*txidx;
1598 {
1599 	struct nge_desc		*f = NULL;
1600 	struct mbuf		*m;
1601 	int			frag, cur, cnt = 0;
1602 #if NVLAN > 0
1603 	struct ifvlan		*ifv = NULL;
1604 
1605 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1606 	    m_head->m_pkthdr.rcvif != NULL)
1607 		ifv = m_head->m_pkthdr.rcvif->if_softc;
1608 #endif
1609 
1610 	/*
1611 	 * Start packing the mbufs in this chain into
1612 	 * the fragment pointers. Stop when we run out
1613 	 * of fragments or hit the end of the mbuf chain.
1614 	 */
1615 	m = m_head;
1616 	cur = frag = *txidx;
1617 
1618 	for (m = m_head; m != NULL; m = m->m_next) {
1619 		if (m->m_len != 0) {
1620 			if ((NGE_TX_LIST_CNT -
1621 			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1622 				return(ENOBUFS);
1623 			f = &sc->nge_ldata->nge_tx_list[frag];
1624 			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1625 			f->nge_ptr = VTOPHYS(mtod(m, vaddr_t));
1626 			DPRINTFN(7,("%s: f->nge_ptr=%#x\n",
1627 				    sc->sc_dv.dv_xname, f->nge_ptr));
1628 			if (cnt != 0)
1629 				f->nge_ctl |= NGE_CMDSTS_OWN;
1630 			cur = frag;
1631 			NGE_INC(frag, NGE_TX_LIST_CNT);
1632 			cnt++;
1633 		}
1634 	}
1635 
1636 	if (m != NULL)
1637 		return(ENOBUFS);
1638 
1639 	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1640 
1641 #if NVLAN > 0
1642 	if (ifv != NULL) {
1643 		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1644 			(NGE_TXEXTSTS_VLANPKT|ifv->ifv_tag);
1645 	}
1646 #endif
1647 
1648 	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1649 	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1650 	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1651 	sc->nge_cdata.nge_tx_cnt += cnt;
1652 	*txidx = frag;
1653 
1654 	return(0);
1655 }
1656 
1657 /*
1658  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1659  * to the mbuf data regions directly in the transmit lists. We also save a
1660  * copy of the pointers since the transmit list fragment pointers are
1661  * physical addresses.
1662  */
1663 
1664 void
1665 nge_start(ifp)
1666 	struct ifnet		*ifp;
1667 {
1668 	struct nge_softc	*sc;
1669 	struct mbuf		*m_head = NULL;
1670 	u_int32_t		idx;
1671 	int			pkts = 0;
1672 
1673 	sc = ifp->if_softc;
1674 
1675 	if (!sc->nge_link)
1676 		return;
1677 
1678 	idx = sc->nge_cdata.nge_tx_prod;
1679 
1680 	if (ifp->if_flags & IFF_OACTIVE)
1681 		return;
1682 
1683 	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1684 		IFQ_POLL(&ifp->if_snd, m_head);
1685 		if (m_head == NULL)
1686 			break;
1687 
1688 		if (nge_encap(sc, m_head, &idx)) {
1689 			ifp->if_flags |= IFF_OACTIVE;
1690 			break;
1691 		}
1692 
1693 		/* now we are committed to transmit the packet */
1694 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1695 		pkts++;
1696 
1697 #if NBPFILTER > 0
1698 		/*
1699 		 * If there's a BPF listener, bounce a copy of this frame
1700 		 * to him.
1701 		 */
1702 		if (ifp->if_bpf)
1703 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1704 #endif
1705 	}
1706 	if (pkts == 0)
1707 		return;
1708 
1709 	/* Transmit */
1710 	sc->nge_cdata.nge_tx_prod = idx;
1711 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1712 
1713 	/*
1714 	 * Set a timeout in case the chip goes out to lunch.
1715 	 */
1716 	ifp->if_timer = 5;
1717 }
1718 
1719 void
1720 nge_init(xsc)
1721 	void			*xsc;
1722 {
1723 	struct nge_softc	*sc = xsc;
1724 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1725 	struct mii_data		*mii;
1726 	u_int32_t		txcfg, rxcfg;
1727 	int			s, media;
1728 
1729 	if (ifp->if_flags & IFF_RUNNING)
1730 		return;
1731 
1732 	s = splnet();
1733 
1734 	/*
1735 	 * Cancel pending I/O and free all RX/TX buffers.
1736 	 */
1737 	nge_stop(sc);
1738 
1739 	mii = sc->nge_tbi ? NULL: &sc->nge_mii;
1740 
1741 	/* Set MAC address */
1742 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1743 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1744 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1745 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1746 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1747 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1748 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1749 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1750 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1751 
1752 	/* Init circular RX list. */
1753 	if (nge_list_rx_init(sc) == ENOBUFS) {
1754 		printf("%s: initialization failed: no "
1755 			"memory for rx buffers\n", sc->sc_dv.dv_xname);
1756 		nge_stop(sc);
1757 		splx(s);
1758 		return;
1759 	}
1760 
1761 	/*
1762 	 * Init tx descriptors.
1763 	 */
1764 	nge_list_tx_init(sc);
1765 
1766 	/*
1767 	 * For the NatSemi chip, we have to explicitly enable the
1768 	 * reception of ARP frames, as well as turn on the 'perfect
1769 	 * match' filter where we store the station address, otherwise
1770 	 * we won't receive unicasts meant for this host.
1771 	 */
1772 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1773 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1774 
1775 	 /* If we want promiscuous mode, set the allframes bit. */
1776 	if (ifp->if_flags & IFF_PROMISC)
1777 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1778 	else
1779 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1780 
1781 	/*
1782 	 * Set the capture broadcast bit to capture broadcast frames.
1783 	 */
1784 	if (ifp->if_flags & IFF_BROADCAST)
1785 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1786 	else
1787 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1788 
1789 	/*
1790 	 * Load the multicast filter.
1791 	 */
1792 	nge_setmulti(sc);
1793 
1794 	/* Turn the receive filter on */
1795 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1796 
1797 	/*
1798 	 * Load the address of the RX and TX lists.
1799 	 */
1800 	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1801 	    VTOPHYS(&sc->nge_ldata->nge_rx_list[0]));
1802 	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1803 	    VTOPHYS(&sc->nge_ldata->nge_tx_list[0]));
1804 
1805 	/* Set RX configuration */
1806 	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1807 
1808 	/*
1809 	 * Enable hardware checksum validation for all IPv4
1810 	 * packets, do not reject packets with bad checksums.
1811 	 */
1812 	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1813 
1814 	/* Set TX configuration */
1815 	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1816 
1817 #if NVLAN > 0
1818 	/*
1819 	 * If VLAN support is enabled, tell the chip to insert
1820 	 * VLAN tags on a per-packet basis as dictated by the
1821 	 * code in the frame encapsulation routine.
1822 	 */
1823 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1824 		NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1825 #endif
1826 
1827 	/* Set full/half duplex mode. */
1828 	if (sc->nge_tbi)
1829 		media = sc->nge_ifmedia.ifm_cur->ifm_media;
1830 	else
1831 		media = mii->mii_media_active;
1832 
1833 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1834 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1835 
1836 	DPRINTFN(4, ("%s: nge_init txcfg=%#x, rxcfg=%#x\n",
1837 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
1838 
1839 	if ((media & IFM_GMASK) == IFM_FDX) {
1840 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1841 		rxcfg |= (NGE_RXCFG_RX_FDX);
1842 	} else {
1843 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1844 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
1845 	}
1846 
1847 	txcfg |= NGE_TXCFG_AUTOPAD;
1848 
1849 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1850 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1851 
1852 	nge_tick(sc);
1853 
1854 	/*
1855 	 * Enable the delivery of PHY interrupts based on
1856 	 * link/speed/duplex status changes and enable return
1857 	 * of extended status information in the DMA descriptors,
1858 	 * required for checksum offloading.
1859 	 */
1860 	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|NGE_CFG_PHYINTR_LNK|
1861 		   NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1862 
1863 	DPRINTFN(1, ("%s: nge_init: config=%#x\n", sc->sc_dv.dv_xname,
1864 		     CSR_READ_4(sc, NGE_CFG)));
1865 
1866 	/*
1867 	 * Configure interrupt holdoff (moderation). We can
1868 	 * have the chip delay interrupt delivery for a certain
1869 	 * period. Units are in 100us, and the max setting
1870 	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1871 	 */
1872 	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1873 
1874 	/*
1875 	 * Enable interrupts.
1876 	 */
1877 	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1878 	CSR_WRITE_4(sc, NGE_IER, 1);
1879 
1880 	/* Enable receiver and transmitter. */
1881 	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1882 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1883 
1884 	if (sc->nge_tbi)
1885 	    nge_ifmedia_tbi_upd(ifp);
1886 	else
1887 	    nge_ifmedia_mii_upd(ifp);
1888 
1889 	ifp->if_flags |= IFF_RUNNING;
1890 	ifp->if_flags &= ~IFF_OACTIVE;
1891 
1892 	splx(s);
1893 }
1894 
1895 /*
1896  * Set mii media options.
1897  */
1898 int
1899 nge_ifmedia_mii_upd(ifp)
1900 	struct ifnet		*ifp;
1901 {
1902 	struct nge_softc	*sc = ifp->if_softc;
1903 	struct mii_data 	*mii = &sc->nge_mii;
1904 
1905 	DPRINTFN(2, ("%s: nge_ifmedia_mii_upd\n", sc->sc_dv.dv_xname));
1906 
1907 	sc->nge_link = 0;
1908 
1909 	if (mii->mii_instance) {
1910 		struct mii_softc *miisc;
1911 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1912 			mii_phy_reset(miisc);
1913 	}
1914 	mii_mediachg(mii);
1915 
1916 	return(0);
1917 }
1918 
1919 /*
1920  * Report current mii media status.
1921  */
1922 void
1923 nge_ifmedia_mii_sts(ifp, ifmr)
1924 	struct ifnet		*ifp;
1925 	struct ifmediareq	*ifmr;
1926 {
1927 	struct nge_softc	*sc = ifp->if_softc;
1928 	struct mii_data *mii = &sc->nge_mii;
1929 
1930 	DPRINTFN(2, ("%s: nge_ifmedia_mii_sts\n", sc->sc_dv.dv_xname));
1931 
1932 	mii_pollstat(mii);
1933 	ifmr->ifm_active = mii->mii_media_active;
1934 	ifmr->ifm_status = mii->mii_media_status;
1935 }
1936 
1937 /*
1938  * Set mii media options.
1939  */
1940 int
1941 nge_ifmedia_tbi_upd(ifp)
1942 	struct ifnet		*ifp;
1943 {
1944 	struct nge_softc	*sc = ifp->if_softc;
1945 
1946 	DPRINTFN(2, ("%s: nge_ifmedia_tbi_upd\n", sc->sc_dv.dv_xname));
1947 
1948 	sc->nge_link = 0;
1949 
1950 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1951 	    == IFM_AUTO) {
1952 		u_int32_t anar, bmcr;
1953 		anar = CSR_READ_4(sc, NGE_TBI_ANAR);
1954 		anar |= (NGE_TBIANAR_HDX | NGE_TBIANAR_FDX);
1955 		CSR_WRITE_4(sc, NGE_TBI_ANAR, anar);
1956 
1957 		bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
1958 		bmcr |= (NGE_TBIBMCR_ENABLE_ANEG|NGE_TBIBMCR_RESTART_ANEG);
1959 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
1960 
1961 		bmcr &= ~(NGE_TBIBMCR_RESTART_ANEG);
1962 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
1963 	} else {
1964 		u_int32_t txcfg, rxcfg;
1965 		txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1966 		rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1967 
1968 		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
1969 		    == IFM_FDX) {
1970 			txcfg |= NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR;
1971 			rxcfg |= NGE_RXCFG_RX_FDX;
1972 		} else {
1973 			txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1974 			rxcfg &= ~(NGE_RXCFG_RX_FDX);
1975 		}
1976 
1977 		txcfg |= NGE_TXCFG_AUTOPAD;
1978 		CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1979 		CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1980 	}
1981 
1982 	NGE_CLRBIT(sc, NGE_GPIO, NGE_GPIO_GP3_OUT);
1983 
1984 	return(0);
1985 }
1986 
1987 /*
1988  * Report current tbi media status.
1989  */
1990 void
1991 nge_ifmedia_tbi_sts(ifp, ifmr)
1992 	struct ifnet		*ifp;
1993 	struct ifmediareq	*ifmr;
1994 {
1995 	struct nge_softc	*sc = ifp->if_softc;
1996 	u_int32_t		bmcr;
1997 
1998 	bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
1999 
2000 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
2001 		u_int32_t bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
2002 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmsr=%#x, bmcr=%#x\n",
2003 			     sc->sc_dv.dv_xname, bmsr, bmcr));
2004 
2005 		if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
2006 			ifmr->ifm_active = IFM_ETHER|IFM_NONE;
2007 			ifmr->ifm_status = IFM_AVALID;
2008 			return;
2009 		}
2010 	} else {
2011 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmcr=%#x\n",
2012 			     sc->sc_dv.dv_xname, bmcr));
2013 	}
2014 
2015 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2016 	ifmr->ifm_active = IFM_ETHER|IFM_1000_SX;
2017 
2018 	if (bmcr & NGE_TBIBMCR_LOOPBACK)
2019 		ifmr->ifm_active |= IFM_LOOP;
2020 
2021 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
2022 		u_int32_t anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
2023 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts anlpar=%#x\n",
2024 			     sc->sc_dv.dv_xname, anlpar));
2025 
2026 		ifmr->ifm_active |= IFM_AUTO;
2027 		if (anlpar & NGE_TBIANLPAR_FDX) {
2028 			ifmr->ifm_active |= IFM_FDX;
2029 		} else if (anlpar & NGE_TBIANLPAR_HDX) {
2030 			ifmr->ifm_active |= IFM_HDX;
2031 		} else
2032 			ifmr->ifm_active |= IFM_FDX;
2033 
2034 	} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX)
2035 		ifmr->ifm_active |= IFM_FDX;
2036 	else
2037 		ifmr->ifm_active |= IFM_HDX;
2038 
2039 }
2040 
2041 int
2042 nge_ioctl(ifp, command, data)
2043 	struct ifnet		*ifp;
2044 	u_long			command;
2045 	caddr_t			data;
2046 {
2047 	struct nge_softc	*sc = ifp->if_softc;
2048 	struct ifreq		*ifr = (struct ifreq *) data;
2049 	struct ifaddr		*ifa = (struct ifaddr *)data;
2050 	struct mii_data		*mii;
2051 	int			s, error = 0;
2052 
2053 	s = splnet();
2054 
2055 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
2056 		splx(s);
2057 		return (error);
2058 	}
2059 
2060 	switch(command) {
2061 	case SIOCSIFMTU:
2062 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
2063 			error = EINVAL;
2064 		else if (ifp->if_mtu != ifr->ifr_mtu)
2065 			ifp->if_mtu = ifr->ifr_mtu;
2066 		break;
2067 	case SIOCSIFADDR:
2068 		ifp->if_flags |= IFF_UP;
2069 		switch (ifa->ifa_addr->sa_family) {
2070 #ifdef INET
2071 		case AF_INET:
2072 			nge_init(sc);
2073 			arp_ifinit(&sc->arpcom, ifa);
2074 			break;
2075 #endif /* INET */
2076 		default:
2077 			nge_init(sc);
2078 			break;
2079                 }
2080 		break;
2081 	case SIOCSIFFLAGS:
2082 		if (ifp->if_flags & IFF_UP) {
2083 			if (ifp->if_flags & IFF_RUNNING &&
2084 			    ifp->if_flags & IFF_PROMISC &&
2085 			    !(sc->nge_if_flags & IFF_PROMISC)) {
2086 				NGE_SETBIT(sc, NGE_RXFILT_CTL,
2087 				    NGE_RXFILTCTL_ALLPHYS|
2088 				    NGE_RXFILTCTL_ALLMULTI);
2089 			} else if (ifp->if_flags & IFF_RUNNING &&
2090 			    !(ifp->if_flags & IFF_PROMISC) &&
2091 			    sc->nge_if_flags & IFF_PROMISC) {
2092 				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2093 				    NGE_RXFILTCTL_ALLPHYS);
2094 				if (!(ifp->if_flags & IFF_ALLMULTI))
2095 					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2096 					    NGE_RXFILTCTL_ALLMULTI);
2097 			} else {
2098 				ifp->if_flags &= ~IFF_RUNNING;
2099 				nge_init(sc);
2100 			}
2101 		} else {
2102 			if (ifp->if_flags & IFF_RUNNING)
2103 				nge_stop(sc);
2104 		}
2105 		sc->nge_if_flags = ifp->if_flags;
2106 		error = 0;
2107 		break;
2108 	case SIOCADDMULTI:
2109 	case SIOCDELMULTI:
2110 		error = (command == SIOCADDMULTI)
2111 			? ether_addmulti(ifr, &sc->arpcom)
2112 			: ether_delmulti(ifr, &sc->arpcom);
2113 
2114 		if (error == ENETRESET) {
2115 			if (ifp->if_flags & IFF_RUNNING)
2116 				nge_setmulti(sc);
2117 			error = 0;
2118 		}
2119 		break;
2120 	case SIOCGIFMEDIA:
2121 	case SIOCSIFMEDIA:
2122 		if (sc->nge_tbi) {
2123 			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2124 					      command);
2125 		} else {
2126 			mii = &sc->nge_mii;
2127 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2128 					      command);
2129 		}
2130 		break;
2131 	default:
2132 		error = ENOTTY;
2133 		break;
2134 	}
2135 
2136 	splx(s);
2137 
2138 	return(error);
2139 }
2140 
2141 void
2142 nge_watchdog(ifp)
2143 	struct ifnet		*ifp;
2144 {
2145 	struct nge_softc	*sc;
2146 
2147 	sc = ifp->if_softc;
2148 
2149 	ifp->if_oerrors++;
2150 	printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
2151 
2152 	nge_stop(sc);
2153 	nge_reset(sc);
2154 	ifp->if_flags &= ~IFF_RUNNING;
2155 	nge_init(sc);
2156 
2157 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2158 		nge_start(ifp);
2159 }
2160 
2161 /*
2162  * Stop the adapter and free any mbufs allocated to the
2163  * RX and TX lists.
2164  */
2165 void
2166 nge_stop(sc)
2167 	struct nge_softc	*sc;
2168 {
2169 	int			i;
2170 	struct ifnet		*ifp;
2171 	struct mii_data		*mii;
2172 
2173 	ifp = &sc->arpcom.ac_if;
2174 	ifp->if_timer = 0;
2175 	if (sc->nge_tbi) {
2176 		mii = NULL;
2177 	} else {
2178 		mii = &sc->nge_mii;
2179 	}
2180 
2181 	timeout_del(&sc->nge_timeout);
2182 
2183 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2184 
2185 	CSR_WRITE_4(sc, NGE_IER, 0);
2186 	CSR_WRITE_4(sc, NGE_IMR, 0);
2187 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2188 	DELAY(1000);
2189 	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2190 	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2191 
2192 	if (!sc->nge_tbi)
2193 		mii_down(mii);
2194 
2195 	sc->nge_link = 0;
2196 
2197 	/*
2198 	 * Free data in the RX lists.
2199 	 */
2200 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2201 		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2202 			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2203 			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2204 		}
2205 	}
2206 	bzero((char *)&sc->nge_ldata->nge_rx_list,
2207 		sizeof(sc->nge_ldata->nge_rx_list));
2208 
2209 	/*
2210 	 * Free the TX list buffers.
2211 	 */
2212 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2213 		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2214 			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2215 			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2216 		}
2217 	}
2218 
2219 	bzero((char *)&sc->nge_ldata->nge_tx_list,
2220 		sizeof(sc->nge_ldata->nge_tx_list));
2221 }
2222 
2223 /*
2224  * Stop all chip I/O so that the kernel's probe routines don't
2225  * get confused by errant DMAs when rebooting.
2226  */
2227 void
2228 nge_shutdown(xsc)
2229 	void *xsc;
2230 {
2231 	struct nge_softc *sc = (struct nge_softc *)xsc;
2232 
2233 	nge_reset(sc);
2234 	nge_stop(sc);
2235 }
2236 
2237 struct cfattach nge_ca = {
2238 	sizeof(struct nge_softc), nge_probe, nge_attach
2239 };
2240 
2241 struct cfdriver nge_cd = {
2242 	0, "nge", DV_IFNET
2243 };
2244