xref: /openbsd-src/sys/dev/pci/if_nge.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: if_nge.c,v 1.25 2003/01/15 06:31:24 art Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001
5  *	Bill Paul <wpaul@bsdi.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_nge.c,v 1.35 2002/08/08 18:33:28 ambrisko Exp $
35  */
36 
37 /*
38  * National Semiconductor DP83820/DP83821 gigabit ethernet driver
39  * for FreeBSD. Datasheets are available from:
40  *
41  * http://www.national.com/ds/DP/DP83820.pdf
42  * http://www.national.com/ds/DP/DP83821.pdf
43  *
44  * These chips are used on several low cost gigabit ethernet NICs
45  * sold by D-Link, Addtron, SMC and Asante. Both parts are
46  * virtually the same, except the 83820 is a 64-bit/32-bit part,
47  * while the 83821 is 32-bit only.
48  *
49  * Many cards also use National gigE transceivers, such as the
50  * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet
51  * contains a full register description that applies to all of these
52  * components:
53  *
54  * http://www.national.com/ds/DP/DP83861.pdf
55  *
56  * Written by Bill Paul <wpaul@bsdi.com>
57  * BSDi Open Source Solutions
58  */
59 
60 /*
61  * The NatSemi DP83820 and 83821 controllers are enhanced versions
62  * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100
63  * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII
64  * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP
65  * hardware checksum offload (IPv4 only), VLAN tagging and filtering,
66  * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern
67  * matching buffers, one perfect address filter buffer and interrupt
68  * moderation. The 83820 supports both 64-bit and 32-bit addressing
69  * and data transfers: the 64-bit support can be toggled on or off
70  * via software. This affects the size of certain fields in the DMA
71  * descriptors.
72  *
73  * There are two bugs/misfeatures in the 83820/83821 that I have
74  * discovered so far:
75  *
76  * - Receive buffers must be aligned on 64-bit boundaries, which means
77  *   you must resort to copying data in order to fix up the payload
78  *   alignment.
79  *
80  * - In order to transmit jumbo frames larger than 8170 bytes, you have
81  *   to turn off transmit checksum offloading, because the chip can't
82  *   compute the checksum on an outgoing frame unless it fits entirely
83  *   within the TX FIFO, which is only 8192 bytes in size. If you have
84  *   TX checksum offload enabled and you transmit attempt to transmit a
85  *   frame larger than 8170 bytes, the transmitter will wedge.
86  *
87  * To work around the latter problem, TX checksum offload is disabled
88  * if the user selects an MTU larger than 8152 (8170 - 18).
89  */
90 
91 #include "bpfilter.h"
92 #include "vlan.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/sockio.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kernel.h>
100 #include <sys/device.h>
101 #include <sys/socket.h>
102 
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 
107 #ifdef INET
108 #include <netinet/in.h>
109 #include <netinet/in_systm.h>
110 #include <netinet/in_var.h>
111 #include <netinet/ip.h>
112 #include <netinet/if_ether.h>
113 #endif
114 
115 #if NVLAN > 0
116 #include <net/if_types.h>
117 #include <net/if_vlan_var.h>
118 #endif
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 
124 #include <uvm/uvm_extern.h>              /* for vtophys */
125 
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 
133 #define NGE_USEIOSPACE
134 
135 #include <dev/pci/if_ngereg.h>
136 
137 int nge_probe(struct device *, void *, void *);
138 void nge_attach(struct device *, struct device *, void *);
139 
140 int nge_alloc_jumbo_mem(struct nge_softc *);
141 void *nge_jalloc(struct nge_softc *);
142 void nge_jfree(caddr_t, u_int, void *);
143 
144 int nge_newbuf(struct nge_softc *, struct nge_desc *,
145 			     struct mbuf *);
146 int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *);
147 void nge_rxeof(struct nge_softc *);
148 void nge_txeof(struct nge_softc *);
149 int nge_intr(void *);
150 void nge_tick(void *);
151 void nge_start(struct ifnet *);
152 int nge_ioctl(struct ifnet *, u_long, caddr_t);
153 void nge_init(void *);
154 void nge_stop(struct nge_softc *);
155 void nge_watchdog(struct ifnet *);
156 void nge_shutdown(void *);
157 int nge_ifmedia_mii_upd(struct ifnet *);
158 void nge_ifmedia_mii_sts(struct ifnet *, struct ifmediareq *);
159 int nge_ifmedia_tbi_upd(struct ifnet *);
160 void nge_ifmedia_tbi_sts(struct ifnet *, struct ifmediareq *);
161 
162 void nge_delay(struct nge_softc *);
163 void nge_eeprom_idle(struct nge_softc *);
164 void nge_eeprom_putbyte(struct nge_softc *, int);
165 void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *);
166 void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int);
167 
168 void nge_mii_sync(struct nge_softc *);
169 void nge_mii_send(struct nge_softc *, u_int32_t, int);
170 int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *);
171 int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *);
172 
173 int nge_miibus_readreg(struct device *, int, int);
174 void nge_miibus_writereg(struct device *, int, int, int);
175 void nge_miibus_statchg(struct device *);
176 
177 void nge_setmulti(struct nge_softc *);
178 u_int32_t nge_crc(struct nge_softc *, caddr_t);
179 void nge_reset(struct nge_softc *);
180 int nge_list_rx_init(struct nge_softc *);
181 int nge_list_tx_init(struct nge_softc *);
182 
183 #ifdef NGE_USEIOSPACE
184 #define NGE_RES			SYS_RES_IOPORT
185 #define NGE_RID			NGE_PCI_LOIO
186 #else
187 #define NGE_RES			SYS_RES_MEMORY
188 #define NGE_RID			NGE_PCI_LOMEM
189 #endif
190 
191 #ifdef NGE_DEBUG
192 #define DPRINTF(x)	if (ngedebug) printf x
193 #define DPRINTFN(n,x)	if (ngedebug >= (n)) printf x
194 int	ngedebug = 0;
195 #else
196 #define DPRINTF(x)
197 #define DPRINTFN(n,x)
198 #endif
199 
200 #define NGE_SETBIT(sc, reg, x)				\
201 	CSR_WRITE_4(sc, reg,				\
202 		CSR_READ_4(sc, reg) | (x))
203 
204 #define NGE_CLRBIT(sc, reg, x)				\
205 	CSR_WRITE_4(sc, reg,				\
206 		CSR_READ_4(sc, reg) & ~(x))
207 
208 #define SIO_SET(x)					\
209 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | x)
210 
211 #define SIO_CLR(x)					\
212 	CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~x)
213 
214 void
215 nge_delay(sc)
216 	struct nge_softc	*sc;
217 {
218 	int			idx;
219 
220 	for (idx = (300 / 33) + 1; idx > 0; idx--)
221 		CSR_READ_4(sc, NGE_CSR);
222 }
223 
224 void
225 nge_eeprom_idle(sc)
226 	struct nge_softc	*sc;
227 {
228 	int		i;
229 
230 	SIO_SET(NGE_MEAR_EE_CSEL);
231 	nge_delay(sc);
232 	SIO_SET(NGE_MEAR_EE_CLK);
233 	nge_delay(sc);
234 
235 	for (i = 0; i < 25; i++) {
236 		SIO_CLR(NGE_MEAR_EE_CLK);
237 		nge_delay(sc);
238 		SIO_SET(NGE_MEAR_EE_CLK);
239 		nge_delay(sc);
240 	}
241 
242 	SIO_CLR(NGE_MEAR_EE_CLK);
243 	nge_delay(sc);
244 	SIO_CLR(NGE_MEAR_EE_CSEL);
245 	nge_delay(sc);
246 	CSR_WRITE_4(sc, NGE_MEAR, 0x00000000);
247 }
248 
249 /*
250  * Send a read command and address to the EEPROM, check for ACK.
251  */
252 void
253 nge_eeprom_putbyte(sc, addr)
254 	struct nge_softc	*sc;
255 	int			addr;
256 {
257 	int			d, i;
258 
259 	d = addr | NGE_EECMD_READ;
260 
261 	/*
262 	 * Feed in each bit and strobe the clock.
263 	 */
264 	for (i = 0x400; i; i >>= 1) {
265 		if (d & i) {
266 			SIO_SET(NGE_MEAR_EE_DIN);
267 		} else {
268 			SIO_CLR(NGE_MEAR_EE_DIN);
269 		}
270 		nge_delay(sc);
271 		SIO_SET(NGE_MEAR_EE_CLK);
272 		nge_delay(sc);
273 		SIO_CLR(NGE_MEAR_EE_CLK);
274 		nge_delay(sc);
275 	}
276 }
277 
278 /*
279  * Read a word of data stored in the EEPROM at address 'addr.'
280  */
281 void
282 nge_eeprom_getword(sc, addr, dest)
283 	struct nge_softc	*sc;
284 	int			addr;
285 	u_int16_t		*dest;
286 {
287 	int			i;
288 	u_int16_t		word = 0;
289 
290 	/* Force EEPROM to idle state. */
291 	nge_eeprom_idle(sc);
292 
293 	/* Enter EEPROM access mode. */
294 	nge_delay(sc);
295 	SIO_CLR(NGE_MEAR_EE_CLK);
296 	nge_delay(sc);
297 	SIO_SET(NGE_MEAR_EE_CSEL);
298 	nge_delay(sc);
299 
300 	/*
301 	 * Send address of word we want to read.
302 	 */
303 	nge_eeprom_putbyte(sc, addr);
304 
305 	/*
306 	 * Start reading bits from EEPROM.
307 	 */
308 	for (i = 0x8000; i; i >>= 1) {
309 		SIO_SET(NGE_MEAR_EE_CLK);
310 		nge_delay(sc);
311 		if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT)
312 			word |= i;
313 		nge_delay(sc);
314 		SIO_CLR(NGE_MEAR_EE_CLK);
315 		nge_delay(sc);
316 	}
317 
318 	/* Turn off EEPROM access mode. */
319 	nge_eeprom_idle(sc);
320 
321 	*dest = word;
322 }
323 
324 /*
325  * Read a sequence of words from the EEPROM.
326  */
327 void
328 nge_read_eeprom(sc, dest, off, cnt, swap)
329 	struct nge_softc	*sc;
330 	caddr_t			dest;
331 	int			off;
332 	int			cnt;
333 	int			swap;
334 {
335 	int			i;
336 	u_int16_t		word = 0, *ptr;
337 
338 	for (i = 0; i < cnt; i++) {
339 		nge_eeprom_getword(sc, off + i, &word);
340 		ptr = (u_int16_t *)(dest + (i * 2));
341 		if (swap)
342 			*ptr = ntohs(word);
343 		else
344 			*ptr = word;
345 	}
346 }
347 
348 /*
349  * Sync the PHYs by setting data bit and strobing the clock 32 times.
350  */
351 void
352 nge_mii_sync(sc)
353 	struct nge_softc		*sc;
354 {
355 	int			i;
356 
357 	SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA);
358 
359 	for (i = 0; i < 32; i++) {
360 		SIO_SET(NGE_MEAR_MII_CLK);
361 		DELAY(1);
362 		SIO_CLR(NGE_MEAR_MII_CLK);
363 		DELAY(1);
364 	}
365 }
366 
367 /*
368  * Clock a series of bits through the MII.
369  */
370 void
371 nge_mii_send(sc, bits, cnt)
372 	struct nge_softc		*sc;
373 	u_int32_t		bits;
374 	int			cnt;
375 {
376 	int			i;
377 
378 	SIO_CLR(NGE_MEAR_MII_CLK);
379 
380 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
381                 if (bits & i) {
382 			SIO_SET(NGE_MEAR_MII_DATA);
383                 } else {
384 			SIO_CLR(NGE_MEAR_MII_DATA);
385                 }
386 		DELAY(1);
387 		SIO_CLR(NGE_MEAR_MII_CLK);
388 		DELAY(1);
389 		SIO_SET(NGE_MEAR_MII_CLK);
390 	}
391 }
392 
393 /*
394  * Read an PHY register through the MII.
395  */
396 int
397 nge_mii_readreg(sc, frame)
398 	struct nge_softc		*sc;
399 	struct nge_mii_frame	*frame;
400 {
401 	int			i, ack, s;
402 
403 	s = splimp();
404 
405 	/*
406 	 * Set up frame for RX.
407 	 */
408 	frame->mii_stdelim = NGE_MII_STARTDELIM;
409 	frame->mii_opcode = NGE_MII_READOP;
410 	frame->mii_turnaround = 0;
411 	frame->mii_data = 0;
412 
413 	CSR_WRITE_4(sc, NGE_MEAR, 0);
414 
415 	/*
416 	 * Turn on data xmit.
417 	 */
418 	SIO_SET(NGE_MEAR_MII_DIR);
419 
420 	nge_mii_sync(sc);
421 
422 	/*
423 	 * Send command/address info.
424 	 */
425 	nge_mii_send(sc, frame->mii_stdelim, 2);
426 	nge_mii_send(sc, frame->mii_opcode, 2);
427 	nge_mii_send(sc, frame->mii_phyaddr, 5);
428 	nge_mii_send(sc, frame->mii_regaddr, 5);
429 
430 	/* Idle bit */
431 	SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA));
432 	DELAY(1);
433 	SIO_SET(NGE_MEAR_MII_CLK);
434 	DELAY(1);
435 
436 	/* Turn off xmit. */
437 	SIO_CLR(NGE_MEAR_MII_DIR);
438 	/* Check for ack */
439 	SIO_CLR(NGE_MEAR_MII_CLK);
440 	DELAY(1);
441 	SIO_SET(NGE_MEAR_MII_CLK);
442 	DELAY(1);
443 	ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA;
444 
445 	/*
446 	 * Now try reading data bits. If the ack failed, we still
447 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
448 	 */
449 	if (ack) {
450 		for(i = 0; i < 16; i++) {
451 			SIO_CLR(NGE_MEAR_MII_CLK);
452 			DELAY(1);
453 			SIO_SET(NGE_MEAR_MII_CLK);
454 			DELAY(1);
455 		}
456 		goto fail;
457 	}
458 
459 	for (i = 0x8000; i; i >>= 1) {
460 		SIO_CLR(NGE_MEAR_MII_CLK);
461 		DELAY(1);
462 		if (!ack) {
463 			if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA)
464 				frame->mii_data |= i;
465 			DELAY(1);
466 		}
467 		SIO_SET(NGE_MEAR_MII_CLK);
468 		DELAY(1);
469 	}
470 
471 fail:
472 
473 	SIO_CLR(NGE_MEAR_MII_CLK);
474 	DELAY(1);
475 	SIO_SET(NGE_MEAR_MII_CLK);
476 	DELAY(1);
477 
478 	splx(s);
479 
480 	if (ack)
481 		return(1);
482 	return(0);
483 }
484 
485 /*
486  * Write to a PHY register through the MII.
487  */
488 int
489 nge_mii_writereg(sc, frame)
490 	struct nge_softc		*sc;
491 	struct nge_mii_frame	*frame;
492 {
493 	int			s;
494 
495 	s = splimp();
496 	/*
497 	 * Set up frame for TX.
498 	 */
499 
500 	frame->mii_stdelim = NGE_MII_STARTDELIM;
501 	frame->mii_opcode = NGE_MII_WRITEOP;
502 	frame->mii_turnaround = NGE_MII_TURNAROUND;
503 
504 	/*
505 	 * Turn on data output.
506 	 */
507 	SIO_SET(NGE_MEAR_MII_DIR);
508 
509 	nge_mii_sync(sc);
510 
511 	nge_mii_send(sc, frame->mii_stdelim, 2);
512 	nge_mii_send(sc, frame->mii_opcode, 2);
513 	nge_mii_send(sc, frame->mii_phyaddr, 5);
514 	nge_mii_send(sc, frame->mii_regaddr, 5);
515 	nge_mii_send(sc, frame->mii_turnaround, 2);
516 	nge_mii_send(sc, frame->mii_data, 16);
517 
518 	/* Idle bit. */
519 	SIO_SET(NGE_MEAR_MII_CLK);
520 	DELAY(1);
521 	SIO_CLR(NGE_MEAR_MII_CLK);
522 	DELAY(1);
523 
524 	/*
525 	 * Turn off xmit.
526 	 */
527 	SIO_CLR(NGE_MEAR_MII_DIR);
528 
529 	splx(s);
530 
531 	return(0);
532 }
533 
534 int
535 nge_miibus_readreg(dev, phy, reg)
536 	struct device		*dev;
537 	int			phy, reg;
538 {
539 	struct nge_softc	*sc = (struct nge_softc *)dev;
540 	struct nge_mii_frame	frame;
541 
542 	DPRINTFN(9, ("%s: nge_miibus_readreg\n", sc->sc_dv.dv_xname));
543 
544 	bzero((char *)&frame, sizeof(frame));
545 
546 	frame.mii_phyaddr = phy;
547 	frame.mii_regaddr = reg;
548 	nge_mii_readreg(sc, &frame);
549 
550 	return(frame.mii_data);
551 }
552 
553 void
554 nge_miibus_writereg(dev, phy, reg, data)
555 	struct device		*dev;
556 	int			phy, reg, data;
557 {
558 	struct nge_softc	*sc = (struct nge_softc *)dev;
559 	struct nge_mii_frame	frame;
560 
561 
562 	DPRINTFN(9, ("%s: nge_miibus_writereg\n", sc->sc_dv.dv_xname));
563 
564 	bzero((char *)&frame, sizeof(frame));
565 
566 	frame.mii_phyaddr = phy;
567 	frame.mii_regaddr = reg;
568 	frame.mii_data = data;
569 	nge_mii_writereg(sc, &frame);
570 }
571 
572 void
573 nge_miibus_statchg(dev)
574 	struct device		*dev;
575 {
576 	struct nge_softc	*sc = (struct nge_softc *)dev;
577 	struct mii_data		*mii = &sc->nge_mii;
578 	u_int32_t		txcfg, rxcfg;
579 
580 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
581 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
582 
583 	DPRINTFN(4, ("%s: nge_miibus_statchg txcfg=%#x, rxcfg=%#x\n",
584 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
585 
586 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
587 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
588 		rxcfg |= (NGE_RXCFG_RX_FDX);
589 	} else {
590 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
591 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
592 	}
593 
594 	txcfg |= NGE_TXCFG_AUTOPAD;
595 
596 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
597 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
598 
599 	/* If we have a 1000Mbps link, set the mode_1000 bit. */
600 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
601 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
602 	else
603 		NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
604 }
605 
606 u_int32_t
607 nge_crc(sc, addr)
608 	struct nge_softc	*sc;
609 	caddr_t			addr;
610 {
611 	u_int32_t		crc, carry;
612 	int			i, j;
613 	u_int8_t		c;
614 
615 	/* Compute CRC for the address value. */
616 	crc = 0xFFFFFFFF; /* initial value */
617 
618 	for (i = 0; i < 6; i++) {
619 		c = *(addr + i);
620 		for (j = 0; j < 8; j++) {
621 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
622 			crc <<= 1;
623 			c >>= 1;
624 			if (carry)
625 				crc = (crc ^ 0x04c11db6) | carry;
626 		}
627 	}
628 
629 	/*
630 	 * return the filter bit position
631 	 */
632 
633 	return((crc >> 21) & 0x00000FFF);
634 }
635 
636 void
637 nge_setmulti(sc)
638 	struct nge_softc	*sc;
639 {
640 	struct arpcom		*ac = &sc->arpcom;
641 	struct ifnet		*ifp = &ac->ac_if;
642 	struct ether_multi      *enm;
643 	struct ether_multistep  step;
644 	u_int32_t		h = 0, i, filtsave;
645 	int			bit, index;
646 
647 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
648 		NGE_CLRBIT(sc, NGE_RXFILT_CTL,
649 		    NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH);
650 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI);
651 		return;
652 	}
653 
654 	/*
655 	 * We have to explicitly enable the multicast hash table
656 	 * on the NatSemi chip if we want to use it, which we do.
657 	 * We also have to tell it that we don't want to use the
658 	 * hash table for matching unicast addresses.
659 	 */
660 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH);
661 	NGE_CLRBIT(sc, NGE_RXFILT_CTL,
662 	    NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH);
663 
664 	filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL);
665 
666 	/* first, zot all the existing hash bits */
667 	for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) {
668 		CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i);
669 		CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0);
670 	}
671 
672 	/*
673 	 * From the 11 bits returned by the crc routine, the top 7
674 	 * bits represent the 16-bit word in the mcast hash table
675 	 * that needs to be updated, and the lower 4 bits represent
676 	 * which bit within that byte needs to be set.
677 	 */
678 	ETHER_FIRST_MULTI(step, ac, enm);
679 	while (enm != NULL) {
680 		h = nge_crc(sc, LLADDR((struct sockaddr_dl *)enm->enm_addrlo));
681 		index = (h >> 4) & 0x7F;
682 		bit = h & 0xF;
683 		CSR_WRITE_4(sc, NGE_RXFILT_CTL,
684 		    NGE_FILTADDR_MCAST_LO + (index * 2));
685 		NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit));
686 		ETHER_NEXT_MULTI(step, enm);
687 	}
688 
689 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave);
690 }
691 
692 void
693 nge_reset(sc)
694 	struct nge_softc	*sc;
695 {
696 	int			i;
697 
698 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET);
699 
700 	for (i = 0; i < NGE_TIMEOUT; i++) {
701 		if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET))
702 			break;
703 	}
704 
705 	if (i == NGE_TIMEOUT)
706 		printf("%s: reset never completed\n", sc->sc_dv.dv_xname);
707 
708 	/* Wait a little while for the chip to get its brains in order. */
709 	DELAY(1000);
710 
711 	/*
712 	 * If this is a NetSemi chip, make sure to clear
713 	 * PME mode.
714 	 */
715 	CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS);
716 	CSR_WRITE_4(sc, NGE_CLKRUN, 0);
717 }
718 
719 /*
720  * Probe for an NatSemi chip. Check the PCI vendor and device
721  * IDs against our list and return a device name if we find a match.
722  */
723 int
724 nge_probe(parent, match, aux)
725 	struct device *parent;
726 	void *match;
727 	void *aux;
728 {
729 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
730 
731 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS &&
732 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_DP83820)
733 		return (1);
734 
735 	return (0);
736 }
737 
738 /*
739  * Attach the interface. Allocate softc structures, do ifmedia
740  * setup and ethernet/BPF attach.
741  */
742 void
743 nge_attach(parent, self, aux)
744 	struct device *parent, *self;
745 	void *aux;
746 {
747 	struct nge_softc	*sc = (struct nge_softc *)self;
748 	struct pci_attach_args	*pa = aux;
749 	pci_chipset_tag_t	pc = pa->pa_pc;
750 	pci_intr_handle_t	ih;
751 	const char		*intrstr = NULL;
752 	bus_addr_t		iobase;
753 	bus_size_t		iosize;
754 	bus_dma_segment_t	seg;
755 	bus_dmamap_t		dmamap;
756 	int			s, rseg;
757 	u_char			eaddr[ETHER_ADDR_LEN];
758 	u_int32_t		command;
759 	struct ifnet		*ifp;
760 	int			error = 0;
761 	caddr_t			kva;
762 
763 	s = splimp();
764 
765 	/*
766 	 * Handle power management nonsense.
767 	 */
768 	DPRINTFN(5, ("%s: preparing for conf read\n", sc->sc_dv.dv_xname));
769 	command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_CAPID) & 0x000000FF;
770 	if (command == 0x01) {
771 		command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_PWRMGMTCTRL);
772 		if (command & NGE_PSTATE_MASK) {
773 			u_int32_t		iobase, membase, irq;
774 
775 			/* Save important PCI config data. */
776 			iobase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOIO);
777 			membase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOMEM);
778 			irq = pci_conf_read(pc, pa->pa_tag, NGE_PCI_INTLINE);
779 
780 			/* Reset the power state. */
781 			printf("%s: chip is in D%d power mode "
782 			       "-- setting to D0\n", sc->sc_dv.dv_xname,
783 			       command & NGE_PSTATE_MASK);
784 			command &= 0xFFFFFFFC;
785 			pci_conf_write(pc, pa->pa_tag,
786 				       NGE_PCI_PWRMGMTCTRL, command);
787 
788 			/* Restore PCI config data. */
789 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOIO, iobase);
790 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOMEM, membase);
791 			pci_conf_write(pc, pa->pa_tag, NGE_PCI_INTLINE, irq);
792 		}
793 	}
794 
795 	/*
796 	 * Map control/status registers.
797 	 */
798 	DPRINTFN(5, ("%s: map control/status regs\n", sc->sc_dv.dv_xname));
799 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
800 	command |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE |
801 	  PCI_COMMAND_MASTER_ENABLE;
802 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
803 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
804 
805 #ifdef NGE_USEIOSPACE
806 	if (!(command & PCI_COMMAND_IO_ENABLE)) {
807 		printf("%s: failed to enable I/O ports!\n",
808 		       sc->sc_dv.dv_xname);
809 		error = ENXIO;;
810 		goto fail;
811 	}
812 	/*
813 	 * Map control/status registers.
814 	 */
815 	DPRINTFN(5, ("%s: pci_io_find\n", sc->sc_dv.dv_xname));
816 	if (pci_io_find(pc, pa->pa_tag, NGE_PCI_LOIO, &iobase, &iosize)) {
817 		printf(": can't find i/o space\n");
818 		goto fail;
819 	}
820 	DPRINTFN(5, ("%s: bus_space_map\n", sc->sc_dv.dv_xname));
821 	if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->nge_bhandle)) {
822 		printf(": can't map i/o space\n");
823 		goto fail;
824 	}
825 	sc->nge_btag = pa->pa_iot;
826 #else
827 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
828 		printf("%s: failed to enable memory mapping!\n",
829 		       sc->sc_dv.dv_xname);
830 		error = ENXIO;
831 		goto fail;
832 	}
833 	DPRINTFN(5, ("%s: pci_mem_find\n", sc->sc_dv.dv_xname));
834 	if (pci_mem_find(pc, pa->pa_tag, NGE_PCI_LOMEM, &iobase,
835 			 &iosize, NULL)) {
836 		printf(": can't find mem space\n");
837 		goto fail;
838 	}
839 	DPRINTFN(5, ("%s: bus_space_map\n", sc->sc_dv.dv_xname));
840 	if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->nge_bhandle)) {
841 		printf(": can't map mem space\n");
842 		goto fail;
843 	}
844 
845 	sc->nge_btag = pa->pa_memt;
846 #endif
847 
848 	/* Disable all interrupts */
849 	CSR_WRITE_4(sc, NGE_IER, 0);
850 
851 	DPRINTFN(5, ("%s: pci_intr_map\n", sc->sc_dv.dv_xname));
852 	if (pci_intr_map(pa, &ih)) {
853 		printf(": couldn't map interrupt\n");
854 		goto fail;
855 	}
856 
857 	DPRINTFN(5, ("%s: pci_intr_string\n", sc->sc_dv.dv_xname));
858 	intrstr = pci_intr_string(pc, ih);
859 	DPRINTFN(5, ("%s: pci_intr_establish\n", sc->sc_dv.dv_xname));
860 	sc->nge_intrhand = pci_intr_establish(pc, ih, IPL_NET, nge_intr, sc,
861 					      sc->sc_dv.dv_xname);
862 	if (sc->nge_intrhand == NULL) {
863 		printf(": couldn't establish interrupt");
864 		if (intrstr != NULL)
865 			printf(" at %s", intrstr);
866 		printf("\n");
867 		goto fail;
868 	}
869 	printf(": %s", intrstr);
870 
871 	/* Reset the adapter. */
872 	DPRINTFN(5, ("%s: nge_reset\n", sc->sc_dv.dv_xname));
873 	nge_reset(sc);
874 
875 	/*
876 	 * Get station address from the EEPROM.
877 	 */
878 	DPRINTFN(5, ("%s: nge_read_eeprom\n", sc->sc_dv.dv_xname));
879 	nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0);
880 	nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0);
881 	nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0);
882 
883 	/*
884 	 * A NatSemi chip was detected. Inform the world.
885 	 */
886 	printf(": address: %s\n", ether_sprintf(eaddr));
887 
888 	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
889 
890 	sc->sc_dmatag = pa->pa_dmat;
891 	DPRINTFN(5, ("%s: bus_dmamem_alloc\n", sc->sc_dv.dv_xname));
892 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct nge_list_data),
893 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
894 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
895 		goto fail;
896 	}
897 	DPRINTFN(5, ("%s: bus_dmamem_map\n", sc->sc_dv.dv_xname));
898 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
899 			   sizeof(struct nge_list_data), &kva,
900 			   BUS_DMA_NOWAIT)) {
901 		printf("%s: can't map dma buffers (%d bytes)\n",
902 		       sc->sc_dv.dv_xname, sizeof(struct nge_list_data));
903 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
904 		goto fail;
905 	}
906 	DPRINTFN(5, ("%s: bus_dmamem_create\n", sc->sc_dv.dv_xname));
907 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct nge_list_data), 1,
908 			      sizeof(struct nge_list_data), 0,
909 			      BUS_DMA_NOWAIT, &dmamap)) {
910 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
911 		bus_dmamem_unmap(sc->sc_dmatag, kva,
912 				 sizeof(struct nge_list_data));
913 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
914 		goto fail;
915 	}
916 	DPRINTFN(5, ("%s: bus_dmamem_load\n", sc->sc_dv.dv_xname));
917 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
918 			    sizeof(struct nge_list_data), NULL,
919 			    BUS_DMA_NOWAIT)) {
920 		bus_dmamap_destroy(sc->sc_dmatag, dmamap);
921 		bus_dmamem_unmap(sc->sc_dmatag, kva,
922 				 sizeof(struct nge_list_data));
923 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
924 		goto fail;
925 	}
926 
927 	DPRINTFN(5, ("%s: bzero\n", sc->sc_dv.dv_xname));
928 	sc->nge_ldata = (struct nge_list_data *)kva;
929 	bzero(sc->nge_ldata, sizeof(struct nge_list_data));
930 
931 	/* Try to allocate memory for jumbo buffers. */
932 	DPRINTFN(5, ("%s: nge_alloc_jumbo_mem\n", sc->sc_dv.dv_xname));
933 	if (nge_alloc_jumbo_mem(sc)) {
934 		printf("%s: jumbo buffer allocation failed\n",
935 		       sc->sc_dv.dv_xname);
936 		goto fail;
937 	}
938 
939 	ifp = &sc->arpcom.ac_if;
940 	ifp->if_softc = sc;
941 	ifp->if_mtu = ETHERMTU;
942 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
943 	ifp->if_ioctl = nge_ioctl;
944 	ifp->if_output = ether_output;
945 	ifp->if_start = nge_start;
946 	ifp->if_watchdog = nge_watchdog;
947 	ifp->if_baudrate = 1000000000;
948 	IFQ_SET_MAXLEN(&ifp->if_snd, NGE_TX_LIST_CNT - 1);
949 	IFQ_SET_READY(&ifp->if_snd);
950 	ifp->if_capabilities =
951 	    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
952 #if NVLAN > 0
953 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
954 #endif
955 	DPRINTFN(5, ("%s: bcopy\n", sc->sc_dv.dv_xname));
956 	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
957 
958 	/*
959 	 * Do MII setup.
960 	 */
961 	DPRINTFN(5, ("%s: mii setup\n", sc->sc_dv.dv_xname));
962 	if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) {
963 		DPRINTFN(5, ("%s: TBI mode\n", sc->sc_dv.dv_xname));
964 		sc->nge_tbi = 1;
965 
966 		ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_tbi_upd,
967 			     nge_ifmedia_tbi_sts);
968 
969 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_NONE, 0, NULL),
970 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
971 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
972 			    0, NULL);
973 		ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
974 
975 		ifmedia_set(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO);
976 
977 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
978 			    | NGE_GPIO_GP4_OUT
979 			    | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB
980 			    | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP4_OUTENB
981 			    | NGE_GPIO_GP5_OUTENB);
982 
983 		NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000);
984 	} else {
985 		sc->nge_mii.mii_ifp = ifp;
986 		sc->nge_mii.mii_readreg = nge_miibus_readreg;
987 		sc->nge_mii.mii_writereg = nge_miibus_writereg;
988 		sc->nge_mii.mii_statchg = nge_miibus_statchg;
989 
990 		ifmedia_init(&sc->nge_mii.mii_media, 0, nge_ifmedia_mii_upd,
991 			     nge_ifmedia_mii_sts);
992 		mii_attach(&sc->sc_dv, &sc->nge_mii, 0xffffffff, MII_PHY_ANY,
993 			   MII_OFFSET_ANY, 0);
994 
995 		if (LIST_FIRST(&sc->nge_mii.mii_phys) == NULL) {
996 
997 			printf("%s: no PHY found!\n", sc->sc_dv.dv_xname);
998 			ifmedia_add(&sc->nge_mii.mii_media,
999 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
1000 			ifmedia_set(&sc->nge_mii.mii_media,
1001 				    IFM_ETHER|IFM_MANUAL);
1002 		}
1003 		else
1004 			ifmedia_set(&sc->nge_mii.mii_media,
1005 				    IFM_ETHER|IFM_AUTO);
1006 	}
1007 
1008 	/*
1009 	 * Call MI attach routine.
1010 	 */
1011 	DPRINTFN(5, ("%s: if_attach\n", sc->sc_dv.dv_xname));
1012 	if_attach(ifp);
1013 	DPRINTFN(5, ("%s: ether_ifattach\n", sc->sc_dv.dv_xname));
1014 	ether_ifattach(ifp);
1015 	DPRINTFN(5, ("%s: timeout_set\n", sc->sc_dv.dv_xname));
1016 	timeout_set(&sc->nge_timeout, nge_tick, sc);
1017 	timeout_add(&sc->nge_timeout, hz);
1018 
1019 fail:
1020 	splx(s);
1021 }
1022 
1023 /*
1024  * Initialize the transmit descriptors.
1025  */
1026 int
1027 nge_list_tx_init(sc)
1028 	struct nge_softc	*sc;
1029 {
1030 	struct nge_list_data	*ld;
1031 	struct nge_ring_data	*cd;
1032 	int			i;
1033 
1034 	cd = &sc->nge_cdata;
1035 	ld = sc->nge_ldata;
1036 
1037 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
1038 		if (i == (NGE_TX_LIST_CNT - 1)) {
1039 			ld->nge_tx_list[i].nge_nextdesc =
1040 			    &ld->nge_tx_list[0];
1041 			ld->nge_tx_list[i].nge_next =
1042 			    vtophys(&ld->nge_tx_list[0]);
1043 		} else {
1044 			ld->nge_tx_list[i].nge_nextdesc =
1045 			    &ld->nge_tx_list[i + 1];
1046 			ld->nge_tx_list[i].nge_next =
1047 			    vtophys(&ld->nge_tx_list[i + 1]);
1048 		}
1049 		ld->nge_tx_list[i].nge_mbuf = NULL;
1050 		ld->nge_tx_list[i].nge_ptr = 0;
1051 		ld->nge_tx_list[i].nge_ctl = 0;
1052 	}
1053 
1054 	cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0;
1055 
1056 	return(0);
1057 }
1058 
1059 
1060 /*
1061  * Initialize the RX descriptors and allocate mbufs for them. Note that
1062  * we arrange the descriptors in a closed ring, so that the last descriptor
1063  * points back to the first.
1064  */
1065 int
1066 nge_list_rx_init(sc)
1067 	struct nge_softc	*sc;
1068 {
1069 	struct nge_list_data	*ld;
1070 	struct nge_ring_data	*cd;
1071 	int			i;
1072 
1073 	ld = sc->nge_ldata;
1074 	cd = &sc->nge_cdata;
1075 
1076 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
1077 		if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS)
1078 			return(ENOBUFS);
1079 		if (i == (NGE_RX_LIST_CNT - 1)) {
1080 			ld->nge_rx_list[i].nge_nextdesc =
1081 			    &ld->nge_rx_list[0];
1082 			ld->nge_rx_list[i].nge_next =
1083 			    vtophys(&ld->nge_rx_list[0]);
1084 		} else {
1085 			ld->nge_rx_list[i].nge_nextdesc =
1086 			    &ld->nge_rx_list[i + 1];
1087 			ld->nge_rx_list[i].nge_next =
1088 			    vtophys(&ld->nge_rx_list[i + 1]);
1089 		}
1090 	}
1091 
1092 	cd->nge_rx_prod = 0;
1093 
1094 	return(0);
1095 }
1096 
1097 /*
1098  * Initialize an RX descriptor and attach an MBUF cluster.
1099  */
1100 int
1101 nge_newbuf(sc, c, m)
1102 	struct nge_softc	*sc;
1103 	struct nge_desc		*c;
1104 	struct mbuf		*m;
1105 {
1106 	struct mbuf		*m_new = NULL;
1107 	caddr_t			*buf = NULL;
1108 
1109 	if (m == NULL) {
1110 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1111 		if (m_new == NULL) {
1112 			printf("%s: no memory for rx list "
1113 			       "-- packet dropped!\n", sc->sc_dv.dv_xname);
1114 			return(ENOBUFS);
1115 		}
1116 
1117 		/* Allocate the jumbo buffer */
1118 		buf = nge_jalloc(sc);
1119 		if (buf == NULL) {
1120 #ifdef NGE_VERBOSE
1121 			printf("%s: jumbo allocation failed "
1122 			       "-- packet dropped!\n", sc->sc_dv.dv_xname);
1123 #endif
1124 			m_freem(m_new);
1125 			return(ENOBUFS);
1126 		}
1127 		/* Attach the buffer to the mbuf */
1128 		m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
1129 		m_new->m_flags |= M_EXT;
1130 		m_new->m_ext.ext_size = m_new->m_pkthdr.len =
1131 			m_new->m_len = NGE_MCLBYTES;
1132 		m_new->m_ext.ext_free = nge_jfree;
1133 		m_new->m_ext.ext_arg = sc;
1134 		MCLINITREFERENCE(m_new);
1135 	} else {
1136 		m_new = m;
1137 		m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES;
1138 		m_new->m_data = m_new->m_ext.ext_buf;
1139 	}
1140 
1141 	m_adj(m_new, sizeof(u_int64_t));
1142 
1143 	c->nge_mbuf = m_new;
1144 	c->nge_ptr = vtophys(mtod(m_new, caddr_t));
1145 	DPRINTFN(7,("%s: c->nge_ptr=%#x\n", sc->sc_dv.dv_xname,
1146 		    c->nge_ptr));
1147 	c->nge_ctl = m_new->m_len;
1148 	c->nge_extsts = 0;
1149 
1150 	return(0);
1151 }
1152 
1153 int
1154 nge_alloc_jumbo_mem(sc)
1155 	struct nge_softc	*sc;
1156 {
1157 	caddr_t			ptr, kva;
1158 	bus_dma_segment_t	seg;
1159 	bus_dmamap_t		dmamap;
1160 	int			i, rseg;
1161 	struct nge_jpool_entry	*entry;
1162 
1163 	if (bus_dmamem_alloc(sc->sc_dmatag, NGE_JMEM, PAGE_SIZE, 0,
1164 			     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1165 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
1166 		return (ENOBUFS);
1167 	}
1168 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, NGE_JMEM, &kva,
1169 			   BUS_DMA_NOWAIT)) {
1170 		printf("%s: can't map dma buffers (%d bytes)\n",
1171 		       sc->sc_dv.dv_xname, NGE_JMEM);
1172 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1173 		return (ENOBUFS);
1174 	}
1175 	if (bus_dmamap_create(sc->sc_dmatag, NGE_JMEM, 1,
1176 			      NGE_JMEM, 0, BUS_DMA_NOWAIT, &dmamap)) {
1177 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
1178 		bus_dmamem_unmap(sc->sc_dmatag, kva, NGE_JMEM);
1179 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1180 		return (ENOBUFS);
1181 	}
1182 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, NGE_JMEM,
1183 			    NULL, BUS_DMA_NOWAIT)) {
1184 		printf("%s: can't load dma map\n", sc->sc_dv.dv_xname);
1185 		bus_dmamap_destroy(sc->sc_dmatag, dmamap);
1186 		bus_dmamem_unmap(sc->sc_dmatag, kva, NGE_JMEM);
1187 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1188 		return (ENOBUFS);
1189         }
1190 	sc->nge_cdata.nge_jumbo_buf = (caddr_t)kva;
1191 	DPRINTFN(1,("%s: nge_jumbo_buf=%#x, NGE_MCLBYTES=%#x\n",
1192 		    sc->sc_dv.dv_xname , sc->nge_cdata.nge_jumbo_buf,
1193 		    NGE_MCLBYTES));
1194 
1195 	LIST_INIT(&sc->nge_jfree_listhead);
1196 	LIST_INIT(&sc->nge_jinuse_listhead);
1197 
1198 	/*
1199 	 * Now divide it up into 9K pieces and save the addresses
1200 	 * in an array. Note that we play an evil trick here by using
1201 	 * the first few bytes in the buffer to hold the the address
1202 	 * of the softc structure for this interface. This is because
1203 	 * nge_jfree() needs it, but it is called by the mbuf management
1204 	 * code which will not pass it to us explicitly.
1205 	 */
1206 	ptr = sc->nge_cdata.nge_jumbo_buf;
1207 	for (i = 0; i < NGE_JSLOTS; i++) {
1208 		sc->nge_cdata.nge_jslots[i].nge_buf = ptr;
1209 		sc->nge_cdata.nge_jslots[i].nge_inuse = 0;
1210 		ptr += NGE_MCLBYTES;
1211 		entry = malloc(sizeof(struct nge_jpool_entry),
1212 			       M_DEVBUF, M_NOWAIT);
1213 		if (entry == NULL) {
1214 			bus_dmamap_unload(sc->sc_dmatag, dmamap);
1215 			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
1216 			bus_dmamem_unmap(sc->sc_dmatag, kva, NGE_JMEM);
1217 			bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1218 			sc->nge_cdata.nge_jumbo_buf = NULL;
1219 			printf("%s: no memory for jumbo buffer queue!\n",
1220 			       sc->sc_dv.dv_xname);
1221 			return(ENOBUFS);
1222 		}
1223 		entry->slot = i;
1224 		LIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry,
1225 				 jpool_entries);
1226 	}
1227 
1228 	return(0);
1229 }
1230 
1231 /*
1232  * Allocate a jumbo buffer.
1233  */
1234 void *
1235 nge_jalloc(sc)
1236 	struct nge_softc	*sc;
1237 {
1238 	struct nge_jpool_entry   *entry;
1239 
1240 	entry = LIST_FIRST(&sc->nge_jfree_listhead);
1241 
1242 	if (entry == NULL) {
1243 #ifdef NGE_VERBOSE
1244 		printf("%s: no free jumbo buffers\n", sc->sc_dv.dv_xname);
1245 #endif
1246 		return(NULL);
1247 	}
1248 
1249 	LIST_REMOVE(entry, jpool_entries);
1250 	LIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries);
1251 	sc->nge_cdata.nge_jslots[entry->slot].nge_inuse = 1;
1252 	return(sc->nge_cdata.nge_jslots[entry->slot].nge_buf);
1253 }
1254 
1255 /*
1256  * Release a jumbo buffer.
1257  */
1258 void
1259 nge_jfree(buf, size, arg)
1260 	caddr_t		buf;
1261 	u_int		size;
1262 	void		*arg;
1263 {
1264 	struct nge_softc	*sc;
1265 	int		        i;
1266 	struct nge_jpool_entry *entry;
1267 
1268 	/* Extract the softc struct pointer. */
1269 	sc = (struct nge_softc *)arg;
1270 
1271 	if (sc == NULL)
1272 		panic("nge_jfree: can't find softc pointer!");
1273 
1274 	/* calculate the slot this buffer belongs to */
1275 
1276 	i = ((vaddr_t)buf - (vaddr_t)sc->nge_cdata.nge_jumbo_buf)
1277 	  / NGE_MCLBYTES;
1278 
1279 	if ((i < 0) || (i >= NGE_JSLOTS))
1280 		panic("nge_jfree: asked to free buffer that we don't manage!");
1281 	else if (sc->nge_cdata.nge_jslots[i].nge_inuse == 0)
1282 		panic("nge_jfree: buffer already free!");
1283 	else {
1284 		sc->nge_cdata.nge_jslots[i].nge_inuse--;
1285 		if(sc->nge_cdata.nge_jslots[i].nge_inuse == 0) {
1286 			entry = LIST_FIRST(&sc->nge_jinuse_listhead);
1287 			if (entry == NULL)
1288 				panic("nge_jfree: buffer not in use!");
1289 			entry->slot = i;
1290 			LIST_REMOVE(entry, jpool_entries);
1291 			LIST_INSERT_HEAD(&sc->nge_jfree_listhead,
1292 					 entry, jpool_entries);
1293 		}
1294 	}
1295 }
1296 
1297 /*
1298  * A frame has been uploaded: pass the resulting mbuf chain up to
1299  * the higher level protocols.
1300  */
1301 void
1302 nge_rxeof(sc)
1303 	struct nge_softc	*sc;
1304 {
1305         struct mbuf		*m;
1306         struct ifnet		*ifp;
1307 	struct nge_desc		*cur_rx;
1308 	int			i, total_len = 0;
1309 	u_int32_t		rxstat;
1310 
1311 	ifp = &sc->arpcom.ac_if;
1312 	i = sc->nge_cdata.nge_rx_prod;
1313 
1314 	while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) {
1315 		struct mbuf		*m0 = NULL;
1316 		u_int32_t		extsts;
1317 
1318 		cur_rx = &sc->nge_ldata->nge_rx_list[i];
1319 		rxstat = cur_rx->nge_rxstat;
1320 		extsts = cur_rx->nge_extsts;
1321 		m = cur_rx->nge_mbuf;
1322 		cur_rx->nge_mbuf = NULL;
1323 		total_len = NGE_RXBYTES(cur_rx);
1324 		NGE_INC(i, NGE_RX_LIST_CNT);
1325 
1326 		/*
1327 		 * If an error occurs, update stats, clear the
1328 		 * status word and leave the mbuf cluster in place:
1329 		 * it should simply get re-used next time this descriptor
1330 		 * comes up in the ring.
1331 		 */
1332 		if (!(rxstat & NGE_CMDSTS_PKT_OK)) {
1333 			ifp->if_ierrors++;
1334 			nge_newbuf(sc, cur_rx, m);
1335 			continue;
1336 		}
1337 
1338 		/*
1339 		 * Ok. NatSemi really screwed up here. This is the
1340 		 * only gigE chip I know of with alignment constraints
1341 		 * on receive buffers. RX buffers must be 64-bit aligned.
1342 		 */
1343 #ifndef __STRICT_ALIGNMENT
1344 		/*
1345 		 * By popular demand, ignore the alignment problems
1346 		 * on the Intel x86 platform. The performance hit
1347 		 * incurred due to unaligned accesses is much smaller
1348 		 * than the hit produced by forcing buffer copies all
1349 		 * the time, especially with jumbo frames. We still
1350 		 * need to fix up the alignment everywhere else though.
1351 		 */
1352 		if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
1353 #endif
1354 			m0 = m_devget(mtod(m, char *), total_len,
1355 			    ETHER_ALIGN, ifp, NULL);
1356 			nge_newbuf(sc, cur_rx, m);
1357 			if (m0 == NULL) {
1358 				printf("%s: no receive buffers "
1359 				    "available -- packet dropped!\n",
1360 				    sc->sc_dv.dv_xname);
1361 				ifp->if_ierrors++;
1362 				continue;
1363 			}
1364 			m_adj(m0, ETHER_ALIGN);
1365 			m = m0;
1366 #ifndef __STRICT_ALIGNMENT
1367 		} else {
1368 			m->m_pkthdr.rcvif = ifp;
1369 			m->m_pkthdr.len = m->m_len = total_len;
1370 		}
1371 #endif
1372 
1373 		ifp->if_ipackets++;
1374 
1375 #if NBPFILTER > 0
1376 		/*
1377 		 * Handle BPF listeners. Let the BPF user see the packet.
1378 		 */
1379 		if (ifp->if_bpf)
1380 			bpf_mtap(ifp->if_bpf, m);
1381 #endif
1382 
1383 		/* Do IP checksum checking. */
1384 		if (extsts & NGE_RXEXTSTS_IPPKT) {
1385 			if (extsts & NGE_RXEXTSTS_IPCSUMERR)
1386 				m->m_pkthdr.csum |= M_IPV4_CSUM_IN_BAD;
1387 			else
1388 				m->m_pkthdr.csum |= M_IPV4_CSUM_IN_OK;
1389 		}
1390 		if (extsts & NGE_RXEXTSTS_TCPPKT) {
1391 			if (extsts & NGE_RXEXTSTS_TCPCSUMERR)
1392 				m->m_pkthdr.csum |= M_TCP_CSUM_IN_BAD;
1393 			else
1394 				m->m_pkthdr.csum |= M_TCP_CSUM_IN_OK;
1395 		}
1396 		if (extsts & NGE_RXEXTSTS_UDPPKT) {
1397 			if (extsts & NGE_RXEXTSTS_UDPCSUMERR)
1398 				m->m_pkthdr.csum |= M_UDP_CSUM_IN_BAD;
1399 			else
1400 				m->m_pkthdr.csum |= M_UDP_CSUM_IN_OK;
1401 		}
1402 
1403 #if NVLAN > 0
1404 		/*
1405 		 * If we received a packet with a vlan tag, pass it
1406 		 * to vlan_input() instead of ether_input().
1407 		 */
1408 		if (extsts & NGE_RXEXTSTS_VLANPKT) {
1409 			if (vlan_input_tag(m, extsts & NGE_RXEXTSTS_VTCI) < 0)
1410 				ifp->if_data.ifi_noproto++;
1411                         continue;
1412                 }
1413 #endif
1414 
1415 		ether_input_mbuf(ifp, m);
1416 	}
1417 
1418 	sc->nge_cdata.nge_rx_prod = i;
1419 }
1420 
1421 /*
1422  * A frame was downloaded to the chip. It's safe for us to clean up
1423  * the list buffers.
1424  */
1425 
1426 void
1427 nge_txeof(sc)
1428 	struct nge_softc	*sc;
1429 {
1430 	struct nge_desc		*cur_tx = NULL;
1431 	struct ifnet		*ifp;
1432 	u_int32_t		idx;
1433 
1434 	ifp = &sc->arpcom.ac_if;
1435 
1436 	/* Clear the timeout timer. */
1437 	ifp->if_timer = 0;
1438 
1439 	/*
1440 	 * Go through our tx list and free mbufs for those
1441 	 * frames that have been transmitted.
1442 	 */
1443 	idx = sc->nge_cdata.nge_tx_cons;
1444 	while (idx != sc->nge_cdata.nge_tx_prod) {
1445 		cur_tx = &sc->nge_ldata->nge_tx_list[idx];
1446 
1447 		if (NGE_OWNDESC(cur_tx))
1448 			break;
1449 
1450 		if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) {
1451 			sc->nge_cdata.nge_tx_cnt--;
1452 			NGE_INC(idx, NGE_TX_LIST_CNT);
1453 			continue;
1454 		}
1455 
1456 		if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) {
1457 			ifp->if_oerrors++;
1458 			if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS)
1459 				ifp->if_collisions++;
1460 			if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL)
1461 				ifp->if_collisions++;
1462 		}
1463 
1464 		ifp->if_collisions +=
1465 		    (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16;
1466 
1467 		ifp->if_opackets++;
1468 		if (cur_tx->nge_mbuf != NULL) {
1469 			m_freem(cur_tx->nge_mbuf);
1470 			cur_tx->nge_mbuf = NULL;
1471 		}
1472 
1473 		sc->nge_cdata.nge_tx_cnt--;
1474 		NGE_INC(idx, NGE_TX_LIST_CNT);
1475 		ifp->if_timer = 0;
1476 	}
1477 
1478 	sc->nge_cdata.nge_tx_cons = idx;
1479 
1480 	if (cur_tx != NULL)
1481 		ifp->if_flags &= ~IFF_OACTIVE;
1482 }
1483 
1484 void
1485 nge_tick(xsc)
1486 	void			*xsc;
1487 {
1488 	struct nge_softc	*sc = xsc;
1489 	struct mii_data		*mii = &sc->nge_mii;
1490 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1491 	int			s;
1492 
1493 	s = splimp();
1494 
1495 	DPRINTFN(10, ("%s: nge_tick: link=%d\n", sc->sc_dv.dv_xname,
1496 		      sc->nge_link));
1497 
1498 	timeout_add(&sc->nge_timeout, hz);
1499 	if (sc->nge_link) {
1500 		splx(s);
1501 		return;
1502 	}
1503 
1504 	if (sc->nge_tbi) {
1505 		if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
1506 		    == IFM_AUTO) {
1507 			u_int32_t bmsr, anlpar, txcfg, rxcfg;
1508 
1509 			bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
1510 			DPRINTFN(2, ("%s: nge_tick: bmsr=%#x\n",
1511 				     sc->sc_dv.dv_xname, bmsr));
1512 
1513 			if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
1514 				CSR_WRITE_4(sc, NGE_TBI_BMCR, 0);
1515 
1516 				splx(s);
1517 				return;
1518 			}
1519 
1520 			anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
1521 			txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1522 			rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1523 
1524 			DPRINTFN(2, ("%s: nge_tick: anlpar=%#x, txcfg=%#x, "
1525 				     "rxcfg=%#x\n", sc->sc_dv.dv_xname, anlpar,
1526 				     txcfg, rxcfg));
1527 
1528 			if (anlpar == 0 || anlpar & NGE_TBIANAR_FDX) {
1529 				txcfg |= (NGE_TXCFG_IGN_HBEAT|
1530 					  NGE_TXCFG_IGN_CARR);
1531 				rxcfg |= NGE_RXCFG_RX_FDX;
1532 			} else {
1533 				txcfg &= ~(NGE_TXCFG_IGN_HBEAT|
1534 					   NGE_TXCFG_IGN_CARR);
1535 				rxcfg &= ~(NGE_RXCFG_RX_FDX);
1536 			}
1537 			txcfg |= NGE_TXCFG_AUTOPAD;
1538 			CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1539 			CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1540 		}
1541 
1542 		DPRINTF(("%s: gigabit link up\n", sc->sc_dv.dv_xname));
1543 		sc->nge_link++;
1544 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1545 			nge_start(ifp);
1546 	} else {
1547 		mii_tick(mii);
1548 		mii_pollstat(mii);
1549 		if (mii->mii_media_status & IFM_ACTIVE &&
1550 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1551 			sc->nge_link++;
1552 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1553 				DPRINTF(("%s: gigabit link up\n",
1554 					 sc->sc_dv.dv_xname));
1555 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
1556 				nge_start(ifp);
1557 		}
1558 
1559 	}
1560 
1561 	splx(s);
1562 }
1563 
1564 int
1565 nge_intr(arg)
1566 	void			*arg;
1567 {
1568 	struct nge_softc	*sc;
1569 	struct ifnet		*ifp;
1570 	u_int32_t		status;
1571 	int			claimed = 0;
1572 
1573 	sc = arg;
1574 	ifp = &sc->arpcom.ac_if;
1575 
1576 	/* Supress unwanted interrupts */
1577 	if (!(ifp->if_flags & IFF_UP)) {
1578 		nge_stop(sc);
1579 		return (0);
1580 	}
1581 
1582 	/* Disable interrupts. */
1583 	CSR_WRITE_4(sc, NGE_IER, 0);
1584 
1585 	/* Data LED on for TBI mode */
1586 	if(sc->nge_tbi)
1587 		 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1588 			     | NGE_GPIO_GP3_OUT);
1589 
1590 	for (;;) {
1591 		/* Reading the ISR register clears all interrupts. */
1592 		status = CSR_READ_4(sc, NGE_ISR);
1593 
1594 		if ((status & NGE_INTRS) == 0)
1595 			break;
1596 
1597 		claimed = 1;
1598 
1599 		if ((status & NGE_ISR_TX_DESC_OK) ||
1600 		    (status & NGE_ISR_TX_ERR) ||
1601 		    (status & NGE_ISR_TX_OK) ||
1602 		    (status & NGE_ISR_TX_IDLE))
1603 			nge_txeof(sc);
1604 
1605 		if ((status & NGE_ISR_RX_DESC_OK) ||
1606 		    (status & NGE_ISR_RX_ERR) ||
1607 		    (status & NGE_ISR_RX_OFLOW) ||
1608 		    (status & NGE_ISR_RX_FIFO_OFLOW) ||
1609 		    (status & NGE_ISR_RX_IDLE) ||
1610 		    (status & NGE_ISR_RX_OK))
1611 			nge_rxeof(sc);
1612 
1613 		if ((status & NGE_ISR_RX_IDLE))
1614 			NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1615 
1616 		if (status & NGE_ISR_SYSERR) {
1617 			nge_reset(sc);
1618 			ifp->if_flags &= ~IFF_RUNNING;
1619 			nge_init(sc);
1620 		}
1621 
1622 #if 0
1623 		/*
1624 		 * XXX: nge_tick() is not ready to be called this way
1625 		 * it screws up the aneg timeout because mii_tick() is
1626 		 * only to be called once per second.
1627 		 */
1628 		if (status & NGE_IMR_PHY_INTR) {
1629 			sc->nge_link = 0;
1630 			nge_tick(sc);
1631 		}
1632 #endif
1633 	}
1634 
1635 	/* Re-enable interrupts. */
1636 	CSR_WRITE_4(sc, NGE_IER, 1);
1637 
1638 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1639 		nge_start(ifp);
1640 
1641 	/* Data LED off for TBI mode */
1642 	if(sc->nge_tbi)
1643 		CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO)
1644 			    & ~NGE_GPIO_GP3_OUT);
1645 
1646 	return claimed;
1647 }
1648 
1649 /*
1650  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1651  * pointers to the fragment pointers.
1652  */
1653 int
1654 nge_encap(sc, m_head, txidx)
1655 	struct nge_softc	*sc;
1656 	struct mbuf		*m_head;
1657 	u_int32_t		*txidx;
1658 {
1659 	struct nge_desc		*f = NULL;
1660 	struct mbuf		*m;
1661 	int			frag, cur, cnt = 0;
1662 #if NVLAN > 0
1663 	struct ifvlan		*ifv = NULL;
1664 
1665 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1666 	    m_head->m_pkthdr.rcvif != NULL)
1667 		ifv = m_head->m_pkthdr.rcvif->if_softc;
1668 #endif
1669 
1670 	/*
1671 	 * Start packing the mbufs in this chain into
1672 	 * the fragment pointers. Stop when we run out
1673 	 * of fragments or hit the end of the mbuf chain.
1674 	 */
1675 	m = m_head;
1676 	cur = frag = *txidx;
1677 
1678 	for (m = m_head; m != NULL; m = m->m_next) {
1679 		if (m->m_len != 0) {
1680 			if ((NGE_TX_LIST_CNT -
1681 			    (sc->nge_cdata.nge_tx_cnt + cnt)) < 2)
1682 				return(ENOBUFS);
1683 			f = &sc->nge_ldata->nge_tx_list[frag];
1684 			f->nge_ctl = NGE_CMDSTS_MORE | m->m_len;
1685 			f->nge_ptr = vtophys(mtod(m, vaddr_t));
1686 			DPRINTFN(7,("%s: f->nge_ptr=%#x\n",
1687 				    sc->sc_dv.dv_xname, f->nge_ptr));
1688 			if (cnt != 0)
1689 				f->nge_ctl |= NGE_CMDSTS_OWN;
1690 			cur = frag;
1691 			NGE_INC(frag, NGE_TX_LIST_CNT);
1692 			cnt++;
1693 		}
1694 	}
1695 
1696 	if (m != NULL)
1697 		return(ENOBUFS);
1698 
1699 	/*
1700 	 * Card handles checksumming on a packet by packet
1701 	 * basis.
1702 	 */
1703 	sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0;
1704 	if (m_head->m_pkthdr.csum) {
1705 		if (m_head->m_pkthdr.csum & M_IPV4_CSUM_OUT)
1706 			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1707 			    NGE_TXEXTSTS_IPCSUM;
1708 		if (m_head->m_pkthdr.csum & M_TCPV4_CSUM_OUT)
1709 			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1710 			    NGE_TXEXTSTS_TCPCSUM;
1711 		if (m_head->m_pkthdr.csum & M_UDPV4_CSUM_OUT)
1712 			sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |=
1713 			    NGE_TXEXTSTS_UDPCSUM;
1714 	}
1715 
1716 #if NVLAN > 0
1717 	if (ifv != NULL) {
1718 		sc->nge_ldata->nge_tx_list[cur].nge_extsts |=
1719 			(NGE_TXEXTSTS_VLANPKT|ifv->ifv_tag);
1720 	}
1721 #endif
1722 
1723 	sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head;
1724 	sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE;
1725 	sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN;
1726 	sc->nge_cdata.nge_tx_cnt += cnt;
1727 	*txidx = frag;
1728 
1729 	return(0);
1730 }
1731 
1732 /*
1733  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1734  * to the mbuf data regions directly in the transmit lists. We also save a
1735  * copy of the pointers since the transmit list fragment pointers are
1736  * physical addresses.
1737  */
1738 
1739 void
1740 nge_start(ifp)
1741 	struct ifnet		*ifp;
1742 {
1743 	struct nge_softc	*sc;
1744 	struct mbuf		*m_head = NULL;
1745 	u_int32_t		idx;
1746 	int			pkts = 0;
1747 
1748 	sc = ifp->if_softc;
1749 
1750 	if (!sc->nge_link)
1751 		return;
1752 
1753 	idx = sc->nge_cdata.nge_tx_prod;
1754 
1755 	if (ifp->if_flags & IFF_OACTIVE)
1756 		return;
1757 
1758 	while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) {
1759 		IFQ_POLL(&ifp->if_snd, m_head);
1760 		if (m_head == NULL)
1761 			break;
1762 
1763 		if (nge_encap(sc, m_head, &idx)) {
1764 			ifp->if_flags |= IFF_OACTIVE;
1765 			break;
1766 		}
1767 
1768 		/* now we are committed to transmit the packet */
1769 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1770 		pkts++;
1771 
1772 #if NBPFILTER > 0
1773 		/*
1774 		 * If there's a BPF listener, bounce a copy of this frame
1775 		 * to him.
1776 		 */
1777 		if (ifp->if_bpf)
1778 			bpf_mtap(ifp->if_bpf, m_head);
1779 #endif
1780 	}
1781 	if (pkts == 0)
1782 		return;
1783 
1784 	/* Transmit */
1785 	sc->nge_cdata.nge_tx_prod = idx;
1786 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE);
1787 
1788 	/*
1789 	 * Set a timeout in case the chip goes out to lunch.
1790 	 */
1791 	ifp->if_timer = 5;
1792 }
1793 
1794 void
1795 nge_init(xsc)
1796 	void			*xsc;
1797 {
1798 	struct nge_softc	*sc = xsc;
1799 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1800 	struct mii_data		*mii;
1801 	u_int32_t		txcfg, rxcfg;
1802 	int			s, media;
1803 
1804 	if (ifp->if_flags & IFF_RUNNING)
1805 		return;
1806 
1807 	s = splimp();
1808 
1809 	/*
1810 	 * Cancel pending I/O and free all RX/TX buffers.
1811 	 */
1812 	nge_stop(sc);
1813 
1814 	mii = sc->nge_tbi ? NULL: &sc->nge_mii;
1815 
1816 	/* Set MAC address */
1817 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0);
1818 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1819 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[0]);
1820 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1);
1821 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1822 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[1]);
1823 	CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2);
1824 	CSR_WRITE_4(sc, NGE_RXFILT_DATA,
1825 	    ((u_int16_t *)sc->arpcom.ac_enaddr)[2]);
1826 
1827 	/* Init circular RX list. */
1828 	if (nge_list_rx_init(sc) == ENOBUFS) {
1829 		printf("%s: initialization failed: no "
1830 			"memory for rx buffers\n", sc->sc_dv.dv_xname);
1831 		nge_stop(sc);
1832 		splx(s);
1833 		return;
1834 	}
1835 
1836 	/*
1837 	 * Init tx descriptors.
1838 	 */
1839 	nge_list_tx_init(sc);
1840 
1841 	/*
1842 	 * For the NatSemi chip, we have to explicitly enable the
1843 	 * reception of ARP frames, as well as turn on the 'perfect
1844 	 * match' filter where we store the station address, otherwise
1845 	 * we won't receive unicasts meant for this host.
1846 	 */
1847 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP);
1848 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT);
1849 
1850 	 /* If we want promiscuous mode, set the allframes bit. */
1851 	if (ifp->if_flags & IFF_PROMISC) {
1852 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1853 	} else {
1854 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS);
1855 	}
1856 
1857 	/*
1858 	 * Set the capture broadcast bit to capture broadcast frames.
1859 	 */
1860 	if (ifp->if_flags & IFF_BROADCAST) {
1861 		NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1862 	} else {
1863 		NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD);
1864 	}
1865 
1866 	/*
1867 	 * Load the multicast filter.
1868 	 */
1869 	nge_setmulti(sc);
1870 
1871 	/* Turn the receive filter on */
1872 	NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE);
1873 
1874 	/*
1875 	 * Load the address of the RX and TX lists.
1876 	 */
1877 	CSR_WRITE_4(sc, NGE_RX_LISTPTR,
1878 	    vtophys(&sc->nge_ldata->nge_rx_list[0]));
1879 	CSR_WRITE_4(sc, NGE_TX_LISTPTR,
1880 	    vtophys(&sc->nge_ldata->nge_tx_list[0]));
1881 
1882 	/* Set RX configuration */
1883 	CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG);
1884 
1885 	/*
1886 	 * Enable hardware checksum validation for all IPv4
1887 	 * packets, do not reject packets with bad checksums.
1888 	 */
1889 	CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB);
1890 
1891 #if NVLAN > 0
1892 	/*
1893 	 * If VLAN support is enabled, tell the chip to detect
1894 	 * and strip VLAN tag info from received frames. The tag
1895 	 * will be provided in the extsts field in the RX descriptors.
1896 	 */
1897 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1898 		NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL,
1899 		    NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB);
1900 #endif
1901 
1902 	/* Set TX configuration */
1903 	CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG);
1904 
1905 	/*
1906 	 * Enable TX IPv4 checksumming on a per-packet basis.
1907 	 */
1908 	CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT);
1909 
1910 #if NVLAN > 0
1911 	/*
1912 	 * If VLAN support is enabled, tell the chip to insert
1913 	 * VLAN tags on a per-packet basis as dictated by the
1914 	 * code in the frame encapsulation routine.
1915 	 */
1916 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1917 		NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT);
1918 #endif
1919 
1920 	/* Set full/half duplex mode. */
1921 	if (sc->nge_tbi)
1922 		media = sc->nge_ifmedia.ifm_cur->ifm_media;
1923 	else
1924 		media = mii->mii_media_active;
1925 
1926 	txcfg = CSR_READ_4(sc, NGE_TX_CFG);
1927 	rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
1928 
1929 	DPRINTFN(4, ("%s: nge_init txcfg=%#x, rxcfg=%#x\n",
1930 		     sc->sc_dv.dv_xname, txcfg, rxcfg));
1931 
1932 	if ((media & IFM_GMASK) == IFM_FDX) {
1933 		txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1934 		rxcfg |= (NGE_RXCFG_RX_FDX);
1935 	} else {
1936 		txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
1937 		rxcfg &= ~(NGE_RXCFG_RX_FDX);
1938 	}
1939 
1940 	txcfg |= NGE_TXCFG_AUTOPAD;
1941 
1942 	CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
1943 	CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
1944 
1945 	nge_tick(sc);
1946 
1947 	/*
1948 	 * Enable the delivery of PHY interrupts based on
1949 	 * link/speed/duplex status changes and enable return
1950 	 * of extended status information in the DMA descriptors,
1951 	 * required for checksum offloading.
1952 	 */
1953 	NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|NGE_CFG_PHYINTR_LNK|
1954 		   NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB);
1955 
1956 	DPRINTFN(1, ("%s: nge_init: config=%#x\n", sc->sc_dv.dv_xname,
1957 		     CSR_READ_4(sc, NGE_CFG)));
1958 
1959 	/*
1960 	 * Configure interrupt holdoff (moderation). We can
1961 	 * have the chip delay interrupt delivery for a certain
1962 	 * period. Units are in 100us, and the max setting
1963 	 * is 25500us (0xFF x 100us). Default is a 100us holdoff.
1964 	 */
1965 	CSR_WRITE_4(sc, NGE_IHR, 0x01);
1966 
1967 	/*
1968 	 * Enable interrupts.
1969 	 */
1970 	CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS);
1971 	CSR_WRITE_4(sc, NGE_IER, 1);
1972 
1973 	/* Enable receiver and transmitter. */
1974 	NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
1975 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE);
1976 
1977 	if (sc->nge_tbi)
1978 	    nge_ifmedia_tbi_upd(ifp);
1979 	else
1980 	    nge_ifmedia_mii_upd(ifp);
1981 
1982 	ifp->if_flags |= IFF_RUNNING;
1983 	ifp->if_flags &= ~IFF_OACTIVE;
1984 
1985 	splx(s);
1986 }
1987 
1988 /*
1989  * Set mii media options.
1990  */
1991 int
1992 nge_ifmedia_mii_upd(ifp)
1993 	struct ifnet		*ifp;
1994 {
1995 	struct nge_softc	*sc = ifp->if_softc;
1996 	struct mii_data 	*mii = &sc->nge_mii;
1997 
1998 	DPRINTFN(2, ("%s: nge_ifmedia_mii_upd\n", sc->sc_dv.dv_xname));
1999 
2000 	sc->nge_link = 0;
2001 
2002 	if (mii->mii_instance) {
2003 		struct mii_softc	*miisc;
2004 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2005 		     miisc = LIST_NEXT(miisc, mii_list))
2006 			mii_phy_reset(miisc);
2007 	}
2008 	mii_mediachg(mii);
2009 
2010 	return(0);
2011 }
2012 
2013 /*
2014  * Report current mii media status.
2015  */
2016 void
2017 nge_ifmedia_mii_sts(ifp, ifmr)
2018 	struct ifnet		*ifp;
2019 	struct ifmediareq	*ifmr;
2020 {
2021 	struct nge_softc	*sc = ifp->if_softc;
2022 	struct mii_data *mii = &sc->nge_mii;
2023 
2024 	DPRINTFN(2, ("%s: nge_ifmedia_mii_sts\n", sc->sc_dv.dv_xname));
2025 
2026 	mii_pollstat(mii);
2027 	ifmr->ifm_active = mii->mii_media_active;
2028 	ifmr->ifm_status = mii->mii_media_status;
2029 }
2030 
2031 /*
2032  * Set mii media options.
2033  */
2034 int
2035 nge_ifmedia_tbi_upd(ifp)
2036 	struct ifnet		*ifp;
2037 {
2038 	struct nge_softc	*sc = ifp->if_softc;
2039 
2040 	DPRINTFN(2, ("%s: nge_ifmedia_tbi_upd\n", sc->sc_dv.dv_xname));
2041 
2042 	sc->nge_link = 0;
2043 
2044 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media)
2045 	    == IFM_AUTO) {
2046 		u_int32_t anar, bmcr;
2047 		anar = CSR_READ_4(sc, NGE_TBI_ANAR);
2048 		anar |= (NGE_TBIANAR_HDX | NGE_TBIANAR_FDX);
2049 		CSR_WRITE_4(sc, NGE_TBI_ANAR, anar);
2050 
2051 		bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
2052 		bmcr |= (NGE_TBIBMCR_ENABLE_ANEG|NGE_TBIBMCR_RESTART_ANEG);
2053 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
2054 
2055 		bmcr &= ~(NGE_TBIBMCR_RESTART_ANEG);
2056 		CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr);
2057 	} else {
2058 		u_int32_t txcfg, rxcfg;
2059 		txcfg = CSR_READ_4(sc, NGE_TX_CFG);
2060 		rxcfg = CSR_READ_4(sc, NGE_RX_CFG);
2061 
2062 		if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK)
2063 		    == IFM_FDX) {
2064 			txcfg |= NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR;
2065 			rxcfg |= NGE_RXCFG_RX_FDX;
2066 		} else {
2067 			txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR);
2068 			rxcfg &= ~(NGE_RXCFG_RX_FDX);
2069 		}
2070 
2071 		txcfg |= NGE_TXCFG_AUTOPAD;
2072 		CSR_WRITE_4(sc, NGE_TX_CFG, txcfg);
2073 		CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg);
2074 	}
2075 
2076 	NGE_CLRBIT(sc, NGE_GPIO, NGE_GPIO_GP3_OUT);
2077 
2078 	return(0);
2079 }
2080 
2081 /*
2082  * Report current tbi media status.
2083  */
2084 void
2085 nge_ifmedia_tbi_sts(ifp, ifmr)
2086 	struct ifnet		*ifp;
2087 	struct ifmediareq	*ifmr;
2088 {
2089 	struct nge_softc	*sc = ifp->if_softc;
2090 	u_int32_t		bmcr;
2091 
2092 	bmcr = CSR_READ_4(sc, NGE_TBI_BMCR);
2093 
2094 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
2095 		u_int32_t bmsr = CSR_READ_4(sc, NGE_TBI_BMSR);
2096 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmsr=%#x, bmcr=%#x\n",
2097 			     sc->sc_dv.dv_xname, bmsr, bmcr));
2098 
2099 		if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) {
2100 			ifmr->ifm_active = IFM_ETHER|IFM_NONE;
2101 			ifmr->ifm_status = IFM_AVALID;
2102 			return;
2103 		}
2104 	} else {
2105 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmcr=%#x\n",
2106 			     sc->sc_dv.dv_xname, bmcr));
2107 	}
2108 
2109 	ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2110 	ifmr->ifm_active = IFM_ETHER|IFM_1000_SX;
2111 
2112 	if (bmcr & NGE_TBIBMCR_LOOPBACK)
2113 		ifmr->ifm_active |= IFM_LOOP;
2114 
2115 	if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) {
2116 		u_int32_t anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR);
2117 		DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts anlpar=%#x\n",
2118 			     sc->sc_dv.dv_xname, anlpar));
2119 
2120 		ifmr->ifm_active |= IFM_AUTO;
2121 		if (anlpar & NGE_TBIANLPAR_FDX) {
2122 			ifmr->ifm_active |= IFM_FDX;
2123 		} else if (anlpar & NGE_TBIANLPAR_HDX) {
2124 			ifmr->ifm_active |= IFM_HDX;
2125 		} else
2126 			ifmr->ifm_active |= IFM_FDX;
2127 
2128 	} else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX)
2129 		ifmr->ifm_active |= IFM_FDX;
2130 	else
2131 		ifmr->ifm_active |= IFM_HDX;
2132 
2133 }
2134 
2135 int
2136 nge_ioctl(ifp, command, data)
2137 	struct ifnet		*ifp;
2138 	u_long			command;
2139 	caddr_t			data;
2140 {
2141 	struct nge_softc	*sc = ifp->if_softc;
2142 	struct ifreq		*ifr = (struct ifreq *) data;
2143 	struct ifaddr		*ifa = (struct ifaddr *)data;
2144 	struct mii_data		*mii;
2145 	int			s, error = 0;
2146 
2147 	s = splimp();
2148 
2149 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
2150 		splx(s);
2151 		return (error);
2152 	}
2153 
2154 	switch(command) {
2155 	case SIOCSIFMTU:
2156 		if (ifr->ifr_mtu > NGE_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
2157 			error = EINVAL;
2158 		else {
2159 			ifp->if_mtu = ifr->ifr_mtu;
2160 			/*
2161 			 * Workaround: if the MTU is larger than
2162 			 * 8152 (TX FIFO size minus 64 minus 18), turn off
2163 			 * TX checksum offloading.
2164 			 */
2165 			if (ifr->ifr_mtu >= 8152)
2166 				ifp->if_capabilities &= ~(IFCAP_CSUM_IPv4 |
2167 				    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4);
2168 			else
2169 				ifp->if_capabilities = IFCAP_CSUM_IPv4 |
2170 					IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
2171 		}
2172 		break;
2173 	case SIOCSIFADDR:
2174 		ifp->if_flags |= IFF_UP;
2175 		switch (ifa->ifa_addr->sa_family) {
2176 #ifdef INET
2177 		case AF_INET:
2178 			nge_init(sc);
2179 			arp_ifinit(&sc->arpcom, ifa);
2180 			break;
2181 #endif /* INET */
2182 		default:
2183 			nge_init(sc);
2184 			break;
2185                 }
2186 		break;
2187 	case SIOCSIFFLAGS:
2188 		if (ifp->if_flags & IFF_UP) {
2189 			if (ifp->if_flags & IFF_RUNNING &&
2190 			    ifp->if_flags & IFF_PROMISC &&
2191 			    !(sc->nge_if_flags & IFF_PROMISC)) {
2192 				NGE_SETBIT(sc, NGE_RXFILT_CTL,
2193 				    NGE_RXFILTCTL_ALLPHYS|
2194 				    NGE_RXFILTCTL_ALLMULTI);
2195 			} else if (ifp->if_flags & IFF_RUNNING &&
2196 			    !(ifp->if_flags & IFF_PROMISC) &&
2197 			    sc->nge_if_flags & IFF_PROMISC) {
2198 				NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2199 				    NGE_RXFILTCTL_ALLPHYS);
2200 				if (!(ifp->if_flags & IFF_ALLMULTI))
2201 					NGE_CLRBIT(sc, NGE_RXFILT_CTL,
2202 					    NGE_RXFILTCTL_ALLMULTI);
2203 			} else {
2204 				ifp->if_flags &= ~IFF_RUNNING;
2205 				nge_init(sc);
2206 			}
2207 		} else {
2208 			if (ifp->if_flags & IFF_RUNNING)
2209 				nge_stop(sc);
2210 		}
2211 		sc->nge_if_flags = ifp->if_flags;
2212 		error = 0;
2213 		break;
2214 	case SIOCADDMULTI:
2215 	case SIOCDELMULTI:
2216 		error = (command == SIOCADDMULTI)
2217 			? ether_addmulti(ifr, &sc->arpcom)
2218 			: ether_delmulti(ifr, &sc->arpcom);
2219 
2220 		if (error == ENETRESET) {
2221 			if (ifp->if_flags & IFF_RUNNING)
2222 				nge_setmulti(sc);
2223 			error = 0;
2224 		}
2225 		break;
2226 	case SIOCGIFMEDIA:
2227 	case SIOCSIFMEDIA:
2228 		if (sc->nge_tbi) {
2229 			error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia,
2230 					      command);
2231 		} else {
2232 			mii = &sc->nge_mii;
2233 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2234 					      command);
2235 		}
2236 		break;
2237 	default:
2238 		error = EINVAL;
2239 		break;
2240 	}
2241 
2242 	splx(s);
2243 
2244 	return(error);
2245 }
2246 
2247 void
2248 nge_watchdog(ifp)
2249 	struct ifnet		*ifp;
2250 {
2251 	struct nge_softc	*sc;
2252 
2253 	sc = ifp->if_softc;
2254 
2255 	ifp->if_oerrors++;
2256 	printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
2257 
2258 	nge_stop(sc);
2259 	nge_reset(sc);
2260 	ifp->if_flags &= ~IFF_RUNNING;
2261 	nge_init(sc);
2262 
2263 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2264 		nge_start(ifp);
2265 }
2266 
2267 /*
2268  * Stop the adapter and free any mbufs allocated to the
2269  * RX and TX lists.
2270  */
2271 void
2272 nge_stop(sc)
2273 	struct nge_softc	*sc;
2274 {
2275 	int			i;
2276 	struct ifnet		*ifp;
2277 	struct mii_data		*mii;
2278 
2279 	ifp = &sc->arpcom.ac_if;
2280 	ifp->if_timer = 0;
2281 	if (sc->nge_tbi) {
2282 		mii = NULL;
2283 	} else {
2284 		mii = &sc->nge_mii;
2285 	}
2286 
2287 	timeout_del(&sc->nge_timeout);
2288 	CSR_WRITE_4(sc, NGE_IER, 0);
2289 	CSR_WRITE_4(sc, NGE_IMR, 0);
2290 	NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE);
2291 	DELAY(1000);
2292 	CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0);
2293 	CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0);
2294 
2295 	if (!sc->nge_tbi)
2296 		mii_down(mii);
2297 
2298 	sc->nge_link = 0;
2299 
2300 	/*
2301 	 * Free data in the RX lists.
2302 	 */
2303 	for (i = 0; i < NGE_RX_LIST_CNT; i++) {
2304 		if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) {
2305 			m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf);
2306 			sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL;
2307 		}
2308 	}
2309 	bzero((char *)&sc->nge_ldata->nge_rx_list,
2310 		sizeof(sc->nge_ldata->nge_rx_list));
2311 
2312 	/*
2313 	 * Free the TX list buffers.
2314 	 */
2315 	for (i = 0; i < NGE_TX_LIST_CNT; i++) {
2316 		if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) {
2317 			m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf);
2318 			sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL;
2319 		}
2320 	}
2321 
2322 	bzero((char *)&sc->nge_ldata->nge_tx_list,
2323 		sizeof(sc->nge_ldata->nge_tx_list));
2324 
2325 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2326 }
2327 
2328 /*
2329  * Stop all chip I/O so that the kernel's probe routines don't
2330  * get confused by errant DMAs when rebooting.
2331  */
2332 void
2333 nge_shutdown(xsc)
2334 	void *xsc;
2335 {
2336 	struct nge_softc *sc = (struct nge_softc *)xsc;
2337 
2338 	nge_reset(sc);
2339 	nge_stop(sc);
2340 }
2341 
2342 struct cfattach nge_ca = {
2343 	sizeof(struct nge_softc), nge_probe, nge_attach
2344 };
2345 
2346 struct cfdriver nge_cd = {
2347 	0, "nge", DV_IFNET
2348 };
2349