xref: /netbsd-src/sys/dev/pci/if_bge.c (revision 08c81a9c2dc8c7300e893321eb65c0925d60871c)
1 /*	$NetBSD: if_bge.c,v 1.19 2002/07/18 02:07:13 mjl Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40  *
41  * NetBSD version by:
42  *
43  *	Frank van der Linden <fvdl@wasabisystems.com>
44  *	Jason Thorpe <thorpej@wasabisystems.com>
45  *
46  * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com>
47  * Senior Engineer, Wind River Systems
48  */
49 
50 /*
51  * The Broadcom BCM5700 is based on technology originally developed by
52  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
53  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
54  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
55  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
56  * frames, highly configurable RX filtering, and 16 RX and TX queues
57  * (which, along with RX filter rules, can be used for QOS applications).
58  * Other features, such as TCP segmentation, may be available as part
59  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
60  * firmware images can be stored in hardware and need not be compiled
61  * into the driver.
62  *
63  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
64  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
65  *
66  * The BCM5701 is a single-chip solution incorporating both the BCM5700
67  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5700
68  * does not support external SSRAM.
69  *
70  * Broadcom also produces a variation of the BCM5700 under the "Altima"
71  * brand name, which is functionally similar but lacks PCI-X support.
72  *
73  * Without external SSRAM, you can only have at most 4 TX rings,
74  * and the use of the mini RX ring is disabled. This seems to imply
75  * that these features are simply not available on the BCM5701. As a
76  * result, this driver does not implement any support for the mini RX
77  * ring.
78  */
79 
80 #include "bpfilter.h"
81 #include "vlan.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/sockio.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/device.h>
91 #include <sys/socket.h>
92 
93 #include <net/if.h>
94 #include <net/if_dl.h>
95 #include <net/if_media.h>
96 #include <net/if_ether.h>
97 
98 #ifdef INET
99 #include <netinet/in.h>
100 #include <netinet/in_systm.h>
101 #include <netinet/in_var.h>
102 #include <netinet/ip.h>
103 #endif
104 
105 #if NBPFILTER > 0
106 #include <net/bpf.h>
107 #endif
108 
109 #include <dev/pci/pcireg.h>
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcidevs.h>
112 
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/brgphyreg.h>
117 
118 #include <dev/pci/if_bgereg.h>
119 
120 #include <uvm/uvm_extern.h>
121 
122 int bge_probe(struct device *, struct cfdata *, void *);
123 void bge_attach(struct device *, struct device *, void *);
124 void bge_release_resources(struct bge_softc *);
125 void bge_txeof(struct bge_softc *);
126 void bge_rxeof(struct bge_softc *);
127 
128 void bge_tick(void *);
129 void bge_stats_update(struct bge_softc *);
130 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
131 
132 int bge_intr(void *);
133 void bge_start(struct ifnet *);
134 int bge_ioctl(struct ifnet *, u_long, caddr_t);
135 int bge_init(struct ifnet *);
136 void bge_stop(struct bge_softc *);
137 void bge_watchdog(struct ifnet *);
138 void bge_shutdown(void *);
139 int bge_ifmedia_upd(struct ifnet *);
140 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
141 
142 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
143 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
144 
145 void bge_setmulti(struct bge_softc *);
146 
147 void bge_handle_events(struct bge_softc *);
148 int bge_alloc_jumbo_mem(struct bge_softc *);
149 void bge_free_jumbo_mem(struct bge_softc *);
150 void *bge_jalloc(struct bge_softc *);
151 void bge_jfree(struct mbuf *, caddr_t, u_int, void *);
152 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
153 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
154 int bge_init_rx_ring_std(struct bge_softc *);
155 void bge_free_rx_ring_std(struct bge_softc *);
156 int bge_init_rx_ring_jumbo(struct bge_softc *);
157 void bge_free_rx_ring_jumbo(struct bge_softc *);
158 void bge_free_tx_ring(struct bge_softc *);
159 int bge_init_tx_ring(struct bge_softc *);
160 
161 int bge_chipinit(struct bge_softc *);
162 int bge_blockinit(struct bge_softc *);
163 
164 #ifdef notdef
165 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
166 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
167 void bge_vpd_read(struct bge_softc *);
168 #endif
169 
170 u_int32_t bge_readmem_ind(struct bge_softc *, int);
171 void bge_writemem_ind(struct bge_softc *, int, int);
172 #ifdef notdef
173 u_int32_t bge_readreg_ind(struct bge_softc *, int);
174 #endif
175 void bge_writereg_ind(struct bge_softc *, int, int);
176 
177 int bge_miibus_readreg(struct device *, int, int);
178 void bge_miibus_writereg(struct device *, int, int, int);
179 void bge_miibus_statchg(struct device *);
180 
181 void bge_reset(struct bge_softc *);
182 
183 void bge_dump_status(struct bge_softc *);
184 void bge_dump_rxbd(struct bge_rx_bd *);
185 
186 #define BGE_DEBUG
187 #ifdef BGE_DEBUG
188 #define DPRINTF(x)	if (bgedebug) printf x
189 #define DPRINTFN(n,x)	if (bgedebug >= (n)) printf x
190 int	bgedebug = 0;
191 #else
192 #define DPRINTF(x)
193 #define DPRINTFN(n,x)
194 #endif
195 
196 /* Various chip quirks. */
197 #define	BGE_QUIRK_LINK_STATE_BROKEN	0x00000001
198 #define	BGE_QUIRK_CSUM_BROKEN		0x00000002
199 
200 struct cfattach bge_ca = {
201 	sizeof(struct bge_softc), bge_probe, bge_attach
202 };
203 
204 u_int32_t
205 bge_readmem_ind(sc, off)
206 	struct bge_softc *sc;
207 	int off;
208 {
209 	struct pci_attach_args	*pa = &(sc->bge_pa);
210 	pcireg_t val;
211 
212 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
213 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
214 	return val;
215 }
216 
217 void
218 bge_writemem_ind(sc, off, val)
219 	struct bge_softc *sc;
220 	int off, val;
221 {
222 	struct pci_attach_args	*pa = &(sc->bge_pa);
223 
224 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
225 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
226 }
227 
228 #ifdef notdef
229 u_int32_t
230 bge_readreg_ind(sc, off)
231 	struct bge_softc *sc;
232 	int off;
233 {
234 	struct pci_attach_args	*pa = &(sc->bge_pa);
235 
236 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
237 	return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
238 }
239 #endif
240 
241 void
242 bge_writereg_ind(sc, off, val)
243 	struct bge_softc *sc;
244 	int off, val;
245 {
246 	struct pci_attach_args	*pa = &(sc->bge_pa);
247 
248 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
249 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
250 }
251 
252 #ifdef notdef
253 u_int8_t
254 bge_vpd_readbyte(sc, addr)
255 	struct bge_softc *sc;
256 	int addr;
257 {
258 	int i;
259 	u_int32_t val;
260 	struct pci_attach_args	*pa = &(sc->bge_pa);
261 
262 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
263 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
264 		DELAY(10);
265 		if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
266 		    BGE_VPD_FLAG)
267 			break;
268 	}
269 
270 	if (i == BGE_TIMEOUT) {
271 		printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
272 		return(0);
273 	}
274 
275 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
276 
277 	return((val >> ((addr % 4) * 8)) & 0xFF);
278 }
279 
280 void
281 bge_vpd_read_res(sc, res, addr)
282 	struct bge_softc *sc;
283 	struct vpd_res *res;
284 	int addr;
285 {
286 	int i;
287 	u_int8_t *ptr;
288 
289 	ptr = (u_int8_t *)res;
290 	for (i = 0; i < sizeof(struct vpd_res); i++)
291 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
292 }
293 
294 void
295 bge_vpd_read(sc)
296 	struct bge_softc *sc;
297 {
298 	int pos = 0, i;
299 	struct vpd_res res;
300 
301 	if (sc->bge_vpd_prodname != NULL)
302 		free(sc->bge_vpd_prodname, M_DEVBUF);
303 	if (sc->bge_vpd_readonly != NULL)
304 		free(sc->bge_vpd_readonly, M_DEVBUF);
305 	sc->bge_vpd_prodname = NULL;
306 	sc->bge_vpd_readonly = NULL;
307 
308 	bge_vpd_read_res(sc, &res, pos);
309 
310 	if (res.vr_id != VPD_RES_ID) {
311 		printf("%s: bad VPD resource id: expected %x got %x\n",
312 			sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
313 		return;
314 	}
315 
316 	pos += sizeof(res);
317 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
318 	if (sc->bge_vpd_prodname == NULL)
319 		panic("bge_vpd_read");
320 	for (i = 0; i < res.vr_len; i++)
321 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
322 	sc->bge_vpd_prodname[i] = '\0';
323 	pos += i;
324 
325 	bge_vpd_read_res(sc, &res, pos);
326 
327 	if (res.vr_id != VPD_RES_READ) {
328 		printf("%s: bad VPD resource id: expected %x got %x\n",
329 		    sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
330 		return;
331 	}
332 
333 	pos += sizeof(res);
334 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
335 	if (sc->bge_vpd_readonly == NULL)
336 		panic("bge_vpd_read");
337 	for (i = 0; i < res.vr_len + 1; i++)
338 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
339 }
340 #endif
341 
342 /*
343  * Read a byte of data stored in the EEPROM at address 'addr.' The
344  * BCM570x supports both the traditional bitbang interface and an
345  * auto access interface for reading the EEPROM. We use the auto
346  * access method.
347  */
348 u_int8_t
349 bge_eeprom_getbyte(sc, addr, dest)
350 	struct bge_softc *sc;
351 	int addr;
352 	u_int8_t *dest;
353 {
354 	int i;
355 	u_int32_t byte = 0;
356 
357 	/*
358 	 * Enable use of auto EEPROM access so we can avoid
359 	 * having to use the bitbang method.
360 	 */
361 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
362 
363 	/* Reset the EEPROM, load the clock period. */
364 	CSR_WRITE_4(sc, BGE_EE_ADDR,
365 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
366 	DELAY(20);
367 
368 	/* Issue the read EEPROM command. */
369 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
370 
371 	/* Wait for completion */
372 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
373 		DELAY(10);
374 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
375 			break;
376 	}
377 
378 	if (i == BGE_TIMEOUT) {
379 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
380 		return(0);
381 	}
382 
383 	/* Get result. */
384 	byte = CSR_READ_4(sc, BGE_EE_DATA);
385 
386 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
387 
388 	return(0);
389 }
390 
391 /*
392  * Read a sequence of bytes from the EEPROM.
393  */
394 int
395 bge_read_eeprom(sc, dest, off, cnt)
396 	struct bge_softc *sc;
397 	caddr_t dest;
398 	int off;
399 	int cnt;
400 {
401 	int err = 0, i;
402 	u_int8_t byte = 0;
403 
404 	for (i = 0; i < cnt; i++) {
405 		err = bge_eeprom_getbyte(sc, off + i, &byte);
406 		if (err)
407 			break;
408 		*(dest + i) = byte;
409 	}
410 
411 	return(err ? 1 : 0);
412 }
413 
414 int
415 bge_miibus_readreg(dev, phy, reg)
416 	struct device *dev;
417 	int phy, reg;
418 {
419 	struct bge_softc *sc = (struct bge_softc *)dev;
420 	struct ifnet *ifp;
421 	u_int32_t val;
422 	int i;
423 
424 	ifp = &sc->ethercom.ec_if;
425 
426 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701_B5 && phy != 1)
427 		return(0);
428 
429 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
430 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
431 
432 	for (i = 0; i < BGE_TIMEOUT; i++) {
433 		val = CSR_READ_4(sc, BGE_MI_COMM);
434 		if (!(val & BGE_MICOMM_BUSY))
435 			break;
436 		delay(10);
437 	}
438 
439 	if (i == BGE_TIMEOUT) {
440 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
441 		return(0);
442 	}
443 
444 	val = CSR_READ_4(sc, BGE_MI_COMM);
445 
446 	if (val & BGE_MICOMM_READFAIL)
447 		return(0);
448 
449 	return(val & 0xFFFF);
450 }
451 
452 void
453 bge_miibus_writereg(dev, phy, reg, val)
454 	struct device *dev;
455 	int phy, reg, val;
456 {
457 	struct bge_softc *sc = (struct bge_softc *)dev;
458 	int i;
459 
460 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
461 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
462 
463 	for (i = 0; i < BGE_TIMEOUT; i++) {
464 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
465 			break;
466 		delay(10);
467 	}
468 
469 	if (i == BGE_TIMEOUT) {
470 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
471 	}
472 }
473 
474 void
475 bge_miibus_statchg(dev)
476 	struct device *dev;
477 {
478 	struct bge_softc *sc = (struct bge_softc *)dev;
479 	struct mii_data *mii = &sc->bge_mii;
480 
481 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
482 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
483 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
484 	} else {
485 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
486 	}
487 
488 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
489 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
490 	} else {
491 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
492 	}
493 }
494 
495 /*
496  * Handle events that have triggered interrupts.
497  */
498 void
499 bge_handle_events(sc)
500 	struct bge_softc		*sc;
501 {
502 
503 	return;
504 }
505 
506 /*
507  * Memory management for jumbo frames.
508  */
509 
510 int
511 bge_alloc_jumbo_mem(sc)
512 	struct bge_softc		*sc;
513 {
514 	caddr_t			ptr, kva;
515 	bus_dma_segment_t	seg;
516 	int		i, rseg, state, error;
517 	struct bge_jpool_entry   *entry;
518 
519 	state = error = 0;
520 
521 	/* Grab a big chunk o' storage. */
522 	if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
523 	     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
524 		printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
525 		return ENOBUFS;
526 	}
527 
528 	state = 1;
529 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
530 	    BUS_DMA_NOWAIT)) {
531 		printf("%s: can't map dma buffers (%d bytes)\n",
532 		    sc->bge_dev.dv_xname, (int)BGE_JMEM);
533 		error = ENOBUFS;
534 		goto out;
535 	}
536 
537 	state = 2;
538 	if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
539 	    BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
540 		printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
541 		error = ENOBUFS;
542 		goto out;
543 	}
544 
545 	state = 3;
546 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
547 	    kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
548 		printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
549 		error = ENOBUFS;
550 		goto out;
551 	}
552 
553 	state = 4;
554 	sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
555 	DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
556 
557 	SLIST_INIT(&sc->bge_jfree_listhead);
558 	SLIST_INIT(&sc->bge_jinuse_listhead);
559 
560 	/*
561 	 * Now divide it up into 9K pieces and save the addresses
562 	 * in an array.
563 	 */
564 	ptr = sc->bge_cdata.bge_jumbo_buf;
565 	for (i = 0; i < BGE_JSLOTS; i++) {
566 		sc->bge_cdata.bge_jslots[i] = ptr;
567 		ptr += BGE_JLEN;
568 		entry = malloc(sizeof(struct bge_jpool_entry),
569 		    M_DEVBUF, M_NOWAIT);
570 		if (entry == NULL) {
571 			printf("%s: no memory for jumbo buffer queue!\n",
572 			    sc->bge_dev.dv_xname);
573 			error = ENOBUFS;
574 			goto out;
575 		}
576 		entry->slot = i;
577 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
578 				 entry, jpool_entries);
579 	}
580 out:
581 	if (error != 0) {
582 		switch (state) {
583 		case 4:
584 			bus_dmamap_unload(sc->bge_dmatag,
585 			    sc->bge_cdata.bge_rx_jumbo_map);
586 		case 3:
587 			bus_dmamap_destroy(sc->bge_dmatag,
588 			    sc->bge_cdata.bge_rx_jumbo_map);
589 		case 2:
590 			bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
591 		case 1:
592 			bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
593 			break;
594 		default:
595 			break;
596 		}
597 	}
598 
599 	return error;
600 }
601 
602 /*
603  * Allocate a jumbo buffer.
604  */
605 void *
606 bge_jalloc(sc)
607 	struct bge_softc		*sc;
608 {
609 	struct bge_jpool_entry   *entry;
610 
611 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
612 
613 	if (entry == NULL) {
614 		printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
615 		return(NULL);
616 	}
617 
618 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
619 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
620 	return(sc->bge_cdata.bge_jslots[entry->slot]);
621 }
622 
623 /*
624  * Release a jumbo buffer.
625  */
626 void
627 bge_jfree(m, buf, size, arg)
628 	struct mbuf	*m;
629 	caddr_t		buf;
630 	u_int		size;
631 	void		*arg;
632 {
633 	struct bge_jpool_entry *entry;
634 	struct bge_softc *sc;
635 	int i, s;
636 
637 	/* Extract the softc struct pointer. */
638 	sc = (struct bge_softc *)arg;
639 
640 	if (sc == NULL)
641 		panic("bge_jfree: can't find softc pointer!");
642 
643 	/* calculate the slot this buffer belongs to */
644 
645 	i = ((caddr_t)buf
646 	     - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
647 
648 	if ((i < 0) || (i >= BGE_JSLOTS))
649 		panic("bge_jfree: asked to free buffer that we don't manage!");
650 
651 	s = splvm();
652 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
653 	if (entry == NULL)
654 		panic("bge_jfree: buffer not in use!");
655 	entry->slot = i;
656 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
657 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
658 
659 	if (__predict_true(m != NULL))
660   		pool_cache_put(&mbpool_cache, m);
661 	splx(s);
662 }
663 
664 
665 /*
666  * Intialize a standard receive ring descriptor.
667  */
668 int
669 bge_newbuf_std(sc, i, m, dmamap)
670 	struct bge_softc	*sc;
671 	int			i;
672 	struct mbuf		*m;
673 	bus_dmamap_t dmamap;
674 {
675 	struct mbuf		*m_new = NULL;
676 	struct bge_rx_bd	*r;
677 	int			error;
678 
679 	if (dmamap == NULL) {
680 		error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
681 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
682 		if (error != 0)
683 			return error;
684 	}
685 
686 	sc->bge_cdata.bge_rx_std_map[i] = dmamap;
687 
688 	if (m == NULL) {
689 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
690 		if (m_new == NULL) {
691 			return(ENOBUFS);
692 		}
693 
694 		MCLGET(m_new, M_DONTWAIT);
695 		if (!(m_new->m_flags & M_EXT)) {
696 			m_freem(m_new);
697 			return(ENOBUFS);
698 		}
699 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
700 		m_adj(m_new, ETHER_ALIGN);
701 
702 		if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
703 		    BUS_DMA_READ|BUS_DMA_NOWAIT))
704 			return(ENOBUFS);
705 	} else {
706 		m_new = m;
707 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
708 		m_new->m_data = m_new->m_ext.ext_buf;
709 		m_adj(m_new, ETHER_ALIGN);
710 	}
711 
712 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
713 	r = &sc->bge_rdata->bge_rx_std_ring[i];
714 	bge_set_hostaddr(&r->bge_addr,
715 	    dmamap->dm_segs[0].ds_addr);
716 	r->bge_flags = BGE_RXBDFLAG_END;
717 	r->bge_len = m_new->m_len;
718 	r->bge_idx = i;
719 
720 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
721 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
722 		i * sizeof (struct bge_rx_bd),
723 	    sizeof (struct bge_rx_bd),
724 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
725 
726 	return(0);
727 }
728 
729 /*
730  * Initialize a jumbo receive ring descriptor. This allocates
731  * a jumbo buffer from the pool managed internally by the driver.
732  */
733 int
734 bge_newbuf_jumbo(sc, i, m)
735 	struct bge_softc *sc;
736 	int i;
737 	struct mbuf *m;
738 {
739 	struct mbuf *m_new = NULL;
740 	struct bge_rx_bd *r;
741 
742 	if (m == NULL) {
743 		caddr_t			*buf = NULL;
744 
745 		/* Allocate the mbuf. */
746 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
747 		if (m_new == NULL) {
748 			return(ENOBUFS);
749 		}
750 
751 		/* Allocate the jumbo buffer */
752 		buf = bge_jalloc(sc);
753 		if (buf == NULL) {
754 			m_freem(m_new);
755 			printf("%s: jumbo allocation failed "
756 			    "-- packet dropped!\n", sc->bge_dev.dv_xname);
757 			return(ENOBUFS);
758 		}
759 
760 		/* Attach the buffer to the mbuf. */
761 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
762 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
763 		    bge_jfree, sc);
764 	} else {
765 		m_new = m;
766 		m_new->m_data = m_new->m_ext.ext_buf;
767 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
768 	}
769 
770 	m_adj(m_new, ETHER_ALIGN);
771 	/* Set up the descriptor. */
772 	r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
773 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
774 	bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
775 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
776 	r->bge_len = m_new->m_len;
777 	r->bge_idx = i;
778 
779 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
780 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
781 		i * sizeof (struct bge_rx_bd),
782 	    sizeof (struct bge_rx_bd),
783 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
784 
785 	return(0);
786 }
787 
788 /*
789  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
790  * that's 1MB or memory, which is a lot. For now, we fill only the first
791  * 256 ring entries and hope that our CPU is fast enough to keep up with
792  * the NIC.
793  */
794 int
795 bge_init_rx_ring_std(sc)
796 	struct bge_softc *sc;
797 {
798 	int i;
799 
800 	if (sc->bge_flags & BGE_RXRING_VALID)
801 		return 0;
802 
803 	for (i = 0; i < BGE_SSLOTS; i++) {
804 		if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
805 			return(ENOBUFS);
806 	}
807 
808 	sc->bge_std = i - 1;
809 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
810 
811 	sc->bge_flags |= BGE_RXRING_VALID;
812 
813 	return(0);
814 }
815 
816 void
817 bge_free_rx_ring_std(sc)
818 	struct bge_softc *sc;
819 {
820 	int i;
821 
822 	if (!(sc->bge_flags & BGE_RXRING_VALID))
823 		return;
824 
825 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
826 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
827 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
828 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
829 			bus_dmamap_destroy(sc->bge_dmatag,
830 			    sc->bge_cdata.bge_rx_std_map[i]);
831 		}
832 		memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
833 		    sizeof(struct bge_rx_bd));
834 	}
835 
836 	sc->bge_flags &= ~BGE_RXRING_VALID;
837 }
838 
839 int
840 bge_init_rx_ring_jumbo(sc)
841 	struct bge_softc *sc;
842 {
843 	int i;
844 	struct bge_rcb *rcb;
845 	struct bge_rcb_opaque *rcbo;
846 
847 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
848 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
849 			return(ENOBUFS);
850 	};
851 
852 	sc->bge_jumbo = i - 1;
853 
854 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
855 	rcbo = (struct bge_rcb_opaque *)rcb;
856 	rcb->bge_flags = 0;
857 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
858 
859 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
860 
861 	return(0);
862 }
863 
864 void
865 bge_free_rx_ring_jumbo(sc)
866 	struct bge_softc *sc;
867 {
868 	int i;
869 
870 	if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
871 		return;
872 
873 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
874 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
875 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
876 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
877 		}
878 		memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
879 		    sizeof(struct bge_rx_bd));
880 	}
881 
882 	sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
883 }
884 
885 void
886 bge_free_tx_ring(sc)
887 	struct bge_softc *sc;
888 {
889 	int i, freed;
890 	struct txdmamap_pool_entry *dma;
891 
892 	if (!(sc->bge_flags & BGE_TXRING_VALID))
893 		return;
894 
895 	freed = 0;
896 
897 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
898 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
899 			freed++;
900 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
901 			sc->bge_cdata.bge_tx_chain[i] = NULL;
902 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
903 					    link);
904 			sc->txdma[i] = 0;
905 		}
906 		memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
907 		    sizeof(struct bge_tx_bd));
908 	}
909 
910 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
911 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
912 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
913 		free(dma, M_DEVBUF);
914 	}
915 
916 	sc->bge_flags &= ~BGE_TXRING_VALID;
917 }
918 
919 int
920 bge_init_tx_ring(sc)
921 	struct bge_softc *sc;
922 {
923 	int i;
924 	bus_dmamap_t dmamap;
925 	struct txdmamap_pool_entry *dma;
926 
927 	if (sc->bge_flags & BGE_TXRING_VALID)
928 		return 0;
929 
930 	sc->bge_txcnt = 0;
931 	sc->bge_tx_saved_considx = 0;
932 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
933 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
934 
935 	SLIST_INIT(&sc->txdma_list);
936 	for (i = 0; i < BGE_RSLOTS; i++) {
937 		if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
938 		    BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
939 		    &dmamap))
940 			return(ENOBUFS);
941 		if (dmamap == NULL)
942 			panic("dmamap NULL in bge_init_tx_ring");
943 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
944 		if (dma == NULL) {
945 			printf("%s: can't alloc txdmamap_pool_entry\n",
946 			    sc->bge_dev.dv_xname);
947 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
948 			return (ENOMEM);
949 		}
950 		dma->dmamap = dmamap;
951 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
952 	}
953 
954 	sc->bge_flags |= BGE_TXRING_VALID;
955 
956 	return(0);
957 }
958 
959 void
960 bge_setmulti(sc)
961 	struct bge_softc *sc;
962 {
963 	struct ethercom		*ac = &sc->ethercom;
964 	struct ifnet		*ifp = &ac->ec_if;
965 	struct ether_multi	*enm;
966 	struct ether_multistep  step;
967 	u_int32_t		hashes[4] = { 0, 0, 0, 0 };
968 	u_int32_t		h;
969 	int			i;
970 
971 	if (ifp->if_flags & IFF_PROMISC)
972 		goto allmulti;
973 
974 	/* Now program new ones. */
975 	ETHER_FIRST_MULTI(step, ac, enm);
976 	while (enm != NULL) {
977 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
978 			/*
979 			 * We must listen to a range of multicast addresses.
980 			 * For now, just accept all multicasts, rather than
981 			 * trying to set only those filter bits needed to match
982 			 * the range.  (At this time, the only use of address
983 			 * ranges is for IP multicast routing, for which the
984 			 * range is big enough to require all bits set.)
985 			 */
986 			goto allmulti;
987 		}
988 
989 		h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
990 
991 		/* Just want the 7 least-significant bits. */
992 		h &= 0x7f;
993 
994 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
995 		ETHER_NEXT_MULTI(step, enm);
996 	}
997 
998 	ifp->if_flags &= ~IFF_ALLMULTI;
999 	goto setit;
1000 
1001  allmulti:
1002 	ifp->if_flags |= IFF_ALLMULTI;
1003 	hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1004 
1005  setit:
1006 	for (i = 0; i < 4; i++)
1007 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1008 }
1009 
1010 int bge_swapbits[] = {
1011 	0,
1012 	BGE_MODECTL_BYTESWAP_DATA,
1013 	BGE_MODECTL_WORDSWAP_DATA,
1014 	BGE_MODECTL_BYTESWAP_NONFRAME,
1015 	BGE_MODECTL_WORDSWAP_NONFRAME,
1016 
1017 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1018 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1019 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1020 
1021 	BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1022 	BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1023 
1024 	BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1025 
1026 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1027 	    BGE_MODECTL_BYTESWAP_NONFRAME,
1028 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1029 	    BGE_MODECTL_WORDSWAP_NONFRAME,
1030 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1031 	    BGE_MODECTL_WORDSWAP_NONFRAME,
1032 	BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1033 	    BGE_MODECTL_WORDSWAP_NONFRAME,
1034 
1035 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1036 	    BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1037 };
1038 
1039 int bge_swapindex = 0;
1040 
1041 /*
1042  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1043  * self-test results.
1044  */
1045 int
1046 bge_chipinit(sc)
1047 	struct bge_softc *sc;
1048 {
1049 	u_int32_t		cachesize;
1050 	int			i;
1051 	struct pci_attach_args	*pa = &(sc->bge_pa);
1052 
1053 
1054 	/* Set endianness before we access any non-PCI registers. */
1055 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1056 	    BGE_INIT);
1057 
1058 	/*
1059 	 * Check the 'ROM failed' bit on the RX CPU to see if
1060 	 * self-tests passed.
1061 	 */
1062 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1063 		printf("%s: RX CPU self-diagnostics failed!\n",
1064 		    sc->bge_dev.dv_xname);
1065 		return(ENODEV);
1066 	}
1067 
1068 	/* Clear the MAC control register */
1069 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1070 
1071 	/*
1072 	 * Clear the MAC statistics block in the NIC's
1073 	 * internal memory.
1074 	 */
1075 	for (i = BGE_STATS_BLOCK;
1076 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1077 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1078 
1079 	for (i = BGE_STATUS_BLOCK;
1080 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1081 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1082 
1083 	/* Set up the PCI DMA control register. */
1084 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1085 	    BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x0F);
1086 
1087 	/*
1088 	 * Set up general mode register.
1089 	 */
1090 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1091 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1092 		    BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM|
1093 		    BGE_MODECTL_RX_NO_PHDR_CSUM);
1094 
1095 	/* Get cache line size. */
1096 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1097 
1098 	/*
1099 	 * Avoid violating PCI spec on certain chip revs.
1100 	 */
1101 	if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
1102 	    PCIM_CMD_MWIEN) {
1103 		switch(cachesize) {
1104 		case 1:
1105 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1106 				   BGE_PCI_WRITE_BNDRY_16BYTES);
1107 			break;
1108 		case 2:
1109 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1110 				   BGE_PCI_WRITE_BNDRY_32BYTES);
1111 			break;
1112 		case 4:
1113 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1114 				   BGE_PCI_WRITE_BNDRY_64BYTES);
1115 			break;
1116 		case 8:
1117 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1118 				   BGE_PCI_WRITE_BNDRY_128BYTES);
1119 			break;
1120 		case 16:
1121 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1122 				   BGE_PCI_WRITE_BNDRY_256BYTES);
1123 			break;
1124 		case 32:
1125 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1126 				   BGE_PCI_WRITE_BNDRY_512BYTES);
1127 			break;
1128 		case 64:
1129 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1130 				   BGE_PCI_WRITE_BNDRY_1024BYTES);
1131 			break;
1132 		default:
1133 		/* Disable PCI memory write and invalidate. */
1134 #if 0
1135 			if (bootverbose)
1136 				printf("%s: cache line size %d not "
1137 				    "supported; disabling PCI MWI\n",
1138 				    sc->bge_dev.dv_xname, cachesize);
1139 #endif
1140 			PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
1141 			    PCIM_CMD_MWIEN);
1142 			break;
1143 		}
1144 	}
1145 
1146 #ifdef __brokenalpha__
1147 	/*
1148 	 * Must insure that we do not cross an 8K (bytes) boundary
1149 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1150 	 * restriction on some ALPHA platforms with early revision
1151 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1152 	 */
1153 	PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1154 #endif
1155 
1156 	/* Set the timer prescaler (always 66Mhz) */
1157 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1158 
1159 	return(0);
1160 }
1161 
1162 int
1163 bge_blockinit(sc)
1164 	struct bge_softc *sc;
1165 {
1166 	struct bge_rcb		*rcb;
1167 	struct bge_rcb_opaque	*rcbo;
1168 	bus_size_t		rcb_addr;
1169 	int			i;
1170 	struct ifnet		*ifp = &sc->ethercom.ec_if;
1171 	bge_hostaddr		taddr;
1172 
1173 	/*
1174 	 * Initialize the memory window pointer register so that
1175 	 * we can access the first 32K of internal NIC RAM. This will
1176 	 * allow us to set up the TX send ring RCBs and the RX return
1177 	 * ring RCBs, plus other things which live in NIC memory.
1178 	 */
1179 
1180 	pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
1181 	    BGE_PCI_MEMWIN_BASEADDR, 0);
1182 
1183 	/* Configure mbuf memory pool */
1184 	if (sc->bge_extram) {
1185 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM);
1186 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1187 	} else {
1188 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1189 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1190 	}
1191 
1192 	/* Configure DMA resource pool */
1193 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS);
1194 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1195 
1196 	/* Configure mbuf pool watermarks */
1197 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1198 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1199 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1200 
1201 	/* Configure DMA resource watermarks */
1202 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1203 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1204 
1205 	/* Enable buffer manager */
1206 	CSR_WRITE_4(sc, BGE_BMAN_MODE,
1207 	    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1208 
1209 	/* Poll for buffer manager start indication */
1210 	for (i = 0; i < BGE_TIMEOUT; i++) {
1211 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1212 			break;
1213 		DELAY(10);
1214 	}
1215 
1216 	if (i == BGE_TIMEOUT) {
1217 		printf("%s: buffer manager failed to start\n",
1218 		    sc->bge_dev.dv_xname);
1219 		return(ENXIO);
1220 	}
1221 
1222 	/* Enable flow-through queues */
1223 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1224 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1225 
1226 	/* Wait until queue initialization is complete */
1227 	for (i = 0; i < BGE_TIMEOUT; i++) {
1228 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1229 			break;
1230 		DELAY(10);
1231 	}
1232 
1233 	if (i == BGE_TIMEOUT) {
1234 		printf("%s: flow-through queue init failed\n",
1235 		    sc->bge_dev.dv_xname);
1236 		return(ENXIO);
1237 	}
1238 
1239 	/* Initialize the standard RX ring control block */
1240 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1241 	bge_set_hostaddr(&rcb->bge_hostaddr,
1242 	    BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1243 	rcb->bge_max_len = BGE_MAX_FRAMELEN;
1244 	if (sc->bge_extram)
1245 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1246 	else
1247 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1248 	rcb->bge_flags = 0;
1249 	rcbo = (struct bge_rcb_opaque *)rcb;
1250 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0);
1251 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1);
1252 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
1253 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3);
1254 
1255 	/*
1256 	 * Initialize the jumbo RX ring control block
1257 	 * We set the 'ring disabled' bit in the flags
1258 	 * field until we're actually ready to start
1259 	 * using this ring (i.e. once we set the MTU
1260 	 * high enough to require it).
1261 	 */
1262 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1263 	bge_set_hostaddr(&rcb->bge_hostaddr,
1264 	    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1265 	rcb->bge_max_len = BGE_MAX_FRAMELEN;
1266 	if (sc->bge_extram)
1267 		rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1268 	else
1269 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1270 	rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED;
1271 
1272 	rcbo = (struct bge_rcb_opaque *)rcb;
1273 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0);
1274 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1);
1275 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
1276 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3);
1277 
1278 	/* Set up dummy disabled mini ring RCB */
1279 	rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1280 	rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED;
1281 	rcbo = (struct bge_rcb_opaque *)rcb;
1282 	CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
1283 
1284 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1285 	    offsetof(struct bge_ring_data, bge_info), sizeof (struct bge_gib),
1286 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1287 
1288 	/*
1289 	 * Set the BD ring replentish thresholds. The recommended
1290 	 * values are 1/8th the number of descriptors allocated to
1291 	 * each ring.
1292 	 */
1293 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1294 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1295 
1296 	/*
1297 	 * Disable all unused send rings by setting the 'ring disabled'
1298 	 * bit in the flags field of all the TX send ring control blocks.
1299 	 * These are located in NIC memory.
1300 	 */
1301 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1302 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1303 		RCB_WRITE_2(sc, rcb_addr, bge_flags,
1304 			    BGE_RCB_FLAG_RING_DISABLED);
1305 		RCB_WRITE_2(sc, rcb_addr, bge_max_len, 0);
1306 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1307 		rcb_addr += sizeof(struct bge_rcb);
1308 	}
1309 
1310 	/* Configure TX RCB 0 (we use only the first ring) */
1311 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1312 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1313 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1314 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1315 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1316 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1317 	RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_TX_RING_CNT);
1318 	RCB_WRITE_2(sc, rcb_addr, bge_flags, 0);
1319 
1320 	/* Disable all unused RX return rings */
1321 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1322 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1323 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1324 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1325 		RCB_WRITE_2(sc, rcb_addr, bge_flags,
1326 			    BGE_RCB_FLAG_RING_DISABLED);
1327 		RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT);
1328 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1329 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1330 		    (i * (sizeof(u_int64_t))), 0);
1331 		rcb_addr += sizeof(struct bge_rcb);
1332 	}
1333 
1334 	/* Initialize RX ring indexes */
1335 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1336 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1337 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1338 
1339 	/*
1340 	 * Set up RX return ring 0
1341 	 * Note that the NIC address for RX return rings is 0x00000000.
1342 	 * The return rings live entirely within the host, so the
1343 	 * nicaddr field in the RCB isn't used.
1344 	 */
1345 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1346 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1347 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1348 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1349 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1350 	RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT);
1351 	RCB_WRITE_2(sc, rcb_addr, bge_flags, 0);
1352 
1353 	/* Set random backoff seed for TX */
1354 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1355 	    LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
1356 	    LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
1357 	    LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
1358 	    BGE_TX_BACKOFF_SEED_MASK);
1359 
1360 	/* Set inter-packet gap */
1361 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1362 
1363 	/*
1364 	 * Specify which ring to use for packets that don't match
1365 	 * any RX rules.
1366 	 */
1367 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1368 
1369 	/*
1370 	 * Configure number of RX lists. One interrupt distribution
1371 	 * list, sixteen active lists, one bad frames class.
1372 	 */
1373 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1374 
1375 	/* Inialize RX list placement stats mask. */
1376 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1377 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1378 
1379 	/* Disable host coalescing until we get it set up */
1380 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1381 
1382 	/* Poll to make sure it's shut down. */
1383 	for (i = 0; i < BGE_TIMEOUT; i++) {
1384 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1385 			break;
1386 		DELAY(10);
1387 	}
1388 
1389 	if (i == BGE_TIMEOUT) {
1390 		printf("%s: host coalescing engine failed to idle\n",
1391 		    sc->bge_dev.dv_xname);
1392 		return(ENXIO);
1393 	}
1394 
1395 	/* Set up host coalescing defaults */
1396 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1397 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1398 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1399 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1400 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1401 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1402 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1403 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1404 	CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1405 
1406 	/* Set up address of statistics block */
1407 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1408 	CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1409 	CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1410 	CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1411 
1412 	/* Set up address of status block */
1413 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1414 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1415 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1416 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1417 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1418 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1419 
1420 	/* Turn on host coalescing state machine */
1421 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1422 
1423 	/* Turn on RX BD completion state machine and enable attentions */
1424 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1425 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1426 
1427 	/* Turn on RX list placement state machine */
1428 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1429 
1430 	/* Turn on RX list selector state machine. */
1431 	CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1432 
1433 	/* Turn on DMA, clear stats */
1434 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1435 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1436 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1437 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1438 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1439 
1440 	/* Set misc. local control, enable interrupts on attentions */
1441 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1442 
1443 #ifdef notdef
1444 	/* Assert GPIO pins for PHY reset */
1445 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1446 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1447 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1448 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1449 #endif
1450 
1451 	/* Turn on DMA completion state machine */
1452 	CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1453 
1454 	/* Turn on write DMA state machine */
1455 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1456 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1457 
1458 	/* Turn on read DMA state machine */
1459 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1460 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1461 
1462 	/* Turn on RX data completion state machine */
1463 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1464 
1465 	/* Turn on RX BD initiator state machine */
1466 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1467 
1468 	/* Turn on RX data and RX BD initiator state machine */
1469 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1470 
1471 	/* Turn on Mbuf cluster free state machine */
1472 	CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1473 
1474 	/* Turn on send BD completion state machine */
1475 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1476 
1477 	/* Turn on send data completion state machine */
1478 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1479 
1480 	/* Turn on send data initiator state machine */
1481 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1482 
1483 	/* Turn on send BD initiator state machine */
1484 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1485 
1486 	/* Turn on send BD selector state machine */
1487 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1488 
1489 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1490 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1491 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1492 
1493 	/* init LED register */
1494 	CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000);
1495 
1496 	/* ack/clear link change events */
1497 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1498 	    BGE_MACSTAT_CFG_CHANGED);
1499 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1500 
1501 	/* Enable PHY auto polling (for MII/GMII only) */
1502 	if (sc->bge_tbi) {
1503 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1504  	} else {
1505 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1506 		if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
1507 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1508 			    BGE_EVTENB_MI_INTERRUPT);
1509 	}
1510 
1511 	/* Enable link state change attentions. */
1512 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1513 
1514 	return(0);
1515 }
1516 
1517 static const struct bge_revision {
1518 	uint32_t		br_asicrev;
1519 	uint32_t		br_quirks;
1520 	const char		*br_name;
1521 } bge_revisions[] = {
1522 	{ BGE_ASICREV_BCM5700_A0,
1523 	  BGE_QUIRK_LINK_STATE_BROKEN,
1524 	  "BCM5700 A0" },
1525 
1526 	{ BGE_ASICREV_BCM5700_A1,
1527 	  BGE_QUIRK_LINK_STATE_BROKEN,
1528 	  "BCM5700 A1" },
1529 
1530 	{ BGE_ASICREV_BCM5700_B0,
1531 	  BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN,
1532 	  "BCM5700 B0" },
1533 
1534 	{ BGE_ASICREV_BCM5700_B1,
1535 	  BGE_QUIRK_LINK_STATE_BROKEN,
1536 	  "BCM5700 B1" },
1537 
1538 	{ BGE_ASICREV_BCM5700_B2,
1539 	  BGE_QUIRK_LINK_STATE_BROKEN,
1540 	  "BCM5700 B2" },
1541 
1542 	/* This is treated like a BCM5700 Bx */
1543 	{ BGE_ASICREV_BCM5700_ALTIMA,
1544 	  BGE_QUIRK_LINK_STATE_BROKEN,
1545 	  "BCM5700 Altima" },
1546 
1547 	{ BGE_ASICREV_BCM5700_C0,
1548 	  0,
1549 	  "BCM5700 C0" },
1550 
1551 	{ BGE_ASICREV_BCM5701_A0,
1552 	  0,
1553 	  "BCM5701 A0" },
1554 
1555 	{ BGE_ASICREV_BCM5701_B0,
1556 	  0,
1557 	  "BCM5701 B0" },
1558 
1559 	{ BGE_ASICREV_BCM5701_B2,
1560 	  0,
1561 	  "BCM5701 B2" },
1562 
1563 	{ BGE_ASICREV_BCM5701_B5,
1564 	  0,
1565 	  "BCM5701 B5" },
1566 
1567 	{ BGE_ASICREV_BCM5703_A0,
1568 	  0,
1569 	  "BCM5703 A0" },
1570 
1571 	{ BGE_ASICREV_BCM5703_A1,
1572 	  0,
1573 	  "BCM5703 A1" },
1574 
1575 	{ BGE_ASICREV_BCM5703_A2,
1576 	  0,
1577 	  "BCM5703 A2" },
1578 
1579 	{ 0, 0, NULL }
1580 };
1581 
1582 static const struct bge_revision *
1583 bge_lookup_rev(uint32_t asicrev)
1584 {
1585 	const struct bge_revision *br;
1586 
1587 	for (br = bge_revisions; br->br_name != NULL; br++) {
1588 		if (br->br_asicrev == asicrev)
1589 			return (br);
1590 	}
1591 
1592 	return (NULL);
1593 }
1594 
1595 static const struct bge_product {
1596 	pci_vendor_id_t		bp_vendor;
1597 	pci_product_id_t	bp_product;
1598 	const char		*bp_name;
1599 } bge_products[] = {
1600 	/*
1601 	 * The BCM5700 documentation seems to indicate that the hardware
1602 	 * still has the Alteon vendor ID burned into it, though it
1603 	 * should always be overridden by the value in the EEPROM.  We'll
1604 	 * check for it anyway.
1605 	 */
1606 	{ PCI_VENDOR_ALTEON,
1607 	  PCI_PRODUCT_ALTEON_BCM5700,
1608 	  "Broadcom BCM5700 Gigabit Ethernet" },
1609 	{ PCI_VENDOR_ALTEON,
1610 	  PCI_PRODUCT_ALTEON_BCM5701,
1611 	  "Broadcom BCM5701 Gigabit Ethernet" },
1612 
1613 	{ PCI_VENDOR_ALTIMA,
1614 	  PCI_PRODUCT_ALTIMA_AC1000,
1615 	  "Altima AC1000 Gigabit Ethernet" },
1616 	{ PCI_VENDOR_ALTIMA,
1617 	  PCI_PRODUCT_ALTIMA_AC1001,
1618 	  "Altima AC1001 Gigabit Ethernet" },
1619 	{ PCI_VENDOR_ALTIMA,
1620 	  PCI_PRODUCT_ALTIMA_AC9100,
1621 	  "Altima AC9100 Gigabit Ethernet" },
1622 
1623 	{ PCI_VENDOR_BROADCOM,
1624 	  PCI_PRODUCT_BROADCOM_BCM5700,
1625 	  "Broadcom BCM5700 Gigabit Ethernet" },
1626 	{ PCI_VENDOR_BROADCOM,
1627 	  PCI_PRODUCT_BROADCOM_BCM5701,
1628 	  "Broadcom BCM5700 Gigabit Ethernet" },
1629 
1630 	{ PCI_VENDOR_SCHNEIDERKOCH,
1631 	  PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
1632 	  "SysKonnect SK-9Dx1 Gigabit Ethernet" },
1633 
1634 	{ PCI_VENDOR_3COM,
1635 	  PCI_PRODUCT_3COM_3C996,
1636 	  "3Com 3c996 Gigabit Ethernet" },
1637 
1638 	{ 0,
1639 	  0,
1640 	  NULL },
1641 };
1642 
1643 static const struct bge_product *
1644 bge_lookup(const struct pci_attach_args *pa)
1645 {
1646 	const struct bge_product *bp;
1647 
1648 	for (bp = bge_products; bp->bp_name != NULL; bp++) {
1649 		if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
1650 		    PCI_PRODUCT(pa->pa_id) == bp->bp_product)
1651 			return (bp);
1652 	}
1653 
1654 	return (NULL);
1655 }
1656 
1657 /*
1658  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1659  * against our list and return its name if we find a match. Note
1660  * that since the Broadcom controller contains VPD support, we
1661  * can get the device name string from the controller itself instead
1662  * of the compiled-in string. This is a little slow, but it guarantees
1663  * we'll always announce the right product name.
1664  */
1665 int
1666 bge_probe(parent, match, aux)
1667 	struct device *parent;
1668 	struct cfdata *match;
1669 	void *aux;
1670 {
1671 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1672 
1673 	if (bge_lookup(pa) != NULL)
1674 		return (1);
1675 
1676 	return (0);
1677 }
1678 
1679 void
1680 bge_attach(parent, self, aux)
1681 	struct device *parent, *self;
1682 	void *aux;
1683 {
1684 	struct bge_softc	*sc = (struct bge_softc *)self;
1685 	struct pci_attach_args	*pa = aux;
1686 	const struct bge_product *bp;
1687 	const struct bge_revision *br;
1688 	pci_chipset_tag_t	pc = pa->pa_pc;
1689 	pci_intr_handle_t	ih;
1690 	const char		*intrstr = NULL;
1691 	bus_dma_segment_t	seg;
1692 	int			rseg;
1693 	u_int32_t		hwcfg = 0;
1694 	u_int32_t		command;
1695 	struct ifnet		*ifp;
1696 	int			unit;
1697 	caddr_t			kva;
1698 	u_char			eaddr[ETHER_ADDR_LEN];
1699 	pcireg_t		memtype;
1700 	bus_addr_t		memaddr;
1701 	bus_size_t		memsize;
1702 
1703 	bp = bge_lookup(pa);
1704 	KASSERT(bp != NULL);
1705 
1706 	sc->bge_pa = *pa;
1707 
1708 	printf(": %s\n", bp->bp_name);
1709 
1710 	/*
1711 	 * Map control/status registers.
1712 	 */
1713 	DPRINTFN(5, ("Map control/status regs\n"));
1714 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1715 	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1716 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
1717 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1718 
1719 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1720 		printf("%s: failed to enable memory mapping!\n",
1721 		    sc->bge_dev.dv_xname);
1722 		return;
1723 	}
1724 
1725 	DPRINTFN(5, ("pci_mem_find\n"));
1726 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1727  	switch (memtype) {
1728         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1729         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1730 		if (pci_mapreg_map(pa, BGE_PCI_BAR0,
1731                     memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
1732 		    &memaddr, &memsize) == 0)
1733 			break;
1734 	default:
1735 		printf("%s: can't find mem space\n",
1736 		    sc->bge_dev.dv_xname);
1737 		return;
1738 	}
1739 
1740 	DPRINTFN(5, ("pci_intr_map\n"));
1741 	if (pci_intr_map(pa, &ih)) {
1742 		printf("%s: couldn't map interrupt\n",
1743 		    sc->bge_dev.dv_xname);
1744 		return;
1745 	}
1746 
1747 	DPRINTFN(5, ("pci_intr_string\n"));
1748 	intrstr = pci_intr_string(pc, ih);
1749 
1750 	DPRINTFN(5, ("pci_intr_establish\n"));
1751 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
1752 
1753 	if (sc->bge_intrhand == NULL) {
1754 		printf("%s: couldn't establish interrupt",
1755 		    sc->bge_dev.dv_xname);
1756 		if (intrstr != NULL)
1757 			printf(" at %s", intrstr);
1758 		printf("\n");
1759 		return;
1760 	}
1761 	printf("%s: interrupting at %s\n", sc->bge_dev.dv_xname, intrstr);
1762 
1763 	/* Try to reset the chip. */
1764 	DPRINTFN(5, ("bge_reset\n"));
1765 	bge_reset(sc);
1766 
1767 	if (bge_chipinit(sc)) {
1768 		printf("%s: chip initializatino failed\n",
1769 		    sc->bge_dev.dv_xname);
1770 		bge_release_resources(sc);
1771 		return;
1772 	}
1773 
1774 	/*
1775 	 * Get station address from the EEPROM.
1776 	 */
1777 	if (bge_read_eeprom(sc, (caddr_t)eaddr,
1778 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1779 		printf("bge%d: failed to read station address\n", unit);
1780 		bge_release_resources(sc);
1781 		return;
1782 	}
1783 
1784 	/*
1785 	 * Save ASIC rev.  Look up any quirks associated with this
1786 	 * ASIC.
1787 	 */
1788 	sc->bge_asicrev =
1789 	    pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
1790 	    BGE_PCIMISCCTL_ASICREV;
1791 	br = bge_lookup_rev(sc->bge_asicrev);
1792 
1793 	printf("%s: ", sc->bge_dev.dv_xname);
1794 	if (br == NULL) {
1795 		printf("unknown ASIC 0x%08x", sc->bge_asicrev);
1796 		sc->bge_quirks = 0;
1797 	} else {
1798 		printf("ASIC %s", br->br_name);
1799 		sc->bge_quirks = br->br_quirks;
1800 	}
1801 	printf(", Ethernet address %s\n", ether_sprintf(eaddr));
1802 
1803 	/* Allocate the general information block and ring buffers. */
1804 	sc->bge_dmatag = pa->pa_dmat;
1805 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
1806 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
1807 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1808 		printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
1809 		return;
1810 	}
1811 	DPRINTFN(5, ("bus_dmamem_map\n"));
1812 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
1813 			   sizeof(struct bge_ring_data), &kva,
1814 			   BUS_DMA_NOWAIT)) {
1815 		printf("%s: can't map dma buffers (%d bytes)\n",
1816 		    sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
1817 		bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1818 		return;
1819 	}
1820 	DPRINTFN(5, ("bus_dmamem_create\n"));
1821 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
1822 	    sizeof(struct bge_ring_data), 0,
1823 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
1824 		printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
1825 		bus_dmamem_unmap(sc->bge_dmatag, kva,
1826 				 sizeof(struct bge_ring_data));
1827 		bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1828 		return;
1829 	}
1830 	DPRINTFN(5, ("bus_dmamem_load\n"));
1831 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
1832 			    sizeof(struct bge_ring_data), NULL,
1833 			    BUS_DMA_NOWAIT)) {
1834 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
1835 		bus_dmamem_unmap(sc->bge_dmatag, kva,
1836 				 sizeof(struct bge_ring_data));
1837 		bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1838 		return;
1839 	}
1840 
1841 	DPRINTFN(5, ("bzero\n"));
1842 	sc->bge_rdata = (struct bge_ring_data *)kva;
1843 
1844 	memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
1845 
1846 	/* Try to allocate memory for jumbo buffers. */
1847 	if (bge_alloc_jumbo_mem(sc)) {
1848 		printf("%s: jumbo buffer allocation failed\n",
1849 		    sc->bge_dev.dv_xname);
1850 	} else
1851 		sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1852 
1853 	/* Set default tuneable values. */
1854 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1855 	sc->bge_rx_coal_ticks = 150;
1856 	sc->bge_tx_coal_ticks = 150;
1857 	sc->bge_rx_max_coal_bds = 64;
1858 	sc->bge_tx_max_coal_bds = 128;
1859 
1860 	/* Set up ifnet structure */
1861 	ifp = &sc->ethercom.ec_if;
1862 	ifp->if_softc = sc;
1863 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1864 	ifp->if_ioctl = bge_ioctl;
1865 	ifp->if_start = bge_start;
1866 	ifp->if_init = bge_init;
1867 	ifp->if_watchdog = bge_watchdog;
1868 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1869 	IFQ_SET_READY(&ifp->if_snd);
1870 	DPRINTFN(5, ("bcopy\n"));
1871 	strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
1872 
1873 	if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
1874 		sc->ethercom.ec_if.if_capabilities |=
1875 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1876 	sc->ethercom.ec_capabilities |=
1877 	    ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
1878 
1879 	/*
1880 	 * Do MII setup.
1881 	 */
1882 	DPRINTFN(5, ("mii setup\n"));
1883 	sc->bge_mii.mii_ifp = ifp;
1884 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
1885 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
1886 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
1887 
1888 	/*
1889 	 * Figure out what sort of media we have by checking the
1890 	 * hardware config word in the EEPROM. Note: on some BCM5700
1891 	 * cards, this value appears to be unset. If that's the
1892 	 * case, we have to rely on identifying the NIC by its PCI
1893 	 * subsystem ID, as we do below for the SysKonnect SK-9D41.
1894 	 */
1895 	bge_read_eeprom(sc, (caddr_t)&hwcfg,
1896 		    BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1897 	if ((be32toh(hwcfg) & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1898 		sc->bge_tbi = 1;
1899 
1900 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
1901 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
1902 	    SK_SUBSYSID_9D41)
1903 		sc->bge_tbi = 1;
1904 
1905 	if (sc->bge_tbi) {
1906 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
1907 		    bge_ifmedia_sts);
1908 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1909 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
1910 			    0, NULL);
1911 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1912 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1913 	} else {
1914 		/*
1915 		 * Do transceiver setup.
1916 		 */
1917 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
1918 			     bge_ifmedia_sts);
1919 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
1920 			   MII_PHY_ANY, MII_OFFSET_ANY, 0);
1921 
1922 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
1923 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
1924 			ifmedia_add(&sc->bge_mii.mii_media,
1925 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
1926 			ifmedia_set(&sc->bge_mii.mii_media,
1927 				    IFM_ETHER|IFM_MANUAL);
1928 		} else
1929 			ifmedia_set(&sc->bge_mii.mii_media,
1930 				    IFM_ETHER|IFM_AUTO);
1931 	}
1932 
1933 	/*
1934 	 * Call MI attach routine.
1935 	 */
1936 	DPRINTFN(5, ("if_attach\n"));
1937 	if_attach(ifp);
1938 	DPRINTFN(5, ("ether_ifattach\n"));
1939 	ether_ifattach(ifp, eaddr);
1940 	DPRINTFN(5, ("callout_init\n"));
1941 	callout_init(&sc->bge_timeout);
1942 }
1943 
1944 void
1945 bge_release_resources(sc)
1946 	struct bge_softc *sc;
1947 {
1948 	if (sc->bge_vpd_prodname != NULL)
1949 		free(sc->bge_vpd_prodname, M_DEVBUF);
1950 
1951 	if (sc->bge_vpd_readonly != NULL)
1952 		free(sc->bge_vpd_readonly, M_DEVBUF);
1953 }
1954 
1955 void
1956 bge_reset(sc)
1957 	struct bge_softc *sc;
1958 {
1959 	struct pci_attach_args *pa = &sc->bge_pa;
1960 	u_int32_t cachesize, command, pcistate;
1961 	int i, val = 0;
1962 
1963 	/* Save some important PCI state. */
1964 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1965 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
1966 	pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
1967 
1968 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1969 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1970 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
1971 
1972 	/* Issue global reset */
1973 	bge_writereg_ind(sc, BGE_MISC_CFG,
1974 	    BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
1975 
1976 	DELAY(1000);
1977 
1978 	/* Reset some of the PCI state that got zapped by reset */
1979 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1980 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1981 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
1982 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
1983 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
1984 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1985 
1986 	/* Enable memory arbiter. */
1987 	CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1988 
1989 	/*
1990 	 * Prevent PXE restart: write a magic number to the
1991 	 * general communications memory at 0xB50.
1992 	 */
1993 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1994 
1995 	/*
1996 	 * Poll the value location we just wrote until
1997 	 * we see the 1's complement of the magic number.
1998 	 * This indicates that the firmware initialization
1999 	 * is complete.
2000 	 */
2001 	for (i = 0; i < 750; i++) {
2002 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2003 		if (val == ~BGE_MAGIC_NUMBER)
2004 			break;
2005 		DELAY(1000);
2006 	}
2007 
2008 	if (i == 750) {
2009 		printf("%s: firmware handshake timed out, val = %x\n",
2010 		    sc->bge_dev.dv_xname, val);
2011 		return;
2012 	}
2013 
2014 	/*
2015 	 * XXX Wait for the value of the PCISTATE register to
2016 	 * return to its original pre-reset state. This is a
2017 	 * fairly good indicator of reset completion. If we don't
2018 	 * wait for the reset to fully complete, trying to read
2019 	 * from the device's non-PCI registers may yield garbage
2020 	 * results.
2021 	 */
2022 	for (i = 0; i < BGE_TIMEOUT; i++) {
2023 		if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) ==
2024 		    pcistate)
2025 			break;
2026 		DELAY(10);
2027 	}
2028 
2029 	/* Enable memory arbiter. */
2030 	CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2031 
2032 	/* Fix up byte swapping */
2033 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2034 
2035 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2036 
2037 	DELAY(10000);
2038 }
2039 
2040 /*
2041  * Frame reception handling. This is called if there's a frame
2042  * on the receive return list.
2043  *
2044  * Note: we have to be able to handle two possibilities here:
2045  * 1) the frame is from the jumbo recieve ring
2046  * 2) the frame is from the standard receive ring
2047  */
2048 
2049 void
2050 bge_rxeof(sc)
2051 	struct bge_softc *sc;
2052 {
2053 	struct ifnet *ifp;
2054 	int stdcnt = 0, jumbocnt = 0;
2055 	int have_tag = 0;
2056 	u_int16_t vlan_tag = 0;
2057 	bus_dmamap_t dmamap;
2058 	bus_addr_t offset, toff;
2059 	bus_size_t tlen;
2060 	int tosync;
2061 
2062 	ifp = &sc->ethercom.ec_if;
2063 
2064 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2065 	    offsetof(struct bge_ring_data, bge_status_block),
2066 	    sizeof (struct bge_status_block),
2067 	    BUS_DMASYNC_POSTREAD);
2068 
2069 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2070 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2071 	    sc->bge_rx_saved_considx;
2072 
2073 	toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2074 
2075 	if (tosync < 0) {
2076 		tlen = (BGE_RETURN_RING_CNT - sc->bge_rx_saved_considx) *
2077 		    sizeof (struct bge_rx_bd);
2078 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2079 		    toff, tlen, BUS_DMASYNC_POSTREAD);
2080 		tosync = -tosync;
2081 	}
2082 
2083 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2084 	    offset, tosync * sizeof (struct bge_rx_bd),
2085 	    BUS_DMASYNC_POSTREAD);
2086 
2087 	while(sc->bge_rx_saved_considx !=
2088 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2089 		struct bge_rx_bd	*cur_rx;
2090 		u_int32_t		rxidx;
2091 		struct mbuf		*m = NULL;
2092 
2093 		cur_rx = &sc->bge_rdata->
2094 			bge_rx_return_ring[sc->bge_rx_saved_considx];
2095 
2096 		rxidx = cur_rx->bge_idx;
2097 		BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT);
2098 
2099 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2100 			have_tag = 1;
2101 			vlan_tag = cur_rx->bge_vlan_tag;
2102 		}
2103 
2104 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2105 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2106 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2107 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2108 			jumbocnt++;
2109 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2110 				ifp->if_ierrors++;
2111 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2112 				continue;
2113 			}
2114 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
2115 					     NULL)== ENOBUFS) {
2116 				ifp->if_ierrors++;
2117 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2118 				continue;
2119 			}
2120 		} else {
2121 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2122 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2123 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2124 			stdcnt++;
2125 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2126 			sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2127 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2128 				ifp->if_ierrors++;
2129 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2130 				continue;
2131 			}
2132 			if (bge_newbuf_std(sc, sc->bge_std,
2133 			    NULL, dmamap) == ENOBUFS) {
2134 				ifp->if_ierrors++;
2135 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2136 				continue;
2137 			}
2138 		}
2139 
2140 		ifp->if_ipackets++;
2141 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len;
2142 		m->m_pkthdr.rcvif = ifp;
2143 
2144 #if NBPFILTER > 0
2145 		/*
2146 		 * Handle BPF listeners. Let the BPF user see the packet.
2147 		 */
2148 		if (ifp->if_bpf)
2149 			bpf_mtap(ifp->if_bpf, m);
2150 #endif
2151 
2152 		if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) {
2153 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2154 			if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
2155 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2156 #if 0	/* XXX appears to be broken */
2157 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2158 				m->m_pkthdr.csum_data =
2159 				    cur_rx->bge_tcp_udp_csum;
2160 				m->m_pkthdr.csum_flags |=
2161 				    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA);
2162 			}
2163 #endif
2164 		}
2165 
2166 		/*
2167 		 * If we received a packet with a vlan tag, pass it
2168 		 * to vlan_input() instead of ether_input().
2169 		 */
2170 		if (have_tag) {
2171 			struct mbuf *n;
2172 
2173 			n = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
2174 			if (n != NULL) {
2175 				*mtod(n, int *) = vlan_tag;
2176 				n->m_len = sizeof(int);
2177 				have_tag = vlan_tag = 0;
2178 			} else {
2179 				printf("%s: no mbuf for tag\n", ifp->if_xname);
2180 				m_freem(m);
2181 				have_tag = vlan_tag = 0;
2182 				continue;
2183 			}
2184 		}
2185 		(*ifp->if_input)(ifp, m);
2186 	}
2187 
2188 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2189 	if (stdcnt)
2190 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2191 	if (jumbocnt)
2192 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2193 }
2194 
2195 void
2196 bge_txeof(sc)
2197 	struct bge_softc *sc;
2198 {
2199 	struct bge_tx_bd *cur_tx = NULL;
2200 	struct ifnet *ifp;
2201 	struct txdmamap_pool_entry *dma;
2202 	bus_addr_t offset, toff;
2203 	bus_size_t tlen;
2204 	int tosync;
2205 	struct mbuf *m;
2206 
2207 	ifp = &sc->ethercom.ec_if;
2208 
2209 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2210 	    offsetof(struct bge_ring_data, bge_status_block),
2211 	    sizeof (struct bge_status_block),
2212 	    BUS_DMASYNC_POSTREAD);
2213 
2214 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
2215 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2216 	    sc->bge_tx_saved_considx;
2217 
2218 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2219 
2220 	if (tosync < 0) {
2221 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2222 		    sizeof (struct bge_tx_bd);
2223 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2224 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2225 		tosync = -tosync;
2226 	}
2227 
2228 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2229 	    offset, tosync * sizeof (struct bge_tx_bd),
2230 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2231 
2232 	/*
2233 	 * Go through our tx ring and free mbufs for those
2234 	 * frames that have been sent.
2235 	 */
2236 	while (sc->bge_tx_saved_considx !=
2237 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2238 		u_int32_t		idx = 0;
2239 
2240 		idx = sc->bge_tx_saved_considx;
2241 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2242 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2243 			ifp->if_opackets++;
2244 		m = sc->bge_cdata.bge_tx_chain[idx];
2245 		if (m != NULL) {
2246 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2247 			dma = sc->txdma[idx];
2248 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2249 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2250 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2251 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2252 			sc->txdma[idx] = NULL;
2253 
2254 			m_freem(m);
2255 		}
2256 		sc->bge_txcnt--;
2257 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2258 		ifp->if_timer = 0;
2259 	}
2260 
2261 	if (cur_tx != NULL)
2262 		ifp->if_flags &= ~IFF_OACTIVE;
2263 }
2264 
2265 int
2266 bge_intr(xsc)
2267 	void *xsc;
2268 {
2269 	struct bge_softc *sc;
2270 	struct ifnet *ifp;
2271 
2272 	sc = xsc;
2273 	ifp = &sc->ethercom.ec_if;
2274 
2275 #ifdef notdef
2276 	/* Avoid this for now -- checking this register is expensive. */
2277 	/* Make sure this is really our interrupt. */
2278 	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2279 		return (0);
2280 #endif
2281 	/* Ack interrupt and stop others from occuring. */
2282 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2283 
2284 	/*
2285 	 * Process link state changes.
2286 	 * Grrr. The link status word in the status block does
2287 	 * not work correctly on the BCM5700 rev AX and BX chips,
2288 	 * according to all avaibable information. Hence, we have
2289 	 * to enable MII interrupts in order to properly obtain
2290 	 * async link changes. Unfortunately, this also means that
2291 	 * we have to read the MAC status register to detect link
2292 	 * changes, thereby adding an additional register access to
2293 	 * the interrupt handler.
2294 	 */
2295 
2296 	if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
2297 		u_int32_t		status;
2298 
2299 		status = CSR_READ_4(sc, BGE_MAC_STS);
2300 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
2301 			sc->bge_link = 0;
2302 			callout_stop(&sc->bge_timeout);
2303 			bge_tick(sc);
2304 			/* Clear the interrupt */
2305 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2306 			    BGE_EVTENB_MI_INTERRUPT);
2307 			bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
2308 			bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
2309 			    BRGPHY_INTRS);
2310 		}
2311 	} else {
2312 		if (sc->bge_rdata->bge_status_block.bge_status &
2313 		    BGE_STATFLAG_LINKSTATE_CHANGED) {
2314 			sc->bge_link = 0;
2315 			callout_stop(&sc->bge_timeout);
2316 			bge_tick(sc);
2317 			/* Clear the interrupt */
2318 			CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2319 			    BGE_MACSTAT_CFG_CHANGED);
2320 		}
2321 	}
2322 
2323 	if (ifp->if_flags & IFF_RUNNING) {
2324 		/* Check RX return ring producer/consumer */
2325 		bge_rxeof(sc);
2326 
2327 		/* Check TX ring producer/consumer */
2328 		bge_txeof(sc);
2329 	}
2330 
2331 	bge_handle_events(sc);
2332 
2333 	/* Re-enable interrupts. */
2334 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2335 
2336 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
2337 		bge_start(ifp);
2338 
2339 	return (1);
2340 }
2341 
2342 void
2343 bge_tick(xsc)
2344 	void *xsc;
2345 {
2346 	struct bge_softc *sc = xsc;
2347 	struct mii_data *mii = &sc->bge_mii;
2348 	struct ifmedia *ifm = NULL;
2349 	struct ifnet *ifp = &sc->ethercom.ec_if;
2350 	int s;
2351 
2352 	s = splnet();
2353 
2354 	bge_stats_update(sc);
2355 	callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2356 	if (sc->bge_link) {
2357 		splx(s);
2358 		return;
2359 	}
2360 
2361 	if (sc->bge_tbi) {
2362 		ifm = &sc->bge_ifmedia;
2363 		if (CSR_READ_4(sc, BGE_MAC_STS) &
2364 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
2365 			sc->bge_link++;
2366 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2367 			printf("%s: gigabit link up\n", sc->bge_dev.dv_xname);
2368 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
2369 				bge_start(ifp);
2370 		}
2371 		splx(s);
2372 		return;
2373 	}
2374 
2375 	mii_tick(mii);
2376 
2377 	if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2378 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2379 		sc->bge_link++;
2380 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2381 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2382 			printf("%s: gigabit link up\n", sc->bge_dev.dv_xname);
2383 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
2384 			bge_start(ifp);
2385 	}
2386 
2387 	splx(s);
2388 }
2389 
2390 void
2391 bge_stats_update(sc)
2392 	struct bge_softc *sc;
2393 {
2394 	struct ifnet *ifp = &sc->ethercom.ec_if;
2395 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2396 
2397 #define READ_STAT(sc, stats, stat) \
2398 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2399 
2400 	ifp->if_collisions +=
2401 	  (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
2402 	   READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2403 	   READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
2404 	   READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
2405 	  ifp->if_collisions;
2406 
2407 #undef READ_STAT
2408 
2409 #ifdef notdef
2410 	ifp->if_collisions +=
2411 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2412 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2413 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2414 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2415 	   ifp->if_collisions;
2416 #endif
2417 }
2418 
2419 /*
2420  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2421  * pointers to descriptors.
2422  */
2423 int
2424 bge_encap(sc, m_head, txidx)
2425 	struct bge_softc *sc;
2426 	struct mbuf *m_head;
2427 	u_int32_t *txidx;
2428 {
2429 	struct bge_tx_bd	*f = NULL;
2430 	u_int32_t		frag, cur, cnt = 0;
2431 	u_int16_t		csum_flags = 0;
2432 	struct txdmamap_pool_entry *dma;
2433 	bus_dmamap_t dmamap;
2434 	int			i = 0;
2435 	struct mbuf		*n;
2436 
2437 	cur = frag = *txidx;
2438 
2439 	if (m_head->m_pkthdr.csum_flags) {
2440 		if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
2441 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2442 		if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
2443 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2444 	}
2445 
2446 	dma = SLIST_FIRST(&sc->txdma_list);
2447 	if (dma == NULL)
2448 		return ENOBUFS;
2449 	dmamap = dma->dmamap;
2450 
2451 	/*
2452 	 * Start packing the mbufs in this chain into
2453 	 * the fragment pointers. Stop when we run out
2454 	 * of fragments or hit the end of the mbuf chain.
2455 	 */
2456 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
2457 	    BUS_DMA_NOWAIT))
2458 		return(ENOBUFS);
2459 
2460 	n = sc->ethercom.ec_nvlans ?
2461 	    m_aux_find(m_head, AF_LINK, ETHERTYPE_VLAN) : NULL;
2462 
2463 	for (i = 0; i < dmamap->dm_nsegs; i++) {
2464 		f = &sc->bge_rdata->bge_tx_ring[frag];
2465 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2466 			break;
2467 		bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
2468 		f->bge_len = dmamap->dm_segs[i].ds_len;
2469 		f->bge_flags = csum_flags;
2470 
2471 		if (n != NULL) {
2472 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2473 			f->bge_vlan_tag = *mtod(n, int *);
2474 		} else {
2475 			f->bge_vlan_tag = 0;
2476 		}
2477 		/*
2478 		 * Sanity check: avoid coming within 16 descriptors
2479 		 * of the end of the ring.
2480 		 */
2481 		if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2482 			return(ENOBUFS);
2483 		cur = frag;
2484 		BGE_INC(frag, BGE_TX_RING_CNT);
2485 		cnt++;
2486 	}
2487 
2488 	if (i < dmamap->dm_nsegs)
2489 		return ENOBUFS;
2490 
2491 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
2492 	    BUS_DMASYNC_PREWRITE);
2493 
2494 	if (frag == sc->bge_tx_saved_considx)
2495 		return(ENOBUFS);
2496 
2497 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2498 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
2499 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
2500 	sc->txdma[cur] = dma;
2501 	sc->bge_txcnt += cnt;
2502 
2503 	*txidx = frag;
2504 
2505 	return(0);
2506 }
2507 
2508 /*
2509  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2510  * to the mbuf data regions directly in the transmit descriptors.
2511  */
2512 void
2513 bge_start(ifp)
2514 	struct ifnet *ifp;
2515 {
2516 	struct bge_softc *sc;
2517 	struct mbuf *m_head = NULL;
2518 	u_int32_t prodidx = 0;
2519 	int pkts = 0;
2520 
2521 	sc = ifp->if_softc;
2522 
2523 	if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
2524 		return;
2525 
2526 	prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2527 
2528 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2529 		IFQ_POLL(&ifp->if_snd, m_head);
2530 		if (m_head == NULL)
2531 			break;
2532 
2533 #if 0
2534 		/*
2535 		 * XXX
2536 		 * safety overkill.  If this is a fragmented packet chain
2537 		 * with delayed TCP/UDP checksums, then only encapsulate
2538 		 * it if we have enough descriptors to handle the entire
2539 		 * chain at once.
2540 		 * (paranoia -- may not actually be needed)
2541 		 */
2542 		if (m_head->m_flags & M_FIRSTFRAG &&
2543 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2544 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2545 			    m_head->m_pkthdr.csum_data + 16) {
2546 				ifp->if_flags |= IFF_OACTIVE;
2547 				break;
2548 			}
2549 		}
2550 #endif
2551 
2552 		/*
2553 		 * Pack the data into the transmit ring. If we
2554 		 * don't have room, set the OACTIVE flag and wait
2555 		 * for the NIC to drain the ring.
2556 		 */
2557 		if (bge_encap(sc, m_head, &prodidx)) {
2558 			ifp->if_flags |= IFF_OACTIVE;
2559 			break;
2560 		}
2561 
2562 		/* now we are committed to transmit the packet */
2563 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
2564 		pkts++;
2565 
2566 #if NBPFILTER > 0
2567 		/*
2568 		 * If there's a BPF listener, bounce a copy of this frame
2569 		 * to him.
2570 		 */
2571 		if (ifp->if_bpf)
2572 			bpf_mtap(ifp->if_bpf, m_head);
2573 #endif
2574 	}
2575 	if (pkts == 0)
2576 		return;
2577 
2578 	/* Transmit */
2579 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2580 
2581 	/*
2582 	 * Set a timeout in case the chip goes out to lunch.
2583 	 */
2584 	ifp->if_timer = 5;
2585 }
2586 
2587 int
2588 bge_init(ifp)
2589 	struct ifnet *ifp;
2590 {
2591 	struct bge_softc *sc = ifp->if_softc;
2592 	u_int16_t *m;
2593 	int s, error;
2594 
2595 	s = splnet();
2596 
2597 	ifp = &sc->ethercom.ec_if;
2598 
2599 	/* Cancel pending I/O and flush buffers. */
2600 	bge_stop(sc);
2601 	bge_reset(sc);
2602 	bge_chipinit(sc);
2603 
2604 	/*
2605 	 * Init the various state machines, ring
2606 	 * control blocks and firmware.
2607 	 */
2608 	error = bge_blockinit(sc);
2609 	if (error != 0) {
2610 		printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
2611 		    error);
2612 		splx(s);
2613 		return error;
2614 	}
2615 
2616 	ifp = &sc->ethercom.ec_if;
2617 
2618 	/* Specify MTU. */
2619 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2620 	    ETHER_HDR_LEN + ETHER_CRC_LEN);
2621 
2622 	/* Load our MAC address. */
2623 	m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
2624 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2625 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2626 
2627 	/* Enable or disable promiscuous mode as needed. */
2628 	if (ifp->if_flags & IFF_PROMISC) {
2629 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2630 	} else {
2631 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2632 	}
2633 
2634 	/* Program multicast filter. */
2635 	bge_setmulti(sc);
2636 
2637 	/* Init RX ring. */
2638 	bge_init_rx_ring_std(sc);
2639 
2640 	/* Init jumbo RX ring. */
2641 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2642 		bge_init_rx_ring_jumbo(sc);
2643 
2644 	/* Init our RX return ring index */
2645 	sc->bge_rx_saved_considx = 0;
2646 
2647 	/* Init TX ring. */
2648 	bge_init_tx_ring(sc);
2649 
2650 	/* Turn on transmitter */
2651 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2652 
2653 	/* Turn on receiver */
2654 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2655 
2656 	/* Tell firmware we're alive. */
2657 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2658 
2659 	/* Enable host interrupts. */
2660 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2661 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2662 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2663 
2664 	bge_ifmedia_upd(ifp);
2665 
2666 	ifp->if_flags |= IFF_RUNNING;
2667 	ifp->if_flags &= ~IFF_OACTIVE;
2668 
2669 	splx(s);
2670 
2671 	callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2672 
2673 	return 0;
2674 }
2675 
2676 /*
2677  * Set media options.
2678  */
2679 int
2680 bge_ifmedia_upd(ifp)
2681 	struct ifnet *ifp;
2682 {
2683 	struct bge_softc *sc = ifp->if_softc;
2684 	struct mii_data *mii = &sc->bge_mii;
2685 	struct ifmedia *ifm = &sc->bge_ifmedia;
2686 
2687 	/* If this is a 1000baseX NIC, enable the TBI port. */
2688 	if (sc->bge_tbi) {
2689 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2690 			return(EINVAL);
2691 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
2692 		case IFM_AUTO:
2693 			break;
2694 		case IFM_1000_SX:
2695 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2696 				BGE_CLRBIT(sc, BGE_MAC_MODE,
2697 				    BGE_MACMODE_HALF_DUPLEX);
2698 			} else {
2699 				BGE_SETBIT(sc, BGE_MAC_MODE,
2700 				    BGE_MACMODE_HALF_DUPLEX);
2701 			}
2702 			break;
2703 		default:
2704 			return(EINVAL);
2705 		}
2706 		return(0);
2707 	}
2708 
2709 	sc->bge_link = 0;
2710 	mii_mediachg(mii);
2711 
2712 	return(0);
2713 }
2714 
2715 /*
2716  * Report current media status.
2717  */
2718 void
2719 bge_ifmedia_sts(ifp, ifmr)
2720 	struct ifnet *ifp;
2721 	struct ifmediareq *ifmr;
2722 {
2723 	struct bge_softc *sc = ifp->if_softc;
2724 	struct mii_data *mii = &sc->bge_mii;
2725 
2726 	if (sc->bge_tbi) {
2727 		ifmr->ifm_status = IFM_AVALID;
2728 		ifmr->ifm_active = IFM_ETHER;
2729 		if (CSR_READ_4(sc, BGE_MAC_STS) &
2730 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
2731 			ifmr->ifm_status |= IFM_ACTIVE;
2732 		ifmr->ifm_active |= IFM_1000_SX;
2733 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2734 			ifmr->ifm_active |= IFM_HDX;
2735 		else
2736 			ifmr->ifm_active |= IFM_FDX;
2737 		return;
2738 	}
2739 
2740 	mii_pollstat(mii);
2741 	ifmr->ifm_active = mii->mii_media_active;
2742 	ifmr->ifm_status = mii->mii_media_status;
2743 }
2744 
2745 int
2746 bge_ioctl(ifp, command, data)
2747 	struct ifnet *ifp;
2748 	u_long command;
2749 	caddr_t data;
2750 {
2751 	struct bge_softc *sc = ifp->if_softc;
2752 	struct ifreq *ifr = (struct ifreq *) data;
2753 	int s, error = 0;
2754 	struct mii_data *mii;
2755 
2756 	s = splnet();
2757 
2758 	switch(command) {
2759 	case SIOCSIFFLAGS:
2760 		if (ifp->if_flags & IFF_UP) {
2761 			/*
2762 			 * If only the state of the PROMISC flag changed,
2763 			 * then just use the 'set promisc mode' command
2764 			 * instead of reinitializing the entire NIC. Doing
2765 			 * a full re-init means reloading the firmware and
2766 			 * waiting for it to start up, which may take a
2767 			 * second or two.
2768 			 */
2769 			if (ifp->if_flags & IFF_RUNNING &&
2770 			    ifp->if_flags & IFF_PROMISC &&
2771 			    !(sc->bge_if_flags & IFF_PROMISC)) {
2772 				BGE_SETBIT(sc, BGE_RX_MODE,
2773 				    BGE_RXMODE_RX_PROMISC);
2774 			} else if (ifp->if_flags & IFF_RUNNING &&
2775 			    !(ifp->if_flags & IFF_PROMISC) &&
2776 			    sc->bge_if_flags & IFF_PROMISC) {
2777 				BGE_CLRBIT(sc, BGE_RX_MODE,
2778 				    BGE_RXMODE_RX_PROMISC);
2779 			} else
2780 				bge_init(ifp);
2781 		} else {
2782 			if (ifp->if_flags & IFF_RUNNING) {
2783 				bge_stop(sc);
2784 			}
2785 		}
2786 		sc->bge_if_flags = ifp->if_flags;
2787 		error = 0;
2788 		break;
2789 	case SIOCSIFMEDIA:
2790 	case SIOCGIFMEDIA:
2791 		if (sc->bge_tbi) {
2792 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
2793 			    command);
2794 		} else {
2795 			mii = &sc->bge_mii;
2796 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2797 			    command);
2798 		}
2799 		error = 0;
2800 		break;
2801 	default:
2802 		error = ether_ioctl(ifp, command, data);
2803 		if (error == ENETRESET) {
2804 			bge_setmulti(sc);
2805 			error = 0;
2806 		}
2807 		break;
2808 	}
2809 
2810 	splx(s);
2811 
2812 	return(error);
2813 }
2814 
2815 void
2816 bge_watchdog(ifp)
2817 	struct ifnet *ifp;
2818 {
2819 	struct bge_softc *sc;
2820 
2821 	sc = ifp->if_softc;
2822 
2823 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
2824 
2825 	ifp->if_flags &= ~IFF_RUNNING;
2826 	bge_init(ifp);
2827 
2828 	ifp->if_oerrors++;
2829 }
2830 
2831 static void
2832 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
2833 {
2834 	int i;
2835 
2836 	BGE_CLRBIT(sc, reg, bit);
2837 
2838 	for (i = 0; i < BGE_TIMEOUT; i++) {
2839 		if ((CSR_READ_4(sc, reg) & bit) == 0)
2840 			return;
2841 		delay(100);
2842 	}
2843 
2844 	printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
2845 	    sc->bge_dev.dv_xname, (u_long) reg, bit);
2846 }
2847 
2848 /*
2849  * Stop the adapter and free any mbufs allocated to the
2850  * RX and TX lists.
2851  */
2852 void
2853 bge_stop(sc)
2854 	struct bge_softc *sc;
2855 {
2856 	struct ifnet *ifp = &sc->ethercom.ec_if;
2857 
2858 	callout_stop(&sc->bge_timeout);
2859 
2860 	/*
2861 	 * Disable all of the receiver blocks
2862 	 */
2863 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2864 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2865 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2866 	bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2867 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2868 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2869 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2870 
2871 	/*
2872 	 * Disable all of the transmit blocks
2873 	 */
2874 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2875 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2876 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2877 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2878 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2879 	bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2880 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2881 
2882 	/*
2883 	 * Shut down all of the memory managers and related
2884 	 * state machines.
2885 	 */
2886 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2887 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2888 	bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2889 
2890 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2891 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2892 
2893 	bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2894 	bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2895 
2896 	/* Disable host interrupts. */
2897 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2898 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2899 
2900 	/*
2901 	 * Tell firmware we're shutting down.
2902 	 */
2903 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2904 
2905 	/* Free the RX lists. */
2906 	bge_free_rx_ring_std(sc);
2907 
2908 	/* Free jumbo RX list. */
2909 	bge_free_rx_ring_jumbo(sc);
2910 
2911 	/* Free TX buffers. */
2912 	bge_free_tx_ring(sc);
2913 
2914 	/*
2915 	 * Isolate/power down the PHY.
2916 	 */
2917 	if (!sc->bge_tbi)
2918 		mii_down(&sc->bge_mii);
2919 
2920 	sc->bge_link = 0;
2921 
2922 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2923 
2924 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2925 }
2926 
2927 /*
2928  * Stop all chip I/O so that the kernel's probe routines don't
2929  * get confused by errant DMAs when rebooting.
2930  */
2931 void
2932 bge_shutdown(xsc)
2933 	void *xsc;
2934 {
2935 	struct bge_softc *sc = (struct bge_softc *)xsc;
2936 
2937 	bge_stop(sc);
2938 	bge_reset(sc);
2939 }
2940