xref: /dflybsd-src/sys/dev/netif/bge/if_bge.c (revision 78195a764d5e70464a6d4f49bc08332a2a8bb4d0)
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.29 2003/12/01 21:06:59 ambrisko Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.50 2005/11/28 17:13:41 dillon Exp $
35  *
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "opt_bge.h"
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/sockio.h>
80 #include <sys/mbuf.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83 #include <sys/socket.h>
84 #include <sys/queue.h>
85 #include <sys/thread2.h>
86 
87 #include <net/if.h>
88 #include <net/ifq_var.h>
89 #include <net/if_arp.h>
90 #include <net/ethernet.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 
94 #include <net/bpf.h>
95 
96 #include <net/if_types.h>
97 #include <net/vlan/if_vlan_var.h>
98 
99 #include <netinet/in_systm.h>
100 #include <netinet/in.h>
101 #include <netinet/ip.h>
102 
103 #include <vm/vm.h>              /* for vtophys */
104 #include <vm/pmap.h>            /* for vtophys */
105 #include <machine/resource.h>
106 #include <sys/bus.h>
107 #include <sys/rman.h>
108 
109 #include <dev/netif/mii_layer/mii.h>
110 #include <dev/netif/mii_layer/miivar.h>
111 #include <dev/netif/mii_layer/miidevs.h>
112 #include <dev/netif/mii_layer/brgphyreg.h>
113 
114 #include <bus/pci/pcidevs.h>
115 #include <bus/pci/pcireg.h>
116 #include <bus/pci/pcivar.h>
117 
118 #include "if_bgereg.h"
119 
120 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
121 
122 /* "controller miibus0" required.  See GENERIC if you get errors here. */
123 #include "miibus_if.h"
124 
125 /*
126  * Various supported device vendors/types and their names. Note: the
127  * spec seems to indicate that the hardware still has Alteon's vendor
128  * ID burned into it, though it will always be overriden by the vendor
129  * ID in the EEPROM. Just to be safe, we cover all possibilities.
130  */
131 #define BGE_DEVDESC_MAX		64	/* Maximum device description length */
132 
133 static struct bge_type bge_devs[] = {
134 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
135 		"Alteon BCM5700 Gigabit Ethernet" },
136 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
137 		"Alteon BCM5701 Gigabit Ethernet" },
138 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
139 		"Broadcom BCM5700 Gigabit Ethernet" },
140 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
141 		"Broadcom BCM5701 Gigabit Ethernet" },
142 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
143 		"Broadcom BCM5702X Gigabit Ethernet" },
144 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
145 		"Broadcom BCM5702 Gigabit Ethernet" },
146 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
147 		"Broadcom BCM5703X Gigabit Ethernet" },
148 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
149 		"Broadcom BCM5703 Gigabit Ethernet" },
150 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
151 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
152 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
153 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
154 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
155 		"Broadcom BCM5705 Gigabit Ethernet" },
156 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
157 		"Broadcom BCM5705K Gigabit Ethernet" },
158 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
159 		"Broadcom BCM5705M Gigabit Ethernet" },
160 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
161 		"Broadcom BCM5705M Gigabit Ethernet" },
162 	{ PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5714C,
163 		"Broadcom BCM5714C Gigabit Ethernet" },
164 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
165 		"Broadcom BCM5721 Gigabit Ethernet" },
166 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
167 		"Broadcom BCM5750 Gigabit Ethernet" },
168 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
169 		"Broadcom BCM5750M Gigabit Ethernet" },
170 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
171 		"Broadcom BCM5751 Gigabit Ethernet" },
172 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
173 		"Broadcom BCM5751M Gigabit Ethernet" },
174 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
175 		"Broadcom BCM5782 Gigabit Ethernet" },
176 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
177 		"Broadcom BCM5788 Gigabit Ethernet" },
178 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
179 		"Broadcom BCM5789 Gigabit Ethernet" },
180 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
181 		"Broadcom BCM5901 Fast Ethernet" },
182 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
183 		"Broadcom BCM5901A2 Fast Ethernet" },
184 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
185 		"SysKonnect Gigabit Ethernet" },
186 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
187 		"Altima AC1000 Gigabit Ethernet" },
188 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
189 		"Altima AC1002 Gigabit Ethernet" },
190 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
191 		"Altima AC9100 Gigabit Ethernet" },
192 	{ 0, 0, NULL }
193 };
194 
195 static int	bge_probe(device_t);
196 static int	bge_attach(device_t);
197 static int	bge_detach(device_t);
198 static void	bge_release_resources(struct bge_softc *);
199 static void	bge_txeof(struct bge_softc *);
200 static void	bge_rxeof(struct bge_softc *);
201 
202 static void	bge_tick(void *);
203 static void	bge_tick_serialized(void *);
204 static void	bge_stats_update(struct bge_softc *);
205 static void	bge_stats_update_regs(struct bge_softc *);
206 static int	bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
207 
208 static void	bge_intr(void *);
209 static void	bge_start(struct ifnet *);
210 static int	bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
211 static void	bge_init(void *);
212 static void	bge_stop(struct bge_softc *);
213 static void	bge_watchdog(struct ifnet *);
214 static void	bge_shutdown(device_t);
215 static int	bge_ifmedia_upd(struct ifnet *);
216 static void	bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
217 
218 static uint8_t	bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
219 static int	bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
220 
221 static void	bge_setmulti(struct bge_softc *);
222 
223 static void	bge_handle_events(struct bge_softc *);
224 static int	bge_alloc_jumbo_mem(struct bge_softc *);
225 static void	bge_free_jumbo_mem(struct bge_softc *);
226 static struct bge_jslot
227 		*bge_jalloc(struct bge_softc *);
228 static void	bge_jfree(void *);
229 static void	bge_jref(void *);
230 static int	bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
231 static int	bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
232 static int	bge_init_rx_ring_std(struct bge_softc *);
233 static void	bge_free_rx_ring_std(struct bge_softc *);
234 static int	bge_init_rx_ring_jumbo(struct bge_softc *);
235 static void	bge_free_rx_ring_jumbo(struct bge_softc *);
236 static void	bge_free_tx_ring(struct bge_softc *);
237 static int	bge_init_tx_ring(struct bge_softc *);
238 
239 static int	bge_chipinit(struct bge_softc *);
240 static int	bge_blockinit(struct bge_softc *);
241 
242 #ifdef notdef
243 static uint8_t	bge_vpd_readbyte(struct bge_softc *, uint32_t);
244 static void	bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
245 static void	bge_vpd_read(struct bge_softc *);
246 #endif
247 
248 static uint32_t	bge_readmem_ind(struct bge_softc *, uint32_t);
249 static void	bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
250 #ifdef notdef
251 static uint32_t	bge_readreg_ind(struct bge_softc *, uint32_t);
252 #endif
253 static void	bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
254 
255 static int	bge_miibus_readreg(device_t, int, int);
256 static int	bge_miibus_writereg(device_t, int, int, int);
257 static void	bge_miibus_statchg(device_t);
258 
259 static void	bge_reset(struct bge_softc *);
260 
261 static device_method_t bge_methods[] = {
262 	/* Device interface */
263 	DEVMETHOD(device_probe,		bge_probe),
264 	DEVMETHOD(device_attach,	bge_attach),
265 	DEVMETHOD(device_detach,	bge_detach),
266 	DEVMETHOD(device_shutdown,	bge_shutdown),
267 
268 	/* bus interface */
269 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
270 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
271 
272 	/* MII interface */
273 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
274 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
275 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
276 
277 	{ 0, 0 }
278 };
279 
280 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
281 static devclass_t bge_devclass;
282 
283 DECLARE_DUMMY_MODULE(if_bge);
284 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
285 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
286 
287 static uint32_t
288 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
289 {
290 	device_t dev = sc->bge_dev;
291 
292 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
293 	return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
294 }
295 
296 static void
297 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
298 {
299 	device_t dev = sc->bge_dev;
300 
301 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
302 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
303 }
304 
305 #ifdef notdef
306 static uint32_t
307 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
308 {
309 	device_t dev = sc->bge_dev;
310 
311 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
312 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
313 }
314 #endif
315 
316 static void
317 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
318 {
319 	device_t dev = sc->bge_dev;
320 
321 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
322 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
323 }
324 
325 #ifdef notdef
326 static uint8_t
327 bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
328 {
329 	device_t dev = sc->bge_dev;
330 	uint32_t val;
331 	int i;
332 
333 	pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
334 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
335 		DELAY(10);
336 		if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
337 			break;
338 	}
339 
340 	if (i == BGE_TIMEOUT) {
341 		device_printf(sc->bge_dev, "VPD read timed out\n");
342 		return(0);
343 	}
344 
345 	val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
346 
347 	return((val >> ((addr % 4) * 8)) & 0xFF);
348 }
349 
350 static void
351 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
352 {
353 	size_t i;
354 	uint8_t *ptr;
355 
356 	ptr = (uint8_t *)res;
357 	for (i = 0; i < sizeof(struct vpd_res); i++)
358 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
359 
360 	return;
361 }
362 
363 static void
364 bge_vpd_read(struct bge_softc *sc)
365 {
366 	int pos = 0, i;
367 	struct vpd_res res;
368 
369 	if (sc->bge_vpd_prodname != NULL)
370 		free(sc->bge_vpd_prodname, M_DEVBUF);
371 	if (sc->bge_vpd_readonly != NULL)
372 		free(sc->bge_vpd_readonly, M_DEVBUF);
373 	sc->bge_vpd_prodname = NULL;
374 	sc->bge_vpd_readonly = NULL;
375 
376 	bge_vpd_read_res(sc, &res, pos);
377 
378 	if (res.vr_id != VPD_RES_ID) {
379 		device_printf(sc->bge_dev,
380 			      "bad VPD resource id: expected %x got %x\n",
381 			      VPD_RES_ID, res.vr_id);
382                 return;
383         }
384 
385 	pos += sizeof(res);
386 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
387 	for (i = 0; i < res.vr_len; i++)
388 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
389 	sc->bge_vpd_prodname[i] = '\0';
390 	pos += i;
391 
392 	bge_vpd_read_res(sc, &res, pos);
393 
394 	if (res.vr_id != VPD_RES_READ) {
395 		device_printf(sc->bge_dev,
396 			      "bad VPD resource id: expected %x got %x\n",
397 			      VPD_RES_READ, res.vr_id);
398 		return;
399 	}
400 
401 	pos += sizeof(res);
402 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
403 	for (i = 0; i < res.vr_len + 1; i++)
404 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
405 }
406 #endif
407 
408 /*
409  * Read a byte of data stored in the EEPROM at address 'addr.' The
410  * BCM570x supports both the traditional bitbang interface and an
411  * auto access interface for reading the EEPROM. We use the auto
412  * access method.
413  */
414 static uint8_t
415 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
416 {
417 	int i;
418 	uint32_t byte = 0;
419 
420 	/*
421 	 * Enable use of auto EEPROM access so we can avoid
422 	 * having to use the bitbang method.
423 	 */
424 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
425 
426 	/* Reset the EEPROM, load the clock period. */
427 	CSR_WRITE_4(sc, BGE_EE_ADDR,
428 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
429 	DELAY(20);
430 
431 	/* Issue the read EEPROM command. */
432 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
433 
434 	/* Wait for completion */
435 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
436 		DELAY(10);
437 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
438 			break;
439 	}
440 
441 	if (i == BGE_TIMEOUT) {
442 		if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
443 		return(0);
444 	}
445 
446 	/* Get result. */
447 	byte = CSR_READ_4(sc, BGE_EE_DATA);
448 
449         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
450 
451 	return(0);
452 }
453 
454 /*
455  * Read a sequence of bytes from the EEPROM.
456  */
457 static int
458 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
459 {
460 	size_t i;
461 	int err;
462 	uint8_t byte;
463 
464 	for (byte = 0, err = 0, i = 0; i < len; i++) {
465 		err = bge_eeprom_getbyte(sc, off + i, &byte);
466 		if (err)
467 			break;
468 		*(dest + i) = byte;
469 	}
470 
471 	return(err ? 1 : 0);
472 }
473 
474 static int
475 bge_miibus_readreg(device_t dev, int phy, int reg)
476 {
477 	struct bge_softc *sc;
478 	struct ifnet *ifp;
479 	uint32_t val, autopoll;
480 	int i;
481 
482 	sc = device_get_softc(dev);
483 	ifp = &sc->arpcom.ac_if;
484 
485 	/*
486 	 * Broadcom's own driver always assumes the internal
487 	 * PHY is at GMII address 1. On some chips, the PHY responds
488 	 * to accesses at all addresses, which could cause us to
489 	 * bogusly attach the PHY 32 times at probe type. Always
490 	 * restricting the lookup to address 1 is simpler than
491 	 * trying to figure out which chips revisions should be
492 	 * special-cased.
493 	 */
494 	if (phy != 1)
495 		return(0);
496 
497 	/* Reading with autopolling on may trigger PCI errors */
498 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
499 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
500 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
501 		DELAY(40);
502 	}
503 
504 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
505 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
506 
507 	for (i = 0; i < BGE_TIMEOUT; i++) {
508 		val = CSR_READ_4(sc, BGE_MI_COMM);
509 		if (!(val & BGE_MICOMM_BUSY))
510 			break;
511 	}
512 
513 	if (i == BGE_TIMEOUT) {
514 		if_printf(ifp, "PHY read timed out\n");
515 		val = 0;
516 		goto done;
517 	}
518 
519 	val = CSR_READ_4(sc, BGE_MI_COMM);
520 
521 done:
522 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
523 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
524 		DELAY(40);
525 	}
526 
527 	if (val & BGE_MICOMM_READFAIL)
528 		return(0);
529 
530 	return(val & 0xFFFF);
531 }
532 
533 static int
534 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
535 {
536 	struct bge_softc *sc;
537 	uint32_t autopoll;
538 	int i;
539 
540 	sc = device_get_softc(dev);
541 
542 	/* Reading with autopolling on may trigger PCI errors */
543 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
544 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
545 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
546 		DELAY(40);
547 	}
548 
549 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
550 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
551 
552 	for (i = 0; i < BGE_TIMEOUT; i++) {
553 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
554 			break;
555 	}
556 
557 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
558 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
559 		DELAY(40);
560 	}
561 
562 	if (i == BGE_TIMEOUT) {
563 		if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
564 		return(0);
565 	}
566 
567 	return(0);
568 }
569 
570 static void
571 bge_miibus_statchg(device_t dev)
572 {
573 	struct bge_softc *sc;
574 	struct mii_data *mii;
575 
576 	sc = device_get_softc(dev);
577 	mii = device_get_softc(sc->bge_miibus);
578 
579 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
580 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
581 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
582 	} else {
583 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
584 	}
585 
586 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
587 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
588 	} else {
589 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
590 	}
591 }
592 
593 /*
594  * Handle events that have triggered interrupts.
595  */
596 static void
597 bge_handle_events(struct bge_softc *sc)
598 {
599 }
600 
601 /*
602  * Memory management for jumbo frames.
603  */
604 static int
605 bge_alloc_jumbo_mem(struct bge_softc *sc)
606 {
607 	struct bge_jslot *entry;
608 	caddr_t ptr;
609 	int i;
610 
611 	/* Grab a big chunk o' storage. */
612 	sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
613 		M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
614 
615 	if (sc->bge_cdata.bge_jumbo_buf == NULL) {
616 		if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
617 		return(ENOBUFS);
618 	}
619 
620 	SLIST_INIT(&sc->bge_jfree_listhead);
621 
622 	/*
623 	 * Now divide it up into 9K pieces and save the addresses
624 	 * in an array. Note that we play an evil trick here by using
625 	 * the first few bytes in the buffer to hold the the address
626 	 * of the softc structure for this interface. This is because
627 	 * bge_jfree() needs it, but it is called by the mbuf management
628 	 * code which will not pass it to us explicitly.
629 	 */
630 	ptr = sc->bge_cdata.bge_jumbo_buf;
631 	for (i = 0; i < BGE_JSLOTS; i++) {
632 		entry = &sc->bge_cdata.bge_jslots[i];
633 		entry->bge_sc = sc;
634 		entry->bge_buf = ptr;
635 		entry->bge_inuse = 0;
636 		entry->bge_slot = i;
637 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
638 		ptr += BGE_JLEN;
639 	}
640 
641 	return(0);
642 }
643 
644 static void
645 bge_free_jumbo_mem(struct bge_softc *sc)
646 {
647 	if (sc->bge_cdata.bge_jumbo_buf)
648 		contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
649 }
650 
651 /*
652  * Allocate a jumbo buffer.
653  */
654 static struct bge_jslot *
655 bge_jalloc(struct bge_softc *sc)
656 {
657 	struct bge_jslot *entry;
658 
659 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
660 
661 	if (entry == NULL) {
662 		if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
663 		return(NULL);
664 	}
665 
666 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
667 	entry->bge_inuse = 1;
668 	return(entry);
669 }
670 
671 /*
672  * Adjust usage count on a jumbo buffer.
673  */
674 static void
675 bge_jref(void *arg)
676 {
677 	struct bge_jslot *entry = (struct bge_jslot *)arg;
678 	struct bge_softc *sc = entry->bge_sc;
679 
680 	if (sc == NULL)
681 		panic("bge_jref: can't find softc pointer!");
682 
683 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry)
684 		panic("bge_jref: asked to reference buffer "
685 		    "that we don't manage!");
686 	else if (entry->bge_inuse == 0)
687 		panic("bge_jref: buffer already free!");
688 	else
689 		entry->bge_inuse++;
690 }
691 
692 /*
693  * Release a jumbo buffer.
694  */
695 static void
696 bge_jfree(void *arg)
697 {
698 	struct bge_jslot *entry = (struct bge_jslot *)arg;
699 	struct bge_softc *sc = entry->bge_sc;
700 
701 	if (sc == NULL)
702 		panic("bge_jfree: can't find softc pointer!");
703 
704 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry)
705 		panic("bge_jfree: asked to free buffer that we don't manage!");
706 	else if (entry->bge_inuse == 0)
707 		panic("bge_jfree: buffer already free!");
708 	else if (--entry->bge_inuse == 0)
709 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
710 }
711 
712 
713 /*
714  * Intialize a standard receive ring descriptor.
715  */
716 static int
717 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
718 {
719 	struct mbuf *m_new = NULL;
720 	struct bge_rx_bd *r;
721 
722 	if (m == NULL) {
723 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
724 		if (m_new == NULL)
725 			return (ENOBUFS);
726 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
727 	} else {
728 		m_new = m;
729 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
730 		m_new->m_data = m_new->m_ext.ext_buf;
731 	}
732 
733 	if (!sc->bge_rx_alignment_bug)
734 		m_adj(m_new, ETHER_ALIGN);
735 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
736 	r = &sc->bge_rdata->bge_rx_std_ring[i];
737 	BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
738 	r->bge_flags = BGE_RXBDFLAG_END;
739 	r->bge_len = m_new->m_len;
740 	r->bge_idx = i;
741 
742 	return(0);
743 }
744 
745 /*
746  * Initialize a jumbo receive ring descriptor. This allocates
747  * a jumbo buffer from the pool managed internally by the driver.
748  */
749 static int
750 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
751 {
752 	struct mbuf *m_new = NULL;
753 	struct bge_rx_bd *r;
754 
755 	if (m == NULL) {
756 		struct bge_jslot *buf;
757 
758 		/* Allocate the mbuf. */
759 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
760 		if (m_new == NULL)
761 			return(ENOBUFS);
762 
763 		/* Allocate the jumbo buffer */
764 		buf = bge_jalloc(sc);
765 		if (buf == NULL) {
766 			m_freem(m_new);
767 			if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
768 			    "-- packet dropped!\n");
769 			return(ENOBUFS);
770 		}
771 
772 		/* Attach the buffer to the mbuf. */
773 		m_new->m_ext.ext_arg = buf;
774 		m_new->m_ext.ext_buf = buf->bge_buf;
775 		m_new->m_ext.ext_free = bge_jfree;
776 		m_new->m_ext.ext_ref = bge_jref;
777 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
778 
779 		m_new->m_data = m_new->m_ext.ext_buf;
780 		m_new->m_flags |= M_EXT;
781 		m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
782 	} else {
783 		m_new = m;
784 		m_new->m_data = m_new->m_ext.ext_buf;
785 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
786 	}
787 
788 	if (!sc->bge_rx_alignment_bug)
789 		m_adj(m_new, ETHER_ALIGN);
790 	/* Set up the descriptor. */
791 	r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
792 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
793 	BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
794 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
795 	r->bge_len = m_new->m_len;
796 	r->bge_idx = i;
797 
798 	return(0);
799 }
800 
801 /*
802  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
803  * that's 1MB or memory, which is a lot. For now, we fill only the first
804  * 256 ring entries and hope that our CPU is fast enough to keep up with
805  * the NIC.
806  */
807 static int
808 bge_init_rx_ring_std(struct bge_softc *sc)
809 {
810 	int i;
811 
812 	for (i = 0; i < BGE_SSLOTS; i++) {
813 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
814 			return(ENOBUFS);
815 	};
816 
817 	sc->bge_std = i - 1;
818 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
819 
820 	return(0);
821 }
822 
823 static void
824 bge_free_rx_ring_std(struct bge_softc *sc)
825 {
826 	int i;
827 
828 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
829 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
830 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
831 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
832 		}
833 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
834 		    sizeof(struct bge_rx_bd));
835 	}
836 }
837 
838 static int
839 bge_init_rx_ring_jumbo(struct bge_softc *sc)
840 {
841 	int i;
842 	struct bge_rcb *rcb;
843 
844 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
845 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
846 			return(ENOBUFS);
847 	};
848 
849 	sc->bge_jumbo = i - 1;
850 
851 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
852 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
853 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
854 
855 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
856 
857 	return(0);
858 }
859 
860 static void
861 bge_free_rx_ring_jumbo(struct bge_softc *sc)
862 {
863 	int i;
864 
865 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
866 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
867 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
868 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
869 		}
870 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
871 		    sizeof(struct bge_rx_bd));
872 	}
873 }
874 
875 static void
876 bge_free_tx_ring(struct bge_softc *sc)
877 {
878 	int i;
879 
880 	if (sc->bge_rdata->bge_tx_ring == NULL)
881 		return;
882 
883 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
884 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
885 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
886 			sc->bge_cdata.bge_tx_chain[i] = NULL;
887 		}
888 		bzero(&sc->bge_rdata->bge_tx_ring[i],
889 		    sizeof(struct bge_tx_bd));
890 	}
891 }
892 
893 static int
894 bge_init_tx_ring(struct bge_softc *sc)
895 {
896 	sc->bge_txcnt = 0;
897 	sc->bge_tx_saved_considx = 0;
898 
899 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
900 	/* 5700 b2 errata */
901 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
902 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
903 
904 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
905 	/* 5700 b2 errata */
906 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
907 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
908 
909 	return(0);
910 }
911 
912 static void
913 bge_setmulti(struct bge_softc *sc)
914 {
915 	struct ifnet *ifp;
916 	struct ifmultiaddr *ifma;
917 	uint32_t hashes[4] = { 0, 0, 0, 0 };
918 	int h, i;
919 
920 	ifp = &sc->arpcom.ac_if;
921 
922 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
923 		for (i = 0; i < 4; i++)
924 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
925 		return;
926 	}
927 
928 	/* First, zot all the existing filters. */
929 	for (i = 0; i < 4; i++)
930 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
931 
932 	/* Now program new ones. */
933 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
934 		if (ifma->ifma_addr->sa_family != AF_LINK)
935 			continue;
936 		h = ether_crc32_le(
937 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
938 		    ETHER_ADDR_LEN) & 0x7f;
939 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
940 	}
941 
942 	for (i = 0; i < 4; i++)
943 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
944 }
945 
946 /*
947  * Do endian, PCI and DMA initialization. Also check the on-board ROM
948  * self-test results.
949  */
950 static int
951 bge_chipinit(struct bge_softc *sc)
952 {
953 	int i;
954 	uint32_t dma_rw_ctl;
955 
956 	/* Set endianness before we access any non-PCI registers. */
957 #if BYTE_ORDER == BIG_ENDIAN
958 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
959 	    BGE_BIGENDIAN_INIT, 4);
960 #else
961 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
962 	    BGE_LITTLEENDIAN_INIT, 4);
963 #endif
964 
965 	/*
966 	 * Check the 'ROM failed' bit on the RX CPU to see if
967 	 * self-tests passed.
968 	 */
969 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
970 		if_printf(&sc->arpcom.ac_if,
971 			  "RX CPU self-diagnostics failed!\n");
972 		return(ENODEV);
973 	}
974 
975 	/* Clear the MAC control register */
976 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
977 
978 	/*
979 	 * Clear the MAC statistics block in the NIC's
980 	 * internal memory.
981 	 */
982 	for (i = BGE_STATS_BLOCK;
983 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
984 		BGE_MEMWIN_WRITE(sc, i, 0);
985 
986 	for (i = BGE_STATUS_BLOCK;
987 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
988 		BGE_MEMWIN_WRITE(sc, i, 0);
989 
990 	/* Set up the PCI DMA control register. */
991 	if (sc->bge_pcie) {
992 		/* PCI Express */
993 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
994 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
995 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
996 	} else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
997 		   BGE_PCISTATE_PCI_BUSMODE) {
998 		/* Conventional PCI bus */
999 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1000 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1001 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1002 		    (0x0F);
1003 	} else {
1004 		/* PCI-X bus */
1005 		/*
1006 		 * The 5704 uses a different encoding of read/write
1007 		 * watermarks.
1008 		 */
1009 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1010 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1011 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1012 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1013 		else
1014 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1015 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1016 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1017 			    (0x0F);
1018 
1019 		/*
1020 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1021 		 * for hardware bugs.
1022 		 */
1023 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1024 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1025 			uint32_t tmp;
1026 
1027 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1028 			if (tmp == 0x6 || tmp == 0x7)
1029 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1030 		}
1031 	}
1032 
1033 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1034 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1035 	    sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1036 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1037 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1038 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1039 
1040 	/*
1041 	 * Set up general mode register.
1042 	 */
1043 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1044 	    BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1045 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1046 	    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1047 
1048 	/*
1049 	 * Disable memory write invalidate.  Apparently it is not supported
1050 	 * properly by these devices.
1051 	 */
1052 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1053 
1054 	/* Set the timer prescaler (always 66Mhz) */
1055 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1056 
1057 	return(0);
1058 }
1059 
1060 static int
1061 bge_blockinit(struct bge_softc *sc)
1062 {
1063 	struct bge_rcb *rcb;
1064 	volatile struct bge_rcb *vrcb;
1065 	int i;
1066 
1067 	/*
1068 	 * Initialize the memory window pointer register so that
1069 	 * we can access the first 32K of internal NIC RAM. This will
1070 	 * allow us to set up the TX send ring RCBs and the RX return
1071 	 * ring RCBs, plus other things which live in NIC memory.
1072 	 */
1073 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1074 
1075 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1076 
1077 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1078 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1079 		/* Configure mbuf memory pool */
1080 		if (sc->bge_extram) {
1081 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1082 			    BGE_EXT_SSRAM);
1083 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1084 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1085 			else
1086 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1087 		} else {
1088 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1089 			    BGE_BUFFPOOL_1);
1090 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1091 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1092 			else
1093 				CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1094 		}
1095 
1096 		/* Configure DMA resource pool */
1097 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1098 		    BGE_DMA_DESCRIPTORS);
1099 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1100 	}
1101 
1102 	/* Configure mbuf pool watermarks */
1103 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1104 	    sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1105 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1106 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1107 	} else {
1108 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1109 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1110 	}
1111 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1112 
1113 	/* Configure DMA resource watermarks */
1114 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1115 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1116 
1117 	/* Enable buffer manager */
1118 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1119 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1120 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1121 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1122 
1123 		/* Poll for buffer manager start indication */
1124 		for (i = 0; i < BGE_TIMEOUT; i++) {
1125 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1126 				break;
1127 			DELAY(10);
1128 		}
1129 
1130 		if (i == BGE_TIMEOUT) {
1131 			if_printf(&sc->arpcom.ac_if,
1132 				  "buffer manager failed to start\n");
1133 			return(ENXIO);
1134 		}
1135 	}
1136 
1137 	/* Enable flow-through queues */
1138 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1139 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1140 
1141 	/* Wait until queue initialization is complete */
1142 	for (i = 0; i < BGE_TIMEOUT; i++) {
1143 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1144 			break;
1145 		DELAY(10);
1146 	}
1147 
1148 	if (i == BGE_TIMEOUT) {
1149 		if_printf(&sc->arpcom.ac_if,
1150 			  "flow-through queue init failed\n");
1151 		return(ENXIO);
1152 	}
1153 
1154 	/* Initialize the standard RX ring control block */
1155 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1156 	BGE_HOSTADDR(rcb->bge_hostaddr,
1157 	    vtophys(&sc->bge_rdata->bge_rx_std_ring));
1158 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1159 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1160 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1161 	else
1162 		rcb->bge_maxlen_flags =
1163 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1164 	if (sc->bge_extram)
1165 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1166 	else
1167 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1168 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1169 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1170 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1171 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1172 
1173 	/*
1174 	 * Initialize the jumbo RX ring control block
1175 	 * We set the 'ring disabled' bit in the flags
1176 	 * field until we're actually ready to start
1177 	 * using this ring (i.e. once we set the MTU
1178 	 * high enough to require it).
1179 	 */
1180 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1181 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1182 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1183 		BGE_HOSTADDR(rcb->bge_hostaddr,
1184 		    vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1185 		rcb->bge_maxlen_flags =
1186 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1187 		    BGE_RCB_FLAG_RING_DISABLED);
1188 		if (sc->bge_extram)
1189 			rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1190 		else
1191 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1192 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1193 		    rcb->bge_hostaddr.bge_addr_hi);
1194 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1195 		    rcb->bge_hostaddr.bge_addr_lo);
1196 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1197 		    rcb->bge_maxlen_flags);
1198 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1199 
1200 		/* Set up dummy disabled mini ring RCB */
1201 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1202 		rcb->bge_maxlen_flags =
1203 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1204 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1205 		    rcb->bge_maxlen_flags);
1206 	}
1207 
1208 	/*
1209 	 * Set the BD ring replentish thresholds. The recommended
1210 	 * values are 1/8th the number of descriptors allocated to
1211 	 * each ring.
1212 	 */
1213 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1214 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1215 
1216 	/*
1217 	 * Disable all unused send rings by setting the 'ring disabled'
1218 	 * bit in the flags field of all the TX send ring control blocks.
1219 	 * These are located in NIC memory.
1220 	 */
1221 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1222 	    BGE_SEND_RING_RCB);
1223 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1224 		vrcb->bge_maxlen_flags =
1225 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1226 		vrcb->bge_nicaddr = 0;
1227 		vrcb++;
1228 	}
1229 
1230 	/* Configure TX RCB 0 (we use only the first ring) */
1231 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1232 	    BGE_SEND_RING_RCB);
1233 	vrcb->bge_hostaddr.bge_addr_hi = 0;
1234 	BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
1235 	vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1236 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1237 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1238 		vrcb->bge_maxlen_flags =
1239 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1240 
1241 	/* Disable all unused RX return rings */
1242 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1243 	    BGE_RX_RETURN_RING_RCB);
1244 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1245 		vrcb->bge_hostaddr.bge_addr_hi = 0;
1246 		vrcb->bge_hostaddr.bge_addr_lo = 0;
1247 		vrcb->bge_maxlen_flags =
1248 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1249 		    BGE_RCB_FLAG_RING_DISABLED);
1250 		vrcb->bge_nicaddr = 0;
1251 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1252 		    (i * (sizeof(uint64_t))), 0);
1253 		vrcb++;
1254 	}
1255 
1256 	/* Initialize RX ring indexes */
1257 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1258 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1259 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1260 
1261 	/*
1262 	 * Set up RX return ring 0
1263 	 * Note that the NIC address for RX return rings is 0x00000000.
1264 	 * The return rings live entirely within the host, so the
1265 	 * nicaddr field in the RCB isn't used.
1266 	 */
1267 	vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1268 	    BGE_RX_RETURN_RING_RCB);
1269 	vrcb->bge_hostaddr.bge_addr_hi = 0;
1270 	BGE_HOSTADDR(vrcb->bge_hostaddr,
1271 	    vtophys(&sc->bge_rdata->bge_rx_return_ring));
1272 	vrcb->bge_nicaddr = 0x00000000;
1273 	vrcb->bge_maxlen_flags =
1274 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1275 
1276 	/* Set random backoff seed for TX */
1277 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1278 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1279 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1280 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1281 	    BGE_TX_BACKOFF_SEED_MASK);
1282 
1283 	/* Set inter-packet gap */
1284 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1285 
1286 	/*
1287 	 * Specify which ring to use for packets that don't match
1288 	 * any RX rules.
1289 	 */
1290 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1291 
1292 	/*
1293 	 * Configure number of RX lists. One interrupt distribution
1294 	 * list, sixteen active lists, one bad frames class.
1295 	 */
1296 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1297 
1298 	/* Inialize RX list placement stats mask. */
1299 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1300 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1301 
1302 	/* Disable host coalescing until we get it set up */
1303 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1304 
1305 	/* Poll to make sure it's shut down. */
1306 	for (i = 0; i < BGE_TIMEOUT; i++) {
1307 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1308 			break;
1309 		DELAY(10);
1310 	}
1311 
1312 	if (i == BGE_TIMEOUT) {
1313 		if_printf(&sc->arpcom.ac_if,
1314 			  "host coalescing engine failed to idle\n");
1315 		return(ENXIO);
1316 	}
1317 
1318 	/* Set up host coalescing defaults */
1319 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1320 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1321 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1322 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1323 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1324 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1325 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1326 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1327 	}
1328 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1329 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1330 
1331 	/* Set up address of statistics block */
1332 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1333 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1334 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1335 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1336 		    vtophys(&sc->bge_rdata->bge_info.bge_stats));
1337 
1338 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1339 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1340 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1341 	}
1342 
1343 	/* Set up address of status block */
1344 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1345 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1346 	    vtophys(&sc->bge_rdata->bge_status_block));
1347 
1348 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1349 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1350 
1351 	/* Turn on host coalescing state machine */
1352 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1353 
1354 	/* Turn on RX BD completion state machine and enable attentions */
1355 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1356 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1357 
1358 	/* Turn on RX list placement state machine */
1359 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1360 
1361 	/* Turn on RX list selector state machine. */
1362 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1363 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1364 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1365 
1366 	/* Turn on DMA, clear stats */
1367 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1368 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1369 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1370 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1371 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1372 
1373 	/* Set misc. local control, enable interrupts on attentions */
1374 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1375 
1376 #ifdef notdef
1377 	/* Assert GPIO pins for PHY reset */
1378 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1379 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1380 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1381 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1382 #endif
1383 
1384 	/* Turn on DMA completion state machine */
1385 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1386 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1387 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1388 
1389 	/* Turn on write DMA state machine */
1390 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
1391 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1392 
1393 	/* Turn on read DMA state machine */
1394 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
1395 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1396 
1397 	/* Turn on RX data completion state machine */
1398 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1399 
1400 	/* Turn on RX BD initiator state machine */
1401 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1402 
1403 	/* Turn on RX data and RX BD initiator state machine */
1404 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1405 
1406 	/* Turn on Mbuf cluster free state machine */
1407 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1408 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1409 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1410 
1411 	/* Turn on send BD completion state machine */
1412 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1413 
1414 	/* Turn on send data completion state machine */
1415 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1416 
1417 	/* Turn on send data initiator state machine */
1418 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1419 
1420 	/* Turn on send BD initiator state machine */
1421 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1422 
1423 	/* Turn on send BD selector state machine */
1424 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1425 
1426 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1427 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1428 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1429 
1430 	/* ack/clear link change events */
1431 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1432 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1433 	    BGE_MACSTAT_LINK_CHANGED);
1434 
1435 	/* Enable PHY auto polling (for MII/GMII only) */
1436 	if (sc->bge_tbi) {
1437 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1438  	} else {
1439 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1440 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1441 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1442 			    BGE_EVTENB_MI_INTERRUPT);
1443 	}
1444 
1445 	/* Enable link state change attentions. */
1446 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1447 
1448 	return(0);
1449 }
1450 
1451 /*
1452  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1453  * against our list and return its name if we find a match. Note
1454  * that since the Broadcom controller contains VPD support, we
1455  * can get the device name string from the controller itself instead
1456  * of the compiled-in string. This is a little slow, but it guarantees
1457  * we'll always announce the right product name.
1458  */
1459 static int
1460 bge_probe(device_t dev)
1461 {
1462 	struct bge_softc *sc;
1463 	struct bge_type *t;
1464 	char *descbuf;
1465 	uint16_t product, vendor;
1466 
1467 	product = pci_get_device(dev);
1468 	vendor = pci_get_vendor(dev);
1469 
1470 	for (t = bge_devs; t->bge_name != NULL; t++) {
1471 		if (vendor == t->bge_vid && product == t->bge_did)
1472 			break;
1473 	}
1474 
1475 	if (t->bge_name == NULL)
1476 		return(ENXIO);
1477 
1478 	sc = device_get_softc(dev);
1479 #ifdef notdef
1480 	sc->bge_dev = dev;
1481 
1482 	bge_vpd_read(sc);
1483 	device_set_desc(dev, sc->bge_vpd_prodname);
1484 #endif
1485 	descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1486 	snprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1487 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1488 	device_set_desc_copy(dev, descbuf);
1489 	if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1490 		sc->bge_no_3_led = 1;
1491 	free(descbuf, M_TEMP);
1492 	return(0);
1493 }
1494 
1495 static int
1496 bge_attach(device_t dev)
1497 {
1498 	struct ifnet *ifp;
1499 	struct bge_softc *sc;
1500 	uint32_t hwcfg = 0;
1501 	uint32_t mac_addr = 0;
1502 	int error = 0, rid;
1503 	uint8_t ether_addr[ETHER_ADDR_LEN];
1504 
1505 	sc = device_get_softc(dev);
1506 	sc->bge_dev = dev;
1507 	callout_init(&sc->bge_stat_timer);
1508 
1509 	/*
1510 	 * Map control/status registers.
1511 	 */
1512 	pci_enable_busmaster(dev);
1513 
1514 	rid = BGE_PCI_BAR0;
1515 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1516 	    RF_ACTIVE);
1517 
1518 	if (sc->bge_res == NULL) {
1519 		device_printf(dev, "couldn't map memory\n");
1520 		error = ENXIO;
1521 		return(error);
1522 	}
1523 
1524 	sc->bge_btag = rman_get_bustag(sc->bge_res);
1525 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1526 	sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1527 
1528 	/* Allocate interrupt */
1529 	rid = 0;
1530 
1531 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1532 	    RF_SHAREABLE | RF_ACTIVE);
1533 
1534 	if (sc->bge_irq == NULL) {
1535 		device_printf(dev, "couldn't map interrupt\n");
1536 		error = ENXIO;
1537 		goto fail;
1538 	}
1539 
1540 	/* Save ASIC rev. */
1541 	sc->bge_chipid =
1542 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1543 	    BGE_PCIMISCCTL_ASICREV;
1544 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1545 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1546 
1547 	/*
1548 	 * Treat the 5714 like the 5750 until we have more info
1549 	 * on this chip.
1550 	 */
1551 	if (sc->bge_asicrev == BGE_ASICREV_BCM5714)
1552 		sc->bge_asicrev = BGE_ASICREV_BCM5750;
1553 
1554 	/*
1555 	 * XXX: Broadcom Linux driver.  Not in specs or eratta.
1556 	 * PCI-Express?
1557 	 */
1558 	if (sc->bge_asicrev == BGE_ASICREV_BCM5750) {
1559 		uint32_t v;
1560 
1561 		v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4);
1562 		if (((v >> 8) & 0xff) == BGE_PCIE_MSI_CAPID) {
1563 			v = pci_read_config(dev, BGE_PCIE_MSI_CAPID, 4);
1564 			if ((v & 0xff) == BGE_PCIE_MSI_CAPID_VAL)
1565 				sc->bge_pcie = 1;
1566 		}
1567 	}
1568 
1569 	ifp = &sc->arpcom.ac_if;
1570 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1571 
1572 	/* Try to reset the chip. */
1573 	bge_reset(sc);
1574 
1575 	if (bge_chipinit(sc)) {
1576 		device_printf(dev, "chip initialization failed\n");
1577 		error = ENXIO;
1578 		goto fail;
1579 	}
1580 
1581 	/*
1582 	 * Get station address from the EEPROM.
1583 	 */
1584 	mac_addr = bge_readmem_ind(sc, 0x0c14);
1585 	if ((mac_addr >> 16) == 0x484b) {
1586 		ether_addr[0] = (uint8_t)(mac_addr >> 8);
1587 		ether_addr[1] = (uint8_t)mac_addr;
1588 		mac_addr = bge_readmem_ind(sc, 0x0c18);
1589 		ether_addr[2] = (uint8_t)(mac_addr >> 24);
1590 		ether_addr[3] = (uint8_t)(mac_addr >> 16);
1591 		ether_addr[4] = (uint8_t)(mac_addr >> 8);
1592 		ether_addr[5] = (uint8_t)mac_addr;
1593 	} else if (bge_read_eeprom(sc, ether_addr,
1594 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1595 		device_printf(dev, "failed to read station address\n");
1596 		error = ENXIO;
1597 		goto fail;
1598 	}
1599 
1600 	/* Allocate the general information block and ring buffers. */
1601 	sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1602 	    M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
1603 
1604 	if (sc->bge_rdata == NULL) {
1605 		error = ENXIO;
1606 		device_printf(dev, "no memory for list buffers!\n");
1607 		goto fail;
1608 	}
1609 
1610 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1611 
1612 	/*
1613 	 * Try to allocate memory for jumbo buffers.
1614 	 * The 5705/5750 does not appear to support jumbo frames.
1615 	 */
1616 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1617 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
1618 		if (bge_alloc_jumbo_mem(sc)) {
1619 			device_printf(dev, "jumbo buffer allocation failed\n");
1620 			error = ENXIO;
1621 			goto fail;
1622 		}
1623 	}
1624 
1625 	/* Set default tuneable values. */
1626 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1627 	sc->bge_rx_coal_ticks = 150;
1628 	sc->bge_tx_coal_ticks = 150;
1629 	sc->bge_rx_max_coal_bds = 64;
1630 	sc->bge_tx_max_coal_bds = 128;
1631 
1632 	/* 5705/5750 limits RX return ring to 512 entries. */
1633 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
1634 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
1635 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1636 	else
1637 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1638 
1639 	/* Set up ifnet structure */
1640 	ifp->if_softc = sc;
1641 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1642 	ifp->if_ioctl = bge_ioctl;
1643 	ifp->if_start = bge_start;
1644 	ifp->if_watchdog = bge_watchdog;
1645 	ifp->if_init = bge_init;
1646 	ifp->if_mtu = ETHERMTU;
1647 	ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1648 	ifq_set_ready(&ifp->if_snd);
1649 	ifp->if_hwassist = BGE_CSUM_FEATURES;
1650 	ifp->if_capabilities = IFCAP_HWCSUM;
1651 	ifp->if_capenable = ifp->if_capabilities;
1652 
1653 	/*
1654 	 * Figure out what sort of media we have by checking the
1655 	 * hardware config word in the first 32k of NIC internal memory,
1656 	 * or fall back to examining the EEPROM if necessary.
1657 	 * Note: on some BCM5700 cards, this value appears to be unset.
1658 	 * If that's the case, we have to rely on identifying the NIC
1659 	 * by its PCI subsystem ID, as we do below for the SysKonnect
1660 	 * SK-9D41.
1661 	 */
1662 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1663 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1664 	else {
1665 		bge_read_eeprom(sc, (caddr_t)&hwcfg,
1666 				BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1667 		hwcfg = ntohl(hwcfg);
1668 	}
1669 
1670 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1671 		sc->bge_tbi = 1;
1672 
1673 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
1674 	if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1675 		sc->bge_tbi = 1;
1676 
1677 	if (sc->bge_tbi) {
1678 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1679 		    bge_ifmedia_upd, bge_ifmedia_sts);
1680 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1681 		ifmedia_add(&sc->bge_ifmedia,
1682 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1683 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1684 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1685 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1686 	} else {
1687 		/*
1688 		 * Do transceiver setup.
1689 		 */
1690 		if (mii_phy_probe(dev, &sc->bge_miibus,
1691 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
1692 			device_printf(dev, "MII without any PHY!\n");
1693 			error = ENXIO;
1694 			goto fail;
1695 		}
1696 	}
1697 
1698 	/*
1699 	 * When using the BCM5701 in PCI-X mode, data corruption has
1700 	 * been observed in the first few bytes of some received packets.
1701 	 * Aligning the packet buffer in memory eliminates the corruption.
1702 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1703 	 * which do not support unaligned accesses, we will realign the
1704 	 * payloads by copying the received packets.
1705 	 */
1706 	switch (sc->bge_chipid) {
1707 	case BGE_CHIPID_BCM5701_A0:
1708 	case BGE_CHIPID_BCM5701_B0:
1709 	case BGE_CHIPID_BCM5701_B2:
1710 	case BGE_CHIPID_BCM5701_B5:
1711 		/* If in PCI-X mode, work around the alignment bug. */
1712 		if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1713 		    (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1714 		    BGE_PCISTATE_PCI_BUSSPEED)
1715 			sc->bge_rx_alignment_bug = 1;
1716 		break;
1717 	}
1718 
1719 	/*
1720 	 * Call MI attach routine.
1721 	 */
1722 	ether_ifattach(ifp, ether_addr, NULL);
1723 
1724 	error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1725 			       bge_intr, sc, &sc->bge_intrhand,
1726 			       ifp->if_serializer);
1727 	if (error) {
1728 		ether_ifdetach(ifp);
1729 		device_printf(dev, "couldn't set up irq\n");
1730 		goto fail;
1731 	}
1732 
1733 	return(0);
1734 
1735 fail:
1736 	bge_detach(dev);
1737 
1738 	return(error);
1739 }
1740 
1741 static int
1742 bge_detach(device_t dev)
1743 {
1744 	struct bge_softc *sc = device_get_softc(dev);
1745 	struct ifnet *ifp = &sc->arpcom.ac_if;
1746 
1747 	lwkt_serialize_enter(ifp->if_serializer);
1748 
1749 	if (device_is_attached(dev)) {
1750 		ether_ifdetach(ifp);
1751 		bge_stop(sc);
1752 		bge_reset(sc);
1753 	}
1754 
1755 	if (sc->bge_tbi)
1756 		ifmedia_removeall(&sc->bge_ifmedia);
1757 	if (sc->bge_miibus);
1758 		device_delete_child(dev, sc->bge_miibus);
1759 	bus_generic_detach(dev);
1760 
1761 	bge_release_resources(sc);
1762 
1763 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1764 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
1765 		bge_free_jumbo_mem(sc);
1766 
1767 	lwkt_serialize_exit(ifp->if_serializer);
1768 
1769 	return(0);
1770 }
1771 
1772 static void
1773 bge_release_resources(struct bge_softc *sc)
1774 {
1775         device_t dev;
1776 
1777         dev = sc->bge_dev;
1778 
1779 	if (sc->bge_vpd_prodname != NULL)
1780 		free(sc->bge_vpd_prodname, M_DEVBUF);
1781 
1782 	if (sc->bge_vpd_readonly != NULL)
1783 		free(sc->bge_vpd_readonly, M_DEVBUF);
1784 
1785         if (sc->bge_intrhand != NULL)
1786                 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1787 
1788         if (sc->bge_irq != NULL)
1789 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1790 
1791         if (sc->bge_res != NULL)
1792 		bus_release_resource(dev, SYS_RES_MEMORY,
1793 		    BGE_PCI_BAR0, sc->bge_res);
1794 
1795         if (sc->bge_rdata != NULL)
1796 		contigfree(sc->bge_rdata, sizeof(struct bge_ring_data),
1797 			   M_DEVBUF);
1798 
1799         return;
1800 }
1801 
1802 static void
1803 bge_reset(struct bge_softc *sc)
1804 {
1805 	device_t dev;
1806 	uint32_t cachesize, command, pcistate, reset;
1807 	int i, val = 0;
1808 
1809 	dev = sc->bge_dev;
1810 
1811 	/* Save some important PCI state. */
1812 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1813 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
1814 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1815 
1816 	pci_write_config(dev, BGE_PCI_MISC_CTL,
1817 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1818 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1819 
1820 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
1821 
1822 	/* XXX: Broadcom Linux driver. */
1823 	if (sc->bge_pcie) {
1824 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
1825 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
1826 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1827 			/* Prevent PCIE link training during global reset */
1828 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
1829 			reset |= (1<<29);
1830 		}
1831 	}
1832 
1833 	/* Issue global reset */
1834 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
1835 
1836 	DELAY(1000);
1837 
1838 	/* XXX: Broadcom Linux driver. */
1839 	if (sc->bge_pcie) {
1840 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
1841 			uint32_t v;
1842 
1843 			DELAY(500000); /* wait for link training to complete */
1844 			v = pci_read_config(dev, 0xc4, 4);
1845 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
1846 		}
1847 		/* Set PCIE max payload size and clear error status. */
1848 		pci_write_config(dev, 0xd8, 0xf5000, 4);
1849 	}
1850 
1851 	/* Reset some of the PCI state that got zapped by reset */
1852 	pci_write_config(dev, BGE_PCI_MISC_CTL,
1853 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1854 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1855 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1856 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
1857 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1858 
1859 	/* Enable memory arbiter. */
1860 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1861 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1862 
1863 	/*
1864 	 * Prevent PXE restart: write a magic number to the
1865 	 * general communications memory at 0xB50.
1866 	 */
1867 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1868 	/*
1869 	 * Poll the value location we just wrote until
1870 	 * we see the 1's complement of the magic number.
1871 	 * This indicates that the firmware initialization
1872 	 * is complete.
1873 	 */
1874 	for (i = 0; i < BGE_TIMEOUT; i++) {
1875 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1876 		if (val == ~BGE_MAGIC_NUMBER)
1877 			break;
1878 		DELAY(10);
1879 	}
1880 
1881 	if (i == BGE_TIMEOUT) {
1882 		if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1883 		return;
1884 	}
1885 
1886 	/*
1887 	 * XXX Wait for the value of the PCISTATE register to
1888 	 * return to its original pre-reset state. This is a
1889 	 * fairly good indicator of reset completion. If we don't
1890 	 * wait for the reset to fully complete, trying to read
1891 	 * from the device's non-PCI registers may yield garbage
1892 	 * results.
1893 	 */
1894 	for (i = 0; i < BGE_TIMEOUT; i++) {
1895 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1896 			break;
1897 		DELAY(10);
1898 	}
1899 
1900 	/* Fix up byte swapping */
1901 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1902 	    BGE_MODECTL_BYTESWAP_DATA);
1903 
1904 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1905 
1906 	/*
1907 	 * The 5704 in TBI mode apparently needs some special
1908 	 * adjustment to insure the SERDES drive level is set
1909 	 * to 1.2V.
1910 	 */
1911 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) {
1912 		uint32_t serdescfg;
1913 
1914 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
1915 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
1916 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
1917 	}
1918 
1919 	/* XXX: Broadcom Linux driver. */
1920 	if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
1921 		uint32_t v;
1922 
1923 		v = CSR_READ_4(sc, 0x7c00);
1924 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
1925 	}
1926 
1927 	DELAY(10000);
1928 }
1929 
1930 /*
1931  * Frame reception handling. This is called if there's a frame
1932  * on the receive return list.
1933  *
1934  * Note: we have to be able to handle two possibilities here:
1935  * 1) the frame is from the jumbo recieve ring
1936  * 2) the frame is from the standard receive ring
1937  */
1938 
1939 static void
1940 bge_rxeof(struct bge_softc *sc)
1941 {
1942 	struct ifnet *ifp;
1943 	int stdcnt = 0, jumbocnt = 0;
1944 
1945 	ifp = &sc->arpcom.ac_if;
1946 
1947 	while(sc->bge_rx_saved_considx !=
1948 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
1949 		struct bge_rx_bd	*cur_rx;
1950 		uint32_t		rxidx;
1951 		struct mbuf		*m = NULL;
1952 		uint16_t		vlan_tag = 0;
1953 		int			have_tag = 0;
1954 
1955 		cur_rx =
1956 	    &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
1957 
1958 		rxidx = cur_rx->bge_idx;
1959 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
1960 
1961 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1962 			have_tag = 1;
1963 			vlan_tag = cur_rx->bge_vlan_tag;
1964 		}
1965 
1966 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1967 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1968 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1969 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1970 			jumbocnt++;
1971 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1972 				ifp->if_ierrors++;
1973 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1974 				continue;
1975 			}
1976 			if (bge_newbuf_jumbo(sc,
1977 			    sc->bge_jumbo, NULL) == ENOBUFS) {
1978 				ifp->if_ierrors++;
1979 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1980 				continue;
1981 			}
1982 		} else {
1983 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1984 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1985 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
1986 			stdcnt++;
1987 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1988 				ifp->if_ierrors++;
1989 				bge_newbuf_std(sc, sc->bge_std, m);
1990 				continue;
1991 			}
1992 			if (bge_newbuf_std(sc, sc->bge_std,
1993 			    NULL) == ENOBUFS) {
1994 				ifp->if_ierrors++;
1995 				bge_newbuf_std(sc, sc->bge_std, m);
1996 				continue;
1997 			}
1998 		}
1999 
2000 		ifp->if_ipackets++;
2001 #ifndef __i386__
2002 		/*
2003 		 * The i386 allows unaligned accesses, but for other
2004 		 * platforms we must make sure the payload is aligned.
2005 		 */
2006 		if (sc->bge_rx_alignment_bug) {
2007 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2008 			    cur_rx->bge_len);
2009 			m->m_data += ETHER_ALIGN;
2010 		}
2011 #endif
2012 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2013 		m->m_pkthdr.rcvif = ifp;
2014 
2015 #if 0 /* currently broken for some packets, possibly related to TCP options */
2016 		if (ifp->if_hwassist) {
2017 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2018 			if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2019 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2020 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2021 				m->m_pkthdr.csum_data =
2022 				    cur_rx->bge_tcp_udp_csum;
2023 				m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2024 			}
2025 		}
2026 #endif
2027 
2028 		/*
2029 		 * If we received a packet with a vlan tag, pass it
2030 		 * to vlan_input() instead of ether_input().
2031 		 */
2032 		if (have_tag) {
2033 			VLAN_INPUT_TAG(m, vlan_tag);
2034 			have_tag = vlan_tag = 0;
2035 		} else {
2036 			ifp->if_input(ifp, m);
2037 		}
2038 	}
2039 
2040 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2041 	if (stdcnt)
2042 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2043 	if (jumbocnt)
2044 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2045 }
2046 
2047 static void
2048 bge_txeof(struct bge_softc *sc)
2049 {
2050 	struct bge_tx_bd *cur_tx = NULL;
2051 	struct ifnet *ifp;
2052 
2053 	ifp = &sc->arpcom.ac_if;
2054 
2055 	/*
2056 	 * Go through our tx ring and free mbufs for those
2057 	 * frames that have been sent.
2058 	 */
2059 	while (sc->bge_tx_saved_considx !=
2060 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2061 		uint32_t		idx = 0;
2062 
2063 		idx = sc->bge_tx_saved_considx;
2064 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2065 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2066 			ifp->if_opackets++;
2067 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2068 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2069 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2070 		}
2071 		sc->bge_txcnt--;
2072 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2073 		ifp->if_timer = 0;
2074 	}
2075 
2076 	if (cur_tx != NULL)
2077 		ifp->if_flags &= ~IFF_OACTIVE;
2078 }
2079 
2080 static void
2081 bge_intr(void *xsc)
2082 {
2083 	struct bge_softc *sc = xsc;
2084 	struct ifnet *ifp = &sc->arpcom.ac_if;
2085  	uint32_t status, statusword, mimode;
2086 
2087 	/* XXX */
2088 	statusword = loadandclear(&sc->bge_rdata->bge_status_block.bge_status);
2089 
2090 #ifdef notdef
2091 	/* Avoid this for now -- checking this register is expensive. */
2092 	/* Make sure this is really our interrupt. */
2093 	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2094 		return;
2095 #endif
2096 	/* Ack interrupt and stop others from occuring. */
2097 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2098 
2099 	/*
2100 	 * Process link state changes.
2101 	 * Grrr. The link status word in the status block does
2102 	 * not work correctly on the BCM5700 rev AX and BX chips,
2103 	 * according to all available information. Hence, we have
2104 	 * to enable MII interrupts in order to properly obtain
2105 	 * async link changes. Unfortunately, this also means that
2106 	 * we have to read the MAC status register to detect link
2107 	 * changes, thereby adding an additional register access to
2108 	 * the interrupt handler.
2109 	 */
2110 
2111 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2112 		status = CSR_READ_4(sc, BGE_MAC_STS);
2113 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
2114 			sc->bge_link = 0;
2115 			callout_stop(&sc->bge_stat_timer);
2116 			bge_tick_serialized(sc);
2117 			/* Clear the interrupt */
2118 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2119 			    BGE_EVTENB_MI_INTERRUPT);
2120 			bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2121 			bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2122 			    BRGPHY_INTRS);
2123 		}
2124 	} else {
2125 		if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) {
2126 			/*
2127 			 * Sometimes PCS encoding errors are detected in
2128 			 * TBI mode (on fiber NICs), and for some reason
2129 			 * the chip will signal them as link changes.
2130 			 * If we get a link change event, but the 'PCS
2131 			 * encoding error' bit in the MAC status register
2132 			 * is set, don't bother doing a link check.
2133 			 * This avoids spurious "gigabit link up" messages
2134 			 * that sometimes appear on fiber NICs during
2135 			 * periods of heavy traffic. (There should be no
2136 			 * effect on copper NICs.)
2137 			 *
2138 			 * If we do have a copper NIC (bge_tbi == 0) then
2139 			 * check that the AUTOPOLL bit is set before
2140 			 * processing the event as a real link change.
2141 			 * Turning AUTOPOLL on and off in the MII read/write
2142 			 * functions will often trigger a link status
2143 			 * interrupt for no reason.
2144 			 */
2145 			status = CSR_READ_4(sc, BGE_MAC_STS);
2146 			mimode = CSR_READ_4(sc, BGE_MI_MODE);
2147 			if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR |
2148 					BGE_MACSTAT_MI_COMPLETE)) &&
2149 			    (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) {
2150 				sc->bge_link = 0;
2151 				callout_stop(&sc->bge_stat_timer);
2152 				bge_tick_serialized(sc);
2153 			}
2154 			sc->bge_link = 0;
2155 			callout_stop(&sc->bge_stat_timer);
2156 			bge_tick_serialized(sc);
2157 			/* Clear the interrupt */
2158 			CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2159 			    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2160 			    BGE_MACSTAT_LINK_CHANGED);
2161 
2162 			/* Force flush the status block cached by PCI bridge */
2163 			CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2164 		}
2165 	}
2166 
2167 	if (ifp->if_flags & IFF_RUNNING) {
2168 		/* Check RX return ring producer/consumer */
2169 		bge_rxeof(sc);
2170 
2171 		/* Check TX ring producer/consumer */
2172 		bge_txeof(sc);
2173 	}
2174 
2175 	bge_handle_events(sc);
2176 
2177 	/* Re-enable interrupts. */
2178 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2179 
2180 	if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
2181 		(*ifp->if_start)(ifp);
2182 }
2183 
2184 static void
2185 bge_tick(void *xsc)
2186 {
2187 	struct bge_softc *sc = xsc;
2188 	struct ifnet *ifp = &sc->arpcom.ac_if;
2189 
2190 	lwkt_serialize_enter(ifp->if_serializer);
2191 	bge_tick_serialized(xsc);
2192 	lwkt_serialize_exit(ifp->if_serializer);
2193 }
2194 
2195 static void
2196 bge_tick_serialized(void *xsc)
2197 {
2198 	struct bge_softc *sc = xsc;
2199 	struct ifnet *ifp = &sc->arpcom.ac_if;
2200 	struct mii_data *mii = NULL;
2201 	struct ifmedia *ifm = NULL;
2202 
2203 	if (sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2204 	    sc->bge_asicrev == BGE_ASICREV_BCM5750)
2205 		bge_stats_update_regs(sc);
2206 	else
2207 		bge_stats_update(sc);
2208 
2209 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2210 
2211 	if (sc->bge_link) {
2212 		return;
2213 	}
2214 
2215 	if (sc->bge_tbi) {
2216 		ifm = &sc->bge_ifmedia;
2217 		if (CSR_READ_4(sc, BGE_MAC_STS) &
2218 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
2219 			sc->bge_link++;
2220 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2221 				BGE_CLRBIT(sc, BGE_MAC_MODE,
2222 					   BGE_MACMODE_TBI_SEND_CFGS);
2223 			}
2224 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2225 			if_printf(ifp, "gigabit link up\n");
2226 			if (!ifq_is_empty(&ifp->if_snd))
2227 				(*ifp->if_start)(ifp);
2228 		}
2229 		return;
2230 	}
2231 
2232 	mii = device_get_softc(sc->bge_miibus);
2233 	mii_tick(mii);
2234 
2235 	if (!sc->bge_link) {
2236 		mii_pollstat(mii);
2237 		if (mii->mii_media_status & IFM_ACTIVE &&
2238 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2239 			sc->bge_link++;
2240 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2241 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2242 				if_printf(ifp, "gigabit link up\n");
2243 			if (!ifq_is_empty(&ifp->if_snd))
2244 				(*ifp->if_start)(ifp);
2245 		}
2246 	}
2247 }
2248 
2249 static void
2250 bge_stats_update_regs(struct bge_softc *sc)
2251 {
2252 	struct ifnet *ifp = &sc->arpcom.ac_if;
2253 	struct bge_mac_stats_regs stats;
2254 	uint32_t *s;
2255 	int i;
2256 
2257 	s = (uint32_t *)&stats;
2258 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2259 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2260 		s++;
2261 	}
2262 
2263 	ifp->if_collisions +=
2264 	   (stats.dot3StatsSingleCollisionFrames +
2265 	   stats.dot3StatsMultipleCollisionFrames +
2266 	   stats.dot3StatsExcessiveCollisions +
2267 	   stats.dot3StatsLateCollisions) -
2268 	   ifp->if_collisions;
2269 }
2270 
2271 static void
2272 bge_stats_update(struct bge_softc *sc)
2273 {
2274 	struct ifnet *ifp = &sc->arpcom.ac_if;
2275 	struct bge_stats *stats;
2276 
2277 	stats = (struct bge_stats *)(sc->bge_vhandle +
2278 	    BGE_MEMWIN_START + BGE_STATS_BLOCK);
2279 
2280 	ifp->if_collisions +=
2281 	   (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2282 	   stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2283 	   stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2284 	   stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2285 	   ifp->if_collisions;
2286 
2287 #ifdef notdef
2288 	ifp->if_collisions +=
2289 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2290 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2291 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2292 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2293 	   ifp->if_collisions;
2294 #endif
2295 }
2296 
2297 /*
2298  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2299  * pointers to descriptors.
2300  */
2301 static int
2302 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2303 {
2304 	struct bge_tx_bd *f = NULL;
2305 	struct mbuf *m;
2306 	uint32_t frag, cur, cnt = 0;
2307 	uint16_t csum_flags = 0;
2308 	struct ifvlan *ifv = NULL;
2309 
2310 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2311 	    m_head->m_pkthdr.rcvif != NULL &&
2312 	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2313 		ifv = m_head->m_pkthdr.rcvif->if_softc;
2314 
2315 	m = m_head;
2316 	cur = frag = *txidx;
2317 
2318 	if (m_head->m_pkthdr.csum_flags) {
2319 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2320 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2321 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2322 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2323 		if (m_head->m_flags & M_LASTFRAG)
2324 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2325 		else if (m_head->m_flags & M_FRAG)
2326 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2327 	}
2328 	/*
2329  	 * Start packing the mbufs in this chain into
2330 	 * the fragment pointers. Stop when we run out
2331  	 * of fragments or hit the end of the mbuf chain.
2332 	 */
2333 	for (m = m_head; m != NULL; m = m->m_next) {
2334 		if (m->m_len != 0) {
2335 			f = &sc->bge_rdata->bge_tx_ring[frag];
2336 			if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2337 				break;
2338 			BGE_HOSTADDR(f->bge_addr,
2339 			    vtophys(mtod(m, vm_offset_t)));
2340 			f->bge_len = m->m_len;
2341 			f->bge_flags = csum_flags;
2342 			if (ifv != NULL) {
2343 				f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2344 				f->bge_vlan_tag = ifv->ifv_tag;
2345 			} else {
2346 				f->bge_vlan_tag = 0;
2347 			}
2348 			/*
2349 			 * Sanity check: avoid coming within 16 descriptors
2350 			 * of the end of the ring.
2351 			 */
2352 			if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2353 				return(ENOBUFS);
2354 			cur = frag;
2355 			BGE_INC(frag, BGE_TX_RING_CNT);
2356 			cnt++;
2357 		}
2358 	}
2359 
2360 	if (m != NULL)
2361 		return(ENOBUFS);
2362 
2363 	if (frag == sc->bge_tx_saved_considx)
2364 		return(ENOBUFS);
2365 
2366 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2367 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
2368 	sc->bge_txcnt += cnt;
2369 
2370 	*txidx = frag;
2371 
2372 	return(0);
2373 }
2374 
2375 /*
2376  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2377  * to the mbuf data regions directly in the transmit descriptors.
2378  */
2379 static void
2380 bge_start(struct ifnet *ifp)
2381 {
2382 	struct bge_softc *sc;
2383 	struct mbuf *m_head = NULL;
2384 	uint32_t prodidx = 0;
2385 	int need_trans;
2386 
2387 	sc = ifp->if_softc;
2388 
2389 	if (!sc->bge_link)
2390 		return;
2391 
2392 	prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2393 
2394 	need_trans = 0;
2395 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2396 		m_head = ifq_poll(&ifp->if_snd);
2397 		if (m_head == NULL)
2398 			break;
2399 
2400 		/*
2401 		 * XXX
2402 		 * safety overkill.  If this is a fragmented packet chain
2403 		 * with delayed TCP/UDP checksums, then only encapsulate
2404 		 * it if we have enough descriptors to handle the entire
2405 		 * chain at once.
2406 		 * (paranoia -- may not actually be needed)
2407 		 */
2408 		if (m_head->m_flags & M_FIRSTFRAG &&
2409 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2410 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2411 			    m_head->m_pkthdr.csum_data + 16) {
2412 				ifp->if_flags |= IFF_OACTIVE;
2413 				break;
2414 			}
2415 		}
2416 
2417 		/*
2418 		 * Pack the data into the transmit ring. If we
2419 		 * don't have room, set the OACTIVE flag and wait
2420 		 * for the NIC to drain the ring.
2421 		 */
2422 		if (bge_encap(sc, m_head, &prodidx)) {
2423 			ifp->if_flags |= IFF_OACTIVE;
2424 			break;
2425 		}
2426 		ifq_dequeue(&ifp->if_snd, m_head);
2427 		need_trans = 1;
2428 
2429 		BPF_MTAP(ifp, m_head);
2430 	}
2431 
2432 	if (!need_trans)
2433 		return;
2434 
2435 	/* Transmit */
2436 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2437 	/* 5700 b2 errata */
2438 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2439 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2440 
2441 	/*
2442 	 * Set a timeout in case the chip goes out to lunch.
2443 	 */
2444 	ifp->if_timer = 5;
2445 }
2446 
2447 static void
2448 bge_init(void *xsc)
2449 {
2450 	struct bge_softc *sc = xsc;
2451 	struct ifnet *ifp = &sc->arpcom.ac_if;
2452 	uint16_t *m;
2453 
2454 	if (ifp->if_flags & IFF_RUNNING) {
2455 		return;
2456 	}
2457 
2458 	/* Cancel pending I/O and flush buffers. */
2459 	bge_stop(sc);
2460 	bge_reset(sc);
2461 	bge_chipinit(sc);
2462 
2463 	/*
2464 	 * Init the various state machines, ring
2465 	 * control blocks and firmware.
2466 	 */
2467 	if (bge_blockinit(sc)) {
2468 		if_printf(ifp, "initialization failure\n");
2469 		return;
2470 	}
2471 
2472 	/* Specify MTU. */
2473 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2474 	    ETHER_HDR_LEN + ETHER_CRC_LEN);
2475 
2476 	/* Load our MAC address. */
2477 	m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2478 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2479 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2480 
2481 	/* Enable or disable promiscuous mode as needed. */
2482 	if (ifp->if_flags & IFF_PROMISC) {
2483 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2484 	} else {
2485 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2486 	}
2487 
2488 	/* Program multicast filter. */
2489 	bge_setmulti(sc);
2490 
2491 	/* Init RX ring. */
2492 	bge_init_rx_ring_std(sc);
2493 
2494 	/*
2495 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2496 	 * memory to insure that the chip has in fact read the first
2497 	 * entry of the ring.
2498 	 */
2499 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2500 		uint32_t		v, i;
2501 		for (i = 0; i < 10; i++) {
2502 			DELAY(20);
2503 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2504 			if (v == (MCLBYTES - ETHER_ALIGN))
2505 				break;
2506 		}
2507 		if (i == 10)
2508 			if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2509 	}
2510 
2511 	/* Init jumbo RX ring. */
2512 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2513 		bge_init_rx_ring_jumbo(sc);
2514 
2515 	/* Init our RX return ring index */
2516 	sc->bge_rx_saved_considx = 0;
2517 
2518 	/* Init TX ring. */
2519 	bge_init_tx_ring(sc);
2520 
2521 	/* Turn on transmitter */
2522 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2523 
2524 	/* Turn on receiver */
2525 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2526 
2527 	/* Tell firmware we're alive. */
2528 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2529 
2530 	/* Enable host interrupts. */
2531 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2532 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2533 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2534 
2535 	bge_ifmedia_upd(ifp);
2536 
2537 	ifp->if_flags |= IFF_RUNNING;
2538 	ifp->if_flags &= ~IFF_OACTIVE;
2539 
2540 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2541 }
2542 
2543 /*
2544  * Set media options.
2545  */
2546 static int
2547 bge_ifmedia_upd(struct ifnet *ifp)
2548 {
2549 	struct bge_softc *sc = ifp->if_softc;
2550 	struct ifmedia *ifm = &sc->bge_ifmedia;
2551 	struct mii_data *mii;
2552 
2553 	/* If this is a 1000baseX NIC, enable the TBI port. */
2554 	if (sc->bge_tbi) {
2555 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2556 			return(EINVAL);
2557 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
2558 		case IFM_AUTO:
2559 #ifndef BGE_FAKE_AUTONEG
2560 			/*
2561 			 * The BCM5704 ASIC appears to have a special
2562 			 * mechanism for programming the autoneg
2563 			 * advertisement registers in TBI mode.
2564 			 */
2565 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2566 				uint32_t sgdig;
2567 
2568 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2569 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2570 				sgdig |= BGE_SGDIGCFG_AUTO |
2571 					 BGE_SGDIGCFG_PAUSE_CAP |
2572 					 BGE_SGDIGCFG_ASYM_PAUSE;
2573 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2574 					    sgdig | BGE_SGDIGCFG_SEND);
2575 				DELAY(5);
2576 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2577 			}
2578 #endif	/* !BEG_FAKE_AUTONEG */
2579 			break;
2580 		case IFM_1000_SX:
2581 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2582 				BGE_CLRBIT(sc, BGE_MAC_MODE,
2583 				    BGE_MACMODE_HALF_DUPLEX);
2584 			} else {
2585 				BGE_SETBIT(sc, BGE_MAC_MODE,
2586 				    BGE_MACMODE_HALF_DUPLEX);
2587 			}
2588 			break;
2589 		default:
2590 			return(EINVAL);
2591 		}
2592 		return(0);
2593 	}
2594 
2595 	mii = device_get_softc(sc->bge_miibus);
2596 	sc->bge_link = 0;
2597 	if (mii->mii_instance) {
2598 		struct mii_softc *miisc;
2599 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2600 		    miisc = LIST_NEXT(miisc, mii_list))
2601 			mii_phy_reset(miisc);
2602 	}
2603 	mii_mediachg(mii);
2604 
2605 	return(0);
2606 }
2607 
2608 /*
2609  * Report current media status.
2610  */
2611 static void
2612 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2613 {
2614 	struct bge_softc *sc = ifp->if_softc;
2615 	struct mii_data *mii;
2616 
2617 	if (sc->bge_tbi) {
2618 		ifmr->ifm_status = IFM_AVALID;
2619 		ifmr->ifm_active = IFM_ETHER;
2620 		if (CSR_READ_4(sc, BGE_MAC_STS) &
2621 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
2622 			ifmr->ifm_status |= IFM_ACTIVE;
2623 		ifmr->ifm_active |= IFM_1000_SX;
2624 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2625 			ifmr->ifm_active |= IFM_HDX;
2626 		else
2627 			ifmr->ifm_active |= IFM_FDX;
2628 		return;
2629 	}
2630 
2631 	mii = device_get_softc(sc->bge_miibus);
2632 	mii_pollstat(mii);
2633 	ifmr->ifm_active = mii->mii_media_active;
2634 	ifmr->ifm_status = mii->mii_media_status;
2635 }
2636 
2637 static int
2638 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2639 {
2640 	struct bge_softc *sc = ifp->if_softc;
2641 	struct ifreq *ifr = (struct ifreq *) data;
2642 	int mask, error = 0;
2643 	struct mii_data *mii;
2644 
2645 	switch(command) {
2646 	case SIOCSIFMTU:
2647 		/* Disallow jumbo frames on 5705/5750. */
2648 		if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 ||
2649 		      sc->bge_asicrev == BGE_ASICREV_BCM5750) &&
2650 		     ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2651 			error = EINVAL;
2652 		else {
2653 			ifp->if_mtu = ifr->ifr_mtu;
2654 			ifp->if_flags &= ~IFF_RUNNING;
2655 			bge_init(sc);
2656 		}
2657 		break;
2658 	case SIOCSIFFLAGS:
2659 		if (ifp->if_flags & IFF_UP) {
2660 			/*
2661 			 * If only the state of the PROMISC flag changed,
2662 			 * then just use the 'set promisc mode' command
2663 			 * instead of reinitializing the entire NIC. Doing
2664 			 * a full re-init means reloading the firmware and
2665 			 * waiting for it to start up, which may take a
2666 			 * second or two.
2667 			 */
2668 			if (ifp->if_flags & IFF_RUNNING &&
2669 			    ifp->if_flags & IFF_PROMISC &&
2670 			    !(sc->bge_if_flags & IFF_PROMISC)) {
2671 				BGE_SETBIT(sc, BGE_RX_MODE,
2672 				    BGE_RXMODE_RX_PROMISC);
2673 			} else if (ifp->if_flags & IFF_RUNNING &&
2674 			    !(ifp->if_flags & IFF_PROMISC) &&
2675 			    sc->bge_if_flags & IFF_PROMISC) {
2676 				BGE_CLRBIT(sc, BGE_RX_MODE,
2677 				    BGE_RXMODE_RX_PROMISC);
2678 			} else
2679 				bge_init(sc);
2680 		} else {
2681 			if (ifp->if_flags & IFF_RUNNING) {
2682 				bge_stop(sc);
2683 			}
2684 		}
2685 		sc->bge_if_flags = ifp->if_flags;
2686 		error = 0;
2687 		break;
2688 	case SIOCADDMULTI:
2689 	case SIOCDELMULTI:
2690 		if (ifp->if_flags & IFF_RUNNING) {
2691 			bge_setmulti(sc);
2692 			error = 0;
2693 		}
2694 		break;
2695 	case SIOCSIFMEDIA:
2696 	case SIOCGIFMEDIA:
2697 		if (sc->bge_tbi) {
2698 			error = ifmedia_ioctl(ifp, ifr,
2699 			    &sc->bge_ifmedia, command);
2700 		} else {
2701 			mii = device_get_softc(sc->bge_miibus);
2702 			error = ifmedia_ioctl(ifp, ifr,
2703 			    &mii->mii_media, command);
2704 		}
2705 		break;
2706         case SIOCSIFCAP:
2707 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2708 		if (mask & IFCAP_HWCSUM) {
2709 			if (IFCAP_HWCSUM & ifp->if_capenable)
2710 				ifp->if_capenable &= ~IFCAP_HWCSUM;
2711 			else
2712 				ifp->if_capenable |= IFCAP_HWCSUM;
2713 		}
2714 		error = 0;
2715 		break;
2716 	default:
2717 		error = ether_ioctl(ifp, command, data);
2718 		break;
2719 	}
2720 	return(error);
2721 }
2722 
2723 static void
2724 bge_watchdog(struct ifnet *ifp)
2725 {
2726 	struct bge_softc *sc = ifp->if_softc;
2727 
2728 	if_printf(ifp, "watchdog timeout -- resetting\n");
2729 
2730 	ifp->if_flags &= ~IFF_RUNNING;
2731 	bge_init(sc);
2732 
2733 	ifp->if_oerrors++;
2734 
2735 	if (!ifq_is_empty(&ifp->if_snd))
2736 		ifp->if_start(ifp);
2737 }
2738 
2739 /*
2740  * Stop the adapter and free any mbufs allocated to the
2741  * RX and TX lists.
2742  */
2743 static void
2744 bge_stop(struct bge_softc *sc)
2745 {
2746 	struct ifnet *ifp = &sc->arpcom.ac_if;
2747 	struct ifmedia_entry *ifm;
2748 	struct mii_data *mii = NULL;
2749 	int mtmp, itmp;
2750 
2751 	if (!sc->bge_tbi)
2752 		mii = device_get_softc(sc->bge_miibus);
2753 
2754 	callout_stop(&sc->bge_stat_timer);
2755 
2756 	/*
2757 	 * Disable all of the receiver blocks
2758 	 */
2759 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2760 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2761 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2762 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2763 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2764 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2765 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2766 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2767 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2768 
2769 	/*
2770 	 * Disable all of the transmit blocks
2771 	 */
2772 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2773 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2774 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2775 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2776 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2777 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2778 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2779 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2780 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2781 
2782 	/*
2783 	 * Shut down all of the memory managers and related
2784 	 * state machines.
2785 	 */
2786 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2787 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2788 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2789 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2790 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2791 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2792 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2793 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2794 	    sc->bge_asicrev != BGE_ASICREV_BCM5750) {
2795 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2796 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2797 	}
2798 
2799 	/* Disable host interrupts. */
2800 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2801 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2802 
2803 	/*
2804 	 * Tell firmware we're shutting down.
2805 	 */
2806 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2807 
2808 	/* Free the RX lists. */
2809 	bge_free_rx_ring_std(sc);
2810 
2811 	/* Free jumbo RX list. */
2812 	if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
2813 	    sc->bge_asicrev != BGE_ASICREV_BCM5750)
2814 		bge_free_rx_ring_jumbo(sc);
2815 
2816 	/* Free TX buffers. */
2817 	bge_free_tx_ring(sc);
2818 
2819 	/*
2820 	 * Isolate/power down the PHY, but leave the media selection
2821 	 * unchanged so that things will be put back to normal when
2822 	 * we bring the interface back up.
2823 	 */
2824 	if (!sc->bge_tbi) {
2825 		itmp = ifp->if_flags;
2826 		ifp->if_flags |= IFF_UP;
2827 		ifm = mii->mii_media.ifm_cur;
2828 		mtmp = ifm->ifm_media;
2829 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
2830 		mii_mediachg(mii);
2831 		ifm->ifm_media = mtmp;
2832 		ifp->if_flags = itmp;
2833 	}
2834 
2835 	sc->bge_link = 0;
2836 
2837 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2838 
2839 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2840 }
2841 
2842 /*
2843  * Stop all chip I/O so that the kernel's probe routines don't
2844  * get confused by errant DMAs when rebooting.
2845  */
2846 static void
2847 bge_shutdown(device_t dev)
2848 {
2849 	struct bge_softc *sc = device_get_softc(dev);
2850 
2851 	bge_stop(sc);
2852 	bge_reset(sc);
2853 }
2854