xref: /openbsd-src/sys/dev/pci/if_bge.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: if_bge.c,v 1.261 2009/01/27 09:17:51 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, Jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/if_ether.h>
98 #endif
99 
100 #if NVLAN > 0
101 #include <net/if_types.h>
102 #include <net/if_vlan_var.h>
103 #endif
104 
105 #if NBPFILTER > 0
106 #include <net/bpf.h>
107 #endif
108 
109 #ifdef __sparc64__
110 #include <sparc64/autoconf.h>
111 #include <dev/ofw/openfirm.h>
112 #endif
113 
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117 
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122 
123 #include <dev/pci/if_bgereg.h>
124 
125 const struct bge_revision * bge_lookup_rev(u_int32_t);
126 int bge_probe(struct device *, void *, void *);
127 void bge_attach(struct device *, struct device *, void *);
128 
129 struct cfattach bge_ca = {
130 	sizeof(struct bge_softc), bge_probe, bge_attach
131 };
132 
133 struct cfdriver bge_cd = {
134 	0, "bge", DV_IFNET
135 };
136 
137 void bge_txeof(struct bge_softc *);
138 void bge_rxeof(struct bge_softc *);
139 
140 void bge_tick(void *);
141 void bge_stats_update(struct bge_softc *);
142 void bge_stats_update_regs(struct bge_softc *);
143 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
144 int bge_compact_dma_runt(struct mbuf *pkt);
145 
146 int bge_intr(void *);
147 void bge_start(struct ifnet *);
148 int bge_ioctl(struct ifnet *, u_long, caddr_t);
149 void bge_init(void *);
150 void bge_power(int, void *);
151 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
152 void bge_stop(struct bge_softc *);
153 void bge_watchdog(struct ifnet *);
154 void bge_shutdown(void *);
155 int bge_ifmedia_upd(struct ifnet *);
156 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 
158 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
159 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
160 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
161 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
162 
163 void bge_iff(struct bge_softc *);
164 
165 int bge_alloc_jumbo_mem(struct bge_softc *);
166 void *bge_jalloc(struct bge_softc *);
167 void bge_jfree(caddr_t, u_int, void *);
168 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
169 int bge_init_rx_ring_jumbo(struct bge_softc *);
170 void bge_free_rx_ring_jumbo(struct bge_softc *);
171 
172 int bge_newbuf(struct bge_softc *, int);
173 int bge_init_rx_ring_std(struct bge_softc *);
174 void bge_rxtick(void *);
175 void bge_fill_rx_ring_std(struct bge_softc *);
176 void bge_free_rx_ring_std(struct bge_softc *);
177 
178 void bge_free_tx_ring(struct bge_softc *);
179 int bge_init_tx_ring(struct bge_softc *);
180 
181 void bge_chipinit(struct bge_softc *);
182 int bge_blockinit(struct bge_softc *);
183 
184 u_int32_t bge_readmem_ind(struct bge_softc *, int);
185 void bge_writemem_ind(struct bge_softc *, int, int);
186 void bge_writereg_ind(struct bge_softc *, int, int);
187 void bge_writembx(struct bge_softc *, int, int);
188 
189 int bge_miibus_readreg(struct device *, int, int);
190 void bge_miibus_writereg(struct device *, int, int, int);
191 void bge_miibus_statchg(struct device *);
192 
193 void bge_reset(struct bge_softc *);
194 void bge_link_upd(struct bge_softc *);
195 
196 #ifdef BGE_DEBUG
197 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
198 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
199 int	bgedebug = 0;
200 #else
201 #define DPRINTF(x)
202 #define DPRINTFN(n,x)
203 #endif
204 
205 /*
206  * Various supported device vendors/types and their names. Note: the
207  * spec seems to indicate that the hardware still has Alteon's vendor
208  * ID burned into it, though it will always be overridden by the vendor
209  * ID in the EEPROM. Just to be safe, we cover all possibilities.
210  */
211 const struct pci_matchid bge_devices[] = {
212 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
213 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
214 
215 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
216 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
217 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
218 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
219 
220 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
221 
222 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
223 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
225 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
227 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
229 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
231 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750 },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
271 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
272 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
273 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
274 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
275 
276 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
277 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
278 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
279 
280 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
281 
282 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
283 };
284 
285 #define BGE_IS_5705_OR_BEYOND(sc)  \
286 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705    || \
287 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750    || \
288 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
289 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
290 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714    || \
291 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752    || \
292 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755    || \
293 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787    || \
294 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
295 
296 #define BGE_IS_575X_PLUS(sc)  \
297 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750    || \
298 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
299 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
300 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714    || \
301 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752    || \
302 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755    || \
303 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787    || \
304 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
305 
306 #define BGE_IS_5714_FAMILY(sc)  \
307 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
308 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
309 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
310 
311 #define BGE_IS_JUMBO_CAPABLE(sc)  \
312 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700    || \
313 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701    || \
314 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703    || \
315 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
316 
317 
318 static const struct bge_revision {
319 	u_int32_t		br_chipid;
320 	const char		*br_name;
321 } bge_revisions[] = {
322 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
323 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
324 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
325 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
326 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
327 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
328 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
329 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
330 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
331 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
332 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
333 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
334 	{ BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
335 	{ BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
336 	{ BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
337 	{ BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
338 	{ BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
339 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
340 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
341 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
342 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
343 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
344 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
345 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
346 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
347 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
348 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
349 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
350 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
351 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
352 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
353 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
354 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
355 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
356 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
357 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
358 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
359 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
360 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
361 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
362 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
363 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
364 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
365 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
366 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
367 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
368 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
369 	/* the 5754 and 5787 share the same ASIC ID */
370 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
371 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
372 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
373 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
374 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
375 
376 	{ 0, NULL }
377 };
378 
379 /*
380  * Some defaults for major revisions, so that newer steppings
381  * that we don't know about have a shot at working.
382  */
383 static const struct bge_revision bge_majorrevs[] = {
384 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
385 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
386 	/* 5702 and 5703 share the same ASIC ID */
387 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
388 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
389 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
390 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
391 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
392 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
393 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
394 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
395 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
396 	/* 5754 and 5787 share the same ASIC ID */
397 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
398 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
399 
400 	{ 0, NULL }
401 };
402 
403 u_int32_t
404 bge_readmem_ind(struct bge_softc *sc, int off)
405 {
406 	struct pci_attach_args	*pa = &(sc->bge_pa);
407 
408 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
409 	return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA));
410 }
411 
412 void
413 bge_writemem_ind(struct bge_softc *sc, int off, int val)
414 {
415 	struct pci_attach_args	*pa = &(sc->bge_pa);
416 
417 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
418 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
419 }
420 
421 void
422 bge_writereg_ind(struct bge_softc *sc, int off, int val)
423 {
424 	struct pci_attach_args	*pa = &(sc->bge_pa);
425 
426 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
427 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
428 }
429 
430 void
431 bge_writembx(struct bge_softc *sc, int off, int val)
432 {
433 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
434 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
435 
436 	CSR_WRITE_4(sc, off, val);
437 }
438 
439 u_int8_t
440 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
441 {
442 	u_int32_t access, byte = 0;
443 	int i;
444 
445 	/* Lock. */
446 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
447 	for (i = 0; i < 8000; i++) {
448 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
449 			break;
450 		DELAY(20);
451 	}
452 	if (i == 8000)
453 		return (1);
454 
455 	/* Enable access. */
456 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
457 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
458 
459 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
460 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
461 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
462 		DELAY(10);
463 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
464 			DELAY(10);
465 			break;
466 		}
467 	}
468 
469 	if (i == BGE_TIMEOUT * 10) {
470 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
471 		return (1);
472 	}
473 
474 	/* Get result. */
475 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
476 
477 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
478 
479 	/* Disable access. */
480 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
481 
482 	/* Unlock. */
483 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
484 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
485 
486 	return (0);
487 }
488 
489 /*
490  * Read a sequence of bytes from NVRAM.
491  */
492 
493 int
494 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
495 {
496 	int err = 0, i;
497 	u_int8_t byte = 0;
498 
499 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
500 		return (1);
501 
502 	for (i = 0; i < cnt; i++) {
503 		err = bge_nvram_getbyte(sc, off + i, &byte);
504 		if (err)
505 			break;
506 		*(dest + i) = byte;
507 	}
508 
509 	return (err ? 1 : 0);
510 }
511 
512 /*
513  * Read a byte of data stored in the EEPROM at address 'addr.' The
514  * BCM570x supports both the traditional bitbang interface and an
515  * auto access interface for reading the EEPROM. We use the auto
516  * access method.
517  */
518 u_int8_t
519 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
520 {
521 	int i;
522 	u_int32_t byte = 0;
523 
524 	/*
525 	 * Enable use of auto EEPROM access so we can avoid
526 	 * having to use the bitbang method.
527 	 */
528 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
529 
530 	/* Reset the EEPROM, load the clock period. */
531 	CSR_WRITE_4(sc, BGE_EE_ADDR,
532 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
533 	DELAY(20);
534 
535 	/* Issue the read EEPROM command. */
536 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
537 
538 	/* Wait for completion */
539 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
540 		DELAY(10);
541 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
542 			break;
543 	}
544 
545 	if (i == BGE_TIMEOUT * 10) {
546 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
547 		return (1);
548 	}
549 
550 	/* Get result. */
551 	byte = CSR_READ_4(sc, BGE_EE_DATA);
552 
553 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
554 
555 	return (0);
556 }
557 
558 /*
559  * Read a sequence of bytes from the EEPROM.
560  */
561 int
562 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
563 {
564 	int err = 0, i;
565 	u_int8_t byte = 0;
566 
567 	for (i = 0; i < cnt; i++) {
568 		err = bge_eeprom_getbyte(sc, off + i, &byte);
569 		if (err)
570 			break;
571 		*(dest + i) = byte;
572 	}
573 
574 	return (err ? 1 : 0);
575 }
576 
577 int
578 bge_miibus_readreg(struct device *dev, int phy, int reg)
579 {
580 	struct bge_softc *sc = (struct bge_softc *)dev;
581 	u_int32_t val, autopoll;
582 	int i;
583 
584 	/*
585 	 * Broadcom's own driver always assumes the internal
586 	 * PHY is at GMII address 1. On some chips, the PHY responds
587 	 * to accesses at all addresses, which could cause us to
588 	 * bogusly attach the PHY 32 times at probe type. Always
589 	 * restricting the lookup to address 1 is simpler than
590 	 * trying to figure out which chips revisions should be
591 	 * special-cased.
592 	 */
593 	if (phy != 1)
594 		return (0);
595 
596 	/* Reading with autopolling on may trigger PCI errors */
597 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
598 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
599 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
600 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
601 		DELAY(40);
602 	}
603 
604 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
605 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
606 
607 	for (i = 0; i < 200; i++) {
608 		delay(1);
609 		val = CSR_READ_4(sc, BGE_MI_COMM);
610 		if (!(val & BGE_MICOMM_BUSY))
611 			break;
612 		delay(10);
613 	}
614 
615 	if (i == 200) {
616 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
617 		val = 0;
618 		goto done;
619 	}
620 
621 	val = CSR_READ_4(sc, BGE_MI_COMM);
622 
623 done:
624 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
625 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
626 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
627 		DELAY(40);
628 	}
629 
630 	if (val & BGE_MICOMM_READFAIL)
631 		return (0);
632 
633 	return (val & 0xFFFF);
634 }
635 
636 void
637 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
638 {
639 	struct bge_softc *sc = (struct bge_softc *)dev;
640 	u_int32_t autopoll;
641 	int i;
642 
643 	/* Reading with autopolling on may trigger PCI errors */
644 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
645 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
646 		DELAY(40);
647 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
648 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
649 		DELAY(10); /* 40 usec is supposed to be adequate */
650 	}
651 
652 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
653 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
654 
655 	for (i = 0; i < 200; i++) {
656 		delay(1);
657 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
658 			break;
659 		delay(10);
660 	}
661 
662 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
663 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
664 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
665 		DELAY(40);
666 	}
667 
668 	if (i == 200) {
669 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
670 	}
671 }
672 
673 void
674 bge_miibus_statchg(struct device *dev)
675 {
676 	struct bge_softc *sc = (struct bge_softc *)dev;
677 	struct mii_data *mii = &sc->bge_mii;
678 
679 	/*
680 	 * Get flow control negotiation result.
681 	 */
682 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
683 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
684 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
685 		mii->mii_media_active &= ~IFM_ETH_FMASK;
686 	}
687 
688 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
689 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
690 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
691 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
692 	else
693 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
694 
695 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
696 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
697 	else
698 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
699 
700 	/*
701 	 * 802.3x flow control
702 	 */
703 	if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
704 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
705 	else
706 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
707 
708 	if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
709 		BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
710 	else
711 		BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
712 }
713 
714 /*
715  * Memory management for Jumbo frames.
716  */
717 
718 int
719 bge_alloc_jumbo_mem(struct bge_softc *sc)
720 {
721 	caddr_t			ptr, kva;
722 	bus_dma_segment_t	seg;
723 	int		i, rseg, state, error;
724 	struct bge_jpool_entry   *entry;
725 
726 	state = error = 0;
727 
728 	/* Grab a big chunk o' storage. */
729 	if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
730 			     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
731 		printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
732 		return (ENOBUFS);
733 	}
734 
735 	state = 1;
736 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
737 			   BUS_DMA_NOWAIT)) {
738 		printf("%s: can't map dma buffers (%d bytes)\n",
739 		    sc->bge_dev.dv_xname, BGE_JMEM);
740 		error = ENOBUFS;
741 		goto out;
742 	}
743 
744 	state = 2;
745 	if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
746 	    BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
747 		printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
748 		error = ENOBUFS;
749 		goto out;
750 	}
751 
752 	state = 3;
753 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
754 			    kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
755 		printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
756 		error = ENOBUFS;
757 		goto out;
758 	}
759 
760 	state = 4;
761 	sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
762 	DPRINTFN(1,("bge_jumbo_buf = 0x%08X\n", sc->bge_cdata.bge_jumbo_buf));
763 
764 	SLIST_INIT(&sc->bge_jfree_listhead);
765 	SLIST_INIT(&sc->bge_jinuse_listhead);
766 
767 	/*
768 	 * Now divide it up into 9K pieces and save the addresses
769 	 * in an array.
770 	 */
771 	ptr = sc->bge_cdata.bge_jumbo_buf;
772 	for (i = 0; i < BGE_JSLOTS; i++) {
773 		sc->bge_cdata.bge_jslots[i] = ptr;
774 		ptr += BGE_JLEN;
775 		entry = malloc(sizeof(struct bge_jpool_entry),
776 		    M_DEVBUF, M_NOWAIT);
777 		if (entry == NULL) {
778 			printf("%s: no memory for jumbo buffer queue!\n",
779 			    sc->bge_dev.dv_xname);
780 			error = ENOBUFS;
781 			goto out;
782 		}
783 		entry->slot = i;
784 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
785 				 entry, jpool_entries);
786 	}
787 out:
788 	if (error != 0) {
789 		switch (state) {
790 		case 4:
791 			bus_dmamap_unload(sc->bge_dmatag,
792 			    sc->bge_cdata.bge_rx_jumbo_map);
793 		case 3:
794 			bus_dmamap_destroy(sc->bge_dmatag,
795 			    sc->bge_cdata.bge_rx_jumbo_map);
796 		case 2:
797 			bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
798 		case 1:
799 			bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
800 			break;
801 		default:
802 			break;
803 		}
804 	}
805 
806 	return (error);
807 }
808 
809 /*
810  * Allocate a Jumbo buffer.
811  */
812 void *
813 bge_jalloc(struct bge_softc *sc)
814 {
815 	struct bge_jpool_entry   *entry;
816 
817 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
818 
819 	if (entry == NULL)
820 		return (NULL);
821 
822 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
823 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
824 	return (sc->bge_cdata.bge_jslots[entry->slot]);
825 }
826 
827 /*
828  * Release a Jumbo buffer.
829  */
830 void
831 bge_jfree(caddr_t buf, u_int size, void *arg)
832 {
833 	struct bge_jpool_entry *entry;
834 	struct bge_softc *sc;
835 	int i;
836 
837 	/* Extract the softc struct pointer. */
838 	sc = (struct bge_softc *)arg;
839 
840 	if (sc == NULL)
841 		panic("bge_jfree: can't find softc pointer!");
842 
843 	/* calculate the slot this buffer belongs to */
844 
845 	i = ((vaddr_t)buf
846 	     - (vaddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
847 
848 	if ((i < 0) || (i >= BGE_JSLOTS))
849 		panic("bge_jfree: asked to free buffer that we don't manage!");
850 
851 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
852 	if (entry == NULL)
853 		panic("bge_jfree: buffer not in use!");
854 	entry->slot = i;
855 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
856 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
857 }
858 
859 
860 /*
861  * Intialize a standard receive ring descriptor.
862  */
863 int
864 bge_newbuf(struct bge_softc *sc, int i)
865 {
866 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
867 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
868 	struct mbuf		*m;
869 	int			error;
870 
871 	MGETHDR(m, M_DONTWAIT, MT_DATA);
872 	if (m == NULL)
873 		return (ENOBUFS);
874 
875 	MCLGETI(m, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
876 	if (!(m->m_flags & M_EXT)) {
877 		m_freem(m);
878 		return (ENOBUFS);
879 	}
880 	m->m_len = m->m_pkthdr.len = MCLBYTES;
881 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
882 	    m_adj(m, ETHER_ALIGN);
883 
884 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
885 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
886 	if (error) {
887 		m_freem(m);
888 		return (ENOBUFS);
889 	}
890 
891 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
892 	    BUS_DMASYNC_PREREAD);
893 	sc->bge_cdata.bge_rx_std_chain[i] = m;
894 
895 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
896 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
897 		i * sizeof (struct bge_rx_bd),
898 	    sizeof (struct bge_rx_bd),
899 	    BUS_DMASYNC_POSTWRITE);
900 
901 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
902 	r->bge_flags = BGE_RXBDFLAG_END;
903 	r->bge_len = m->m_len;
904 	r->bge_idx = i;
905 
906 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
907 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
908 		i * sizeof (struct bge_rx_bd),
909 	    sizeof (struct bge_rx_bd),
910 	    BUS_DMASYNC_PREWRITE);
911 
912 	sc->bge_std_cnt++;
913 
914 	return (0);
915 }
916 
917 /*
918  * Initialize a Jumbo receive ring descriptor. This allocates
919  * a Jumbo buffer from the pool managed internally by the driver.
920  */
921 int
922 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
923 {
924 	struct mbuf *m_new = NULL;
925 	struct bge_rx_bd *r;
926 
927 	if (m == NULL) {
928 		caddr_t			buf = NULL;
929 
930 		/* Allocate the mbuf. */
931 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
932 		if (m_new == NULL)
933 			return (ENOBUFS);
934 
935 		/* Allocate the Jumbo buffer */
936 		buf = bge_jalloc(sc);
937 		if (buf == NULL) {
938 			m_freem(m_new);
939 			return (ENOBUFS);
940 		}
941 
942 		/* Attach the buffer to the mbuf. */
943 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
944 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, 0, bge_jfree, sc);
945 	} else {
946 		/*
947 		 * We're re-using a previously allocated mbuf;
948 		 * be sure to re-init pointers and lengths to
949 		 * default values.
950 		 */
951 		m_new = m;
952 		m_new->m_data = m_new->m_ext.ext_buf;
953 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
954 	}
955 
956 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
957 		m_adj(m_new, ETHER_ALIGN);
958 	/* Set up the descriptor. */
959 	r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
960 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
961 	BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
962 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
963 	r->bge_len = m_new->m_len;
964 	r->bge_idx = i;
965 
966 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
967 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
968 		i * sizeof (struct bge_rx_bd),
969 	    sizeof (struct bge_rx_bd),
970 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
971 
972 	return (0);
973 }
974 
975 /*
976  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
977  * that's 1MB or memory, which is a lot. For now, we fill only the first
978  * 256 ring entries and hope that our CPU is fast enough to keep up with
979  * the NIC.
980  */
981 int
982 bge_init_rx_ring_std(struct bge_softc *sc)
983 {
984 	int i;
985 
986 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
987 		return (0);
988 
989 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
990 		if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 0,
991 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
992 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
993 			printf("%s: unable to create dmamap for slot %d\n",
994 			    sc->bge_dev.dv_xname, i);
995 			goto uncreate;
996 		}
997 		bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
998 		    sizeof(struct bge_rx_bd));
999 	}
1000 
1001 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1002 	sc->bge_std_cnt = 0;
1003 	bge_fill_rx_ring_std(sc);
1004 
1005 	SET(sc->bge_flags, BGE_RXRING_VALID);
1006 
1007 	return (0);
1008 
1009 uncreate:
1010 	while (--i) {
1011 		bus_dmamap_destroy(sc->bge_dmatag,
1012 		    sc->bge_cdata.bge_rx_std_map[i]);
1013 	}
1014 	return (1);
1015 }
1016 
1017 void
1018 bge_rxtick(void *arg)
1019 {
1020 	struct bge_softc *sc = arg;
1021 	int s;
1022 
1023 	s = splnet();
1024 	bge_fill_rx_ring_std(sc);
1025 	splx(s);
1026 }
1027 
1028 void
1029 bge_fill_rx_ring_std(struct bge_softc *sc)
1030 {
1031 	int i;
1032 	int post = 0;
1033 
1034 	i = sc->bge_std;
1035 	while (sc->bge_std_cnt < BGE_STD_RX_RING_CNT) {
1036 		BGE_INC(i, BGE_STD_RX_RING_CNT);
1037 
1038 		if (bge_newbuf(sc, i) != 0)
1039 			break;
1040 
1041 		sc->bge_std = i;
1042 		post = 1;
1043 	}
1044 
1045 	if (post)
1046 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1047 
1048 	/*
1049 	 * bge always needs more than 8 packets on the ring. if we cant do
1050 	 * that now, then try again later.
1051 	 */
1052 	if (sc->bge_std_cnt <= 8)
1053 		timeout_add(&sc->bge_rxtimeout, 1);
1054 }
1055 
1056 void
1057 bge_free_rx_ring_std(struct bge_softc *sc)
1058 {
1059 	bus_dmamap_t dmap;
1060 	struct mbuf *m;
1061 	int i;
1062 
1063 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
1064 		return;
1065 
1066 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1067 		dmap = sc->bge_cdata.bge_rx_std_map[i];
1068 		m = sc->bge_cdata.bge_rx_std_chain[i];
1069 		if (m != NULL) {
1070 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1071 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1072 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1073 			m_freem(m);
1074 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1075 		}
1076 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1077 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
1078 		bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
1079 		    sizeof(struct bge_rx_bd));
1080 	}
1081 
1082 	CLR(sc->bge_flags, BGE_RXRING_VALID);
1083 }
1084 
1085 int
1086 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1087 {
1088 	int i;
1089 	volatile struct bge_rcb *rcb;
1090 
1091 	if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1092 		return (0);
1093 
1094 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1095 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1096 			return (ENOBUFS);
1097 	};
1098 
1099 	sc->bge_jumbo = i - 1;
1100 	sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1101 
1102 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1103 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1104 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1105 
1106 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1107 
1108 	return (0);
1109 }
1110 
1111 void
1112 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1113 {
1114 	int i;
1115 
1116 	if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1117 		return;
1118 
1119 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1120 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1121 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1122 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1123 		}
1124 		bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
1125 		    sizeof(struct bge_rx_bd));
1126 	}
1127 
1128 	sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1129 }
1130 
1131 void
1132 bge_free_tx_ring(struct bge_softc *sc)
1133 {
1134 	int i;
1135 	struct txdmamap_pool_entry *dma;
1136 
1137 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1138 		return;
1139 
1140 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1141 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1142 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1143 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1144 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1145 					    link);
1146 			sc->txdma[i] = 0;
1147 		}
1148 		bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1149 		    sizeof(struct bge_tx_bd));
1150 	}
1151 
1152 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1153 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1154 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1155 		free(dma, M_DEVBUF);
1156 	}
1157 
1158 	sc->bge_flags &= ~BGE_TXRING_VALID;
1159 }
1160 
1161 int
1162 bge_init_tx_ring(struct bge_softc *sc)
1163 {
1164 	int i;
1165 	bus_dmamap_t dmamap;
1166 	struct txdmamap_pool_entry *dma;
1167 
1168 	if (sc->bge_flags & BGE_TXRING_VALID)
1169 		return (0);
1170 
1171 	sc->bge_txcnt = 0;
1172 	sc->bge_tx_saved_considx = 0;
1173 
1174 	/* Initialize transmit producer index for host-memory send ring. */
1175 	sc->bge_tx_prodidx = 0;
1176 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1177 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1178 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1179 
1180 	/* NIC-memory send ring not used; initialize to zero. */
1181 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1182 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1183 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1184 
1185 	SLIST_INIT(&sc->txdma_list);
1186 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1187 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
1188 		    BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
1189 		    &dmamap))
1190 			return (ENOBUFS);
1191 		if (dmamap == NULL)
1192 			panic("dmamap NULL in bge_init_tx_ring");
1193 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1194 		if (dma == NULL) {
1195 			printf("%s: can't alloc txdmamap_pool_entry\n",
1196 			    sc->bge_dev.dv_xname);
1197 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1198 			return (ENOMEM);
1199 		}
1200 		dma->dmamap = dmamap;
1201 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1202 	}
1203 
1204 	sc->bge_flags |= BGE_TXRING_VALID;
1205 
1206 	return (0);
1207 }
1208 
1209 void
1210 bge_iff(struct bge_softc *sc)
1211 {
1212 	struct arpcom		*ac = &sc->arpcom;
1213 	struct ifnet		*ifp = &ac->ac_if;
1214 	struct ether_multi	*enm;
1215 	struct ether_multistep  step;
1216 	u_int8_t		hashes[16];
1217 	u_int32_t		h, rxmode;
1218 
1219 	/* First, zot all the existing filters. */
1220 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1221 	ifp->if_flags &= ~IFF_ALLMULTI;
1222 	memset(hashes, 0x00, sizeof(hashes));
1223 
1224 	if (ifp->if_flags & IFF_PROMISC)
1225 		rxmode |= BGE_RXMODE_RX_PROMISC;
1226 	else if (ac->ac_multirangecnt > 0) {
1227 		ifp->if_flags |= IFF_ALLMULTI;
1228 		memset(hashes, 0xff, sizeof(hashes));
1229 	} else {
1230 		ETHER_FIRST_MULTI(step, ac, enm);
1231 		while (enm != NULL) {
1232 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1233 			setbit(hashes, h & 0x7F);
1234 			ETHER_NEXT_MULTI(step, enm);
1235 		}
1236 	}
1237 
1238 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1239 	    hashes, sizeof(hashes));
1240 
1241 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1242 }
1243 
1244 /*
1245  * Do endian, PCI and DMA initialization.
1246  */
1247 void
1248 bge_chipinit(struct bge_softc *sc)
1249 {
1250 	struct pci_attach_args	*pa = &(sc->bge_pa);
1251 	u_int32_t dma_rw_ctl;
1252 	int i;
1253 
1254 	/* Set endianness before we access any non-PCI registers. */
1255 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1256 	    BGE_INIT);
1257 
1258 	/* Clear the MAC control register */
1259 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1260 
1261 	/*
1262 	 * Clear the MAC statistics block in the NIC's
1263 	 * internal memory.
1264 	 */
1265 	for (i = BGE_STATS_BLOCK;
1266 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1267 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1268 
1269 	for (i = BGE_STATUS_BLOCK;
1270 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1271 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1272 
1273 	/*
1274 	 * Set up the PCI DMA control register.
1275 	 */
1276 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1277 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1278 
1279 	if (sc->bge_flags & BGE_PCIE) {
1280 		/* Read watermark not used, 128 bytes for write. */
1281 		dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1282 	} else if (sc->bge_flags & BGE_PCIX) {
1283 		/* PCI-X bus */
1284 		if (BGE_IS_5714_FAMILY(sc)) {
1285 			/* 256 bytes for read and write. */
1286 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1287 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1288 
1289 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1290 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1291 			else
1292 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1293 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1294 			/* 1536 bytes for read, 384 bytes for write. */
1295 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1296 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1297 		} else {
1298 			/* 384 bytes for read and write. */
1299 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1300 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1301 			    (0x0F);
1302 		}
1303 
1304 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1305 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1306 			u_int32_t tmp;
1307 
1308 			/* Set ONEDMA_ATONCE for hardware workaround. */
1309 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1310 			if (tmp == 6 || tmp == 7)
1311 				dma_rw_ctl |=
1312 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1313 
1314 			/* Set PCI-X DMA write workaround. */
1315 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1316 		}
1317 	} else {
1318 		/* Conventional PCI bus: 256 bytes for read and write. */
1319 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1320 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1321 
1322 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1323 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1324 			dma_rw_ctl |= 0x0F;
1325 	}
1326 
1327 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1328 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1329 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1330 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1331 
1332 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1333 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1334 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1335 
1336 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1337 
1338 	/*
1339 	 * Set up general mode register.
1340 	 */
1341 #ifndef BGE_CHECKSUM
1342 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1343 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1344 		    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1345 #else
1346 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1347 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS);
1348 #endif
1349 
1350 	/*
1351 	 * Disable memory write invalidate.  Apparently it is not supported
1352 	 * properly by these devices.
1353 	 */
1354 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1355 	    PCI_COMMAND_INVALIDATE_ENABLE);
1356 
1357 #ifdef __brokenalpha__
1358 	/*
1359 	 * Must insure that we do not cross an 8K (bytes) boundary
1360 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1361 	 * restriction on some ALPHA platforms with early revision
1362 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1363 	 */
1364 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1365 	    BGE_PCI_READ_BNDRY_1024);
1366 #endif
1367 
1368 	/* Set the timer prescaler (always 66MHz) */
1369 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1370 
1371 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1372 		DELAY(40);	/* XXX */
1373 
1374 		/* Put PHY into ready state */
1375 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1376 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1377 		DELAY(40);
1378 	}
1379 }
1380 
1381 int
1382 bge_blockinit(struct bge_softc *sc)
1383 {
1384 	volatile struct bge_rcb		*rcb;
1385 	vaddr_t			rcb_addr;
1386 	int			i;
1387 	bge_hostaddr		taddr;
1388 	u_int32_t		val;
1389 
1390 	/*
1391 	 * Initialize the memory window pointer register so that
1392 	 * we can access the first 32K of internal NIC RAM. This will
1393 	 * allow us to set up the TX send ring RCBs and the RX return
1394 	 * ring RCBs, plus other things which live in NIC memory.
1395 	 */
1396 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1397 
1398 	/* Configure mbuf memory pool */
1399 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1400 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1401 		    BGE_BUFFPOOL_1);
1402 
1403 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1404 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1405 		else
1406 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1407 
1408 		/* Configure DMA resource pool */
1409 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1410 		    BGE_DMA_DESCRIPTORS);
1411 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1412 	}
1413 
1414 	/* Configure mbuf pool watermarks */
1415 	/* new Broadcom docs strongly recommend these: */
1416 	if (BGE_IS_5705_OR_BEYOND(sc)) {
1417 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1418 
1419 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1420 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1421 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1422 		} else {
1423 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1424 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1425 		}
1426 	} else {
1427 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1428 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1429 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1430 	}
1431 
1432 	/* Configure DMA resource watermarks */
1433 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1434 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1435 
1436 	/* Enable buffer manager */
1437 	CSR_WRITE_4(sc, BGE_BMAN_MODE,
1438 	    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1439 
1440 	/* Poll for buffer manager start indication */
1441 	for (i = 0; i < 2000; i++) {
1442 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1443 			break;
1444 		DELAY(10);
1445 	}
1446 
1447 	if (i == 2000) {
1448 		printf("%s: buffer manager failed to start\n",
1449 		    sc->bge_dev.dv_xname);
1450 		return (ENXIO);
1451 	}
1452 
1453 	/* Enable flow-through queues */
1454 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1455 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1456 
1457 	/* Wait until queue initialization is complete */
1458 	for (i = 0; i < 2000; i++) {
1459 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1460 			break;
1461 		DELAY(10);
1462 	}
1463 
1464 	if (i == 2000) {
1465 		printf("%s: flow-through queue init failed\n",
1466 		    sc->bge_dev.dv_xname);
1467 		return (ENXIO);
1468 	}
1469 
1470 	/* Initialize the standard RX ring control block */
1471 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1472 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1473 	if (BGE_IS_5705_OR_BEYOND(sc))
1474 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1475 	else
1476 		rcb->bge_maxlen_flags =
1477 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1478 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1479 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1480 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1481 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1482 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1483 
1484 	/*
1485 	 * Initialize the Jumbo RX ring control block
1486 	 * We set the 'ring disabled' bit in the flags
1487 	 * field until we're actually ready to start
1488 	 * using this ring (i.e. once we set the MTU
1489 	 * high enough to require it).
1490 	 */
1491 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1492 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1493 		BGE_HOSTADDR(rcb->bge_hostaddr,
1494 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1495 		rcb->bge_maxlen_flags =
1496 		    BGE_RCB_MAXLEN_FLAGS(BGE_JUMBO_FRAMELEN,
1497 		        BGE_RCB_FLAG_RING_DISABLED);
1498 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1499 
1500 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1501 		    rcb->bge_hostaddr.bge_addr_hi);
1502 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1503 		    rcb->bge_hostaddr.bge_addr_lo);
1504 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1505 		    rcb->bge_maxlen_flags);
1506 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR,
1507 		    rcb->bge_nicaddr);
1508 
1509 		/* Set up dummy disabled mini ring RCB */
1510 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1511 		rcb->bge_maxlen_flags =
1512 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1513 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1514 		    rcb->bge_maxlen_flags);
1515 
1516 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1517 		    offsetof(struct bge_ring_data, bge_info),
1518 		    sizeof (struct bge_gib),
1519 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1520 	}
1521 
1522 #if 0
1523 	/*
1524 	 * Set the BD ring replenish thresholds. The recommended
1525 	 * values are 1/8th the number of descriptors allocated to
1526 	 * each ring.
1527 	 */
1528 	i = BGE_STD_RX_RING_CNT / 8;
1529 
1530 	/*
1531 	 * Use a value of 8 for the following chips to workaround HW errata.
1532 	 * Some of these chips have been added based on empirical
1533 	 * evidence (they don't work unless this is done).
1534 	 */
1535 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
1536 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1537 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1538 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
1539 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1540 #endif
1541 		i = 8;
1542 
1543 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i);
1544 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8);
1545 
1546 	/*
1547 	 * Disable all unused send rings by setting the 'ring disabled'
1548 	 * bit in the flags field of all the TX send ring control blocks.
1549 	 * These are located in NIC memory.
1550 	 */
1551 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1552 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1553 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1554 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1555 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1556 		rcb_addr += sizeof(struct bge_rcb);
1557 	}
1558 
1559 	/* Configure TX RCB 0 (we use only the first ring) */
1560 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1561 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1562 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1563 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1564 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1565 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1566 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1567 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1568 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1569 
1570 	/* Disable all unused RX return rings */
1571 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1572 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1573 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1574 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1575 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1576 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1577 			BGE_RCB_FLAG_RING_DISABLED));
1578 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1579 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1580 		    (i * (sizeof(u_int64_t))), 0);
1581 		rcb_addr += sizeof(struct bge_rcb);
1582 	}
1583 
1584 	/* Initialize RX ring indexes */
1585 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1586 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1587 	bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1588 
1589 	/*
1590 	 * Set up RX return ring 0
1591 	 * Note that the NIC address for RX return rings is 0x00000000.
1592 	 * The return rings live entirely within the host, so the
1593 	 * nicaddr field in the RCB isn't used.
1594 	 */
1595 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1596 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1597 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1598 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1599 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1600 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1601 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1602 
1603 	/* Set random backoff seed for TX */
1604 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1605 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1606 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1607 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1608 	    BGE_TX_BACKOFF_SEED_MASK);
1609 
1610 	/* Set inter-packet gap */
1611 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1612 
1613 	/*
1614 	 * Specify which ring to use for packets that don't match
1615 	 * any RX rules.
1616 	 */
1617 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1618 
1619 	/*
1620 	 * Configure number of RX lists. One interrupt distribution
1621 	 * list, sixteen active lists, one bad frames class.
1622 	 */
1623 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1624 
1625 	/* Inialize RX list placement stats mask. */
1626 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1627 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1628 
1629 	/* Disable host coalescing until we get it set up */
1630 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1631 
1632 	/* Poll to make sure it's shut down. */
1633 	for (i = 0; i < 2000; i++) {
1634 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1635 			break;
1636 		DELAY(10);
1637 	}
1638 
1639 	if (i == 2000) {
1640 		printf("%s: host coalescing engine failed to idle\n",
1641 		    sc->bge_dev.dv_xname);
1642 		return (ENXIO);
1643 	}
1644 
1645 	/* Set up host coalescing defaults */
1646 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1647 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1648 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1649 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1650 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1651 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1652 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1653 	}
1654 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1655 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1656 
1657 	/* Set up address of statistics block */
1658 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1659 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1660 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1661 			    BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1662 
1663 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1664 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1665 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1666 	}
1667 
1668 	/* Set up address of status block */
1669 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1670 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1671 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1672 
1673 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1674 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1675 
1676 	/* Turn on host coalescing state machine */
1677 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1678 
1679 	/* Turn on RX BD completion state machine and enable attentions */
1680 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1681 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1682 
1683 	/* Turn on RX list placement state machine */
1684 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1685 
1686 	/* Turn on RX list selector state machine. */
1687 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1688 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1689 
1690 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1691 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1692 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1693 	    BGE_MACMODE_FRMHDR_DMA_ENB;
1694 
1695 	if (sc->bge_flags & BGE_PHY_FIBER_TBI)
1696 	    val |= BGE_PORTMODE_TBI;
1697 	else if (sc->bge_flags & BGE_PHY_FIBER_MII)
1698 	    val |= BGE_PORTMODE_GMII;
1699 	else
1700 	    val |= BGE_PORTMODE_MII;
1701 
1702 	/* Turn on DMA, clear stats */
1703 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1704 
1705 	/* Set misc. local control, enable interrupts on attentions */
1706 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1707 
1708 #ifdef notdef
1709 	/* Assert GPIO pins for PHY reset */
1710 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1711 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1712 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1713 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1714 #endif
1715 
1716 	/* Turn on DMA completion state machine */
1717 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1718 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1719 
1720 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1721 
1722 	/* Enable host coalescing bug fix. */
1723 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1724 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
1725 		val |= (1 << 29);
1726 
1727 	/* Turn on write DMA state machine */
1728 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1729 
1730 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
1731 
1732 	if (sc->bge_flags & BGE_PCIE)
1733 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1734 
1735 	/* Turn on read DMA state machine */
1736 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1737 
1738 	/* Turn on RX data completion state machine */
1739 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1740 
1741 	/* Turn on RX BD initiator state machine */
1742 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1743 
1744 	/* Turn on RX data and RX BD initiator state machine */
1745 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1746 
1747 	/* Turn on Mbuf cluster free state machine */
1748 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1749 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1750 
1751 	/* Turn on send BD completion state machine */
1752 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1753 
1754 	/* Turn on send data completion state machine */
1755 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1756 
1757 	/* Turn on send data initiator state machine */
1758 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1759 
1760 	/* Turn on send BD initiator state machine */
1761 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1762 
1763 	/* Turn on send BD selector state machine */
1764 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1765 
1766 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1767 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1768 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1769 
1770 	/* ack/clear link change events */
1771 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1772 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1773 	    BGE_MACSTAT_LINK_CHANGED);
1774 
1775 	/* Enable PHY auto polling (for MII/GMII only) */
1776 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1777 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1778  	} else {
1779 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1780 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1781 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
1782 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1783 			    BGE_EVTENB_MI_INTERRUPT);
1784 	}
1785 
1786 	/*
1787 	 * Clear any pending link state attention.
1788 	 * Otherwise some link state change events may be lost until attention
1789 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1790 	 * It's not necessary on newer BCM chips - perhaps enabling link
1791 	 * state change attentions implies clearing pending attention.
1792 	 */
1793 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1794 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1795 	    BGE_MACSTAT_LINK_CHANGED);
1796 
1797 	/* Enable link state change attentions. */
1798 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1799 
1800 	return (0);
1801 }
1802 
1803 const struct bge_revision *
1804 bge_lookup_rev(u_int32_t chipid)
1805 {
1806 	const struct bge_revision *br;
1807 
1808 	for (br = bge_revisions; br->br_name != NULL; br++) {
1809 		if (br->br_chipid == chipid)
1810 			return (br);
1811 	}
1812 
1813 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1814 		if (br->br_chipid == BGE_ASICREV(chipid))
1815 			return (br);
1816 	}
1817 
1818 	return (NULL);
1819 }
1820 
1821 /*
1822  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1823  * against our list and return its name if we find a match. Note
1824  * that since the Broadcom controller contains VPD support, we
1825  * can get the device name string from the controller itself instead
1826  * of the compiled-in string. This is a little slow, but it guarantees
1827  * we'll always announce the right product name.
1828  */
1829 int
1830 bge_probe(struct device *parent, void *match, void *aux)
1831 {
1832 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
1833 }
1834 
1835 void
1836 bge_attach(struct device *parent, struct device *self, void *aux)
1837 {
1838 	struct bge_softc	*sc = (struct bge_softc *)self;
1839 	struct pci_attach_args	*pa = aux;
1840 	pci_chipset_tag_t	pc = pa->pa_pc;
1841 	const struct bge_revision *br;
1842 	pcireg_t		pm_ctl, memtype, subid;
1843 	pci_intr_handle_t	ih;
1844 	const char		*intrstr = NULL;
1845 	bus_size_t		size;
1846 	bus_dma_segment_t	seg;
1847 	int			rseg, gotenaddr = 0;
1848 	u_int32_t		hwcfg = 0;
1849 	u_int32_t		mac_addr = 0;
1850 	u_int32_t		misccfg;
1851 	struct ifnet		*ifp;
1852 	caddr_t			kva;
1853 #ifdef __sparc64__
1854 	char			name[32];
1855 #endif
1856 
1857 	sc->bge_pa = *pa;
1858 
1859 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1860 
1861 	/*
1862 	 * Map control/status registers.
1863 	 */
1864 	DPRINTFN(5, ("Map control/status regs\n"));
1865 
1866 	DPRINTFN(5, ("pci_mapreg_map\n"));
1867 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1868 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
1869 	    &sc->bge_bhandle, NULL, &size, 0)) {
1870 		printf(": can't find mem space\n");
1871 		return;
1872 	}
1873 
1874 	DPRINTFN(5, ("pci_intr_map\n"));
1875 	if (pci_intr_map(pa, &ih)) {
1876 		printf(": couldn't map interrupt\n");
1877 		goto fail_1;
1878 	}
1879 
1880 	DPRINTFN(5, ("pci_intr_string\n"));
1881 	intrstr = pci_intr_string(pc, ih);
1882 
1883 	/*
1884 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
1885 	 * can clobber the chip's PCI config-space power control registers,
1886 	 * leaving the card in D3 powersave state.
1887 	 * We do not have memory-mapped registers in this state,
1888 	 * so force device into D0 state before starting initialization.
1889 	 */
1890 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
1891 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
1892 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
1893 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1894 	DELAY(1000);	/* 27 usec is allegedly sufficent */
1895 
1896 	/*
1897 	 * Save ASIC rev.
1898 	 */
1899 
1900 	sc->bge_chipid =
1901             pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
1902             BGE_PCIMISCCTL_ASICREV;
1903 
1904 	printf(", ");
1905 	br = bge_lookup_rev(sc->bge_chipid);
1906 	if (br == NULL)
1907 		printf("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
1908 	else
1909 		printf("%s (0x%04x)", br->br_name, sc->bge_chipid >> 16);
1910 
1911 	/*
1912 	 * PCI Express check.
1913 	 */
1914 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1915 	    NULL, NULL) != 0)
1916 		sc->bge_flags |= BGE_PCIE;
1917 
1918 	/*
1919 	 * PCI-X check.
1920 	 */
1921 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
1922 	    BGE_PCISTATE_PCI_BUSMODE) == 0)
1923 		sc->bge_flags |= BGE_PCIX;
1924 
1925 	/*
1926 	 * SEEPROM check.
1927 	 */
1928 #ifdef __sparc64__
1929 	/*
1930 	 * Onboard interfaces on UltraSPARC systems generally don't
1931 	 * have a SEEPROM fitted.  These interfaces, and cards that
1932 	 * have FCode, are named "network" by the PROM, whereas cards
1933 	 * without FCode show up as "ethernet".  Since we don't really
1934 	 * need the information from the SEEPROM on cards that have
1935 	 * FCode it's fine to pretend they don't have one.
1936 	 */
1937 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
1938 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
1939 		sc->bge_flags |= BGE_NO_EEPROM;
1940 #endif
1941 
1942 	/*
1943 	 * When using the BCM5701 in PCI-X mode, data corruption has
1944 	 * been observed in the first few bytes of some received packets.
1945 	 * Aligning the packet buffer in memory eliminates the corruption.
1946 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1947 	 * which do not support unaligned accesses, we will realign the
1948 	 * payloads by copying the received packets.
1949 	 */
1950 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1951 	    sc->bge_flags & BGE_PCIX)
1952 		sc->bge_flags |= BGE_RX_ALIGNBUG;
1953 
1954 	if (BGE_IS_JUMBO_CAPABLE(sc))
1955 		sc->bge_flags |= BGE_JUMBO_CAP;
1956 
1957 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1958 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
1959 	    PCI_VENDOR(subid) == DELL_VENDORID)
1960 		sc->bge_flags |= BGE_NO_3LED;
1961 
1962 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
1963 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
1964 
1965 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1966 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
1967 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
1968 		sc->bge_flags |= BGE_IS_5788;
1969 
1970 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
1971 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
1972 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1973 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1974 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
1975 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1976 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1977 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1978 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
1979 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
1980 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1981 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1982 		sc->bge_flags |= BGE_10_100_ONLY;
1983 
1984 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1985 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1986 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1987 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1988 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1989 		sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
1990 
1991 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1992 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1993 		sc->bge_flags |= BGE_PHY_CRC_BUG;
1994 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
1995 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
1996 		sc->bge_flags |= BGE_PHY_ADC_BUG;
1997 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1998 		sc->bge_flags |= BGE_PHY_5704_A0_BUG;
1999 
2000 	if (BGE_IS_5705_OR_BEYOND(sc)) {
2001 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2002 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2003 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2004 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2005 				sc->bge_flags |= BGE_PHY_JITTER_BUG;
2006 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2007 				sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
2008 		} else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
2009 			sc->bge_flags |= BGE_PHY_BER_BUG;
2010 	}
2011 
2012 	/* Try to reset the chip. */
2013 	DPRINTFN(5, ("bge_reset\n"));
2014 	bge_reset(sc);
2015 
2016 	bge_chipinit(sc);
2017 
2018 #ifdef __sparc64__
2019 	if (!gotenaddr) {
2020 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
2021 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
2022 			gotenaddr = 1;
2023 	}
2024 #endif
2025 
2026 	/*
2027 	 * Get station address from the EEPROM.
2028 	 */
2029 	if (!gotenaddr) {
2030 		mac_addr = bge_readmem_ind(sc, 0x0c14);
2031 		if ((mac_addr >> 16) == 0x484b) {
2032 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2033 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2034 			mac_addr = bge_readmem_ind(sc, 0x0c18);
2035 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2036 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2037 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2038 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2039 			gotenaddr = 1;
2040 		}
2041 	}
2042 	if (!gotenaddr) {
2043 		int mac_offset = BGE_EE_MAC_OFFSET;
2044 
2045 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2046 			mac_offset = BGE_EE_MAC_OFFSET_5906;
2047 
2048 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2049 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
2050 			gotenaddr = 1;
2051 	}
2052 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
2053 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2054 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
2055 			gotenaddr = 1;
2056 	}
2057 
2058 #ifdef __sparc64__
2059 	if (!gotenaddr) {
2060 		extern void myetheraddr(u_char *);
2061 
2062 		myetheraddr(sc->arpcom.ac_enaddr);
2063 		gotenaddr = 1;
2064 	}
2065 #endif
2066 
2067 	if (!gotenaddr) {
2068 		printf(": failed to read station address\n");
2069 		goto fail_1;
2070 	}
2071 
2072 	/* Allocate the general information block and ring buffers. */
2073 	sc->bge_dmatag = pa->pa_dmat;
2074 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
2075 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2076 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2077 		printf(": can't alloc rx buffers\n");
2078 		goto fail_1;
2079 	}
2080 	DPRINTFN(5, ("bus_dmamem_map\n"));
2081 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2082 			   sizeof(struct bge_ring_data), &kva,
2083 			   BUS_DMA_NOWAIT)) {
2084 		printf(": can't map dma buffers (%zu bytes)\n",
2085 		    sizeof(struct bge_ring_data));
2086 		goto fail_2;
2087 	}
2088 	DPRINTFN(5, ("bus_dmamem_create\n"));
2089 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2090 	    sizeof(struct bge_ring_data), 0,
2091 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2092 		printf(": can't create dma map\n");
2093 		goto fail_3;
2094 	}
2095 	DPRINTFN(5, ("bus_dmamem_load\n"));
2096 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2097 			    sizeof(struct bge_ring_data), NULL,
2098 			    BUS_DMA_NOWAIT)) {
2099 		goto fail_4;
2100 	}
2101 
2102 	DPRINTFN(5, ("bzero\n"));
2103 	sc->bge_rdata = (struct bge_ring_data *)kva;
2104 
2105 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2106 
2107 	/*
2108 	 * Try to allocate memory for Jumbo buffers.
2109 	 */
2110 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
2111 		if (bge_alloc_jumbo_mem(sc)) {
2112 			printf(": jumbo buffer allocation failed\n");
2113 			goto fail_5;
2114 		}
2115 	}
2116 
2117 	/* Set default tuneable values. */
2118 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2119 	sc->bge_rx_coal_ticks = 150;
2120 	sc->bge_rx_max_coal_bds = 64;
2121 	sc->bge_tx_coal_ticks = 300;
2122 	sc->bge_tx_max_coal_bds = 400;
2123 
2124 	/* 5705 limits RX return ring to 512 entries. */
2125 	if (BGE_IS_5705_OR_BEYOND(sc))
2126 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2127 	else
2128 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2129 
2130 	/* Set up ifnet structure */
2131 	ifp = &sc->arpcom.ac_if;
2132 	ifp->if_softc = sc;
2133 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2134 	ifp->if_ioctl = bge_ioctl;
2135 	ifp->if_start = bge_start;
2136 	ifp->if_watchdog = bge_watchdog;
2137 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2138 	IFQ_SET_READY(&ifp->if_snd);
2139 
2140 	/* lwm must be greater than the replenish threshold */
2141 	m_clsetwms(ifp, MCLBYTES, 17, BGE_STD_RX_RING_CNT);
2142 
2143 	DPRINTFN(5, ("bcopy\n"));
2144 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2145 
2146 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2147 
2148 #if NVLAN > 0
2149 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2150 #endif
2151 
2152 	if (BGE_IS_JUMBO_CAPABLE(sc))
2153 		ifp->if_hardmtu = BGE_JUMBO_MTU;
2154 
2155 	/*
2156 	 * Do MII setup.
2157 	 */
2158 	DPRINTFN(5, ("mii setup\n"));
2159 	sc->bge_mii.mii_ifp = ifp;
2160 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
2161 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
2162 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
2163 
2164 	/*
2165 	 * Figure out what sort of media we have by checking the hardware
2166 	 * config word in the first 32K of internal NIC memory, or fall back to
2167 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
2168 	 * this value seems to be unset. If that's the case, we have to rely on
2169 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
2170 	 * SysKonnect SK-9D41.
2171 	 */
2172 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2173 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2174 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
2175 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2176 		    sizeof(hwcfg))) {
2177 			printf(": failed to read media type\n");
2178 			goto fail_5;
2179 		}
2180 		hwcfg = ntohl(hwcfg);
2181 	}
2182 
2183 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2184 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
2185 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2186 		if (BGE_IS_5714_FAMILY(sc))
2187 		    sc->bge_flags |= BGE_PHY_FIBER_MII;
2188 		else
2189 		    sc->bge_flags |= BGE_PHY_FIBER_TBI;
2190 	}
2191 
2192 	/* Hookup IRQ last. */
2193 	DPRINTFN(5, ("pci_intr_establish\n"));
2194 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc,
2195 	    sc->bge_dev.dv_xname);
2196 	if (sc->bge_intrhand == NULL) {
2197 		printf(": couldn't establish interrupt");
2198 		if (intrstr != NULL)
2199 			printf(" at %s", intrstr);
2200 		printf("\n");
2201 		goto fail_5;
2202 	}
2203 
2204 	/*
2205 	 * A Broadcom chip was detected. Inform the world.
2206 	 */
2207 	printf(": %s, address %s\n", intrstr,
2208 	    ether_sprintf(sc->arpcom.ac_enaddr));
2209 
2210 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2211 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2212 		    bge_ifmedia_sts);
2213 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2214 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2215 			    0, NULL);
2216 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2217 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2218 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2219 	} else {
2220 		int mii_flags;
2221 
2222 		/*
2223 		 * Do transceiver setup.
2224 		 */
2225 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2226 			     bge_ifmedia_sts);
2227 		mii_flags = MIIF_DOPAUSE;
2228 		if (sc->bge_flags & BGE_PHY_FIBER_MII)
2229 			mii_flags |= MIIF_HAVEFIBER;
2230 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2231 			   MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
2232 
2233 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2234 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2235 			ifmedia_add(&sc->bge_mii.mii_media,
2236 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
2237 			ifmedia_set(&sc->bge_mii.mii_media,
2238 				    IFM_ETHER|IFM_MANUAL);
2239 		} else
2240 			ifmedia_set(&sc->bge_mii.mii_media,
2241 				    IFM_ETHER|IFM_AUTO);
2242 	}
2243 
2244 	/*
2245 	 * Call MI attach routine.
2246 	 */
2247 	if_attach(ifp);
2248 	ether_ifattach(ifp);
2249 
2250 	sc->sc_shutdownhook = shutdownhook_establish(bge_shutdown, sc);
2251 	sc->sc_powerhook = powerhook_establish(bge_power, sc);
2252 
2253 	timeout_set(&sc->bge_timeout, bge_tick, sc);
2254 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
2255 	return;
2256 
2257 fail_5:
2258 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
2259 
2260 fail_4:
2261 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2262 
2263 fail_3:
2264 	bus_dmamem_unmap(sc->bge_dmatag, kva,
2265 	    sizeof(struct bge_ring_data));
2266 
2267 fail_2:
2268 	bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2269 
2270 fail_1:
2271 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, size);
2272 }
2273 
2274 void
2275 bge_reset(struct bge_softc *sc)
2276 {
2277 	struct pci_attach_args *pa = &sc->bge_pa;
2278 	pcireg_t cachesize, command, pcistate, new_pcistate;
2279 	u_int32_t reset;
2280 	int i, val = 0;
2281 
2282 	/* Save some important PCI state. */
2283 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2284 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2285 	pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2286 
2287 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2288 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2289 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2290 
2291 	/* Disable fastboot on controllers that support it. */
2292 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2293 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2294 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
2295 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2296 
2297 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2298 
2299 	if (sc->bge_flags & BGE_PCIE) {
2300 		if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
2301 			/* PCI Express 1.0 system */
2302 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2303 		}
2304 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2305 			/*
2306 			 * Prevent PCI Express link training
2307 			 * during global reset.
2308 			 */
2309 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2310 			reset |= (1<<29);
2311 		}
2312 	}
2313 
2314 	/*
2315 	 * Set GPHY Power Down Override to leave GPHY
2316 	 * powered up in D0 uninitialized.
2317 	 */
2318 	if (BGE_IS_5705_OR_BEYOND(sc))
2319 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
2320 
2321 	/* Issue global reset */
2322 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2323 
2324 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2325 		u_int32_t status, ctrl;
2326 
2327 		status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2328 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2329 		    status | BGE_VCPU_STATUS_DRV_RESET);
2330 		ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2331 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2332 		    ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2333 
2334 		sc->bge_flags |= BGE_NO_EEPROM;
2335 	}
2336 
2337 	DELAY(1000);
2338 
2339 	if (sc->bge_flags & BGE_PCIE) {
2340 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2341 			pcireg_t v;
2342 
2343 			DELAY(500000); /* wait for link training to complete */
2344 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
2345 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
2346 		}
2347 
2348 		/*
2349 		 * Set PCI Express max payload size to 128 bytes
2350 		 * and clear error status.
2351 		 */
2352 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2353 		    BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2354 	}
2355 
2356 	/* Reset some of the PCI state that got zapped by reset */
2357 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2358 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2359 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2360 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2361 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2362 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2363 
2364 	/* Enable memory arbiter. */
2365 	if (BGE_IS_5714_FAMILY(sc)) {
2366 		u_int32_t val;
2367 
2368 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2369 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2370 	} else
2371 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2372 
2373  	/*
2374 	 * Prevent PXE restart: write a magic number to the
2375 	 * general communications memory at 0xB50.
2376 	 */
2377 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2378 
2379 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2380 		for (i = 0; i < BGE_TIMEOUT; i++) {
2381 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2382 			if (val & BGE_VCPU_STATUS_INIT_DONE)
2383 				break;
2384 			DELAY(100);
2385 		}
2386 
2387 		if (i >= BGE_TIMEOUT)
2388 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
2389 	} else {
2390 		/*
2391 		 * Poll until we see 1's complement of the magic number.
2392 		 * This indicates that the firmware initialization
2393 		 * is complete.  We expect this to fail if no SEEPROM
2394 		 * is fitted.
2395 		 */
2396 		for (i = 0; i < BGE_TIMEOUT; i++) {
2397 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2398 			if (val == ~BGE_MAGIC_NUMBER)
2399 				break;
2400 			DELAY(10);
2401 		}
2402 
2403 		if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM)))
2404 			printf("%s: firmware handshake timed out\n",
2405 			   sc->bge_dev.dv_xname);
2406 	}
2407 
2408 	/*
2409 	 * XXX Wait for the value of the PCISTATE register to
2410 	 * return to its original pre-reset state. This is a
2411 	 * fairly good indicator of reset completion. If we don't
2412 	 * wait for the reset to fully complete, trying to read
2413 	 * from the device's non-PCI registers may yield garbage
2414 	 * results.
2415 	 */
2416 	for (i = 0; i < BGE_TIMEOUT; i++) {
2417 		new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2418 		    BGE_PCI_PCISTATE);
2419 		if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2420 		    (pcistate & ~BGE_PCISTATE_RESERVED))
2421 			break;
2422 		DELAY(10);
2423 	}
2424 	if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2425 	    (pcistate & ~BGE_PCISTATE_RESERVED)) {
2426 		DPRINTFN(5, ("%s: pcistate failed to revert\n",
2427 		    sc->bge_dev.dv_xname));
2428 	}
2429 
2430 	/* Fix up byte swapping */
2431 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2432 
2433 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2434 
2435 	/*
2436 	 * The 5704 in TBI mode apparently needs some special
2437 	 * adjustment to insure the SERDES drive level is set
2438 	 * to 1.2V.
2439 	 */
2440 	if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
2441 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2442 		u_int32_t serdescfg;
2443 
2444 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2445 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2446 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2447 	}
2448 
2449 	if (sc->bge_flags & BGE_PCIE &&
2450 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2451 		u_int32_t v;
2452 
2453 		/* Enable PCI Express bug fix */
2454 		v = CSR_READ_4(sc, 0x7c00);
2455 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2456 	}
2457 	DELAY(10000);
2458 }
2459 
2460 /*
2461  * Frame reception handling. This is called if there's a frame
2462  * on the receive return list.
2463  *
2464  * Note: we have to be able to handle two possibilities here:
2465  * 1) the frame is from the Jumbo receive ring
2466  * 2) the frame is from the standard receive ring
2467  */
2468 
2469 void
2470 bge_rxeof(struct bge_softc *sc)
2471 {
2472 	struct ifnet *ifp;
2473 	int stdcnt = 0, jumbocnt = 0;
2474 	bus_dmamap_t dmamap;
2475 	bus_addr_t offset, toff;
2476 	bus_size_t tlen;
2477 	int tosync;
2478 
2479 	/* Nothing to do */
2480 	if (sc->bge_rx_saved_considx ==
2481 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx)
2482 		return;
2483 
2484 	ifp = &sc->arpcom.ac_if;
2485 
2486 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2487 	    offsetof(struct bge_ring_data, bge_status_block),
2488 	    sizeof (struct bge_status_block),
2489 	    BUS_DMASYNC_POSTREAD);
2490 
2491 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2492 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2493 	    sc->bge_rx_saved_considx;
2494 
2495 	toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2496 
2497 	if (tosync < 0) {
2498 		tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2499 		    sizeof (struct bge_rx_bd);
2500 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2501 		    toff, tlen, BUS_DMASYNC_POSTREAD);
2502 		tosync = -tosync;
2503 	}
2504 
2505 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2506 	    offset, tosync * sizeof (struct bge_rx_bd),
2507 	    BUS_DMASYNC_POSTREAD);
2508 
2509 	while(sc->bge_rx_saved_considx !=
2510 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2511 		struct bge_rx_bd	*cur_rx;
2512 		u_int32_t		rxidx;
2513 		struct mbuf		*m = NULL;
2514 #ifdef BGE_CHECKSUM
2515 		u_int16_t		sumflags = 0;
2516 #endif
2517 
2518 		cur_rx = &sc->bge_rdata->
2519 			bge_rx_return_ring[sc->bge_rx_saved_considx];
2520 
2521 		rxidx = cur_rx->bge_idx;
2522 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2523 
2524 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2525 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2526 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2527 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2528 			jumbocnt++;
2529 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2530 				ifp->if_ierrors++;
2531 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2532 				continue;
2533 			}
2534 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo, NULL)
2535 			    == ENOBUFS) {
2536 				struct mbuf             *m0;
2537 				m0 = m_devget(mtod(m, char *),
2538 				    cur_rx->bge_len - ETHER_CRC_LEN,
2539 				    ETHER_ALIGN, ifp, NULL);
2540 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2541 				if (m0 == NULL) {
2542 					ifp->if_ierrors++;
2543 					continue;
2544 				}
2545 				m = m0;
2546 			}
2547 		} else {
2548 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2549 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2550 
2551 			stdcnt++;
2552 			sc->bge_std_cnt--;
2553 
2554 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2555 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
2556 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2557 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2558 
2559 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2560 				m_freem(m);
2561 				ifp->if_ierrors++;
2562 				continue;
2563 			}
2564 		}
2565 
2566 		ifp->if_ipackets++;
2567 #ifdef __STRICT_ALIGNMENT
2568 		/*
2569 		 * The i386 allows unaligned accesses, but for other
2570 		 * platforms we must make sure the payload is aligned.
2571 		 */
2572 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
2573 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2574 			    cur_rx->bge_len);
2575 			m->m_data += ETHER_ALIGN;
2576 		}
2577 #endif
2578 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2579 		m->m_pkthdr.rcvif = ifp;
2580 
2581 #if NVLAN > 0
2582 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2583 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
2584 			m->m_flags |= M_VLANTAG;
2585 		}
2586 #endif
2587 
2588 #if NBPFILTER > 0
2589 		/*
2590 		 * Handle BPF listeners. Let the BPF user see the packet.
2591 		 */
2592 		if (ifp->if_bpf)
2593 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
2594 #endif
2595 
2596 #ifdef BGE_CHECKSUM
2597 		if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2598 			sumflags |= M_IPV4_CSUM_IN_OK;
2599 		else
2600 			sumflags |= M_IPV4_CSUM_IN_BAD;
2601 
2602 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2603 			m->m_pkthdr.csum_data =
2604 				cur_rx->bge_tcp_udp_csum;
2605 			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2606 		}
2607 
2608 		m->m_pkthdr.csum_flags = sumflags;
2609 		sumflags = 0;
2610 #endif
2611 		ether_input_mbuf(ifp, m);
2612 	}
2613 
2614 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2615 	if (stdcnt)
2616 		bge_fill_rx_ring_std(sc);
2617 	if (jumbocnt)
2618 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2619 }
2620 
2621 void
2622 bge_txeof(struct bge_softc *sc)
2623 {
2624 	struct bge_tx_bd *cur_tx = NULL;
2625 	struct ifnet *ifp;
2626 	struct txdmamap_pool_entry *dma;
2627 	bus_addr_t offset, toff;
2628 	bus_size_t tlen;
2629 	int tosync;
2630 	struct mbuf *m;
2631 
2632 	/* Nothing to do */
2633 	if (sc->bge_tx_saved_considx ==
2634 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx)
2635 		return;
2636 
2637 	ifp = &sc->arpcom.ac_if;
2638 
2639 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2640 	    offsetof(struct bge_ring_data, bge_status_block),
2641 	    sizeof (struct bge_status_block),
2642 	    BUS_DMASYNC_POSTREAD);
2643 
2644 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
2645 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2646 	    sc->bge_tx_saved_considx;
2647 
2648 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2649 
2650 	if (tosync < 0) {
2651 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2652 		    sizeof (struct bge_tx_bd);
2653 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2654 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2655 		tosync = -tosync;
2656 	}
2657 
2658 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2659 	    offset, tosync * sizeof (struct bge_tx_bd),
2660 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2661 
2662 	/*
2663 	 * Go through our tx ring and free mbufs for those
2664 	 * frames that have been sent.
2665 	 */
2666 	while (sc->bge_tx_saved_considx !=
2667 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2668 		u_int32_t		idx = 0;
2669 
2670 		idx = sc->bge_tx_saved_considx;
2671 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2672 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2673 			ifp->if_opackets++;
2674 		m = sc->bge_cdata.bge_tx_chain[idx];
2675 		if (m != NULL) {
2676 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2677 			dma = sc->txdma[idx];
2678 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2679 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2680 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2681 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2682 			sc->txdma[idx] = NULL;
2683 
2684 			m_freem(m);
2685 		}
2686 		sc->bge_txcnt--;
2687 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2688 	}
2689 
2690 	if (sc->bge_txcnt < BGE_TX_RING_CNT - 16)
2691 		ifp->if_flags &= ~IFF_OACTIVE;
2692 	if (sc->bge_txcnt == 0)
2693 		ifp->if_timer = 0;
2694 }
2695 
2696 int
2697 bge_intr(void *xsc)
2698 {
2699 	struct bge_softc *sc;
2700 	struct ifnet *ifp;
2701 	u_int32_t statusword;
2702 
2703 	sc = xsc;
2704 	ifp = &sc->arpcom.ac_if;
2705 
2706 	/* It is possible for the interrupt to arrive before
2707 	 * the status block is updated prior to the interrupt.
2708 	 * Reading the PCI State register will confirm whether the
2709 	 * interrupt is ours and will flush the status block.
2710 	 */
2711 
2712 	/* read status word from status block */
2713 	statusword = sc->bge_rdata->bge_status_block.bge_status;
2714 
2715 	if ((statusword & BGE_STATFLAG_UPDATED) ||
2716 	    (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
2717 
2718 		/* Ack interrupt and stop others from occurring. */
2719 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2720 
2721 		/* clear status word */
2722 		sc->bge_rdata->bge_status_block.bge_status = 0;
2723 
2724 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2725 		    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
2726 		    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
2727 			bge_link_upd(sc);
2728 
2729 		if (ifp->if_flags & IFF_RUNNING) {
2730 			/* Check RX return ring producer/consumer */
2731 			bge_rxeof(sc);
2732 
2733 			/* Check TX ring producer/consumer */
2734 			bge_txeof(sc);
2735 		}
2736 
2737 		/* Re-enable interrupts. */
2738 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2739 
2740 		bge_start(ifp);
2741 
2742 		return (1);
2743 	} else
2744 		return (0);
2745 }
2746 
2747 void
2748 bge_tick(void *xsc)
2749 {
2750 	struct bge_softc *sc = xsc;
2751 	struct mii_data *mii = &sc->bge_mii;
2752 	int s;
2753 
2754 	s = splnet();
2755 
2756 	if (BGE_IS_5705_OR_BEYOND(sc))
2757 		bge_stats_update_regs(sc);
2758 	else
2759 		bge_stats_update(sc);
2760 
2761 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2762 		/*
2763 		 * Since in TBI mode auto-polling can't be used we should poll
2764 		 * link status manually. Here we register pending link event
2765 		 * and trigger interrupt.
2766 		 */
2767 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
2768 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2769 	} else {
2770 		/*
2771 		 * Do not touch PHY if we have link up. This could break
2772 		 * IPMI/ASF mode or produce extra input errors.
2773 		 * (extra input errors was reported for bcm5701 & bcm5704).
2774 		 */
2775 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
2776 			mii_tick(mii);
2777 	}
2778 
2779 	timeout_add_sec(&sc->bge_timeout, 1);
2780 
2781 	splx(s);
2782 }
2783 
2784 void
2785 bge_stats_update_regs(struct bge_softc *sc)
2786 {
2787 	struct ifnet *ifp = &sc->arpcom.ac_if;
2788 
2789 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
2790 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
2791 
2792 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2793 
2794 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
2795 
2796 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
2797 }
2798 
2799 void
2800 bge_stats_update(struct bge_softc *sc)
2801 {
2802 	struct ifnet *ifp = &sc->arpcom.ac_if;
2803 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2804 	u_int32_t cnt;
2805 
2806 #define READ_STAT(sc, stats, stat) \
2807 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2808 
2809 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
2810 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
2811 	sc->bge_tx_collisions = cnt;
2812 
2813 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2814 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
2815 	sc->bge_rx_discards = cnt;
2816 
2817 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
2818 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_inerrors);
2819 	sc->bge_rx_inerrors = cnt;
2820 
2821 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
2822 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_overruns);
2823 	sc->bge_rx_overruns = cnt;
2824 
2825 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2826 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
2827 	sc->bge_tx_discards = cnt;
2828 
2829 #undef READ_STAT
2830 }
2831 
2832 /*
2833  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
2834  */
2835 int
2836 bge_compact_dma_runt(struct mbuf *pkt)
2837 {
2838 	struct mbuf	*m, *prev, *n = NULL;
2839 	int 		totlen, prevlen, newprevlen;
2840 
2841 	prev = NULL;
2842 	totlen = 0;
2843 	prevlen = -1;
2844 
2845 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
2846 		int mlen = m->m_len;
2847 		int shortfall = 8 - mlen ;
2848 
2849 		totlen += mlen;
2850 		if (mlen == 0)
2851 			continue;
2852 		if (mlen >= 8)
2853 			continue;
2854 
2855 		/* If we get here, mbuf data is too small for DMA engine.
2856 		 * Try to fix by shuffling data to prev or next in chain.
2857 		 * If that fails, do a compacting deep-copy of the whole chain.
2858 		 */
2859 
2860 		/* Internal frag. If fits in prev, copy it there. */
2861 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
2862 			bcopy(m->m_data,
2863 			      prev->m_data+prev->m_len,
2864 			      mlen);
2865 			prev->m_len += mlen;
2866 			m->m_len = 0;
2867 			/* XXX stitch chain */
2868 			prev->m_next = m_free(m);
2869 			m = prev;
2870 			continue;
2871 		} else if (m->m_next != NULL &&
2872 			   M_TRAILINGSPACE(m) >= shortfall &&
2873 			   m->m_next->m_len >= (8 + shortfall)) {
2874 			/* m is writable and have enough data in next, pull up. */
2875 
2876 			bcopy(m->m_next->m_data,
2877 			      m->m_data+m->m_len,
2878 			      shortfall);
2879 			m->m_len += shortfall;
2880 			m->m_next->m_len -= shortfall;
2881 			m->m_next->m_data += shortfall;
2882 		} else if (m->m_next == NULL || 1) {
2883 			/* Got a runt at the very end of the packet.
2884 			 * borrow data from the tail of the preceding mbuf and
2885 			 * update its length in-place. (The original data is still
2886 			 * valid, so we can do this even if prev is not writable.)
2887 			 */
2888 
2889 			/* if we'd make prev a runt, just move all of its data. */
2890 #ifdef DEBUG
2891 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
2892 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
2893 #endif
2894 			if ((prev->m_len - shortfall) < 8)
2895 				shortfall = prev->m_len;
2896 
2897 			newprevlen = prev->m_len - shortfall;
2898 
2899 			MGET(n, M_NOWAIT, MT_DATA);
2900 			if (n == NULL)
2901 				return (ENOBUFS);
2902 			KASSERT(m->m_len + shortfall < MLEN
2903 				/*,
2904 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
2905 
2906 			/* first copy the data we're stealing from prev */
2907 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
2908 
2909 			/* update prev->m_len accordingly */
2910 			prev->m_len -= shortfall;
2911 
2912 			/* copy data from runt m */
2913 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
2914 
2915 			/* n holds what we stole from prev, plus m */
2916 			n->m_len = shortfall + m->m_len;
2917 
2918 			/* stitch n into chain and free m */
2919 			n->m_next = m->m_next;
2920 			prev->m_next = n;
2921 			/* KASSERT(m->m_next == NULL); */
2922 			m->m_next = NULL;
2923 			m_free(m);
2924 			m = n;	/* for continuing loop */
2925 		}
2926 		prevlen = m->m_len;
2927 	}
2928 	return (0);
2929 }
2930 
2931 /*
2932  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2933  * pointers to descriptors.
2934  */
2935 int
2936 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2937 {
2938 	struct bge_tx_bd	*f = NULL;
2939 	u_int32_t		frag, cur;
2940 	u_int16_t		csum_flags = 0;
2941 	struct txdmamap_pool_entry *dma;
2942 	bus_dmamap_t dmamap;
2943 	int			i = 0;
2944 
2945 	cur = frag = *txidx;
2946 
2947 #ifdef BGE_CHECKSUM
2948 	if (m_head->m_pkthdr.csum_flags) {
2949 		if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2950 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2951 		if (m_head->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT |
2952 					     M_UDPV4_CSUM_OUT))
2953 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2954 #ifdef fake
2955 		if (m_head->m_flags & M_LASTFRAG)
2956 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2957 		else if (m_head->m_flags & M_FRAG)
2958 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2959 #endif
2960 	}
2961 #endif
2962 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
2963 		goto doit;
2964 
2965 	/*
2966 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
2967 	 * less than eight bytes.  If we encounter a teeny mbuf
2968 	 * at the end of a chain, we can pad.  Otherwise, copy.
2969 	 */
2970 	if (bge_compact_dma_runt(m_head) != 0)
2971 		return (ENOBUFS);
2972 
2973 doit:
2974 	dma = SLIST_FIRST(&sc->txdma_list);
2975 	if (dma == NULL)
2976 		return (ENOBUFS);
2977 	dmamap = dma->dmamap;
2978 
2979 	/*
2980 	 * Start packing the mbufs in this chain into
2981 	 * the fragment pointers. Stop when we run out
2982 	 * of fragments or hit the end of the mbuf chain.
2983 	 */
2984 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
2985 	    BUS_DMA_NOWAIT))
2986 		return (ENOBUFS);
2987 
2988 	/*
2989 	 * Sanity check: avoid coming within 16 descriptors
2990 	 * of the end of the ring.
2991 	 */
2992 	if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16))
2993 		goto fail_unload;
2994 
2995 	for (i = 0; i < dmamap->dm_nsegs; i++) {
2996 		f = &sc->bge_rdata->bge_tx_ring[frag];
2997 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2998 			break;
2999 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
3000 		f->bge_len = dmamap->dm_segs[i].ds_len;
3001 		f->bge_flags = csum_flags;
3002 		f->bge_vlan_tag = 0;
3003 #if NVLAN > 0
3004 		if (m_head->m_flags & M_VLANTAG) {
3005 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3006 			f->bge_vlan_tag = m_head->m_pkthdr.ether_vtag;
3007 		}
3008 #endif
3009 		cur = frag;
3010 		BGE_INC(frag, BGE_TX_RING_CNT);
3011 	}
3012 
3013 	if (i < dmamap->dm_nsegs)
3014 		goto fail_unload;
3015 
3016 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3017 	    BUS_DMASYNC_PREWRITE);
3018 
3019 	if (frag == sc->bge_tx_saved_considx)
3020 		goto fail_unload;
3021 
3022 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3023 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
3024 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3025 	sc->txdma[cur] = dma;
3026 	sc->bge_txcnt += dmamap->dm_nsegs;
3027 
3028 	*txidx = frag;
3029 
3030 	return (0);
3031 
3032 fail_unload:
3033 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
3034 
3035 	return (ENOBUFS);
3036 }
3037 
3038 /*
3039  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3040  * to the mbuf data regions directly in the transmit descriptors.
3041  */
3042 void
3043 bge_start(struct ifnet *ifp)
3044 {
3045 	struct bge_softc *sc;
3046 	struct mbuf *m_head = NULL;
3047 	u_int32_t prodidx;
3048 	int pkts = 0;
3049 
3050 	sc = ifp->if_softc;
3051 
3052 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3053 		return;
3054 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3055 		return;
3056 	if (IFQ_IS_EMPTY(&ifp->if_snd))
3057 		return;
3058 
3059 	prodidx = sc->bge_tx_prodidx;
3060 
3061 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3062 		IFQ_POLL(&ifp->if_snd, m_head);
3063 		if (m_head == NULL)
3064 			break;
3065 
3066 		/*
3067 		 * Pack the data into the transmit ring. If we
3068 		 * don't have room, set the OACTIVE flag and wait
3069 		 * for the NIC to drain the ring.
3070 		 */
3071 		if (bge_encap(sc, m_head, &prodidx)) {
3072 			ifp->if_flags |= IFF_OACTIVE;
3073 			break;
3074 		}
3075 
3076 		/* now we are committed to transmit the packet */
3077 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
3078 		pkts++;
3079 
3080 #if NBPFILTER > 0
3081 		/*
3082 		 * If there's a BPF listener, bounce a copy of this frame
3083 		 * to him.
3084 		 */
3085 		if (ifp->if_bpf)
3086 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
3087 #endif
3088 	}
3089 	if (pkts == 0)
3090 		return;
3091 
3092 	/* Transmit */
3093 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3094 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
3095 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3096 
3097 	sc->bge_tx_prodidx = prodidx;
3098 
3099 	/*
3100 	 * Set a timeout in case the chip goes out to lunch.
3101 	 */
3102 	ifp->if_timer = 5;
3103 }
3104 
3105 void
3106 bge_init(void *xsc)
3107 {
3108 	struct bge_softc *sc = xsc;
3109 	struct ifnet *ifp;
3110 	u_int16_t *m;
3111 	int s;
3112 
3113 	s = splnet();
3114 
3115 	ifp = &sc->arpcom.ac_if;
3116 
3117 	/* Cancel pending I/O and flush buffers. */
3118 	bge_stop(sc);
3119 	bge_reset(sc);
3120 	bge_chipinit(sc);
3121 
3122 	/*
3123 	 * Init the various state machines, ring
3124 	 * control blocks and firmware.
3125 	 */
3126 	if (bge_blockinit(sc)) {
3127 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
3128 		splx(s);
3129 		return;
3130 	}
3131 
3132 	ifp = &sc->arpcom.ac_if;
3133 
3134 	/* Specify MRU. */
3135 	if (BGE_IS_JUMBO_CAPABLE(sc))
3136 		CSR_WRITE_4(sc, BGE_RX_MTU,
3137 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
3138 	else
3139 		CSR_WRITE_4(sc, BGE_RX_MTU,
3140 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
3141 
3142 	/* Load our MAC address. */
3143 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3144 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3145 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3146 
3147 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
3148 		/* Disable hardware decapsulation of VLAN frames. */
3149 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
3150 	}
3151 
3152 	/* Program promiscuous mode and multicast filters. */
3153 	bge_iff(sc);
3154 
3155 	/* Init RX ring. */
3156 	bge_init_rx_ring_std(sc);
3157 
3158 	/*
3159 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3160 	 * memory to insure that the chip has in fact read the first
3161 	 * entry of the ring.
3162 	 */
3163 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3164 		u_int32_t		v, i;
3165 		for (i = 0; i < 10; i++) {
3166 			DELAY(20);
3167 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3168 			if (v == (MCLBYTES - ETHER_ALIGN))
3169 				break;
3170 		}
3171 		if (i == 10)
3172 			printf("%s: 5705 A0 chip failed to load RX ring\n",
3173 			    sc->bge_dev.dv_xname);
3174 	}
3175 
3176 	/* Init Jumbo RX ring. */
3177 	if (BGE_IS_JUMBO_CAPABLE(sc))
3178 		bge_init_rx_ring_jumbo(sc);
3179 
3180 	/* Init our RX return ring index */
3181 	sc->bge_rx_saved_considx = 0;
3182 
3183 	/* Init our RX/TX stat counters. */
3184 	sc->bge_tx_collisions = 0;
3185 	sc->bge_rx_discards = 0;
3186 	sc->bge_rx_inerrors = 0;
3187 	sc->bge_rx_overruns = 0;
3188 	sc->bge_tx_discards = 0;
3189 
3190 	/* Init TX ring. */
3191 	bge_init_tx_ring(sc);
3192 
3193 	/* Turn on transmitter */
3194 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3195 
3196 	/* Turn on receiver */
3197 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3198 
3199 	CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3200 
3201 	/* Tell firmware we're alive. */
3202 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3203 
3204 	/* Enable host interrupts. */
3205 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3206 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3207 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3208 
3209 	bge_ifmedia_upd(ifp);
3210 
3211 	ifp->if_flags |= IFF_RUNNING;
3212 	ifp->if_flags &= ~IFF_OACTIVE;
3213 
3214 	splx(s);
3215 
3216 	timeout_add_sec(&sc->bge_timeout, 1);
3217 }
3218 
3219 /*
3220  * Set media options.
3221  */
3222 int
3223 bge_ifmedia_upd(struct ifnet *ifp)
3224 {
3225 	struct bge_softc *sc = ifp->if_softc;
3226 	struct mii_data *mii = &sc->bge_mii;
3227 	struct ifmedia *ifm = &sc->bge_ifmedia;
3228 
3229 	/* If this is a 1000baseX NIC, enable the TBI port. */
3230 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3231 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3232 			return (EINVAL);
3233 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3234 		case IFM_AUTO:
3235 			/*
3236 			 * The BCM5704 ASIC appears to have a special
3237 			 * mechanism for programming the autoneg
3238 			 * advertisement registers in TBI mode.
3239 			 */
3240 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3241 				u_int32_t sgdig;
3242 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3243 				if (sgdig & BGE_SGDIGSTS_DONE) {
3244 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3245 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3246 					sgdig |= BGE_SGDIGCFG_AUTO |
3247 					    BGE_SGDIGCFG_PAUSE_CAP |
3248 					    BGE_SGDIGCFG_ASYM_PAUSE;
3249 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3250 					    sgdig | BGE_SGDIGCFG_SEND);
3251 					DELAY(5);
3252 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3253 				}
3254 			}
3255 			break;
3256 		case IFM_1000_SX:
3257 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3258 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3259 				    BGE_MACMODE_HALF_DUPLEX);
3260 			} else {
3261 				BGE_SETBIT(sc, BGE_MAC_MODE,
3262 				    BGE_MACMODE_HALF_DUPLEX);
3263 			}
3264 			break;
3265 		default:
3266 			return (EINVAL);
3267 		}
3268 		/* XXX 802.3x flow control for 1000BASE-SX */
3269 		return (0);
3270 	}
3271 
3272 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3273 	if (mii->mii_instance) {
3274 		struct mii_softc *miisc;
3275 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3276 			mii_phy_reset(miisc);
3277 	}
3278 	mii_mediachg(mii);
3279 
3280 	/*
3281 	 * Force an interrupt so that we will call bge_link_upd
3282 	 * if needed and clear any pending link state attention.
3283 	 * Without this we are not getting any further interrupts
3284 	 * for link state changes and thus will not UP the link and
3285 	 * not be able to send in bge_start. The only way to get
3286 	 * things working was to receive a packet and get a RX intr.
3287 	 */
3288 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3289 	    sc->bge_flags & BGE_IS_5788)
3290 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3291 	else
3292 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3293 
3294 	return (0);
3295 }
3296 
3297 /*
3298  * Report current media status.
3299  */
3300 void
3301 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3302 {
3303 	struct bge_softc *sc = ifp->if_softc;
3304 	struct mii_data *mii = &sc->bge_mii;
3305 
3306 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3307 		ifmr->ifm_status = IFM_AVALID;
3308 		ifmr->ifm_active = IFM_ETHER;
3309 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3310 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3311 			ifmr->ifm_status |= IFM_ACTIVE;
3312 		} else {
3313 			ifmr->ifm_active |= IFM_NONE;
3314 			return;
3315 		}
3316 		ifmr->ifm_active |= IFM_1000_SX;
3317 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3318 			ifmr->ifm_active |= IFM_HDX;
3319 		else
3320 			ifmr->ifm_active |= IFM_FDX;
3321 		return;
3322 	}
3323 
3324 	mii_pollstat(mii);
3325 	ifmr->ifm_status = mii->mii_media_status;
3326 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3327 	    sc->bge_flowflags;
3328 }
3329 
3330 int
3331 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3332 {
3333 	struct bge_softc *sc = ifp->if_softc;
3334 	struct ifaddr *ifa = (struct ifaddr *) data;
3335 	struct ifreq *ifr = (struct ifreq *) data;
3336 	int s, error = 0;
3337 	struct mii_data *mii;
3338 
3339 	s = splnet();
3340 
3341 	switch(command) {
3342 	case SIOCSIFADDR:
3343 		ifp->if_flags |= IFF_UP;
3344 		if (!(ifp->if_flags & IFF_RUNNING))
3345 			bge_init(sc);
3346 #ifdef INET
3347 		if (ifa->ifa_addr->sa_family == AF_INET)
3348 			arp_ifinit(&sc->arpcom, ifa);
3349 #endif /* INET */
3350 		break;
3351 
3352 	case SIOCSIFFLAGS:
3353 		if (ifp->if_flags & IFF_UP) {
3354 			if (ifp->if_flags & IFF_RUNNING)
3355 				bge_iff(sc);
3356 			else
3357 				bge_init(sc);
3358 		} else {
3359 			if (ifp->if_flags & IFF_RUNNING)
3360 				bge_stop(sc);
3361 		}
3362 		sc->bge_if_flags = ifp->if_flags;
3363 		break;
3364 
3365 	case SIOCSIFMEDIA:
3366 		/* XXX Flow control is not supported for 1000BASE-SX */
3367 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3368 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3369 			sc->bge_flowflags = 0;
3370 		}
3371 
3372 		/* Flow control requires full-duplex mode. */
3373 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3374 		    (ifr->ifr_media & IFM_FDX) == 0) {
3375 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
3376 		}
3377 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3378 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3379 				/* We can do both TXPAUSE and RXPAUSE. */
3380 				ifr->ifr_media |=
3381 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3382 			}
3383 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3384 		}
3385 		/* FALLTHROUGH */
3386 	case SIOCGIFMEDIA:
3387 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3388 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3389 			    command);
3390 		} else {
3391 			mii = &sc->bge_mii;
3392 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3393 			    command);
3394 		}
3395 		break;
3396 
3397 	default:
3398 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
3399 	}
3400 
3401 	if (error == ENETRESET) {
3402 		if (ifp->if_flags & IFF_RUNNING)
3403 			bge_iff(sc);
3404 		error = 0;
3405 	}
3406 
3407 	splx(s);
3408 	return (error);
3409 }
3410 
3411 void
3412 bge_watchdog(struct ifnet *ifp)
3413 {
3414 	struct bge_softc *sc;
3415 
3416 	sc = ifp->if_softc;
3417 
3418 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3419 
3420 	bge_init(sc);
3421 
3422 	ifp->if_oerrors++;
3423 }
3424 
3425 void
3426 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
3427 {
3428 	int i;
3429 
3430 	BGE_CLRBIT(sc, reg, bit);
3431 
3432 	for (i = 0; i < BGE_TIMEOUT; i++) {
3433 		if ((CSR_READ_4(sc, reg) & bit) == 0)
3434 			return;
3435 		delay(100);
3436 	}
3437 
3438 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3439 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
3440 }
3441 
3442 /*
3443  * Stop the adapter and free any mbufs allocated to the
3444  * RX and TX lists.
3445  */
3446 void
3447 bge_stop(struct bge_softc *sc)
3448 {
3449 	struct ifnet *ifp = &sc->arpcom.ac_if;
3450 	struct ifmedia_entry *ifm;
3451 	struct mii_data *mii;
3452 	int mtmp, itmp;
3453 
3454 	timeout_del(&sc->bge_timeout);
3455 	timeout_del(&sc->bge_rxtimeout);
3456 
3457 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3458 
3459 	/*
3460 	 * Disable all of the receiver blocks
3461 	 */
3462 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3463 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3464 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3465 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3466 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3467 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3468 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3469 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3470 
3471 	/*
3472 	 * Disable all of the transmit blocks
3473 	 */
3474 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3475 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3476 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3477 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3478 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3479 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3480 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3481 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3482 
3483 	/*
3484 	 * Shut down all of the memory managers and related
3485 	 * state machines.
3486 	 */
3487 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3488 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3489 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3490 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3491 
3492 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3493 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3494 
3495 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3496 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3497 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3498 	}
3499 
3500 	/* Disable host interrupts. */
3501 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3502 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3503 
3504 	/*
3505 	 * Tell firmware we're shutting down.
3506 	 */
3507 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3508 
3509 	/* Free the RX lists. */
3510 	bge_free_rx_ring_std(sc);
3511 
3512 	/* Free jumbo RX list. */
3513 	if (BGE_IS_JUMBO_CAPABLE(sc))
3514 		bge_free_rx_ring_jumbo(sc);
3515 
3516 	/* Free TX buffers. */
3517 	bge_free_tx_ring(sc);
3518 
3519 	/*
3520 	 * Isolate/power down the PHY, but leave the media selection
3521 	 * unchanged so that things will be put back to normal when
3522 	 * we bring the interface back up.
3523 	 */
3524 	if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) {
3525 		mii = &sc->bge_mii;
3526 		itmp = ifp->if_flags;
3527 		ifp->if_flags |= IFF_UP;
3528 		ifm = mii->mii_media.ifm_cur;
3529 		mtmp = ifm->ifm_media;
3530 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3531 		mii_mediachg(mii);
3532 		ifm->ifm_media = mtmp;
3533 		ifp->if_flags = itmp;
3534 	}
3535 
3536 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3537 
3538 	/* Clear MAC's link state (PHY may still have link UP). */
3539 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3540 }
3541 
3542 /*
3543  * Stop all chip I/O so that the kernel's probe routines don't
3544  * get confused by errant DMAs when rebooting.
3545  */
3546 void
3547 bge_shutdown(void *xsc)
3548 {
3549 	struct bge_softc *sc = (struct bge_softc *)xsc;
3550 
3551 	bge_stop(sc);
3552 	bge_reset(sc);
3553 }
3554 
3555 void
3556 bge_link_upd(struct bge_softc *sc)
3557 {
3558 	struct ifnet *ifp = &sc->arpcom.ac_if;
3559 	struct mii_data *mii = &sc->bge_mii;
3560 	u_int32_t status;
3561 	int link;
3562 
3563 	/* Clear 'pending link event' flag */
3564 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
3565 
3566 	/*
3567 	 * Process link state changes.
3568 	 * Grrr. The link status word in the status block does
3569 	 * not work correctly on the BCM5700 rev AX and BX chips,
3570 	 * according to all available information. Hence, we have
3571 	 * to enable MII interrupts in order to properly obtain
3572 	 * async link changes. Unfortunately, this also means that
3573 	 * we have to read the MAC status register to detect link
3574 	 * changes, thereby adding an additional register access to
3575 	 * the interrupt handler.
3576 	 *
3577 	 */
3578 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3579 		status = CSR_READ_4(sc, BGE_MAC_STS);
3580 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3581 			mii_pollstat(mii);
3582 
3583 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3584 			    mii->mii_media_status & IFM_ACTIVE &&
3585 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3586 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3587 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3588 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3589 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3590 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3591 
3592 			/* Clear the interrupt */
3593 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3594 			    BGE_EVTENB_MI_INTERRUPT);
3595 			bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
3596 			bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
3597 			    BRGPHY_INTRS);
3598 		}
3599 		return;
3600 	}
3601 
3602 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3603 		status = CSR_READ_4(sc, BGE_MAC_STS);
3604 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3605 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
3606 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3607 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
3608 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3609 					    BGE_MACMODE_TBI_SEND_CFGS);
3610 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3611 				status = CSR_READ_4(sc, BGE_MAC_MODE);
3612 				ifp->if_link_state =
3613 				    (status & BGE_MACMODE_HALF_DUPLEX) ?
3614 				    LINK_STATE_HALF_DUPLEX :
3615 				    LINK_STATE_FULL_DUPLEX;
3616 				if_link_state_change(ifp);
3617 				ifp->if_baudrate = IF_Gbps(1);
3618 			}
3619 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
3620 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3621 			ifp->if_link_state = LINK_STATE_DOWN;
3622 			if_link_state_change(ifp);
3623 			ifp->if_baudrate = 0;
3624 		}
3625         /*
3626 	 * Discard link events for MII/GMII cards if MI auto-polling disabled.
3627 	 * This should not happen since mii callouts are locked now, but
3628 	 * we keep this check for debug.
3629 	 */
3630 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
3631 		/*
3632 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3633 		 * in status word always set. Workaround this bug by reading
3634 		 * PHY link status directly.
3635 		 */
3636 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
3637 		    BGE_STS_LINK : 0;
3638 
3639 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
3640 			mii_pollstat(mii);
3641 
3642 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3643 			    mii->mii_media_status & IFM_ACTIVE &&
3644 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3645 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3646 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3647 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3648 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3649 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3650 		}
3651 	}
3652 
3653 	/* Clear the attention */
3654 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3655 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3656 	    BGE_MACSTAT_LINK_CHANGED);
3657 }
3658 
3659 void
3660 bge_power(int why, void *xsc)
3661 {
3662 	struct bge_softc *sc = (struct bge_softc *)xsc;
3663 	struct ifnet *ifp;
3664 
3665 	if (why == PWR_RESUME) {
3666 		ifp = &sc->arpcom.ac_if;
3667 		if (ifp->if_flags & IFF_UP) {
3668 			bge_init(xsc);
3669 			bge_start(ifp);
3670 		}
3671 	}
3672 }
3673