xref: /openbsd-src/sys/dev/pci/if_bge.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*	$OpenBSD: if_bge.c,v 1.245 2008/09/23 00:27:18 brad Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, Jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/if_ether.h>
98 #endif
99 
100 #if NVLAN > 0
101 #include <net/if_types.h>
102 #include <net/if_vlan_var.h>
103 #endif
104 
105 #if NBPFILTER > 0
106 #include <net/bpf.h>
107 #endif
108 
109 #ifdef __sparc64__
110 #include <sparc64/autoconf.h>
111 #include <dev/ofw/openfirm.h>
112 #endif
113 
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117 
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122 
123 #include <dev/pci/if_bgereg.h>
124 
125 const struct bge_revision * bge_lookup_rev(u_int32_t);
126 int bge_probe(struct device *, void *, void *);
127 void bge_attach(struct device *, struct device *, void *);
128 
129 struct cfattach bge_ca = {
130 	sizeof(struct bge_softc), bge_probe, bge_attach
131 };
132 
133 struct cfdriver bge_cd = {
134 	0, "bge", DV_IFNET
135 };
136 
137 void bge_txeof(struct bge_softc *);
138 void bge_rxeof(struct bge_softc *);
139 
140 void bge_tick(void *);
141 void bge_stats_update(struct bge_softc *);
142 void bge_stats_update_regs(struct bge_softc *);
143 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
144 int bge_compact_dma_runt(struct mbuf *pkt);
145 
146 int bge_intr(void *);
147 void bge_start(struct ifnet *);
148 int bge_ioctl(struct ifnet *, u_long, caddr_t);
149 void bge_init(void *);
150 void bge_power(int, void *);
151 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
152 void bge_stop(struct bge_softc *);
153 void bge_watchdog(struct ifnet *);
154 void bge_shutdown(void *);
155 int bge_ifmedia_upd(struct ifnet *);
156 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 
158 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
159 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
160 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
161 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
162 
163 void bge_iff(struct bge_softc *);
164 
165 int bge_alloc_jumbo_mem(struct bge_softc *);
166 void *bge_jalloc(struct bge_softc *);
167 void bge_jfree(caddr_t, u_int, void *);
168 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
169 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
170 int bge_init_rx_ring_std(struct bge_softc *);
171 void bge_free_rx_ring_std(struct bge_softc *);
172 int bge_init_rx_ring_jumbo(struct bge_softc *);
173 void bge_free_rx_ring_jumbo(struct bge_softc *);
174 void bge_free_tx_ring(struct bge_softc *);
175 int bge_init_tx_ring(struct bge_softc *);
176 
177 void bge_chipinit(struct bge_softc *);
178 int bge_blockinit(struct bge_softc *);
179 
180 u_int32_t bge_readmem_ind(struct bge_softc *, int);
181 void bge_writemem_ind(struct bge_softc *, int, int);
182 void bge_writereg_ind(struct bge_softc *, int, int);
183 void bge_writembx(struct bge_softc *, int, int);
184 
185 int bge_miibus_readreg(struct device *, int, int);
186 void bge_miibus_writereg(struct device *, int, int, int);
187 void bge_miibus_statchg(struct device *);
188 
189 void bge_reset(struct bge_softc *);
190 void bge_link_upd(struct bge_softc *);
191 
192 #ifdef BGE_DEBUG
193 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
194 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
195 int	bgedebug = 0;
196 #else
197 #define DPRINTF(x)
198 #define DPRINTFN(n,x)
199 #endif
200 
201 /*
202  * Various supported device vendors/types and their names. Note: the
203  * spec seems to indicate that the hardware still has Alteon's vendor
204  * ID burned into it, though it will always be overridden by the vendor
205  * ID in the EEPROM. Just to be safe, we cover all possibilities.
206  */
207 const struct pci_matchid bge_devices[] = {
208 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
209 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
210 
211 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
212 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
213 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
214 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
215 
216 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
217 
218 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
219 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
220 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
221 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
222 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
223 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
225 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
227 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
229 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
231 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750 },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
271 
272 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
273 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
274 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
275 
276 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
277 
278 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
279 };
280 
281 #define BGE_IS_5705_OR_BEYOND(sc)  \
282 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705    || \
283 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750    || \
284 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
285 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
286 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714    || \
287 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752    || \
288 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755    || \
289 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787    || \
290 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
291 
292 #define BGE_IS_575X_PLUS(sc)  \
293 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750    || \
294 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
295 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
296 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714    || \
297 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752    || \
298 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755    || \
299 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787    || \
300 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
301 
302 #define BGE_IS_5714_FAMILY(sc)  \
303 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
304 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
305 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
306 
307 #define BGE_IS_JUMBO_CAPABLE(sc)  \
308 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700    || \
309 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701    || \
310 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703    || \
311 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
312 
313 
314 static const struct bge_revision {
315 	u_int32_t		br_chipid;
316 	const char		*br_name;
317 } bge_revisions[] = {
318 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
319 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
320 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
321 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
322 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
323 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
324 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
325 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
326 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
327 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
328 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
329 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
330 	{ BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
331 	{ BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
332 	{ BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
333 	{ BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
334 	{ BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
335 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
336 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
337 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
338 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
339 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
340 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
341 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
342 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
343 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
344 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
345 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
346 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
347 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
348 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
349 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
350 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
351 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
352 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
353 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
354 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
355 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
356 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
357 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
358 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
359 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
360 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
361 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
362 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
363 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
364 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
365 	/* the 5754 and 5787 share the same ASIC ID */
366 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
367 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
368 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
369 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
370 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
371 
372 	{ 0, NULL }
373 };
374 
375 /*
376  * Some defaults for major revisions, so that newer steppings
377  * that we don't know about have a shot at working.
378  */
379 static const struct bge_revision bge_majorrevs[] = {
380 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
381 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
382 	/* 5702 and 5703 share the same ASIC ID */
383 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
384 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
385 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
386 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
387 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
388 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
389 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
390 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
391 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
392 	/* 5754 and 5787 share the same ASIC ID */
393 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
394 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
395 
396 	{ 0, NULL }
397 };
398 
399 u_int32_t
400 bge_readmem_ind(struct bge_softc *sc, int off)
401 {
402 	struct pci_attach_args	*pa = &(sc->bge_pa);
403 
404 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
405 	return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA));
406 }
407 
408 void
409 bge_writemem_ind(struct bge_softc *sc, int off, int val)
410 {
411 	struct pci_attach_args	*pa = &(sc->bge_pa);
412 
413 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
414 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
415 }
416 
417 void
418 bge_writereg_ind(struct bge_softc *sc, int off, int val)
419 {
420 	struct pci_attach_args	*pa = &(sc->bge_pa);
421 
422 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
423 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
424 }
425 
426 void
427 bge_writembx(struct bge_softc *sc, int off, int val)
428 {
429 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
430 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
431 
432 	CSR_WRITE_4(sc, off, val);
433 }
434 
435 u_int8_t
436 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
437 {
438 	u_int32_t access, byte = 0;
439 	int i;
440 
441 	/* Lock. */
442 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
443 	for (i = 0; i < 8000; i++) {
444 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
445 			break;
446 		DELAY(20);
447 	}
448 	if (i == 8000)
449 		return (1);
450 
451 	/* Enable access. */
452 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
453 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
454 
455 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
456 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
457 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
458 		DELAY(10);
459 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
460 			DELAY(10);
461 			break;
462 		}
463 	}
464 
465 	if (i == BGE_TIMEOUT * 10) {
466 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
467 		return (1);
468 	}
469 
470 	/* Get result. */
471 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
472 
473 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
474 
475 	/* Disable access. */
476 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
477 
478 	/* Unlock. */
479 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
480 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
481 
482 	return (0);
483 }
484 
485 /*
486  * Read a sequence of bytes from NVRAM.
487  */
488 
489 int
490 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
491 {
492 	int err = 0, i;
493 	u_int8_t byte = 0;
494 
495 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
496 		return (1);
497 
498 	for (i = 0; i < cnt; i++) {
499 		err = bge_nvram_getbyte(sc, off + i, &byte);
500 		if (err)
501 			break;
502 		*(dest + i) = byte;
503 	}
504 
505 	return (err ? 1 : 0);
506 }
507 
508 /*
509  * Read a byte of data stored in the EEPROM at address 'addr.' The
510  * BCM570x supports both the traditional bitbang interface and an
511  * auto access interface for reading the EEPROM. We use the auto
512  * access method.
513  */
514 u_int8_t
515 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
516 {
517 	int i;
518 	u_int32_t byte = 0;
519 
520 	/*
521 	 * Enable use of auto EEPROM access so we can avoid
522 	 * having to use the bitbang method.
523 	 */
524 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
525 
526 	/* Reset the EEPROM, load the clock period. */
527 	CSR_WRITE_4(sc, BGE_EE_ADDR,
528 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
529 	DELAY(20);
530 
531 	/* Issue the read EEPROM command. */
532 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
533 
534 	/* Wait for completion */
535 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
536 		DELAY(10);
537 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
538 			break;
539 	}
540 
541 	if (i == BGE_TIMEOUT * 10) {
542 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
543 		return (1);
544 	}
545 
546 	/* Get result. */
547 	byte = CSR_READ_4(sc, BGE_EE_DATA);
548 
549 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
550 
551 	return (0);
552 }
553 
554 /*
555  * Read a sequence of bytes from the EEPROM.
556  */
557 int
558 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
559 {
560 	int err = 0, i;
561 	u_int8_t byte = 0;
562 
563 	for (i = 0; i < cnt; i++) {
564 		err = bge_eeprom_getbyte(sc, off + i, &byte);
565 		if (err)
566 			break;
567 		*(dest + i) = byte;
568 	}
569 
570 	return (err ? 1 : 0);
571 }
572 
573 int
574 bge_miibus_readreg(struct device *dev, int phy, int reg)
575 {
576 	struct bge_softc *sc = (struct bge_softc *)dev;
577 	u_int32_t val, autopoll;
578 	int i;
579 
580 	/*
581 	 * Broadcom's own driver always assumes the internal
582 	 * PHY is at GMII address 1. On some chips, the PHY responds
583 	 * to accesses at all addresses, which could cause us to
584 	 * bogusly attach the PHY 32 times at probe type. Always
585 	 * restricting the lookup to address 1 is simpler than
586 	 * trying to figure out which chips revisions should be
587 	 * special-cased.
588 	 */
589 	if (phy != 1)
590 		return (0);
591 
592 	/* Reading with autopolling on may trigger PCI errors */
593 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
594 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
595 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
596 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
597 		DELAY(40);
598 	}
599 
600 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
601 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
602 
603 	for (i = 0; i < 200; i++) {
604 		delay(1);
605 		val = CSR_READ_4(sc, BGE_MI_COMM);
606 		if (!(val & BGE_MICOMM_BUSY))
607 			break;
608 		delay(10);
609 	}
610 
611 	if (i == 200) {
612 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
613 		val = 0;
614 		goto done;
615 	}
616 
617 	val = CSR_READ_4(sc, BGE_MI_COMM);
618 
619 done:
620 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
621 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
622 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
623 		DELAY(40);
624 	}
625 
626 	if (val & BGE_MICOMM_READFAIL)
627 		return (0);
628 
629 	return (val & 0xFFFF);
630 }
631 
632 void
633 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
634 {
635 	struct bge_softc *sc = (struct bge_softc *)dev;
636 	u_int32_t autopoll;
637 	int i;
638 
639 	/* Reading with autopolling on may trigger PCI errors */
640 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
641 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
642 		DELAY(40);
643 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
644 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
645 		DELAY(10); /* 40 usec is supposed to be adequate */
646 	}
647 
648 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
649 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
650 
651 	for (i = 0; i < 200; i++) {
652 		delay(1);
653 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
654 			break;
655 		delay(10);
656 	}
657 
658 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
659 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
660 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
661 		DELAY(40);
662 	}
663 
664 	if (i == 200) {
665 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
666 	}
667 }
668 
669 void
670 bge_miibus_statchg(struct device *dev)
671 {
672 	struct bge_softc *sc = (struct bge_softc *)dev;
673 	struct mii_data *mii = &sc->bge_mii;
674 
675 	/*
676 	 * Get flow control negotiation result.
677 	 */
678 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
679 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
680 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
681 		mii->mii_media_active &= ~IFM_ETH_FMASK;
682 	}
683 
684 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
685 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
686 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
687 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
688 	else
689 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
690 
691 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
692 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
693 	else
694 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
695 
696 	/*
697 	 * 802.3x flow control
698 	 */
699 	if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
700 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
701 	else
702 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
703 
704 	if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
705 		BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
706 	else
707 		BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
708 }
709 
710 /*
711  * Memory management for Jumbo frames.
712  */
713 
714 int
715 bge_alloc_jumbo_mem(struct bge_softc *sc)
716 {
717 	caddr_t			ptr, kva;
718 	bus_dma_segment_t	seg;
719 	int		i, rseg, state, error;
720 	struct bge_jpool_entry   *entry;
721 
722 	state = error = 0;
723 
724 	/* Grab a big chunk o' storage. */
725 	if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
726 			     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
727 		printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
728 		return (ENOBUFS);
729 	}
730 
731 	state = 1;
732 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
733 			   BUS_DMA_NOWAIT)) {
734 		printf("%s: can't map dma buffers (%d bytes)\n",
735 		    sc->bge_dev.dv_xname, BGE_JMEM);
736 		error = ENOBUFS;
737 		goto out;
738 	}
739 
740 	state = 2;
741 	if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
742 	    BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
743 		printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
744 		error = ENOBUFS;
745 		goto out;
746 	}
747 
748 	state = 3;
749 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
750 			    kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
751 		printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
752 		error = ENOBUFS;
753 		goto out;
754 	}
755 
756 	state = 4;
757 	sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
758 	DPRINTFN(1,("bge_jumbo_buf = 0x%08X\n", sc->bge_cdata.bge_jumbo_buf));
759 
760 	SLIST_INIT(&sc->bge_jfree_listhead);
761 	SLIST_INIT(&sc->bge_jinuse_listhead);
762 
763 	/*
764 	 * Now divide it up into 9K pieces and save the addresses
765 	 * in an array.
766 	 */
767 	ptr = sc->bge_cdata.bge_jumbo_buf;
768 	for (i = 0; i < BGE_JSLOTS; i++) {
769 		sc->bge_cdata.bge_jslots[i] = ptr;
770 		ptr += BGE_JLEN;
771 		entry = malloc(sizeof(struct bge_jpool_entry),
772 		    M_DEVBUF, M_NOWAIT);
773 		if (entry == NULL) {
774 			printf("%s: no memory for jumbo buffer queue!\n",
775 			    sc->bge_dev.dv_xname);
776 			error = ENOBUFS;
777 			goto out;
778 		}
779 		entry->slot = i;
780 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
781 				 entry, jpool_entries);
782 	}
783 out:
784 	if (error != 0) {
785 		switch (state) {
786 		case 4:
787 			bus_dmamap_unload(sc->bge_dmatag,
788 			    sc->bge_cdata.bge_rx_jumbo_map);
789 		case 3:
790 			bus_dmamap_destroy(sc->bge_dmatag,
791 			    sc->bge_cdata.bge_rx_jumbo_map);
792 		case 2:
793 			bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
794 		case 1:
795 			bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
796 			break;
797 		default:
798 			break;
799 		}
800 	}
801 
802 	return (error);
803 }
804 
805 /*
806  * Allocate a Jumbo buffer.
807  */
808 void *
809 bge_jalloc(struct bge_softc *sc)
810 {
811 	struct bge_jpool_entry   *entry;
812 
813 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
814 
815 	if (entry == NULL)
816 		return (NULL);
817 
818 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
819 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
820 	return (sc->bge_cdata.bge_jslots[entry->slot]);
821 }
822 
823 /*
824  * Release a Jumbo buffer.
825  */
826 void
827 bge_jfree(caddr_t buf, u_int size, void *arg)
828 {
829 	struct bge_jpool_entry *entry;
830 	struct bge_softc *sc;
831 	int i;
832 
833 	/* Extract the softc struct pointer. */
834 	sc = (struct bge_softc *)arg;
835 
836 	if (sc == NULL)
837 		panic("bge_jfree: can't find softc pointer!");
838 
839 	/* calculate the slot this buffer belongs to */
840 
841 	i = ((vaddr_t)buf
842 	     - (vaddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
843 
844 	if ((i < 0) || (i >= BGE_JSLOTS))
845 		panic("bge_jfree: asked to free buffer that we don't manage!");
846 
847 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
848 	if (entry == NULL)
849 		panic("bge_jfree: buffer not in use!");
850 	entry->slot = i;
851 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
852 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
853 }
854 
855 
856 /*
857  * Intialize a standard receive ring descriptor.
858  */
859 int
860 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
861     bus_dmamap_t dmamap)
862 {
863 	struct mbuf		*m_new = NULL;
864 	struct bge_rx_bd	*r;
865 	int			error;
866 
867 	if (dmamap == NULL) {
868 		error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
869 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
870 		if (error != 0)
871 			return (error);
872 	}
873 
874 	sc->bge_cdata.bge_rx_std_map[i] = dmamap;
875 
876 	if (m == NULL) {
877 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
878 		if (m_new == NULL)
879 			return (ENOBUFS);
880 
881 		MCLGET(m_new, M_DONTWAIT);
882 		if (!(m_new->m_flags & M_EXT)) {
883 			m_freem(m_new);
884 			return (ENOBUFS);
885 		}
886 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
887 	} else {
888 		/*
889 		 * We're re-using a previously allocated mbuf;
890 		 * be sure to re-init pointers and lengths to
891 		 * default values.
892 		 */
893 		m_new = m;
894 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
895 		m_new->m_data = m_new->m_ext.ext_buf;
896 	}
897 
898 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
899 	    m_adj(m_new, ETHER_ALIGN);
900 
901 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
902 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
903 	if (error) {
904 		if (m == NULL) {
905 			m_freem(m_new);
906 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
907 		}
908 		return (ENOBUFS);
909 	}
910 
911 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
912 	r = &sc->bge_rdata->bge_rx_std_ring[i];
913 	BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
914 	r->bge_flags = BGE_RXBDFLAG_END;
915 	r->bge_len = m_new->m_len;
916 	r->bge_idx = i;
917 
918 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
919 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
920 		i * sizeof (struct bge_rx_bd),
921 	    sizeof (struct bge_rx_bd),
922 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
923 
924 	return (0);
925 }
926 
927 /*
928  * Initialize a Jumbo receive ring descriptor. This allocates
929  * a Jumbo buffer from the pool managed internally by the driver.
930  */
931 int
932 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
933 {
934 	struct mbuf *m_new = NULL;
935 	struct bge_rx_bd *r;
936 
937 	if (m == NULL) {
938 		caddr_t			buf = NULL;
939 
940 		/* Allocate the mbuf. */
941 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
942 		if (m_new == NULL)
943 			return (ENOBUFS);
944 
945 		/* Allocate the Jumbo buffer */
946 		buf = bge_jalloc(sc);
947 		if (buf == NULL) {
948 			m_freem(m_new);
949 			return (ENOBUFS);
950 		}
951 
952 		/* Attach the buffer to the mbuf. */
953 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
954 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, 0, bge_jfree, sc);
955 	} else {
956 		/*
957 		 * We're re-using a previously allocated mbuf;
958 		 * be sure to re-init pointers and lengths to
959 		 * default values.
960 		 */
961 		m_new = m;
962 		m_new->m_data = m_new->m_ext.ext_buf;
963 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
964 	}
965 
966 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
967 		m_adj(m_new, ETHER_ALIGN);
968 	/* Set up the descriptor. */
969 	r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
970 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
971 	BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
972 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
973 	r->bge_len = m_new->m_len;
974 	r->bge_idx = i;
975 
976 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
977 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
978 		i * sizeof (struct bge_rx_bd),
979 	    sizeof (struct bge_rx_bd),
980 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
981 
982 	return (0);
983 }
984 
985 /*
986  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
987  * that's 1MB or memory, which is a lot. For now, we fill only the first
988  * 256 ring entries and hope that our CPU is fast enough to keep up with
989  * the NIC.
990  */
991 int
992 bge_init_rx_ring_std(struct bge_softc *sc)
993 {
994 	int i;
995 
996 	if (sc->bge_flags & BGE_RXRING_VALID)
997 		return (0);
998 
999 	for (i = 0; i < BGE_SSLOTS; i++) {
1000 		if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1001 			return (ENOBUFS);
1002 	}
1003 
1004 	sc->bge_std = i - 1;
1005 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1006 
1007 	sc->bge_flags |= BGE_RXRING_VALID;
1008 
1009 	return (0);
1010 }
1011 
1012 void
1013 bge_free_rx_ring_std(struct bge_softc *sc)
1014 {
1015 	int i;
1016 
1017 	if (!(sc->bge_flags & BGE_RXRING_VALID))
1018 		return;
1019 
1020 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1021 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1022 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1023 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1024 			bus_dmamap_destroy(sc->bge_dmatag,
1025 			    sc->bge_cdata.bge_rx_std_map[i]);
1026 		}
1027 		bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
1028 		    sizeof(struct bge_rx_bd));
1029 	}
1030 
1031 	sc->bge_flags &= ~BGE_RXRING_VALID;
1032 }
1033 
1034 int
1035 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1036 {
1037 	int i;
1038 	volatile struct bge_rcb *rcb;
1039 
1040 	if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1041 		return (0);
1042 
1043 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1044 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1045 			return (ENOBUFS);
1046 	};
1047 
1048 	sc->bge_jumbo = i - 1;
1049 	sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1050 
1051 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1052 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1053 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1054 
1055 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1056 
1057 	return (0);
1058 }
1059 
1060 void
1061 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1062 {
1063 	int i;
1064 
1065 	if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1066 		return;
1067 
1068 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1069 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1070 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1071 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1072 		}
1073 		bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
1074 		    sizeof(struct bge_rx_bd));
1075 	}
1076 
1077 	sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1078 }
1079 
1080 void
1081 bge_free_tx_ring(struct bge_softc *sc)
1082 {
1083 	int i;
1084 	struct txdmamap_pool_entry *dma;
1085 
1086 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1087 		return;
1088 
1089 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1090 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1091 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1092 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1093 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1094 					    link);
1095 			sc->txdma[i] = 0;
1096 		}
1097 		bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1098 		    sizeof(struct bge_tx_bd));
1099 	}
1100 
1101 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1102 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1103 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1104 		free(dma, M_DEVBUF);
1105 	}
1106 
1107 	sc->bge_flags &= ~BGE_TXRING_VALID;
1108 }
1109 
1110 int
1111 bge_init_tx_ring(struct bge_softc *sc)
1112 {
1113 	int i;
1114 	bus_dmamap_t dmamap;
1115 	struct txdmamap_pool_entry *dma;
1116 
1117 	if (sc->bge_flags & BGE_TXRING_VALID)
1118 		return (0);
1119 
1120 	sc->bge_txcnt = 0;
1121 	sc->bge_tx_saved_considx = 0;
1122 
1123 	/* Initialize transmit producer index for host-memory send ring. */
1124 	sc->bge_tx_prodidx = 0;
1125 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1126 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1127 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1128 
1129 	/* NIC-memory send ring not used; initialize to zero. */
1130 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1131 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1132 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1133 
1134 	SLIST_INIT(&sc->txdma_list);
1135 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1136 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
1137 		    BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
1138 		    &dmamap))
1139 			return (ENOBUFS);
1140 		if (dmamap == NULL)
1141 			panic("dmamap NULL in bge_init_tx_ring");
1142 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1143 		if (dma == NULL) {
1144 			printf("%s: can't alloc txdmamap_pool_entry\n",
1145 			    sc->bge_dev.dv_xname);
1146 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1147 			return (ENOMEM);
1148 		}
1149 		dma->dmamap = dmamap;
1150 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1151 	}
1152 
1153 	sc->bge_flags |= BGE_TXRING_VALID;
1154 
1155 	return (0);
1156 }
1157 
1158 void
1159 bge_iff(struct bge_softc *sc)
1160 {
1161 	struct arpcom		*ac = &sc->arpcom;
1162 	struct ifnet		*ifp = &ac->ac_if;
1163 	struct ether_multi	*enm;
1164 	struct ether_multistep  step;
1165 	u_int8_t		hashes[16];
1166 	u_int32_t		h, rxmode;
1167 
1168 	/* First, zot all the existing filters. */
1169 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1170 	ifp->if_flags &= ~IFF_ALLMULTI;
1171 	memset(hashes, 0x00, sizeof(hashes));
1172 
1173 	if (ifp->if_flags & IFF_PROMISC)
1174 		rxmode |= BGE_RXMODE_RX_PROMISC;
1175 	else if (ac->ac_multirangecnt > 0) {
1176 		ifp->if_flags |= IFF_ALLMULTI;
1177 		memset(hashes, 0xff, sizeof(hashes));
1178 	} else {
1179 		ETHER_FIRST_MULTI(step, ac, enm);
1180 		while (enm != NULL) {
1181 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1182 			setbit(hashes, h & 0x7F);
1183 			ETHER_NEXT_MULTI(step, enm);
1184 		}
1185 	}
1186 
1187 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1188 	    hashes, sizeof(hashes));
1189 
1190 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1191 }
1192 
1193 /*
1194  * Do endian, PCI and DMA initialization.
1195  */
1196 void
1197 bge_chipinit(struct bge_softc *sc)
1198 {
1199 	struct pci_attach_args	*pa = &(sc->bge_pa);
1200 	u_int32_t dma_rw_ctl;
1201 	int i;
1202 
1203 	/* Set endianness before we access any non-PCI registers. */
1204 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1205 	    BGE_INIT);
1206 
1207 	/* Clear the MAC control register */
1208 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1209 
1210 	/*
1211 	 * Clear the MAC statistics block in the NIC's
1212 	 * internal memory.
1213 	 */
1214 	for (i = BGE_STATS_BLOCK;
1215 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1216 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1217 
1218 	for (i = BGE_STATUS_BLOCK;
1219 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1220 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1221 
1222 	/*
1223 	 * Set up the PCI DMA control register.
1224 	 */
1225 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1226 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1227 
1228 	if (sc->bge_flags & BGE_PCIE) {
1229 		/* Read watermark not used, 128 bytes for write. */
1230 		dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1231 	} else if (sc->bge_flags & BGE_PCIX) {
1232 		/* PCI-X bus */
1233 		if (BGE_IS_5714_FAMILY(sc)) {
1234 			/* 256 bytes for read and write. */
1235 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1236 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1237 
1238 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1239 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1240 			else
1241 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1242 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1243 			/* 1536 bytes for read, 384 bytes for write. */
1244 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1245 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1246 		} else {
1247 			/* 384 bytes for read and write. */
1248 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1249 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1250 			    (0x0F);
1251 		}
1252 
1253 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1254 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1255 			u_int32_t tmp;
1256 
1257 			/* Set ONEDMA_ATONCE for hardware workaround. */
1258 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1259 			if (tmp == 6 || tmp == 7)
1260 				dma_rw_ctl |=
1261 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1262 
1263 			/* Set PCI-X DMA write workaround. */
1264 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1265 		}
1266 	} else {
1267 		/* Conventional PCI bus: 256 bytes for read and write. */
1268 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1269 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1270 
1271 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1272 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1273 			dma_rw_ctl |= 0x0F;
1274 	}
1275 
1276 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1277 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1278 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1279 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1280 
1281 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1282 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1283 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1284 
1285 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1286 
1287 	/*
1288 	 * Set up general mode register.
1289 	 */
1290 #ifndef BGE_CHECKSUM
1291 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1292 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1293 		    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1294 #else
1295 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1296 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS);
1297 #endif
1298 
1299 	/*
1300 	 * Disable memory write invalidate.  Apparently it is not supported
1301 	 * properly by these devices.
1302 	 */
1303 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1304 	    PCI_COMMAND_INVALIDATE_ENABLE);
1305 
1306 #ifdef __brokenalpha__
1307 	/*
1308 	 * Must insure that we do not cross an 8K (bytes) boundary
1309 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1310 	 * restriction on some ALPHA platforms with early revision
1311 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1312 	 */
1313 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1314 	    BGE_PCI_READ_BNDRY_1024);
1315 #endif
1316 
1317 	/* Set the timer prescaler (always 66MHz) */
1318 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1319 }
1320 
1321 int
1322 bge_blockinit(struct bge_softc *sc)
1323 {
1324 	volatile struct bge_rcb		*rcb;
1325 	vaddr_t			rcb_addr;
1326 	int			i;
1327 	bge_hostaddr		taddr;
1328 	u_int32_t		val;
1329 
1330 	/*
1331 	 * Initialize the memory window pointer register so that
1332 	 * we can access the first 32K of internal NIC RAM. This will
1333 	 * allow us to set up the TX send ring RCBs and the RX return
1334 	 * ring RCBs, plus other things which live in NIC memory.
1335 	 */
1336 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1337 
1338 	/* Configure mbuf memory pool */
1339 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1340 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1341 		    BGE_BUFFPOOL_1);
1342 
1343 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1344 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1345 		else
1346 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1347 
1348 		/* Configure DMA resource pool */
1349 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1350 		    BGE_DMA_DESCRIPTORS);
1351 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1352 	}
1353 
1354 	/* Configure mbuf pool watermarks */
1355 	/* new Broadcom docs strongly recommend these: */
1356 	if (BGE_IS_5705_OR_BEYOND(sc)) {
1357 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1358 
1359 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1360 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1361 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1362 		} else {
1363 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1364 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1365 		}
1366 	} else {
1367 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1368 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1369 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1370 	}
1371 
1372 	/* Configure DMA resource watermarks */
1373 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1374 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1375 
1376 	/* Enable buffer manager */
1377 	CSR_WRITE_4(sc, BGE_BMAN_MODE,
1378 	    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1379 
1380 	/* Poll for buffer manager start indication */
1381 	for (i = 0; i < 2000; i++) {
1382 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1383 			break;
1384 		DELAY(10);
1385 	}
1386 
1387 	if (i == 2000) {
1388 		printf("%s: buffer manager failed to start\n",
1389 		    sc->bge_dev.dv_xname);
1390 		return (ENXIO);
1391 	}
1392 
1393 	/* Enable flow-through queues */
1394 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1395 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1396 
1397 	/* Wait until queue initialization is complete */
1398 	for (i = 0; i < 2000; i++) {
1399 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1400 			break;
1401 		DELAY(10);
1402 	}
1403 
1404 	if (i == 2000) {
1405 		printf("%s: flow-through queue init failed\n",
1406 		    sc->bge_dev.dv_xname);
1407 		return (ENXIO);
1408 	}
1409 
1410 	/* Initialize the standard RX ring control block */
1411 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1412 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1413 	if (BGE_IS_5705_OR_BEYOND(sc))
1414 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1415 	else
1416 		rcb->bge_maxlen_flags =
1417 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1418 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1419 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1420 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1421 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1422 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1423 
1424 	/*
1425 	 * Initialize the Jumbo RX ring control block
1426 	 * We set the 'ring disabled' bit in the flags
1427 	 * field until we're actually ready to start
1428 	 * using this ring (i.e. once we set the MTU
1429 	 * high enough to require it).
1430 	 */
1431 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1432 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1433 		BGE_HOSTADDR(rcb->bge_hostaddr,
1434 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1435 		rcb->bge_maxlen_flags =
1436 		    BGE_RCB_MAXLEN_FLAGS(BGE_JUMBO_FRAMELEN,
1437 		        BGE_RCB_FLAG_RING_DISABLED);
1438 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1439 
1440 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1441 		    rcb->bge_hostaddr.bge_addr_hi);
1442 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1443 		    rcb->bge_hostaddr.bge_addr_lo);
1444 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1445 		    rcb->bge_maxlen_flags);
1446 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR,
1447 		    rcb->bge_nicaddr);
1448 
1449 		/* Set up dummy disabled mini ring RCB */
1450 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1451 		rcb->bge_maxlen_flags =
1452 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1453 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1454 		    rcb->bge_maxlen_flags);
1455 
1456 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1457 		    offsetof(struct bge_ring_data, bge_info),
1458 		    sizeof (struct bge_gib),
1459 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1460 	}
1461 
1462 	/*
1463 	 * Set the BD ring replenish thresholds. The recommended
1464 	 * values are 1/8th the number of descriptors allocated to
1465 	 * each ring.
1466 	 */
1467 	i = BGE_STD_RX_RING_CNT / 8;
1468 
1469 	/*
1470 	 * Use a value of 8 for the following chips to workaround HW errata.
1471 	 * Some of these chips have been added based on empirical
1472 	 * evidence (they don't work unless this is done).
1473 	 */
1474 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
1475 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1476 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1477 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
1478 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1479 		i = 8;
1480 
1481 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i);
1482 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8);
1483 
1484 	/*
1485 	 * Disable all unused send rings by setting the 'ring disabled'
1486 	 * bit in the flags field of all the TX send ring control blocks.
1487 	 * These are located in NIC memory.
1488 	 */
1489 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1490 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1491 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1492 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1493 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1494 		rcb_addr += sizeof(struct bge_rcb);
1495 	}
1496 
1497 	/* Configure TX RCB 0 (we use only the first ring) */
1498 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1499 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1500 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1501 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1502 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1503 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1504 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1505 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1506 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1507 
1508 	/* Disable all unused RX return rings */
1509 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1510 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1511 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1512 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1513 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1514 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1515 			BGE_RCB_FLAG_RING_DISABLED));
1516 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1517 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1518 		    (i * (sizeof(u_int64_t))), 0);
1519 		rcb_addr += sizeof(struct bge_rcb);
1520 	}
1521 
1522 	/* Initialize RX ring indexes */
1523 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1524 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1525 	bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1526 
1527 	/*
1528 	 * Set up RX return ring 0
1529 	 * Note that the NIC address for RX return rings is 0x00000000.
1530 	 * The return rings live entirely within the host, so the
1531 	 * nicaddr field in the RCB isn't used.
1532 	 */
1533 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1534 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1535 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1536 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1537 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1538 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1539 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1540 
1541 	/* Set random backoff seed for TX */
1542 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1543 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1544 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1545 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1546 	    BGE_TX_BACKOFF_SEED_MASK);
1547 
1548 	/* Set inter-packet gap */
1549 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1550 
1551 	/*
1552 	 * Specify which ring to use for packets that don't match
1553 	 * any RX rules.
1554 	 */
1555 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1556 
1557 	/*
1558 	 * Configure number of RX lists. One interrupt distribution
1559 	 * list, sixteen active lists, one bad frames class.
1560 	 */
1561 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1562 
1563 	/* Inialize RX list placement stats mask. */
1564 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1565 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1566 
1567 	/* Disable host coalescing until we get it set up */
1568 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1569 
1570 	/* Poll to make sure it's shut down. */
1571 	for (i = 0; i < 2000; i++) {
1572 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1573 			break;
1574 		DELAY(10);
1575 	}
1576 
1577 	if (i == 2000) {
1578 		printf("%s: host coalescing engine failed to idle\n",
1579 		    sc->bge_dev.dv_xname);
1580 		return (ENXIO);
1581 	}
1582 
1583 	/* Set up host coalescing defaults */
1584 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1585 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1586 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1587 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1588 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1589 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1590 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1591 	}
1592 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1593 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1594 
1595 	/* Set up address of statistics block */
1596 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1597 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1598 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1599 			    BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1600 
1601 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1602 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1603 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1604 	}
1605 
1606 	/* Set up address of status block */
1607 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1608 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1609 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1610 
1611 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1612 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1613 
1614 	/* Turn on host coalescing state machine */
1615 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1616 
1617 	/* Turn on RX BD completion state machine and enable attentions */
1618 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1619 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1620 
1621 	/* Turn on RX list placement state machine */
1622 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1623 
1624 	/* Turn on RX list selector state machine. */
1625 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1626 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1627 
1628 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1629 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1630 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1631 	    BGE_MACMODE_FRMHDR_DMA_ENB;
1632 
1633 	if (sc->bge_flags & BGE_PHY_FIBER_TBI)
1634 	    val |= BGE_PORTMODE_TBI;
1635 	else if (sc->bge_flags & BGE_PHY_FIBER_MII)
1636 	    val |= BGE_PORTMODE_GMII;
1637 	else
1638 	    val |= BGE_PORTMODE_MII;
1639 
1640 	/* Turn on DMA, clear stats */
1641 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1642 
1643 	/* Set misc. local control, enable interrupts on attentions */
1644 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1645 
1646 #ifdef notdef
1647 	/* Assert GPIO pins for PHY reset */
1648 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1649 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1650 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1651 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1652 #endif
1653 
1654 	/* Turn on DMA completion state machine */
1655 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1656 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1657 
1658 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1659 
1660 	/* Enable host coalescing bug fix. */
1661 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1662 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
1663 		val |= (1 << 29);
1664 
1665 	/* Turn on write DMA state machine */
1666 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1667 
1668 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
1669 
1670 	if (sc->bge_flags & BGE_PCIE)
1671 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1672 
1673 	/* Turn on read DMA state machine */
1674 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1675 
1676 	/* Turn on RX data completion state machine */
1677 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1678 
1679 	/* Turn on RX BD initiator state machine */
1680 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1681 
1682 	/* Turn on RX data and RX BD initiator state machine */
1683 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1684 
1685 	/* Turn on Mbuf cluster free state machine */
1686 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1687 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1688 
1689 	/* Turn on send BD completion state machine */
1690 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1691 
1692 	/* Turn on send data completion state machine */
1693 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1694 
1695 	/* Turn on send data initiator state machine */
1696 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1697 
1698 	/* Turn on send BD initiator state machine */
1699 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1700 
1701 	/* Turn on send BD selector state machine */
1702 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1703 
1704 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1705 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1706 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1707 
1708 	/* ack/clear link change events */
1709 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1710 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1711 	    BGE_MACSTAT_LINK_CHANGED);
1712 
1713 	/* Enable PHY auto polling (for MII/GMII only) */
1714 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1715 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1716  	} else {
1717 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1718 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1719 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
1720 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1721 			    BGE_EVTENB_MI_INTERRUPT);
1722 	}
1723 
1724 	/*
1725 	 * Clear any pending link state attention.
1726 	 * Otherwise some link state change events may be lost until attention
1727 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1728 	 * It's not necessary on newer BCM chips - perhaps enabling link
1729 	 * state change attentions implies clearing pending attention.
1730 	 */
1731 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1732 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1733 	    BGE_MACSTAT_LINK_CHANGED);
1734 
1735 	/* Enable link state change attentions. */
1736 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1737 
1738 	return (0);
1739 }
1740 
1741 const struct bge_revision *
1742 bge_lookup_rev(u_int32_t chipid)
1743 {
1744 	const struct bge_revision *br;
1745 
1746 	for (br = bge_revisions; br->br_name != NULL; br++) {
1747 		if (br->br_chipid == chipid)
1748 			return (br);
1749 	}
1750 
1751 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1752 		if (br->br_chipid == BGE_ASICREV(chipid))
1753 			return (br);
1754 	}
1755 
1756 	return (NULL);
1757 }
1758 
1759 /*
1760  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1761  * against our list and return its name if we find a match. Note
1762  * that since the Broadcom controller contains VPD support, we
1763  * can get the device name string from the controller itself instead
1764  * of the compiled-in string. This is a little slow, but it guarantees
1765  * we'll always announce the right product name.
1766  */
1767 int
1768 bge_probe(struct device *parent, void *match, void *aux)
1769 {
1770 	return (pci_matchbyid((struct pci_attach_args *)aux, bge_devices,
1771 	    sizeof(bge_devices)/sizeof(bge_devices[0])));
1772 }
1773 
1774 void
1775 bge_attach(struct device *parent, struct device *self, void *aux)
1776 {
1777 	struct bge_softc	*sc = (struct bge_softc *)self;
1778 	struct pci_attach_args	*pa = aux;
1779 	pci_chipset_tag_t	pc = pa->pa_pc;
1780 	const struct bge_revision *br;
1781 	pcireg_t		pm_ctl, memtype, subid;
1782 	pci_intr_handle_t	ih;
1783 	const char		*intrstr = NULL;
1784 	bus_size_t		size;
1785 	bus_dma_segment_t	seg;
1786 	int			rseg, gotenaddr = 0;
1787 	u_int32_t		hwcfg = 0;
1788 	u_int32_t		mac_addr = 0;
1789 	u_int32_t		misccfg;
1790 	struct ifnet		*ifp;
1791 	caddr_t			kva;
1792 #ifdef __sparc64__
1793 	char			name[32];
1794 #endif
1795 
1796 	sc->bge_pa = *pa;
1797 
1798 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1799 
1800 	/*
1801 	 * Map control/status registers.
1802 	 */
1803 	DPRINTFN(5, ("Map control/status regs\n"));
1804 
1805 	DPRINTFN(5, ("pci_mapreg_map\n"));
1806 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1807 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
1808 	    &sc->bge_bhandle, NULL, &size, 0)) {
1809 		printf(": can't find mem space\n");
1810 		return;
1811 	}
1812 
1813 	DPRINTFN(5, ("pci_intr_map\n"));
1814 	if (pci_intr_map(pa, &ih)) {
1815 		printf(": couldn't map interrupt\n");
1816 		goto fail_1;
1817 	}
1818 
1819 	DPRINTFN(5, ("pci_intr_string\n"));
1820 	intrstr = pci_intr_string(pc, ih);
1821 
1822 	/*
1823 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
1824 	 * can clobber the chip's PCI config-space power control registers,
1825 	 * leaving the card in D3 powersave state.
1826 	 * We do not have memory-mapped registers in this state,
1827 	 * so force device into D0 state before starting initialization.
1828 	 */
1829 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
1830 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
1831 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
1832 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1833 	DELAY(1000);	/* 27 usec is allegedly sufficent */
1834 
1835 	/*
1836 	 * Save ASIC rev.
1837 	 */
1838 
1839 	sc->bge_chipid =
1840             pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
1841             BGE_PCIMISCCTL_ASICREV;
1842 
1843 	printf(", ");
1844 	br = bge_lookup_rev(sc->bge_chipid);
1845 	if (br == NULL)
1846 		printf("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
1847 	else
1848 		printf("%s (0x%04x)", br->br_name, sc->bge_chipid >> 16);
1849 
1850 	/*
1851 	 * PCI Express check.
1852 	 */
1853 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1854 	    NULL, NULL) != 0)
1855 		sc->bge_flags |= BGE_PCIE;
1856 
1857 	/*
1858 	 * PCI-X check.
1859 	 */
1860 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
1861 	    BGE_PCISTATE_PCI_BUSMODE) == 0)
1862 		sc->bge_flags |= BGE_PCIX;
1863 
1864 	/*
1865 	 * SEEPROM check.
1866 	 */
1867 #ifdef __sparc64__
1868 	/*
1869 	 * Onboard interfaces on UltraSPARC systems generally don't
1870 	 * have a SEEPROM fitted.  These interfaces, and cards that
1871 	 * have FCode, are named "network" by the PROM, whereas cards
1872 	 * without FCode show up as "ethernet".  Since we don't really
1873 	 * need the information from the SEEPROM on cards that have
1874 	 * FCode it's fine to pretend they don't have one.
1875 	 */
1876 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
1877 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
1878 		sc->bge_flags |= BGE_NO_EEPROM;
1879 #endif
1880 
1881 	/*
1882 	 * When using the BCM5701 in PCI-X mode, data corruption has
1883 	 * been observed in the first few bytes of some received packets.
1884 	 * Aligning the packet buffer in memory eliminates the corruption.
1885 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1886 	 * which do not support unaligned accesses, we will realign the
1887 	 * payloads by copying the received packets.
1888 	 */
1889 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1890 	    sc->bge_flags & BGE_PCIX)
1891 		sc->bge_flags |= BGE_RX_ALIGNBUG;
1892 
1893 	if (BGE_IS_JUMBO_CAPABLE(sc))
1894 		sc->bge_flags |= BGE_JUMBO_CAP;
1895 
1896 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1897 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
1898 	    PCI_VENDOR(subid) == DELL_VENDORID)
1899 		sc->bge_flags |= BGE_NO_3LED;
1900 
1901 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
1902 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
1903 
1904 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1905 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
1906 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
1907 		sc->bge_flags |= BGE_IS_5788;
1908 
1909 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
1910 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
1911 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1912 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1913 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
1914 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1915 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1916 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1917 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
1918 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
1919 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1920 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1921 		sc->bge_flags |= BGE_10_100_ONLY;
1922 
1923 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1924 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1925 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1926 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1927 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1928 		sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
1929 
1930 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1931 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1932 		sc->bge_flags |= BGE_PHY_CRC_BUG;
1933 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
1934 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
1935 		sc->bge_flags |= BGE_PHY_ADC_BUG;
1936 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1937 		sc->bge_flags |= BGE_PHY_5704_A0_BUG;
1938 
1939 	if (BGE_IS_5705_OR_BEYOND(sc)) {
1940 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1941 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
1942 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
1943 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
1944 				sc->bge_flags |= BGE_PHY_JITTER_BUG;
1945 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
1946 				sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
1947 		} else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
1948 			sc->bge_flags |= BGE_PHY_BER_BUG;
1949 	}
1950 
1951 	/* Try to reset the chip. */
1952 	DPRINTFN(5, ("bge_reset\n"));
1953 	bge_reset(sc);
1954 
1955 	bge_chipinit(sc);
1956 
1957 #ifdef __sparc64__
1958 	if (!gotenaddr) {
1959 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
1960 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
1961 			gotenaddr = 1;
1962 	}
1963 #endif
1964 
1965 	/*
1966 	 * Get station address from the EEPROM.
1967 	 */
1968 	if (!gotenaddr) {
1969 		mac_addr = bge_readmem_ind(sc, 0x0c14);
1970 		if ((mac_addr >> 16) == 0x484b) {
1971 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
1972 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
1973 			mac_addr = bge_readmem_ind(sc, 0x0c18);
1974 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
1975 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
1976 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
1977 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
1978 			gotenaddr = 1;
1979 		}
1980 	}
1981 	if (!gotenaddr) {
1982 		int mac_offset = BGE_EE_MAC_OFFSET;
1983 
1984 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1985 			mac_offset = BGE_EE_MAC_OFFSET_5906;
1986 
1987 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1988 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
1989 			gotenaddr = 1;
1990 	}
1991 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
1992 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1993 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
1994 			gotenaddr = 1;
1995 	}
1996 
1997 #ifdef __sparc64__
1998 	if (!gotenaddr) {
1999 		extern void myetheraddr(u_char *);
2000 
2001 		myetheraddr(sc->arpcom.ac_enaddr);
2002 		gotenaddr = 1;
2003 	}
2004 #endif
2005 
2006 	if (!gotenaddr) {
2007 		printf(": failed to read station address\n");
2008 		goto fail_1;
2009 	}
2010 
2011 	/* Allocate the general information block and ring buffers. */
2012 	sc->bge_dmatag = pa->pa_dmat;
2013 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
2014 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2015 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2016 		printf(": can't alloc rx buffers\n");
2017 		goto fail_1;
2018 	}
2019 	DPRINTFN(5, ("bus_dmamem_map\n"));
2020 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2021 			   sizeof(struct bge_ring_data), &kva,
2022 			   BUS_DMA_NOWAIT)) {
2023 		printf(": can't map dma buffers (%zu bytes)\n",
2024 		    sizeof(struct bge_ring_data));
2025 		goto fail_2;
2026 	}
2027 	DPRINTFN(5, ("bus_dmamem_create\n"));
2028 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2029 	    sizeof(struct bge_ring_data), 0,
2030 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2031 		printf(": can't create dma map\n");
2032 		goto fail_3;
2033 	}
2034 	DPRINTFN(5, ("bus_dmamem_load\n"));
2035 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2036 			    sizeof(struct bge_ring_data), NULL,
2037 			    BUS_DMA_NOWAIT)) {
2038 		goto fail_4;
2039 	}
2040 
2041 	DPRINTFN(5, ("bzero\n"));
2042 	sc->bge_rdata = (struct bge_ring_data *)kva;
2043 
2044 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2045 
2046 	/*
2047 	 * Try to allocate memory for Jumbo buffers.
2048 	 */
2049 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
2050 		if (bge_alloc_jumbo_mem(sc)) {
2051 			printf(": jumbo buffer allocation failed\n");
2052 			goto fail_5;
2053 		}
2054 	}
2055 
2056 	/* Set default tuneable values. */
2057 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2058 	sc->bge_rx_coal_ticks = 150;
2059 	sc->bge_rx_max_coal_bds = 64;
2060 	sc->bge_tx_coal_ticks = 300;
2061 	sc->bge_tx_max_coal_bds = 400;
2062 
2063 	/* 5705 limits RX return ring to 512 entries. */
2064 	if (BGE_IS_5705_OR_BEYOND(sc))
2065 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2066 	else
2067 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2068 
2069 	/* Set up ifnet structure */
2070 	ifp = &sc->arpcom.ac_if;
2071 	ifp->if_softc = sc;
2072 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2073 	ifp->if_ioctl = bge_ioctl;
2074 	ifp->if_start = bge_start;
2075 	ifp->if_watchdog = bge_watchdog;
2076 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2077 	IFQ_SET_READY(&ifp->if_snd);
2078 	DPRINTFN(5, ("bcopy\n"));
2079 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2080 
2081 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2082 
2083 #if NVLAN > 0
2084 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2085 #endif
2086 
2087 	if (BGE_IS_JUMBO_CAPABLE(sc))
2088 		ifp->if_hardmtu = BGE_JUMBO_MTU;
2089 
2090 	/*
2091 	 * Do MII setup.
2092 	 */
2093 	DPRINTFN(5, ("mii setup\n"));
2094 	sc->bge_mii.mii_ifp = ifp;
2095 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
2096 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
2097 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
2098 
2099 	/*
2100 	 * Figure out what sort of media we have by checking the hardware
2101 	 * config word in the first 32K of internal NIC memory, or fall back to
2102 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
2103 	 * this value seems to be unset. If that's the case, we have to rely on
2104 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
2105 	 * SysKonnect SK-9D41.
2106 	 */
2107 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2108 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2109 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
2110 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2111 		    sizeof(hwcfg))) {
2112 			printf(": failed to read media type\n");
2113 			goto fail_5;
2114 		}
2115 		hwcfg = ntohl(hwcfg);
2116 	}
2117 
2118 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2119 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
2120 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2121 		if (BGE_IS_5714_FAMILY(sc))
2122 		    sc->bge_flags |= BGE_PHY_FIBER_MII;
2123 		else
2124 		    sc->bge_flags |= BGE_PHY_FIBER_TBI;
2125 	}
2126 
2127 	/* Hookup IRQ last. */
2128 	DPRINTFN(5, ("pci_intr_establish\n"));
2129 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc,
2130 	    sc->bge_dev.dv_xname);
2131 	if (sc->bge_intrhand == NULL) {
2132 		printf(": couldn't establish interrupt");
2133 		if (intrstr != NULL)
2134 			printf(" at %s", intrstr);
2135 		printf("\n");
2136 		goto fail_5;
2137 	}
2138 
2139 	/*
2140 	 * A Broadcom chip was detected. Inform the world.
2141 	 */
2142 	printf(": %s, address %s\n", intrstr,
2143 	    ether_sprintf(sc->arpcom.ac_enaddr));
2144 
2145 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2146 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2147 		    bge_ifmedia_sts);
2148 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2149 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2150 			    0, NULL);
2151 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2152 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2153 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2154 	} else {
2155 		int mii_flags;
2156 
2157 		/*
2158 		 * Do transceiver setup.
2159 		 */
2160 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2161 			     bge_ifmedia_sts);
2162 		mii_flags = MIIF_DOPAUSE;
2163 		if (sc->bge_flags & BGE_PHY_FIBER_MII)
2164 			mii_flags |= MIIF_HAVEFIBER;
2165 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2166 			   MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
2167 
2168 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2169 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2170 			ifmedia_add(&sc->bge_mii.mii_media,
2171 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
2172 			ifmedia_set(&sc->bge_mii.mii_media,
2173 				    IFM_ETHER|IFM_MANUAL);
2174 		} else
2175 			ifmedia_set(&sc->bge_mii.mii_media,
2176 				    IFM_ETHER|IFM_AUTO);
2177 	}
2178 
2179 	/*
2180 	 * Call MI attach routine.
2181 	 */
2182 	if_attach(ifp);
2183 	ether_ifattach(ifp);
2184 
2185 	sc->sc_shutdownhook = shutdownhook_establish(bge_shutdown, sc);
2186 	sc->sc_powerhook = powerhook_establish(bge_power, sc);
2187 
2188 	timeout_set(&sc->bge_timeout, bge_tick, sc);
2189 	return;
2190 
2191 fail_5:
2192 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
2193 
2194 fail_4:
2195 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2196 
2197 fail_3:
2198 	bus_dmamem_unmap(sc->bge_dmatag, kva,
2199 	    sizeof(struct bge_ring_data));
2200 
2201 fail_2:
2202 	bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2203 
2204 fail_1:
2205 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, size);
2206 }
2207 
2208 void
2209 bge_reset(struct bge_softc *sc)
2210 {
2211 	struct pci_attach_args *pa = &sc->bge_pa;
2212 	pcireg_t cachesize, command, pcistate, new_pcistate;
2213 	u_int32_t reset;
2214 	int i, val = 0;
2215 
2216 	/* Save some important PCI state. */
2217 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2218 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2219 	pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2220 
2221 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2222 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2223 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2224 
2225 	/* Disable fastboot on controllers that support it. */
2226 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2227 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2228 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
2229 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2230 
2231 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2232 
2233 	if (sc->bge_flags & BGE_PCIE) {
2234 		if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
2235 			/* PCI Express 1.0 system */
2236 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2237 		}
2238 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2239 			/*
2240 			 * Prevent PCI Express link training
2241 			 * during global reset.
2242 			 */
2243 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2244 			reset |= (1<<29);
2245 		}
2246 	}
2247 
2248 	/*
2249 	 * Set GPHY Power Down Override to leave GPHY
2250 	 * powered up in D0 uninitialized.
2251 	 */
2252 	if (BGE_IS_5705_OR_BEYOND(sc))
2253 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
2254 
2255 	/* Issue global reset */
2256 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2257 
2258 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2259 		u_int32_t status, ctrl;
2260 
2261 		status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2262 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2263 		    status | BGE_VCPU_STATUS_DRV_RESET);
2264 		ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2265 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2266 		    ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2267 
2268 		sc->bge_flags |= BGE_NO_EEPROM;
2269 	}
2270 
2271 	DELAY(1000);
2272 
2273 	if (sc->bge_flags & BGE_PCIE) {
2274 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2275 			pcireg_t v;
2276 
2277 			DELAY(500000); /* wait for link training to complete */
2278 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
2279 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
2280 		}
2281 
2282 		/*
2283 		 * Set PCI Express max payload size to 128 bytes
2284 		 * and clear error status.
2285 		 */
2286 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2287 		    BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2288 	}
2289 
2290 	/* Reset some of the PCI state that got zapped by reset */
2291 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2292 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2293 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2294 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2295 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2296 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2297 
2298 	/* Enable memory arbiter. */
2299 	if (BGE_IS_5714_FAMILY(sc)) {
2300 		u_int32_t val;
2301 
2302 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2303 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2304 	} else
2305 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2306 
2307  	/*
2308 	 * Prevent PXE restart: write a magic number to the
2309 	 * general communications memory at 0xB50.
2310 	 */
2311 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2312 
2313 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2314 		for (i = 0; i < BGE_TIMEOUT; i++) {
2315 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2316 			if (val & BGE_VCPU_STATUS_INIT_DONE)
2317 				break;
2318 			DELAY(100);
2319 		}
2320 
2321 		if (i >= BGE_TIMEOUT)
2322 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
2323 	} else {
2324 		/*
2325 		 * Poll until we see 1's complement of the magic number.
2326 		 * This indicates that the firmware initialization
2327 		 * is complete.  We expect this to fail if no SEEPROM
2328 		 * is fitted.
2329 		 */
2330 		for (i = 0; i < BGE_TIMEOUT; i++) {
2331 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2332 			if (val == ~BGE_MAGIC_NUMBER)
2333 				break;
2334 			DELAY(10);
2335 		}
2336 
2337 		if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM)))
2338 			printf("%s: firmware handshake timed out\n",
2339 			   sc->bge_dev.dv_xname);
2340 	}
2341 
2342 	/*
2343 	 * XXX Wait for the value of the PCISTATE register to
2344 	 * return to its original pre-reset state. This is a
2345 	 * fairly good indicator of reset completion. If we don't
2346 	 * wait for the reset to fully complete, trying to read
2347 	 * from the device's non-PCI registers may yield garbage
2348 	 * results.
2349 	 */
2350 	for (i = 0; i < BGE_TIMEOUT; i++) {
2351 		new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2352 		    BGE_PCI_PCISTATE);
2353 		if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2354 		    (pcistate & ~BGE_PCISTATE_RESERVED))
2355 			break;
2356 		DELAY(10);
2357 	}
2358 	if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2359 	    (pcistate & ~BGE_PCISTATE_RESERVED)) {
2360 		DPRINTFN(5, ("%s: pcistate failed to revert\n",
2361 		    sc->bge_dev.dv_xname));
2362 	}
2363 
2364 	/* Fix up byte swapping */
2365 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2366 
2367 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2368 
2369 	/*
2370 	 * The 5704 in TBI mode apparently needs some special
2371 	 * adjustment to insure the SERDES drive level is set
2372 	 * to 1.2V.
2373 	 */
2374 	if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
2375 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2376 		u_int32_t serdescfg;
2377 
2378 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2379 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2380 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2381 	}
2382 
2383 	if (sc->bge_flags & BGE_PCIE &&
2384 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2385 		u_int32_t v;
2386 
2387 		/* Enable PCI Express bug fix */
2388 		v = CSR_READ_4(sc, 0x7c00);
2389 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2390 	}
2391 	DELAY(10000);
2392 }
2393 
2394 /*
2395  * Frame reception handling. This is called if there's a frame
2396  * on the receive return list.
2397  *
2398  * Note: we have to be able to handle two possibilities here:
2399  * 1) the frame is from the Jumbo receive ring
2400  * 2) the frame is from the standard receive ring
2401  */
2402 
2403 void
2404 bge_rxeof(struct bge_softc *sc)
2405 {
2406 	struct ifnet *ifp;
2407 	int stdcnt = 0, jumbocnt = 0;
2408 	bus_dmamap_t dmamap;
2409 	bus_addr_t offset, toff;
2410 	bus_size_t tlen;
2411 	int tosync;
2412 
2413 	/* Nothing to do */
2414 	if (sc->bge_rx_saved_considx ==
2415 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx)
2416 		return;
2417 
2418 	ifp = &sc->arpcom.ac_if;
2419 
2420 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2421 	    offsetof(struct bge_ring_data, bge_status_block),
2422 	    sizeof (struct bge_status_block),
2423 	    BUS_DMASYNC_POSTREAD);
2424 
2425 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2426 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2427 	    sc->bge_rx_saved_considx;
2428 
2429 	toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2430 
2431 	if (tosync < 0) {
2432 		tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2433 		    sizeof (struct bge_rx_bd);
2434 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2435 		    toff, tlen, BUS_DMASYNC_POSTREAD);
2436 		tosync = -tosync;
2437 	}
2438 
2439 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2440 	    offset, tosync * sizeof (struct bge_rx_bd),
2441 	    BUS_DMASYNC_POSTREAD);
2442 
2443 	while(sc->bge_rx_saved_considx !=
2444 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2445 		struct bge_rx_bd	*cur_rx;
2446 		u_int32_t		rxidx;
2447 		struct mbuf		*m = NULL;
2448 #ifdef BGE_CHECKSUM
2449 		u_int16_t		sumflags = 0;
2450 #endif
2451 
2452 		cur_rx = &sc->bge_rdata->
2453 			bge_rx_return_ring[sc->bge_rx_saved_considx];
2454 
2455 		rxidx = cur_rx->bge_idx;
2456 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2457 
2458 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2459 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2460 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2461 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2462 			jumbocnt++;
2463 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2464 				ifp->if_ierrors++;
2465 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2466 				continue;
2467 			}
2468 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo, NULL)
2469 			    == ENOBUFS) {
2470 				struct mbuf             *m0;
2471 				m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
2472 				    cur_rx->bge_len - ETHER_CRC_LEN +
2473 				    ETHER_ALIGN, 0, ifp, NULL);
2474 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2475 				if (m0 == NULL) {
2476 					ifp->if_ierrors++;
2477 					continue;
2478 				}
2479 				m_adj(m0, ETHER_ALIGN);
2480 				m = m0;
2481 			}
2482 		} else {
2483 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2484 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2485 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2486 			stdcnt++;
2487 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2488 			sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2489 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2490 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2491 				ifp->if_ierrors++;
2492 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2493 				continue;
2494 			}
2495 			if (bge_newbuf_std(sc, sc->bge_std,
2496 			    NULL, dmamap) == ENOBUFS) {
2497 				ifp->if_ierrors++;
2498 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2499 				continue;
2500 			}
2501 		}
2502 
2503 		ifp->if_ipackets++;
2504 #ifdef __STRICT_ALIGNMENT
2505 		/*
2506 		 * The i386 allows unaligned accesses, but for other
2507 		 * platforms we must make sure the payload is aligned.
2508 		 */
2509 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
2510 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2511 			    cur_rx->bge_len);
2512 			m->m_data += ETHER_ALIGN;
2513 		}
2514 #endif
2515 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2516 		m->m_pkthdr.rcvif = ifp;
2517 
2518 #if NBPFILTER > 0
2519 		/*
2520 		 * Handle BPF listeners. Let the BPF user see the packet.
2521 		 */
2522 		if (ifp->if_bpf)
2523 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
2524 #endif
2525 
2526 #ifdef BGE_CHECKSUM
2527 		if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2528 			sumflags |= M_IPV4_CSUM_IN_OK;
2529 		else
2530 			sumflags |= M_IPV4_CSUM_IN_BAD;
2531 
2532 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2533 			m->m_pkthdr.csum_data =
2534 				cur_rx->bge_tcp_udp_csum;
2535 			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2536 		}
2537 
2538 		m->m_pkthdr.csum_flags = sumflags;
2539 		sumflags = 0;
2540 #endif
2541 		ether_input_mbuf(ifp, m);
2542 	}
2543 
2544 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2545 	if (stdcnt)
2546 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2547 	if (jumbocnt)
2548 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2549 }
2550 
2551 void
2552 bge_txeof(struct bge_softc *sc)
2553 {
2554 	struct bge_tx_bd *cur_tx = NULL;
2555 	struct ifnet *ifp;
2556 	struct txdmamap_pool_entry *dma;
2557 	bus_addr_t offset, toff;
2558 	bus_size_t tlen;
2559 	int tosync;
2560 	struct mbuf *m;
2561 
2562 	/* Nothing to do */
2563 	if (sc->bge_tx_saved_considx ==
2564 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx)
2565 		return;
2566 
2567 	ifp = &sc->arpcom.ac_if;
2568 
2569 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2570 	    offsetof(struct bge_ring_data, bge_status_block),
2571 	    sizeof (struct bge_status_block),
2572 	    BUS_DMASYNC_POSTREAD);
2573 
2574 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
2575 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2576 	    sc->bge_tx_saved_considx;
2577 
2578 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2579 
2580 	if (tosync < 0) {
2581 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2582 		    sizeof (struct bge_tx_bd);
2583 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2584 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2585 		tosync = -tosync;
2586 	}
2587 
2588 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2589 	    offset, tosync * sizeof (struct bge_tx_bd),
2590 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2591 
2592 	/*
2593 	 * Go through our tx ring and free mbufs for those
2594 	 * frames that have been sent.
2595 	 */
2596 	while (sc->bge_tx_saved_considx !=
2597 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2598 		u_int32_t		idx = 0;
2599 
2600 		idx = sc->bge_tx_saved_considx;
2601 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2602 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2603 			ifp->if_opackets++;
2604 		m = sc->bge_cdata.bge_tx_chain[idx];
2605 		if (m != NULL) {
2606 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2607 			dma = sc->txdma[idx];
2608 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2609 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2610 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2611 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2612 			sc->txdma[idx] = NULL;
2613 
2614 			m_freem(m);
2615 		}
2616 		sc->bge_txcnt--;
2617 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2618 	}
2619 
2620 	if (sc->bge_txcnt < BGE_TX_RING_CNT - 16)
2621 		ifp->if_flags &= ~IFF_OACTIVE;
2622 	if (sc->bge_txcnt == 0)
2623 		ifp->if_timer = 0;
2624 }
2625 
2626 int
2627 bge_intr(void *xsc)
2628 {
2629 	struct bge_softc *sc;
2630 	struct ifnet *ifp;
2631 	u_int32_t statusword;
2632 
2633 	sc = xsc;
2634 	ifp = &sc->arpcom.ac_if;
2635 
2636 	/* It is possible for the interrupt to arrive before
2637 	 * the status block is updated prior to the interrupt.
2638 	 * Reading the PCI State register will confirm whether the
2639 	 * interrupt is ours and will flush the status block.
2640 	 */
2641 
2642 	/* read status word from status block */
2643 	statusword = sc->bge_rdata->bge_status_block.bge_status;
2644 
2645 	if ((statusword & BGE_STATFLAG_UPDATED) ||
2646 	    (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
2647 
2648 		/* Ack interrupt and stop others from occurring. */
2649 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2650 
2651 		/* clear status word */
2652 		sc->bge_rdata->bge_status_block.bge_status = 0;
2653 
2654 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2655 		    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
2656 		    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
2657 			bge_link_upd(sc);
2658 
2659 		if (ifp->if_flags & IFF_RUNNING) {
2660 			/* Check RX return ring producer/consumer */
2661 			bge_rxeof(sc);
2662 
2663 			/* Check TX ring producer/consumer */
2664 			bge_txeof(sc);
2665 		}
2666 
2667 		/* Re-enable interrupts. */
2668 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2669 
2670 		bge_start(ifp);
2671 
2672 		return (1);
2673 	} else
2674 		return (0);
2675 }
2676 
2677 void
2678 bge_tick(void *xsc)
2679 {
2680 	struct bge_softc *sc = xsc;
2681 	struct mii_data *mii = &sc->bge_mii;
2682 	int s;
2683 
2684 	s = splnet();
2685 
2686 	if (BGE_IS_5705_OR_BEYOND(sc))
2687 		bge_stats_update_regs(sc);
2688 	else
2689 		bge_stats_update(sc);
2690 
2691 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2692 		/*
2693 		 * Since in TBI mode auto-polling can't be used we should poll
2694 		 * link status manually. Here we register pending link event
2695 		 * and trigger interrupt.
2696 		 */
2697 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
2698 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2699 	} else {
2700 		/*
2701 		 * Do not touch PHY if we have link up. This could break
2702 		 * IPMI/ASF mode or produce extra input errors.
2703 		 * (extra input errors was reported for bcm5701 & bcm5704).
2704 		 */
2705 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
2706 			mii_tick(mii);
2707 	}
2708 
2709 	timeout_add_sec(&sc->bge_timeout, 1);
2710 
2711 	splx(s);
2712 }
2713 
2714 void
2715 bge_stats_update_regs(struct bge_softc *sc)
2716 {
2717 	struct ifnet *ifp = &sc->arpcom.ac_if;
2718 
2719 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
2720 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
2721 
2722 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2723 
2724 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
2725 
2726 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
2727 }
2728 
2729 void
2730 bge_stats_update(struct bge_softc *sc)
2731 {
2732 	struct ifnet *ifp = &sc->arpcom.ac_if;
2733 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2734 	u_int32_t cnt;
2735 
2736 #define READ_STAT(sc, stats, stat) \
2737 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2738 
2739 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
2740 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
2741 	sc->bge_tx_collisions = cnt;
2742 
2743 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2744 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
2745 	sc->bge_rx_discards = cnt;
2746 
2747 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
2748 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_inerrors);
2749 	sc->bge_rx_inerrors = cnt;
2750 
2751 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
2752 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_overruns);
2753 	sc->bge_rx_overruns = cnt;
2754 
2755 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2756 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
2757 	sc->bge_tx_discards = cnt;
2758 
2759 #undef READ_STAT
2760 }
2761 
2762 /*
2763  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
2764  */
2765 int
2766 bge_compact_dma_runt(struct mbuf *pkt)
2767 {
2768 	struct mbuf	*m, *prev, *n = NULL;
2769 	int 		totlen, prevlen, newprevlen;
2770 
2771 	prev = NULL;
2772 	totlen = 0;
2773 	prevlen = -1;
2774 
2775 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
2776 		int mlen = m->m_len;
2777 		int shortfall = 8 - mlen ;
2778 
2779 		totlen += mlen;
2780 		if (mlen == 0)
2781 			continue;
2782 		if (mlen >= 8)
2783 			continue;
2784 
2785 		/* If we get here, mbuf data is too small for DMA engine.
2786 		 * Try to fix by shuffling data to prev or next in chain.
2787 		 * If that fails, do a compacting deep-copy of the whole chain.
2788 		 */
2789 
2790 		/* Internal frag. If fits in prev, copy it there. */
2791 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
2792 			bcopy(m->m_data,
2793 			      prev->m_data+prev->m_len,
2794 			      mlen);
2795 			prev->m_len += mlen;
2796 			m->m_len = 0;
2797 			/* XXX stitch chain */
2798 			prev->m_next = m_free(m);
2799 			m = prev;
2800 			continue;
2801 		} else if (m->m_next != NULL &&
2802 			   M_TRAILINGSPACE(m) >= shortfall &&
2803 			   m->m_next->m_len >= (8 + shortfall)) {
2804 			/* m is writable and have enough data in next, pull up. */
2805 
2806 			bcopy(m->m_next->m_data,
2807 			      m->m_data+m->m_len,
2808 			      shortfall);
2809 			m->m_len += shortfall;
2810 			m->m_next->m_len -= shortfall;
2811 			m->m_next->m_data += shortfall;
2812 		} else if (m->m_next == NULL || 1) {
2813 			/* Got a runt at the very end of the packet.
2814 			 * borrow data from the tail of the preceding mbuf and
2815 			 * update its length in-place. (The original data is still
2816 			 * valid, so we can do this even if prev is not writable.)
2817 			 */
2818 
2819 			/* if we'd make prev a runt, just move all of its data. */
2820 #ifdef DEBUG
2821 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
2822 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
2823 #endif
2824 			if ((prev->m_len - shortfall) < 8)
2825 				shortfall = prev->m_len;
2826 
2827 			newprevlen = prev->m_len - shortfall;
2828 
2829 			MGET(n, M_NOWAIT, MT_DATA);
2830 			if (n == NULL)
2831 				return (ENOBUFS);
2832 			KASSERT(m->m_len + shortfall < MLEN
2833 				/*,
2834 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
2835 
2836 			/* first copy the data we're stealing from prev */
2837 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
2838 
2839 			/* update prev->m_len accordingly */
2840 			prev->m_len -= shortfall;
2841 
2842 			/* copy data from runt m */
2843 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
2844 
2845 			/* n holds what we stole from prev, plus m */
2846 			n->m_len = shortfall + m->m_len;
2847 
2848 			/* stitch n into chain and free m */
2849 			n->m_next = m->m_next;
2850 			prev->m_next = n;
2851 			/* KASSERT(m->m_next == NULL); */
2852 			m->m_next = NULL;
2853 			m_free(m);
2854 			m = n;	/* for continuing loop */
2855 		}
2856 		prevlen = m->m_len;
2857 	}
2858 	return (0);
2859 }
2860 
2861 /*
2862  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2863  * pointers to descriptors.
2864  */
2865 int
2866 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2867 {
2868 	struct bge_tx_bd	*f = NULL;
2869 	u_int32_t		frag, cur;
2870 	u_int16_t		csum_flags = 0;
2871 	struct txdmamap_pool_entry *dma;
2872 	bus_dmamap_t dmamap;
2873 	int			i = 0;
2874 #if NVLAN > 0
2875 	struct ifvlan		*ifv = NULL;
2876 
2877 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2878 	    m_head->m_pkthdr.rcvif != NULL)
2879 		ifv = m_head->m_pkthdr.rcvif->if_softc;
2880 #endif
2881 
2882 	cur = frag = *txidx;
2883 
2884 #ifdef BGE_CHECKSUM
2885 	if (m_head->m_pkthdr.csum_flags) {
2886 		if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2887 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2888 		if (m_head->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT |
2889 					     M_UDPV4_CSUM_OUT))
2890 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2891 #ifdef fake
2892 		if (m_head->m_flags & M_LASTFRAG)
2893 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2894 		else if (m_head->m_flags & M_FRAG)
2895 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2896 #endif
2897 	}
2898 #endif
2899 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
2900 		goto doit;
2901 
2902 	/*
2903 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
2904 	 * less than eight bytes.  If we encounter a teeny mbuf
2905 	 * at the end of a chain, we can pad.  Otherwise, copy.
2906 	 */
2907 	if (bge_compact_dma_runt(m_head) != 0)
2908 		return (ENOBUFS);
2909 
2910 doit:
2911 	dma = SLIST_FIRST(&sc->txdma_list);
2912 	if (dma == NULL)
2913 		return (ENOBUFS);
2914 	dmamap = dma->dmamap;
2915 
2916 	/*
2917 	 * Start packing the mbufs in this chain into
2918 	 * the fragment pointers. Stop when we run out
2919 	 * of fragments or hit the end of the mbuf chain.
2920 	 */
2921 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
2922 	    BUS_DMA_NOWAIT))
2923 		return (ENOBUFS);
2924 
2925 	/*
2926 	 * Sanity check: avoid coming within 16 descriptors
2927 	 * of the end of the ring.
2928 	 */
2929 	if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16))
2930 		goto fail_unload;
2931 
2932 	for (i = 0; i < dmamap->dm_nsegs; i++) {
2933 		f = &sc->bge_rdata->bge_tx_ring[frag];
2934 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2935 			break;
2936 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
2937 		f->bge_len = dmamap->dm_segs[i].ds_len;
2938 		f->bge_flags = csum_flags;
2939 #if NVLAN > 0
2940 		if (ifv != NULL) {
2941 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2942 			f->bge_vlan_tag = ifv->ifv_tag;
2943 		} else {
2944 			f->bge_vlan_tag = 0;
2945 		}
2946 #endif
2947 		cur = frag;
2948 		BGE_INC(frag, BGE_TX_RING_CNT);
2949 	}
2950 
2951 	if (i < dmamap->dm_nsegs)
2952 		goto fail_unload;
2953 
2954 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
2955 	    BUS_DMASYNC_PREWRITE);
2956 
2957 	if (frag == sc->bge_tx_saved_considx)
2958 		goto fail_unload;
2959 
2960 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2961 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
2962 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
2963 	sc->txdma[cur] = dma;
2964 	sc->bge_txcnt += dmamap->dm_nsegs;
2965 
2966 	*txidx = frag;
2967 
2968 	return (0);
2969 
2970 fail_unload:
2971 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
2972 
2973 	return (ENOBUFS);
2974 }
2975 
2976 /*
2977  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2978  * to the mbuf data regions directly in the transmit descriptors.
2979  */
2980 void
2981 bge_start(struct ifnet *ifp)
2982 {
2983 	struct bge_softc *sc;
2984 	struct mbuf *m_head = NULL;
2985 	u_int32_t prodidx;
2986 	int pkts = 0;
2987 
2988 	sc = ifp->if_softc;
2989 
2990 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2991 		return;
2992 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
2993 		return;
2994 	if (IFQ_IS_EMPTY(&ifp->if_snd))
2995 		return;
2996 
2997 	prodidx = sc->bge_tx_prodidx;
2998 
2999 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3000 		IFQ_POLL(&ifp->if_snd, m_head);
3001 		if (m_head == NULL)
3002 			break;
3003 
3004 		/*
3005 		 * Pack the data into the transmit ring. If we
3006 		 * don't have room, set the OACTIVE flag and wait
3007 		 * for the NIC to drain the ring.
3008 		 */
3009 		if (bge_encap(sc, m_head, &prodidx)) {
3010 			ifp->if_flags |= IFF_OACTIVE;
3011 			break;
3012 		}
3013 
3014 		/* now we are committed to transmit the packet */
3015 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
3016 		pkts++;
3017 
3018 #if NBPFILTER > 0
3019 		/*
3020 		 * If there's a BPF listener, bounce a copy of this frame
3021 		 * to him.
3022 		 */
3023 		if (ifp->if_bpf)
3024 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
3025 #endif
3026 	}
3027 	if (pkts == 0)
3028 		return;
3029 
3030 	/* Transmit */
3031 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3032 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
3033 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3034 
3035 	sc->bge_tx_prodidx = prodidx;
3036 
3037 	/*
3038 	 * Set a timeout in case the chip goes out to lunch.
3039 	 */
3040 	ifp->if_timer = 5;
3041 }
3042 
3043 void
3044 bge_init(void *xsc)
3045 {
3046 	struct bge_softc *sc = xsc;
3047 	struct ifnet *ifp;
3048 	u_int16_t *m;
3049 	int s;
3050 
3051 	s = splnet();
3052 
3053 	ifp = &sc->arpcom.ac_if;
3054 
3055 	/* Cancel pending I/O and flush buffers. */
3056 	bge_stop(sc);
3057 	bge_reset(sc);
3058 	bge_chipinit(sc);
3059 
3060 	/*
3061 	 * Init the various state machines, ring
3062 	 * control blocks and firmware.
3063 	 */
3064 	if (bge_blockinit(sc)) {
3065 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
3066 		splx(s);
3067 		return;
3068 	}
3069 
3070 	ifp = &sc->arpcom.ac_if;
3071 
3072 	/* Specify MRU. */
3073 	if (BGE_IS_JUMBO_CAPABLE(sc))
3074 		CSR_WRITE_4(sc, BGE_RX_MTU,
3075 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
3076 	else
3077 		CSR_WRITE_4(sc, BGE_RX_MTU,
3078 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
3079 
3080 	/* Load our MAC address. */
3081 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3082 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3083 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3084 
3085 	/* Disable hardware decapsulation of vlan frames. */
3086 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
3087 
3088 	/* Program promiscuous mode and multicast filters. */
3089 	bge_iff(sc);
3090 
3091 	/* Init RX ring. */
3092 	bge_init_rx_ring_std(sc);
3093 
3094 	/*
3095 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3096 	 * memory to insure that the chip has in fact read the first
3097 	 * entry of the ring.
3098 	 */
3099 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3100 		u_int32_t		v, i;
3101 		for (i = 0; i < 10; i++) {
3102 			DELAY(20);
3103 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3104 			if (v == (MCLBYTES - ETHER_ALIGN))
3105 				break;
3106 		}
3107 		if (i == 10)
3108 			printf("%s: 5705 A0 chip failed to load RX ring\n",
3109 			    sc->bge_dev.dv_xname);
3110 	}
3111 
3112 	/* Init Jumbo RX ring. */
3113 	if (BGE_IS_JUMBO_CAPABLE(sc))
3114 		bge_init_rx_ring_jumbo(sc);
3115 
3116 	/* Init our RX return ring index */
3117 	sc->bge_rx_saved_considx = 0;
3118 
3119 	/* Init our RX/TX stat counters. */
3120 	sc->bge_tx_collisions = 0;
3121 	sc->bge_rx_discards = 0;
3122 	sc->bge_rx_inerrors = 0;
3123 	sc->bge_rx_overruns = 0;
3124 	sc->bge_tx_discards = 0;
3125 
3126 	/* Init TX ring. */
3127 	bge_init_tx_ring(sc);
3128 
3129 	/* Turn on transmitter */
3130 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3131 
3132 	/* Turn on receiver */
3133 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3134 
3135 	CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3136 
3137 	/* Tell firmware we're alive. */
3138 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3139 
3140 	/* Enable host interrupts. */
3141 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3142 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3143 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3144 
3145 	bge_ifmedia_upd(ifp);
3146 
3147 	ifp->if_flags |= IFF_RUNNING;
3148 	ifp->if_flags &= ~IFF_OACTIVE;
3149 
3150 	splx(s);
3151 
3152 	timeout_add_sec(&sc->bge_timeout, 1);
3153 }
3154 
3155 /*
3156  * Set media options.
3157  */
3158 int
3159 bge_ifmedia_upd(struct ifnet *ifp)
3160 {
3161 	struct bge_softc *sc = ifp->if_softc;
3162 	struct mii_data *mii = &sc->bge_mii;
3163 	struct ifmedia *ifm = &sc->bge_ifmedia;
3164 
3165 	/* If this is a 1000baseX NIC, enable the TBI port. */
3166 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3167 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3168 			return (EINVAL);
3169 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3170 		case IFM_AUTO:
3171 			/*
3172 			 * The BCM5704 ASIC appears to have a special
3173 			 * mechanism for programming the autoneg
3174 			 * advertisement registers in TBI mode.
3175 			 */
3176 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3177 				u_int32_t sgdig;
3178 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3179 				if (sgdig & BGE_SGDIGSTS_DONE) {
3180 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3181 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3182 					sgdig |= BGE_SGDIGCFG_AUTO |
3183 					    BGE_SGDIGCFG_PAUSE_CAP |
3184 					    BGE_SGDIGCFG_ASYM_PAUSE;
3185 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3186 					    sgdig | BGE_SGDIGCFG_SEND);
3187 					DELAY(5);
3188 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3189 				}
3190 			}
3191 			break;
3192 		case IFM_1000_SX:
3193 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3194 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3195 				    BGE_MACMODE_HALF_DUPLEX);
3196 			} else {
3197 				BGE_SETBIT(sc, BGE_MAC_MODE,
3198 				    BGE_MACMODE_HALF_DUPLEX);
3199 			}
3200 			break;
3201 		default:
3202 			return (EINVAL);
3203 		}
3204 		/* XXX 802.3x flow control for 1000BASE-SX */
3205 		return (0);
3206 	}
3207 
3208 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3209 	if (mii->mii_instance) {
3210 		struct mii_softc *miisc;
3211 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3212 			mii_phy_reset(miisc);
3213 	}
3214 	mii_mediachg(mii);
3215 
3216 	/*
3217 	 * Force an interrupt so that we will call bge_link_upd
3218 	 * if needed and clear any pending link state attention.
3219 	 * Without this we are not getting any further interrupts
3220 	 * for link state changes and thus will not UP the link and
3221 	 * not be able to send in bge_start. The only way to get
3222 	 * things working was to receive a packet and get a RX intr.
3223 	 */
3224 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3225 	    sc->bge_flags & BGE_IS_5788)
3226 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3227 	else
3228 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3229 
3230 	return (0);
3231 }
3232 
3233 /*
3234  * Report current media status.
3235  */
3236 void
3237 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3238 {
3239 	struct bge_softc *sc = ifp->if_softc;
3240 	struct mii_data *mii = &sc->bge_mii;
3241 
3242 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3243 		ifmr->ifm_status = IFM_AVALID;
3244 		ifmr->ifm_active = IFM_ETHER;
3245 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3246 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3247 			ifmr->ifm_status |= IFM_ACTIVE;
3248 		} else {
3249 			ifmr->ifm_active |= IFM_NONE;
3250 			return;
3251 		}
3252 		ifmr->ifm_active |= IFM_1000_SX;
3253 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3254 			ifmr->ifm_active |= IFM_HDX;
3255 		else
3256 			ifmr->ifm_active |= IFM_FDX;
3257 		return;
3258 	}
3259 
3260 	mii_pollstat(mii);
3261 	ifmr->ifm_status = mii->mii_media_status;
3262 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3263 	    sc->bge_flowflags;
3264 }
3265 
3266 int
3267 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3268 {
3269 	struct bge_softc *sc = ifp->if_softc;
3270 	struct ifreq *ifr = (struct ifreq *) data;
3271 	struct ifaddr *ifa = (struct ifaddr *)data;
3272 	int s, error = 0;
3273 	struct mii_data *mii;
3274 
3275 	s = splnet();
3276 
3277 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
3278 		splx(s);
3279 		return (error);
3280 	}
3281 
3282 	switch(command) {
3283 	case SIOCSIFADDR:
3284 		ifp->if_flags |= IFF_UP;
3285 		if (!(ifp->if_flags & IFF_RUNNING))
3286 			bge_init(sc);
3287 #ifdef INET
3288 		if (ifa->ifa_addr->sa_family == AF_INET)
3289 			arp_ifinit(&sc->arpcom, ifa);
3290 #endif /* INET */
3291 		break;
3292 	case SIOCSIFMTU:
3293 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
3294 			error = EINVAL;
3295 		else if (ifp->if_mtu != ifr->ifr_mtu)
3296 			ifp->if_mtu = ifr->ifr_mtu;
3297 		break;
3298 	case SIOCSIFFLAGS:
3299 		if (ifp->if_flags & IFF_UP) {
3300 			if (ifp->if_flags & IFF_RUNNING)
3301 				bge_iff(sc);
3302 			else
3303 				bge_init(sc);
3304 		} else {
3305 			if (ifp->if_flags & IFF_RUNNING)
3306 				bge_stop(sc);
3307 		}
3308 		sc->bge_if_flags = ifp->if_flags;
3309 		break;
3310 	case SIOCADDMULTI:
3311 	case SIOCDELMULTI:
3312 		error = (command == SIOCADDMULTI)
3313 			? ether_addmulti(ifr, &sc->arpcom)
3314 			: ether_delmulti(ifr, &sc->arpcom);
3315 
3316 		if (error == ENETRESET) {
3317 			if (ifp->if_flags & IFF_RUNNING)
3318 				bge_iff(sc);
3319 			error = 0;
3320 		}
3321 		break;
3322 	case SIOCSIFMEDIA:
3323 		/* XXX Flow control is not supported for 1000BASE-SX */
3324 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3325 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3326 			sc->bge_flowflags = 0;
3327 		}
3328 
3329 		/* Flow control requires full-duplex mode. */
3330 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3331 		    (ifr->ifr_media & IFM_FDX) == 0) {
3332 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
3333 		}
3334 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3335 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3336 				/* We can do both TXPAUSE and RXPAUSE. */
3337 				ifr->ifr_media |=
3338 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3339 			}
3340 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3341 		}
3342 		/* FALLTHROUGH */
3343 	case SIOCGIFMEDIA:
3344 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3345 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3346 			    command);
3347 		} else {
3348 			mii = &sc->bge_mii;
3349 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3350 			    command);
3351 		}
3352 		break;
3353 	default:
3354 		error = ENOTTY;
3355 		break;
3356 	}
3357 
3358 	splx(s);
3359 
3360 	return (error);
3361 }
3362 
3363 void
3364 bge_watchdog(struct ifnet *ifp)
3365 {
3366 	struct bge_softc *sc;
3367 
3368 	sc = ifp->if_softc;
3369 
3370 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3371 
3372 	bge_init(sc);
3373 
3374 	ifp->if_oerrors++;
3375 }
3376 
3377 void
3378 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
3379 {
3380 	int i;
3381 
3382 	BGE_CLRBIT(sc, reg, bit);
3383 
3384 	for (i = 0; i < BGE_TIMEOUT; i++) {
3385 		if ((CSR_READ_4(sc, reg) & bit) == 0)
3386 			return;
3387 		delay(100);
3388 	}
3389 
3390 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3391 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
3392 }
3393 
3394 /*
3395  * Stop the adapter and free any mbufs allocated to the
3396  * RX and TX lists.
3397  */
3398 void
3399 bge_stop(struct bge_softc *sc)
3400 {
3401 	struct ifnet *ifp = &sc->arpcom.ac_if;
3402 	struct ifmedia_entry *ifm;
3403 	struct mii_data *mii;
3404 	int mtmp, itmp;
3405 
3406 	timeout_del(&sc->bge_timeout);
3407 
3408 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3409 
3410 	/*
3411 	 * Disable all of the receiver blocks
3412 	 */
3413 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3414 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3415 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3416 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3417 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3418 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3419 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3420 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3421 
3422 	/*
3423 	 * Disable all of the transmit blocks
3424 	 */
3425 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3426 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3427 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3428 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3429 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3430 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3431 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3432 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3433 
3434 	/*
3435 	 * Shut down all of the memory managers and related
3436 	 * state machines.
3437 	 */
3438 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3439 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3440 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3441 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3442 
3443 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3444 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3445 
3446 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3447 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3448 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3449 	}
3450 
3451 	/* Disable host interrupts. */
3452 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3453 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3454 
3455 	/*
3456 	 * Tell firmware we're shutting down.
3457 	 */
3458 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3459 
3460 	/* Free the RX lists. */
3461 	bge_free_rx_ring_std(sc);
3462 
3463 	/* Free jumbo RX list. */
3464 	if (BGE_IS_JUMBO_CAPABLE(sc))
3465 		bge_free_rx_ring_jumbo(sc);
3466 
3467 	/* Free TX buffers. */
3468 	bge_free_tx_ring(sc);
3469 
3470 	/*
3471 	 * Isolate/power down the PHY, but leave the media selection
3472 	 * unchanged so that things will be put back to normal when
3473 	 * we bring the interface back up.
3474 	 */
3475 	if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) {
3476 		mii = &sc->bge_mii;
3477 		itmp = ifp->if_flags;
3478 		ifp->if_flags |= IFF_UP;
3479 		ifm = mii->mii_media.ifm_cur;
3480 		mtmp = ifm->ifm_media;
3481 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3482 		mii_mediachg(mii);
3483 		ifm->ifm_media = mtmp;
3484 		ifp->if_flags = itmp;
3485 	}
3486 
3487 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3488 
3489 	/* Clear MAC's link state (PHY may still have link UP). */
3490 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3491 }
3492 
3493 /*
3494  * Stop all chip I/O so that the kernel's probe routines don't
3495  * get confused by errant DMAs when rebooting.
3496  */
3497 void
3498 bge_shutdown(void *xsc)
3499 {
3500 	struct bge_softc *sc = (struct bge_softc *)xsc;
3501 
3502 	bge_stop(sc);
3503 	bge_reset(sc);
3504 }
3505 
3506 void
3507 bge_link_upd(struct bge_softc *sc)
3508 {
3509 	struct ifnet *ifp = &sc->arpcom.ac_if;
3510 	struct mii_data *mii = &sc->bge_mii;
3511 	u_int32_t status;
3512 	int link;
3513 
3514 	/* Clear 'pending link event' flag */
3515 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
3516 
3517 	/*
3518 	 * Process link state changes.
3519 	 * Grrr. The link status word in the status block does
3520 	 * not work correctly on the BCM5700 rev AX and BX chips,
3521 	 * according to all available information. Hence, we have
3522 	 * to enable MII interrupts in order to properly obtain
3523 	 * async link changes. Unfortunately, this also means that
3524 	 * we have to read the MAC status register to detect link
3525 	 * changes, thereby adding an additional register access to
3526 	 * the interrupt handler.
3527 	 *
3528 	 */
3529 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3530 		status = CSR_READ_4(sc, BGE_MAC_STS);
3531 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3532 			mii_pollstat(mii);
3533 
3534 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3535 			    mii->mii_media_status & IFM_ACTIVE &&
3536 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3537 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3538 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3539 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3540 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3541 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3542 
3543 			/* Clear the interrupt */
3544 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3545 			    BGE_EVTENB_MI_INTERRUPT);
3546 			bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
3547 			bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
3548 			    BRGPHY_INTRS);
3549 		}
3550 		return;
3551 	}
3552 
3553 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3554 		status = CSR_READ_4(sc, BGE_MAC_STS);
3555 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3556 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
3557 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3558 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
3559 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3560 					    BGE_MACMODE_TBI_SEND_CFGS);
3561 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3562 				status = CSR_READ_4(sc, BGE_MAC_MODE);
3563 				ifp->if_link_state =
3564 				    (status & BGE_MACMODE_HALF_DUPLEX) ?
3565 				    LINK_STATE_HALF_DUPLEX :
3566 				    LINK_STATE_FULL_DUPLEX;
3567 				if_link_state_change(ifp);
3568 				ifp->if_baudrate = IF_Gbps(1);
3569 			}
3570 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
3571 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3572 			ifp->if_link_state = LINK_STATE_DOWN;
3573 			if_link_state_change(ifp);
3574 			ifp->if_baudrate = 0;
3575 		}
3576         /*
3577 	 * Discard link events for MII/GMII cards if MI auto-polling disabled.
3578 	 * This should not happen since mii callouts are locked now, but
3579 	 * we keep this check for debug.
3580 	 */
3581 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
3582 		/*
3583 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3584 		 * in status word always set. Workaround this bug by reading
3585 		 * PHY link status directly.
3586 		 */
3587 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
3588 		    BGE_STS_LINK : 0;
3589 
3590 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
3591 			mii_pollstat(mii);
3592 
3593 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3594 			    mii->mii_media_status & IFM_ACTIVE &&
3595 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3596 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3597 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3598 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3599 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3600 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3601 		}
3602 	}
3603 
3604 	/* Clear the attention */
3605 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3606 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3607 	    BGE_MACSTAT_LINK_CHANGED);
3608 }
3609 
3610 void
3611 bge_power(int why, void *xsc)
3612 {
3613 	struct bge_softc *sc = (struct bge_softc *)xsc;
3614 	struct ifnet *ifp;
3615 
3616 	if (why == PWR_RESUME) {
3617 		ifp = &sc->arpcom.ac_if;
3618 		if (ifp->if_flags & IFF_UP) {
3619 			bge_init(xsc);
3620 			bge_start(ifp);
3621 		}
3622 	}
3623 }
3624