xref: /openbsd-src/sys/dev/pci/if_bge.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: if_bge.c,v 1.262 2009/04/23 19:15:07 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, Jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/if_ether.h>
98 #endif
99 
100 #if NVLAN > 0
101 #include <net/if_types.h>
102 #include <net/if_vlan_var.h>
103 #endif
104 
105 #if NBPFILTER > 0
106 #include <net/bpf.h>
107 #endif
108 
109 #ifdef __sparc64__
110 #include <sparc64/autoconf.h>
111 #include <dev/ofw/openfirm.h>
112 #endif
113 
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117 
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122 
123 #include <dev/pci/if_bgereg.h>
124 
125 const struct bge_revision * bge_lookup_rev(u_int32_t);
126 int bge_probe(struct device *, void *, void *);
127 void bge_attach(struct device *, struct device *, void *);
128 
129 struct cfattach bge_ca = {
130 	sizeof(struct bge_softc), bge_probe, bge_attach
131 };
132 
133 struct cfdriver bge_cd = {
134 	0, "bge", DV_IFNET
135 };
136 
137 void bge_txeof(struct bge_softc *);
138 void bge_rxeof(struct bge_softc *);
139 
140 void bge_tick(void *);
141 void bge_stats_update(struct bge_softc *);
142 void bge_stats_update_regs(struct bge_softc *);
143 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
144 int bge_compact_dma_runt(struct mbuf *pkt);
145 
146 int bge_intr(void *);
147 void bge_start(struct ifnet *);
148 int bge_ioctl(struct ifnet *, u_long, caddr_t);
149 void bge_init(void *);
150 void bge_power(int, void *);
151 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
152 void bge_stop(struct bge_softc *);
153 void bge_watchdog(struct ifnet *);
154 void bge_shutdown(void *);
155 int bge_ifmedia_upd(struct ifnet *);
156 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 
158 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
159 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
160 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
161 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
162 
163 void bge_iff(struct bge_softc *);
164 
165 int bge_newbuf_jumbo(struct bge_softc *, int);
166 int bge_init_rx_ring_jumbo(struct bge_softc *);
167 void bge_fill_rx_ring_jumbo(struct bge_softc *);
168 void bge_free_rx_ring_jumbo(struct bge_softc *);
169 
170 int bge_newbuf(struct bge_softc *, int);
171 int bge_init_rx_ring_std(struct bge_softc *);
172 void bge_rxtick(void *);
173 void bge_fill_rx_ring_std(struct bge_softc *);
174 void bge_free_rx_ring_std(struct bge_softc *);
175 
176 void bge_free_tx_ring(struct bge_softc *);
177 int bge_init_tx_ring(struct bge_softc *);
178 
179 void bge_chipinit(struct bge_softc *);
180 int bge_blockinit(struct bge_softc *);
181 
182 u_int32_t bge_readmem_ind(struct bge_softc *, int);
183 void bge_writemem_ind(struct bge_softc *, int, int);
184 void bge_writereg_ind(struct bge_softc *, int, int);
185 void bge_writembx(struct bge_softc *, int, int);
186 
187 int bge_miibus_readreg(struct device *, int, int);
188 void bge_miibus_writereg(struct device *, int, int, int);
189 void bge_miibus_statchg(struct device *);
190 
191 void bge_reset(struct bge_softc *);
192 void bge_link_upd(struct bge_softc *);
193 
194 #ifdef BGE_DEBUG
195 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
196 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
197 int	bgedebug = 0;
198 #else
199 #define DPRINTF(x)
200 #define DPRINTFN(n,x)
201 #endif
202 
203 /*
204  * Various supported device vendors/types and their names. Note: the
205  * spec seems to indicate that the hardware still has Alteon's vendor
206  * ID burned into it, though it will always be overridden by the vendor
207  * ID in the EEPROM. Just to be safe, we cover all possibilities.
208  */
209 const struct pci_matchid bge_devices[] = {
210 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
211 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
212 
213 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
214 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
215 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
216 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
217 
218 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
219 
220 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
221 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
222 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
223 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
225 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
227 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
229 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
231 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750 },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
271 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
272 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
273 
274 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
275 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
276 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
277 
278 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
279 
280 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
281 };
282 
283 #define BGE_IS_5705_OR_BEYOND(sc)  \
284 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705    || \
285 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750    || \
286 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
287 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
288 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714    || \
289 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752    || \
290 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755    || \
291 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787    || \
292 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
293 
294 #define BGE_IS_575X_PLUS(sc)  \
295 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750    || \
296 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
297 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
298 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714    || \
299 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752    || \
300 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755    || \
301 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787    || \
302 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
303 
304 #define BGE_IS_5714_FAMILY(sc)  \
305 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
306 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780    || \
307 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
308 
309 #define BGE_IS_JUMBO_CAPABLE(sc)  \
310 	(BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700    || \
311 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701    || \
312 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703    || \
313 	 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
314 
315 
316 static const struct bge_revision {
317 	u_int32_t		br_chipid;
318 	const char		*br_name;
319 } bge_revisions[] = {
320 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
321 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
322 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
323 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
324 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
325 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
326 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
327 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
328 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
329 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
330 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
331 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
332 	{ BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
333 	{ BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
334 	{ BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
335 	{ BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
336 	{ BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
337 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
338 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
339 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
340 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
341 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
342 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
343 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
344 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
345 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
346 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
347 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
348 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
349 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
350 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
351 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
352 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
353 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
354 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
355 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
356 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
357 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
358 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
359 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
360 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
361 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
362 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
363 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
364 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
365 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
366 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
367 	/* the 5754 and 5787 share the same ASIC ID */
368 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
369 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
370 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
371 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
372 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
373 
374 	{ 0, NULL }
375 };
376 
377 /*
378  * Some defaults for major revisions, so that newer steppings
379  * that we don't know about have a shot at working.
380  */
381 static const struct bge_revision bge_majorrevs[] = {
382 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
383 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
384 	/* 5702 and 5703 share the same ASIC ID */
385 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
386 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
387 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
388 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
389 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
390 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
391 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
392 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
393 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
394 	/* 5754 and 5787 share the same ASIC ID */
395 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
396 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
397 
398 	{ 0, NULL }
399 };
400 
401 u_int32_t
402 bge_readmem_ind(struct bge_softc *sc, int off)
403 {
404 	struct pci_attach_args	*pa = &(sc->bge_pa);
405 
406 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
407 	return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA));
408 }
409 
410 void
411 bge_writemem_ind(struct bge_softc *sc, int off, int val)
412 {
413 	struct pci_attach_args	*pa = &(sc->bge_pa);
414 
415 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
416 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
417 }
418 
419 void
420 bge_writereg_ind(struct bge_softc *sc, int off, int val)
421 {
422 	struct pci_attach_args	*pa = &(sc->bge_pa);
423 
424 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
425 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
426 }
427 
428 void
429 bge_writembx(struct bge_softc *sc, int off, int val)
430 {
431 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
432 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
433 
434 	CSR_WRITE_4(sc, off, val);
435 }
436 
437 u_int8_t
438 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
439 {
440 	u_int32_t access, byte = 0;
441 	int i;
442 
443 	/* Lock. */
444 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
445 	for (i = 0; i < 8000; i++) {
446 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
447 			break;
448 		DELAY(20);
449 	}
450 	if (i == 8000)
451 		return (1);
452 
453 	/* Enable access. */
454 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
455 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
456 
457 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
458 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
459 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
460 		DELAY(10);
461 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
462 			DELAY(10);
463 			break;
464 		}
465 	}
466 
467 	if (i == BGE_TIMEOUT * 10) {
468 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
469 		return (1);
470 	}
471 
472 	/* Get result. */
473 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
474 
475 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
476 
477 	/* Disable access. */
478 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
479 
480 	/* Unlock. */
481 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
482 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
483 
484 	return (0);
485 }
486 
487 /*
488  * Read a sequence of bytes from NVRAM.
489  */
490 
491 int
492 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
493 {
494 	int err = 0, i;
495 	u_int8_t byte = 0;
496 
497 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
498 		return (1);
499 
500 	for (i = 0; i < cnt; i++) {
501 		err = bge_nvram_getbyte(sc, off + i, &byte);
502 		if (err)
503 			break;
504 		*(dest + i) = byte;
505 	}
506 
507 	return (err ? 1 : 0);
508 }
509 
510 /*
511  * Read a byte of data stored in the EEPROM at address 'addr.' The
512  * BCM570x supports both the traditional bitbang interface and an
513  * auto access interface for reading the EEPROM. We use the auto
514  * access method.
515  */
516 u_int8_t
517 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
518 {
519 	int i;
520 	u_int32_t byte = 0;
521 
522 	/*
523 	 * Enable use of auto EEPROM access so we can avoid
524 	 * having to use the bitbang method.
525 	 */
526 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
527 
528 	/* Reset the EEPROM, load the clock period. */
529 	CSR_WRITE_4(sc, BGE_EE_ADDR,
530 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
531 	DELAY(20);
532 
533 	/* Issue the read EEPROM command. */
534 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
535 
536 	/* Wait for completion */
537 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
538 		DELAY(10);
539 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
540 			break;
541 	}
542 
543 	if (i == BGE_TIMEOUT * 10) {
544 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
545 		return (1);
546 	}
547 
548 	/* Get result. */
549 	byte = CSR_READ_4(sc, BGE_EE_DATA);
550 
551 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
552 
553 	return (0);
554 }
555 
556 /*
557  * Read a sequence of bytes from the EEPROM.
558  */
559 int
560 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
561 {
562 	int err = 0, i;
563 	u_int8_t byte = 0;
564 
565 	for (i = 0; i < cnt; i++) {
566 		err = bge_eeprom_getbyte(sc, off + i, &byte);
567 		if (err)
568 			break;
569 		*(dest + i) = byte;
570 	}
571 
572 	return (err ? 1 : 0);
573 }
574 
575 int
576 bge_miibus_readreg(struct device *dev, int phy, int reg)
577 {
578 	struct bge_softc *sc = (struct bge_softc *)dev;
579 	u_int32_t val, autopoll;
580 	int i;
581 
582 	/*
583 	 * Broadcom's own driver always assumes the internal
584 	 * PHY is at GMII address 1. On some chips, the PHY responds
585 	 * to accesses at all addresses, which could cause us to
586 	 * bogusly attach the PHY 32 times at probe type. Always
587 	 * restricting the lookup to address 1 is simpler than
588 	 * trying to figure out which chips revisions should be
589 	 * special-cased.
590 	 */
591 	if (phy != 1)
592 		return (0);
593 
594 	/* Reading with autopolling on may trigger PCI errors */
595 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
596 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
597 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
598 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
599 		DELAY(40);
600 	}
601 
602 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
603 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
604 
605 	for (i = 0; i < 200; i++) {
606 		delay(1);
607 		val = CSR_READ_4(sc, BGE_MI_COMM);
608 		if (!(val & BGE_MICOMM_BUSY))
609 			break;
610 		delay(10);
611 	}
612 
613 	if (i == 200) {
614 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
615 		val = 0;
616 		goto done;
617 	}
618 
619 	val = CSR_READ_4(sc, BGE_MI_COMM);
620 
621 done:
622 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
623 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
624 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
625 		DELAY(40);
626 	}
627 
628 	if (val & BGE_MICOMM_READFAIL)
629 		return (0);
630 
631 	return (val & 0xFFFF);
632 }
633 
634 void
635 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
636 {
637 	struct bge_softc *sc = (struct bge_softc *)dev;
638 	u_int32_t autopoll;
639 	int i;
640 
641 	/* Reading with autopolling on may trigger PCI errors */
642 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
643 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
644 		DELAY(40);
645 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
646 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
647 		DELAY(10); /* 40 usec is supposed to be adequate */
648 	}
649 
650 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
651 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
652 
653 	for (i = 0; i < 200; i++) {
654 		delay(1);
655 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
656 			break;
657 		delay(10);
658 	}
659 
660 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
661 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
662 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
663 		DELAY(40);
664 	}
665 
666 	if (i == 200) {
667 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
668 	}
669 }
670 
671 void
672 bge_miibus_statchg(struct device *dev)
673 {
674 	struct bge_softc *sc = (struct bge_softc *)dev;
675 	struct mii_data *mii = &sc->bge_mii;
676 
677 	/*
678 	 * Get flow control negotiation result.
679 	 */
680 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
681 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
682 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
683 		mii->mii_media_active &= ~IFM_ETH_FMASK;
684 	}
685 
686 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
687 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
688 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
689 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
690 	else
691 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
692 
693 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
694 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
695 	else
696 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
697 
698 	/*
699 	 * 802.3x flow control
700 	 */
701 	if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
702 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
703 	else
704 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
705 
706 	if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
707 		BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
708 	else
709 		BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
710 }
711 
712 /*
713  * Intialize a standard receive ring descriptor.
714  */
715 int
716 bge_newbuf(struct bge_softc *sc, int i)
717 {
718 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
719 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
720 	struct mbuf		*m;
721 	int			error;
722 
723 	MGETHDR(m, M_DONTWAIT, MT_DATA);
724 	if (m == NULL)
725 		return (ENOBUFS);
726 
727 	MCLGETI(m, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
728 	if (!(m->m_flags & M_EXT)) {
729 		m_freem(m);
730 		return (ENOBUFS);
731 	}
732 	m->m_len = m->m_pkthdr.len = MCLBYTES;
733 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
734 	    m_adj(m, ETHER_ALIGN);
735 
736 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
737 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
738 	if (error) {
739 		m_freem(m);
740 		return (ENOBUFS);
741 	}
742 
743 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
744 	    BUS_DMASYNC_PREREAD);
745 	sc->bge_cdata.bge_rx_std_chain[i] = m;
746 
747 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
748 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
749 		i * sizeof (struct bge_rx_bd),
750 	    sizeof (struct bge_rx_bd),
751 	    BUS_DMASYNC_POSTWRITE);
752 
753 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
754 	r->bge_flags = BGE_RXBDFLAG_END;
755 	r->bge_len = m->m_len;
756 	r->bge_idx = i;
757 
758 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
759 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
760 		i * sizeof (struct bge_rx_bd),
761 	    sizeof (struct bge_rx_bd),
762 	    BUS_DMASYNC_PREWRITE);
763 
764 	sc->bge_std_cnt++;
765 
766 	return (0);
767 }
768 
769 /*
770  * Initialize a Jumbo receive ring descriptor.
771  */
772 int
773 bge_newbuf_jumbo(struct bge_softc *sc, int i)
774 {
775 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
776 	struct bge_ext_rx_bd	*r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
777 	struct mbuf		*m;
778 	int			error;
779 
780 	MGETHDR(m, M_DONTWAIT, MT_DATA);
781 	if (m == NULL)
782 		return (ENOBUFS);
783 
784 	MCLGETI(m, M_DONTWAIT, &sc->arpcom.ac_if, BGE_JLEN);
785 	if (!(m->m_flags & M_EXT)) {
786 		m_freem(m);
787 		return (ENOBUFS);
788 	}
789 	m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
790 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
791 	    m_adj(m, ETHER_ALIGN);
792 
793 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
794 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
795 	if (error) {
796 		m_freem(m);
797 		return (ENOBUFS);
798 	}
799 
800 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
801 	    BUS_DMASYNC_PREREAD);
802 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
803 
804 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
805 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
806 		i * sizeof (struct bge_ext_rx_bd),
807 	    sizeof (struct bge_ext_rx_bd),
808 	    BUS_DMASYNC_POSTWRITE);
809 
810 	/*
811 	 * Fill in the extended RX buffer descriptor.
812 	 */
813 	r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
814 	r->bge_bd.bge_idx = i;
815 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
816 	switch (dmap->dm_nsegs) {
817 	case 4:
818 		BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr);
819 		r->bge_len3 = dmap->dm_segs[3].ds_len;
820 		/* FALLTHROUGH */
821 	case 3:
822 		BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr);
823 		r->bge_len2 = dmap->dm_segs[2].ds_len;
824 		/* FALLTHROUGH */
825 	case 2:
826 		BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr);
827 		r->bge_len1 = dmap->dm_segs[1].ds_len;
828 		/* FALLTHROUGH */
829 	case 1:
830 		BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr);
831 		r->bge_bd.bge_len = dmap->dm_segs[0].ds_len;
832 		break;
833 	default:
834 		panic("%s: %d segments\n", __func__, dmap->dm_nsegs);
835 	}
836 
837 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
838 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
839 		i * sizeof (struct bge_ext_rx_bd),
840 	    sizeof (struct bge_ext_rx_bd),
841 	    BUS_DMASYNC_PREWRITE);
842 
843 	sc->bge_jumbo_cnt++;
844 
845 	return (0);
846 }
847 
848 /*
849  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
850  * that's 1MB or memory, which is a lot. For now, we fill only the first
851  * 256 ring entries and hope that our CPU is fast enough to keep up with
852  * the NIC.
853  */
854 int
855 bge_init_rx_ring_std(struct bge_softc *sc)
856 {
857 	int i;
858 
859 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
860 		return (0);
861 
862 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
863 		if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 0,
864 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
865 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
866 			printf("%s: unable to create dmamap for slot %d\n",
867 			    sc->bge_dev.dv_xname, i);
868 			goto uncreate;
869 		}
870 		bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
871 		    sizeof(struct bge_rx_bd));
872 	}
873 
874 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
875 	sc->bge_std_cnt = 0;
876 	bge_fill_rx_ring_std(sc);
877 
878 	SET(sc->bge_flags, BGE_RXRING_VALID);
879 
880 	return (0);
881 
882 uncreate:
883 	while (--i) {
884 		bus_dmamap_destroy(sc->bge_dmatag,
885 		    sc->bge_cdata.bge_rx_std_map[i]);
886 	}
887 	return (1);
888 }
889 
890 void
891 bge_rxtick(void *arg)
892 {
893 	struct bge_softc *sc = arg;
894 	int s;
895 
896 	s = splnet();
897 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID) &&
898 	    sc->bge_std_cnt <= 8)
899 		bge_fill_rx_ring_std(sc);
900 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) &&
901 	    sc->bge_jumbo_cnt <= 8)
902 		bge_fill_rx_ring_jumbo(sc);
903 	splx(s);
904 }
905 
906 void
907 bge_fill_rx_ring_std(struct bge_softc *sc)
908 {
909 	int i;
910 	int post = 0;
911 
912 	i = sc->bge_std;
913 	while (sc->bge_std_cnt < BGE_STD_RX_RING_CNT) {
914 		BGE_INC(i, BGE_STD_RX_RING_CNT);
915 
916 		if (bge_newbuf(sc, i) != 0)
917 			break;
918 
919 		sc->bge_std = i;
920 		post = 1;
921 	}
922 
923 	if (post)
924 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
925 
926 	/*
927 	 * bge always needs more than 8 packets on the ring. if we cant do
928 	 * that now, then try again later.
929 	 */
930 	if (sc->bge_std_cnt <= 8)
931 		timeout_add(&sc->bge_rxtimeout, 1);
932 }
933 
934 void
935 bge_free_rx_ring_std(struct bge_softc *sc)
936 {
937 	bus_dmamap_t dmap;
938 	struct mbuf *m;
939 	int i;
940 
941 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
942 		return;
943 
944 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
945 		dmap = sc->bge_cdata.bge_rx_std_map[i];
946 		m = sc->bge_cdata.bge_rx_std_chain[i];
947 		if (m != NULL) {
948 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
949 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
950 			bus_dmamap_unload(sc->bge_dmatag, dmap);
951 			m_freem(m);
952 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
953 		}
954 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
955 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
956 		bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
957 		    sizeof(struct bge_rx_bd));
958 	}
959 
960 	CLR(sc->bge_flags, BGE_RXRING_VALID);
961 }
962 
963 int
964 bge_init_rx_ring_jumbo(struct bge_softc *sc)
965 {
966 	volatile struct bge_rcb *rcb;
967 	int i;
968 
969 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
970 		return (0);
971 
972 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
973 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,
974 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
975 		    &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) {
976 			printf("%s: unable to create dmamap for slot %d\n",
977 			    sc->bge_dev.dv_xname, i);
978 			goto uncreate;
979 		}
980 		bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
981 		    sizeof(struct bge_ext_rx_bd));
982 	}
983 
984 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
985 	sc->bge_jumbo_cnt = 0;
986 	bge_fill_rx_ring_jumbo(sc);
987 
988 	SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
989 
990 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
991 	rcb->bge_maxlen_flags =
992 	    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
993 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
994 
995 	return (0);
996 
997 uncreate:
998 	while (--i) {
999 		bus_dmamap_destroy(sc->bge_dmatag,
1000 		    sc->bge_cdata.bge_rx_jumbo_map[i]);
1001 	}
1002 	return (1);
1003 }
1004 
1005 void
1006 bge_fill_rx_ring_jumbo(struct bge_softc *sc)
1007 {
1008 	int i;
1009 	int post = 0;
1010 
1011 	i = sc->bge_jumbo;
1012 	while (sc->bge_jumbo_cnt < BGE_JUMBO_RX_RING_CNT) {
1013 		BGE_INC(i, BGE_JUMBO_RX_RING_CNT);
1014 
1015 		if (bge_newbuf_jumbo(sc, i) != 0)
1016 			break;
1017 
1018 		sc->bge_jumbo = i;
1019 		post = 1;
1020 	}
1021 
1022 	if (post)
1023 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1024 
1025 	/*
1026 	 * bge always needs more than 8 packets on the ring. if we cant do
1027 	 * that now, then try again later.
1028 	 */
1029 	if (sc->bge_jumbo_cnt <= 8)
1030 		timeout_add(&sc->bge_rxtimeout, 1);
1031 }
1032 
1033 void
1034 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1035 {
1036 	bus_dmamap_t dmap;
1037 	struct mbuf *m;
1038 	int i;
1039 
1040 	if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1041 		return;
1042 
1043 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1044 		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1045 		m = sc->bge_cdata.bge_rx_jumbo_chain[i];
1046 		if (m != NULL) {
1047 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1048 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1049 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1050 			m_freem(m);
1051 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1052 		}
1053 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1054 		sc->bge_cdata.bge_rx_jumbo_map[i] = NULL;
1055 		bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
1056 		    sizeof(struct bge_ext_rx_bd));
1057 	}
1058 
1059 	CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1060 }
1061 
1062 void
1063 bge_free_tx_ring(struct bge_softc *sc)
1064 {
1065 	int i;
1066 	struct txdmamap_pool_entry *dma;
1067 
1068 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1069 		return;
1070 
1071 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1072 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1073 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1074 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1075 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1076 					    link);
1077 			sc->txdma[i] = 0;
1078 		}
1079 		bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1080 		    sizeof(struct bge_tx_bd));
1081 	}
1082 
1083 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1084 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1085 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1086 		free(dma, M_DEVBUF);
1087 	}
1088 
1089 	sc->bge_flags &= ~BGE_TXRING_VALID;
1090 }
1091 
1092 int
1093 bge_init_tx_ring(struct bge_softc *sc)
1094 {
1095 	int i;
1096 	bus_dmamap_t dmamap;
1097 	struct txdmamap_pool_entry *dma;
1098 
1099 	if (sc->bge_flags & BGE_TXRING_VALID)
1100 		return (0);
1101 
1102 	sc->bge_txcnt = 0;
1103 	sc->bge_tx_saved_considx = 0;
1104 
1105 	/* Initialize transmit producer index for host-memory send ring. */
1106 	sc->bge_tx_prodidx = 0;
1107 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1108 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1109 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1110 
1111 	/* NIC-memory send ring not used; initialize to zero. */
1112 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1113 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1114 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1115 
1116 	SLIST_INIT(&sc->txdma_list);
1117 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1118 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
1119 		    BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
1120 		    &dmamap))
1121 			return (ENOBUFS);
1122 		if (dmamap == NULL)
1123 			panic("dmamap NULL in bge_init_tx_ring");
1124 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1125 		if (dma == NULL) {
1126 			printf("%s: can't alloc txdmamap_pool_entry\n",
1127 			    sc->bge_dev.dv_xname);
1128 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1129 			return (ENOMEM);
1130 		}
1131 		dma->dmamap = dmamap;
1132 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1133 	}
1134 
1135 	sc->bge_flags |= BGE_TXRING_VALID;
1136 
1137 	return (0);
1138 }
1139 
1140 void
1141 bge_iff(struct bge_softc *sc)
1142 {
1143 	struct arpcom		*ac = &sc->arpcom;
1144 	struct ifnet		*ifp = &ac->ac_if;
1145 	struct ether_multi	*enm;
1146 	struct ether_multistep  step;
1147 	u_int8_t		hashes[16];
1148 	u_int32_t		h, rxmode;
1149 
1150 	/* First, zot all the existing filters. */
1151 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1152 	ifp->if_flags &= ~IFF_ALLMULTI;
1153 	memset(hashes, 0x00, sizeof(hashes));
1154 
1155 	if (ifp->if_flags & IFF_PROMISC)
1156 		rxmode |= BGE_RXMODE_RX_PROMISC;
1157 	else if (ac->ac_multirangecnt > 0) {
1158 		ifp->if_flags |= IFF_ALLMULTI;
1159 		memset(hashes, 0xff, sizeof(hashes));
1160 	} else {
1161 		ETHER_FIRST_MULTI(step, ac, enm);
1162 		while (enm != NULL) {
1163 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1164 			setbit(hashes, h & 0x7F);
1165 			ETHER_NEXT_MULTI(step, enm);
1166 		}
1167 	}
1168 
1169 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1170 	    hashes, sizeof(hashes));
1171 
1172 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1173 }
1174 
1175 /*
1176  * Do endian, PCI and DMA initialization.
1177  */
1178 void
1179 bge_chipinit(struct bge_softc *sc)
1180 {
1181 	struct pci_attach_args	*pa = &(sc->bge_pa);
1182 	u_int32_t dma_rw_ctl;
1183 	int i;
1184 
1185 	/* Set endianness before we access any non-PCI registers. */
1186 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1187 	    BGE_INIT);
1188 
1189 	/* Clear the MAC control register */
1190 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1191 
1192 	/*
1193 	 * Clear the MAC statistics block in the NIC's
1194 	 * internal memory.
1195 	 */
1196 	for (i = BGE_STATS_BLOCK;
1197 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1198 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1199 
1200 	for (i = BGE_STATUS_BLOCK;
1201 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1202 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1203 
1204 	/*
1205 	 * Set up the PCI DMA control register.
1206 	 */
1207 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1208 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1209 
1210 	if (sc->bge_flags & BGE_PCIE) {
1211 		/* Read watermark not used, 128 bytes for write. */
1212 		dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1213 	} else if (sc->bge_flags & BGE_PCIX) {
1214 		/* PCI-X bus */
1215 		if (BGE_IS_5714_FAMILY(sc)) {
1216 			/* 256 bytes for read and write. */
1217 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1218 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1219 
1220 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1221 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1222 			else
1223 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1224 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1225 			/* 1536 bytes for read, 384 bytes for write. */
1226 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1227 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1228 		} else {
1229 			/* 384 bytes for read and write. */
1230 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1231 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1232 			    (0x0F);
1233 		}
1234 
1235 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1236 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1237 			u_int32_t tmp;
1238 
1239 			/* Set ONEDMA_ATONCE for hardware workaround. */
1240 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1241 			if (tmp == 6 || tmp == 7)
1242 				dma_rw_ctl |=
1243 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1244 
1245 			/* Set PCI-X DMA write workaround. */
1246 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1247 		}
1248 	} else {
1249 		/* Conventional PCI bus: 256 bytes for read and write. */
1250 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1251 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1252 
1253 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1254 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1255 			dma_rw_ctl |= 0x0F;
1256 	}
1257 
1258 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1259 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1260 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1261 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1262 
1263 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1264 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1265 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1266 
1267 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1268 
1269 	/*
1270 	 * Set up general mode register.
1271 	 */
1272 #ifndef BGE_CHECKSUM
1273 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1274 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1275 		    BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1276 #else
1277 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1278 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS);
1279 #endif
1280 
1281 	/*
1282 	 * Disable memory write invalidate.  Apparently it is not supported
1283 	 * properly by these devices.
1284 	 */
1285 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1286 	    PCI_COMMAND_INVALIDATE_ENABLE);
1287 
1288 #ifdef __brokenalpha__
1289 	/*
1290 	 * Must insure that we do not cross an 8K (bytes) boundary
1291 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1292 	 * restriction on some ALPHA platforms with early revision
1293 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1294 	 */
1295 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1296 	    BGE_PCI_READ_BNDRY_1024);
1297 #endif
1298 
1299 	/* Set the timer prescaler (always 66MHz) */
1300 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1301 
1302 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1303 		DELAY(40);	/* XXX */
1304 
1305 		/* Put PHY into ready state */
1306 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1307 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1308 		DELAY(40);
1309 	}
1310 }
1311 
1312 int
1313 bge_blockinit(struct bge_softc *sc)
1314 {
1315 	volatile struct bge_rcb		*rcb;
1316 	vaddr_t			rcb_addr;
1317 	int			i;
1318 	bge_hostaddr		taddr;
1319 	u_int32_t		val;
1320 
1321 	/*
1322 	 * Initialize the memory window pointer register so that
1323 	 * we can access the first 32K of internal NIC RAM. This will
1324 	 * allow us to set up the TX send ring RCBs and the RX return
1325 	 * ring RCBs, plus other things which live in NIC memory.
1326 	 */
1327 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1328 
1329 	/* Configure mbuf memory pool */
1330 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1331 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1332 		    BGE_BUFFPOOL_1);
1333 
1334 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1335 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1336 		else
1337 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1338 
1339 		/* Configure DMA resource pool */
1340 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1341 		    BGE_DMA_DESCRIPTORS);
1342 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1343 	}
1344 
1345 	/* Configure mbuf pool watermarks */
1346 	/* new Broadcom docs strongly recommend these: */
1347 	if (BGE_IS_5705_OR_BEYOND(sc)) {
1348 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1349 
1350 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1351 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1352 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1353 		} else {
1354 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1355 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1356 		}
1357 	} else {
1358 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1359 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1360 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1361 	}
1362 
1363 	/* Configure DMA resource watermarks */
1364 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1365 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1366 
1367 	/* Enable buffer manager */
1368 	CSR_WRITE_4(sc, BGE_BMAN_MODE,
1369 	    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1370 
1371 	/* Poll for buffer manager start indication */
1372 	for (i = 0; i < 2000; i++) {
1373 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1374 			break;
1375 		DELAY(10);
1376 	}
1377 
1378 	if (i == 2000) {
1379 		printf("%s: buffer manager failed to start\n",
1380 		    sc->bge_dev.dv_xname);
1381 		return (ENXIO);
1382 	}
1383 
1384 	/* Enable flow-through queues */
1385 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1386 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1387 
1388 	/* Wait until queue initialization is complete */
1389 	for (i = 0; i < 2000; i++) {
1390 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1391 			break;
1392 		DELAY(10);
1393 	}
1394 
1395 	if (i == 2000) {
1396 		printf("%s: flow-through queue init failed\n",
1397 		    sc->bge_dev.dv_xname);
1398 		return (ENXIO);
1399 	}
1400 
1401 	/* Initialize the standard RX ring control block */
1402 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1403 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1404 	if (BGE_IS_5705_OR_BEYOND(sc))
1405 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1406 	else
1407 		rcb->bge_maxlen_flags =
1408 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1409 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1410 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1411 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1412 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1413 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1414 
1415 	/*
1416 	 * Initialize the Jumbo RX ring control block
1417 	 * We set the 'ring disabled' bit in the flags
1418 	 * field until we're actually ready to start
1419 	 * using this ring (i.e. once we set the MTU
1420 	 * high enough to require it).
1421 	 */
1422 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1423 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1424 		BGE_HOSTADDR(rcb->bge_hostaddr,
1425 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1426 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1427 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1428 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1429 
1430 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1431 		    rcb->bge_hostaddr.bge_addr_hi);
1432 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1433 		    rcb->bge_hostaddr.bge_addr_lo);
1434 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1435 		    rcb->bge_maxlen_flags);
1436 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR,
1437 		    rcb->bge_nicaddr);
1438 
1439 		/* Set up dummy disabled mini ring RCB */
1440 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1441 		rcb->bge_maxlen_flags =
1442 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1443 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1444 		    rcb->bge_maxlen_flags);
1445 
1446 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1447 		    offsetof(struct bge_ring_data, bge_info),
1448 		    sizeof (struct bge_gib),
1449 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1450 	}
1451 
1452 	/*
1453 	 * Set the BD ring replenish thresholds. The recommended
1454 	 * values are 1/8th the number of descriptors allocated to
1455 	 * each ring, but since we try to avoid filling the entire
1456 	 * ring we set these to the minimal value of 8.  This needs to
1457 	 * be done on several of the supported chip revisions anyway,
1458 	 * to work around HW bugs.
1459 	 */
1460 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
1461 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
1462 
1463 	/*
1464 	 * Disable all unused send rings by setting the 'ring disabled'
1465 	 * bit in the flags field of all the TX send ring control blocks.
1466 	 * These are located in NIC memory.
1467 	 */
1468 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1469 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1470 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1471 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1472 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1473 		rcb_addr += sizeof(struct bge_rcb);
1474 	}
1475 
1476 	/* Configure TX RCB 0 (we use only the first ring) */
1477 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1478 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1479 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1480 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1481 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1482 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1483 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1484 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1485 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1486 
1487 	/* Disable all unused RX return rings */
1488 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1489 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1490 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1491 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1492 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1493 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1494 			BGE_RCB_FLAG_RING_DISABLED));
1495 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1496 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1497 		    (i * (sizeof(u_int64_t))), 0);
1498 		rcb_addr += sizeof(struct bge_rcb);
1499 	}
1500 
1501 	/* Initialize RX ring indexes */
1502 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1503 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1504 	bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1505 
1506 	/*
1507 	 * Set up RX return ring 0
1508 	 * Note that the NIC address for RX return rings is 0x00000000.
1509 	 * The return rings live entirely within the host, so the
1510 	 * nicaddr field in the RCB isn't used.
1511 	 */
1512 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1513 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1514 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1515 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1516 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1517 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1518 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1519 
1520 	/* Set random backoff seed for TX */
1521 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1522 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1523 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1524 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1525 	    BGE_TX_BACKOFF_SEED_MASK);
1526 
1527 	/* Set inter-packet gap */
1528 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1529 
1530 	/*
1531 	 * Specify which ring to use for packets that don't match
1532 	 * any RX rules.
1533 	 */
1534 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1535 
1536 	/*
1537 	 * Configure number of RX lists. One interrupt distribution
1538 	 * list, sixteen active lists, one bad frames class.
1539 	 */
1540 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1541 
1542 	/* Inialize RX list placement stats mask. */
1543 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1544 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1545 
1546 	/* Disable host coalescing until we get it set up */
1547 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1548 
1549 	/* Poll to make sure it's shut down. */
1550 	for (i = 0; i < 2000; i++) {
1551 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1552 			break;
1553 		DELAY(10);
1554 	}
1555 
1556 	if (i == 2000) {
1557 		printf("%s: host coalescing engine failed to idle\n",
1558 		    sc->bge_dev.dv_xname);
1559 		return (ENXIO);
1560 	}
1561 
1562 	/* Set up host coalescing defaults */
1563 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1564 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1565 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1566 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1567 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1568 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1569 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1570 	}
1571 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1572 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1573 
1574 	/* Set up address of statistics block */
1575 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1576 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1577 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1578 			    BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1579 
1580 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1581 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1582 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1583 	}
1584 
1585 	/* Set up address of status block */
1586 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1587 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1588 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1589 
1590 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1591 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1592 
1593 	/* Turn on host coalescing state machine */
1594 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1595 
1596 	/* Turn on RX BD completion state machine and enable attentions */
1597 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1598 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1599 
1600 	/* Turn on RX list placement state machine */
1601 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1602 
1603 	/* Turn on RX list selector state machine. */
1604 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1605 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1606 
1607 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1608 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1609 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1610 	    BGE_MACMODE_FRMHDR_DMA_ENB;
1611 
1612 	if (sc->bge_flags & BGE_PHY_FIBER_TBI)
1613 	    val |= BGE_PORTMODE_TBI;
1614 	else if (sc->bge_flags & BGE_PHY_FIBER_MII)
1615 	    val |= BGE_PORTMODE_GMII;
1616 	else
1617 	    val |= BGE_PORTMODE_MII;
1618 
1619 	/* Turn on DMA, clear stats */
1620 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1621 
1622 	/* Set misc. local control, enable interrupts on attentions */
1623 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1624 
1625 #ifdef notdef
1626 	/* Assert GPIO pins for PHY reset */
1627 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1628 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1629 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1630 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1631 #endif
1632 
1633 	/* Turn on DMA completion state machine */
1634 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1635 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1636 
1637 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1638 
1639 	/* Enable host coalescing bug fix. */
1640 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1641 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
1642 		val |= (1 << 29);
1643 
1644 	/* Turn on write DMA state machine */
1645 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1646 
1647 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
1648 
1649 	if (sc->bge_flags & BGE_PCIE)
1650 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1651 
1652 	/* Turn on read DMA state machine */
1653 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1654 
1655 	/* Turn on RX data completion state machine */
1656 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1657 
1658 	/* Turn on RX BD initiator state machine */
1659 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1660 
1661 	/* Turn on RX data and RX BD initiator state machine */
1662 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1663 
1664 	/* Turn on Mbuf cluster free state machine */
1665 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
1666 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1667 
1668 	/* Turn on send BD completion state machine */
1669 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1670 
1671 	/* Turn on send data completion state machine */
1672 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1673 
1674 	/* Turn on send data initiator state machine */
1675 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1676 
1677 	/* Turn on send BD initiator state machine */
1678 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1679 
1680 	/* Turn on send BD selector state machine */
1681 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1682 
1683 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1684 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1685 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1686 
1687 	/* ack/clear link change events */
1688 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1689 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1690 	    BGE_MACSTAT_LINK_CHANGED);
1691 
1692 	/* Enable PHY auto polling (for MII/GMII only) */
1693 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1694 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1695  	} else {
1696 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1697 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1698 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
1699 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1700 			    BGE_EVTENB_MI_INTERRUPT);
1701 	}
1702 
1703 	/*
1704 	 * Clear any pending link state attention.
1705 	 * Otherwise some link state change events may be lost until attention
1706 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1707 	 * It's not necessary on newer BCM chips - perhaps enabling link
1708 	 * state change attentions implies clearing pending attention.
1709 	 */
1710 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1711 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1712 	    BGE_MACSTAT_LINK_CHANGED);
1713 
1714 	/* Enable link state change attentions. */
1715 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1716 
1717 	return (0);
1718 }
1719 
1720 const struct bge_revision *
1721 bge_lookup_rev(u_int32_t chipid)
1722 {
1723 	const struct bge_revision *br;
1724 
1725 	for (br = bge_revisions; br->br_name != NULL; br++) {
1726 		if (br->br_chipid == chipid)
1727 			return (br);
1728 	}
1729 
1730 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1731 		if (br->br_chipid == BGE_ASICREV(chipid))
1732 			return (br);
1733 	}
1734 
1735 	return (NULL);
1736 }
1737 
1738 /*
1739  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1740  * against our list and return its name if we find a match. Note
1741  * that since the Broadcom controller contains VPD support, we
1742  * can get the device name string from the controller itself instead
1743  * of the compiled-in string. This is a little slow, but it guarantees
1744  * we'll always announce the right product name.
1745  */
1746 int
1747 bge_probe(struct device *parent, void *match, void *aux)
1748 {
1749 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
1750 }
1751 
1752 void
1753 bge_attach(struct device *parent, struct device *self, void *aux)
1754 {
1755 	struct bge_softc	*sc = (struct bge_softc *)self;
1756 	struct pci_attach_args	*pa = aux;
1757 	pci_chipset_tag_t	pc = pa->pa_pc;
1758 	const struct bge_revision *br;
1759 	pcireg_t		pm_ctl, memtype, subid;
1760 	pci_intr_handle_t	ih;
1761 	const char		*intrstr = NULL;
1762 	bus_size_t		size;
1763 	bus_dma_segment_t	seg;
1764 	int			rseg, gotenaddr = 0;
1765 	u_int32_t		hwcfg = 0;
1766 	u_int32_t		mac_addr = 0;
1767 	u_int32_t		misccfg;
1768 	struct ifnet		*ifp;
1769 	caddr_t			kva;
1770 #ifdef __sparc64__
1771 	char			name[32];
1772 #endif
1773 
1774 	sc->bge_pa = *pa;
1775 
1776 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1777 
1778 	/*
1779 	 * Map control/status registers.
1780 	 */
1781 	DPRINTFN(5, ("Map control/status regs\n"));
1782 
1783 	DPRINTFN(5, ("pci_mapreg_map\n"));
1784 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1785 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
1786 	    &sc->bge_bhandle, NULL, &size, 0)) {
1787 		printf(": can't find mem space\n");
1788 		return;
1789 	}
1790 
1791 	DPRINTFN(5, ("pci_intr_map\n"));
1792 	if (pci_intr_map(pa, &ih)) {
1793 		printf(": couldn't map interrupt\n");
1794 		goto fail_1;
1795 	}
1796 
1797 	DPRINTFN(5, ("pci_intr_string\n"));
1798 	intrstr = pci_intr_string(pc, ih);
1799 
1800 	/*
1801 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
1802 	 * can clobber the chip's PCI config-space power control registers,
1803 	 * leaving the card in D3 powersave state.
1804 	 * We do not have memory-mapped registers in this state,
1805 	 * so force device into D0 state before starting initialization.
1806 	 */
1807 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
1808 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
1809 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
1810 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1811 	DELAY(1000);	/* 27 usec is allegedly sufficent */
1812 
1813 	/*
1814 	 * Save ASIC rev.
1815 	 */
1816 
1817 	sc->bge_chipid =
1818             pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
1819             BGE_PCIMISCCTL_ASICREV;
1820 
1821 	printf(", ");
1822 	br = bge_lookup_rev(sc->bge_chipid);
1823 	if (br == NULL)
1824 		printf("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
1825 	else
1826 		printf("%s (0x%04x)", br->br_name, sc->bge_chipid >> 16);
1827 
1828 	/*
1829 	 * PCI Express check.
1830 	 */
1831 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1832 	    NULL, NULL) != 0)
1833 		sc->bge_flags |= BGE_PCIE;
1834 
1835 	/*
1836 	 * PCI-X check.
1837 	 */
1838 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
1839 	    BGE_PCISTATE_PCI_BUSMODE) == 0)
1840 		sc->bge_flags |= BGE_PCIX;
1841 
1842 	/*
1843 	 * SEEPROM check.
1844 	 */
1845 #ifdef __sparc64__
1846 	/*
1847 	 * Onboard interfaces on UltraSPARC systems generally don't
1848 	 * have a SEEPROM fitted.  These interfaces, and cards that
1849 	 * have FCode, are named "network" by the PROM, whereas cards
1850 	 * without FCode show up as "ethernet".  Since we don't really
1851 	 * need the information from the SEEPROM on cards that have
1852 	 * FCode it's fine to pretend they don't have one.
1853 	 */
1854 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
1855 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
1856 		sc->bge_flags |= BGE_NO_EEPROM;
1857 #endif
1858 
1859 	/*
1860 	 * When using the BCM5701 in PCI-X mode, data corruption has
1861 	 * been observed in the first few bytes of some received packets.
1862 	 * Aligning the packet buffer in memory eliminates the corruption.
1863 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1864 	 * which do not support unaligned accesses, we will realign the
1865 	 * payloads by copying the received packets.
1866 	 */
1867 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1868 	    sc->bge_flags & BGE_PCIX)
1869 		sc->bge_flags |= BGE_RX_ALIGNBUG;
1870 
1871 	if (BGE_IS_JUMBO_CAPABLE(sc))
1872 		sc->bge_flags |= BGE_JUMBO_CAP;
1873 
1874 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1875 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
1876 	    PCI_VENDOR(subid) == DELL_VENDORID)
1877 		sc->bge_flags |= BGE_NO_3LED;
1878 
1879 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
1880 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
1881 
1882 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1883 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
1884 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
1885 		sc->bge_flags |= BGE_IS_5788;
1886 
1887 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
1888 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
1889 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1890 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1891 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
1892 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1893 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1894 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1895 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
1896 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
1897 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1898 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1899 		sc->bge_flags |= BGE_10_100_ONLY;
1900 
1901 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1902 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1903 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1904 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1905 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1906 		sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
1907 
1908 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1909 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1910 		sc->bge_flags |= BGE_PHY_CRC_BUG;
1911 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
1912 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
1913 		sc->bge_flags |= BGE_PHY_ADC_BUG;
1914 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1915 		sc->bge_flags |= BGE_PHY_5704_A0_BUG;
1916 
1917 	if (BGE_IS_5705_OR_BEYOND(sc)) {
1918 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1919 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
1920 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
1921 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
1922 				sc->bge_flags |= BGE_PHY_JITTER_BUG;
1923 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
1924 				sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
1925 		} else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
1926 			sc->bge_flags |= BGE_PHY_BER_BUG;
1927 	}
1928 
1929 	/* Try to reset the chip. */
1930 	DPRINTFN(5, ("bge_reset\n"));
1931 	bge_reset(sc);
1932 
1933 	bge_chipinit(sc);
1934 
1935 #ifdef __sparc64__
1936 	if (!gotenaddr) {
1937 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
1938 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
1939 			gotenaddr = 1;
1940 	}
1941 #endif
1942 
1943 	/*
1944 	 * Get station address from the EEPROM.
1945 	 */
1946 	if (!gotenaddr) {
1947 		mac_addr = bge_readmem_ind(sc, 0x0c14);
1948 		if ((mac_addr >> 16) == 0x484b) {
1949 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
1950 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
1951 			mac_addr = bge_readmem_ind(sc, 0x0c18);
1952 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
1953 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
1954 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
1955 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
1956 			gotenaddr = 1;
1957 		}
1958 	}
1959 	if (!gotenaddr) {
1960 		int mac_offset = BGE_EE_MAC_OFFSET;
1961 
1962 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1963 			mac_offset = BGE_EE_MAC_OFFSET_5906;
1964 
1965 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1966 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
1967 			gotenaddr = 1;
1968 	}
1969 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
1970 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1971 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
1972 			gotenaddr = 1;
1973 	}
1974 
1975 #ifdef __sparc64__
1976 	if (!gotenaddr) {
1977 		extern void myetheraddr(u_char *);
1978 
1979 		myetheraddr(sc->arpcom.ac_enaddr);
1980 		gotenaddr = 1;
1981 	}
1982 #endif
1983 
1984 	if (!gotenaddr) {
1985 		printf(": failed to read station address\n");
1986 		goto fail_1;
1987 	}
1988 
1989 	/* Allocate the general information block and ring buffers. */
1990 	sc->bge_dmatag = pa->pa_dmat;
1991 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
1992 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
1993 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1994 		printf(": can't alloc rx buffers\n");
1995 		goto fail_1;
1996 	}
1997 	DPRINTFN(5, ("bus_dmamem_map\n"));
1998 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
1999 			   sizeof(struct bge_ring_data), &kva,
2000 			   BUS_DMA_NOWAIT)) {
2001 		printf(": can't map dma buffers (%zu bytes)\n",
2002 		    sizeof(struct bge_ring_data));
2003 		goto fail_2;
2004 	}
2005 	DPRINTFN(5, ("bus_dmamem_create\n"));
2006 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2007 	    sizeof(struct bge_ring_data), 0,
2008 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2009 		printf(": can't create dma map\n");
2010 		goto fail_3;
2011 	}
2012 	DPRINTFN(5, ("bus_dmamem_load\n"));
2013 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2014 			    sizeof(struct bge_ring_data), NULL,
2015 			    BUS_DMA_NOWAIT)) {
2016 		goto fail_4;
2017 	}
2018 
2019 	DPRINTFN(5, ("bzero\n"));
2020 	sc->bge_rdata = (struct bge_ring_data *)kva;
2021 
2022 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2023 
2024 	/* Set default tuneable values. */
2025 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2026 	sc->bge_rx_coal_ticks = 150;
2027 	sc->bge_rx_max_coal_bds = 64;
2028 	sc->bge_tx_coal_ticks = 300;
2029 	sc->bge_tx_max_coal_bds = 400;
2030 
2031 	/* 5705 limits RX return ring to 512 entries. */
2032 	if (BGE_IS_5705_OR_BEYOND(sc))
2033 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2034 	else
2035 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2036 
2037 	/* Set up ifnet structure */
2038 	ifp = &sc->arpcom.ac_if;
2039 	ifp->if_softc = sc;
2040 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2041 	ifp->if_ioctl = bge_ioctl;
2042 	ifp->if_start = bge_start;
2043 	ifp->if_watchdog = bge_watchdog;
2044 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2045 	IFQ_SET_READY(&ifp->if_snd);
2046 
2047 	/* lwm must be greater than the replenish threshold */
2048 	m_clsetwms(ifp, MCLBYTES, 17, BGE_STD_RX_RING_CNT);
2049 	m_clsetwms(ifp, BGE_JLEN, 17, BGE_JUMBO_RX_RING_CNT);
2050 
2051 	DPRINTFN(5, ("bcopy\n"));
2052 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2053 
2054 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2055 
2056 #if NVLAN > 0
2057 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2058 #endif
2059 
2060 	if (BGE_IS_JUMBO_CAPABLE(sc))
2061 		ifp->if_hardmtu = BGE_JUMBO_MTU;
2062 
2063 	/*
2064 	 * Do MII setup.
2065 	 */
2066 	DPRINTFN(5, ("mii setup\n"));
2067 	sc->bge_mii.mii_ifp = ifp;
2068 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
2069 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
2070 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
2071 
2072 	/*
2073 	 * Figure out what sort of media we have by checking the hardware
2074 	 * config word in the first 32K of internal NIC memory, or fall back to
2075 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
2076 	 * this value seems to be unset. If that's the case, we have to rely on
2077 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
2078 	 * SysKonnect SK-9D41.
2079 	 */
2080 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2081 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2082 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
2083 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2084 		    sizeof(hwcfg))) {
2085 			printf(": failed to read media type\n");
2086 			goto fail_5;
2087 		}
2088 		hwcfg = ntohl(hwcfg);
2089 	}
2090 
2091 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2092 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
2093 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2094 		if (BGE_IS_5714_FAMILY(sc))
2095 		    sc->bge_flags |= BGE_PHY_FIBER_MII;
2096 		else
2097 		    sc->bge_flags |= BGE_PHY_FIBER_TBI;
2098 	}
2099 
2100 	/* Hookup IRQ last. */
2101 	DPRINTFN(5, ("pci_intr_establish\n"));
2102 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc,
2103 	    sc->bge_dev.dv_xname);
2104 	if (sc->bge_intrhand == NULL) {
2105 		printf(": couldn't establish interrupt");
2106 		if (intrstr != NULL)
2107 			printf(" at %s", intrstr);
2108 		printf("\n");
2109 		goto fail_5;
2110 	}
2111 
2112 	/*
2113 	 * A Broadcom chip was detected. Inform the world.
2114 	 */
2115 	printf(": %s, address %s\n", intrstr,
2116 	    ether_sprintf(sc->arpcom.ac_enaddr));
2117 
2118 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2119 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2120 		    bge_ifmedia_sts);
2121 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2122 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2123 			    0, NULL);
2124 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2125 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2126 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2127 	} else {
2128 		int mii_flags;
2129 
2130 		/*
2131 		 * Do transceiver setup.
2132 		 */
2133 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2134 			     bge_ifmedia_sts);
2135 		mii_flags = MIIF_DOPAUSE;
2136 		if (sc->bge_flags & BGE_PHY_FIBER_MII)
2137 			mii_flags |= MIIF_HAVEFIBER;
2138 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2139 			   MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
2140 
2141 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2142 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2143 			ifmedia_add(&sc->bge_mii.mii_media,
2144 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
2145 			ifmedia_set(&sc->bge_mii.mii_media,
2146 				    IFM_ETHER|IFM_MANUAL);
2147 		} else
2148 			ifmedia_set(&sc->bge_mii.mii_media,
2149 				    IFM_ETHER|IFM_AUTO);
2150 	}
2151 
2152 	/*
2153 	 * Call MI attach routine.
2154 	 */
2155 	if_attach(ifp);
2156 	ether_ifattach(ifp);
2157 
2158 	sc->sc_shutdownhook = shutdownhook_establish(bge_shutdown, sc);
2159 	sc->sc_powerhook = powerhook_establish(bge_power, sc);
2160 
2161 	timeout_set(&sc->bge_timeout, bge_tick, sc);
2162 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
2163 	return;
2164 
2165 fail_5:
2166 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
2167 
2168 fail_4:
2169 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2170 
2171 fail_3:
2172 	bus_dmamem_unmap(sc->bge_dmatag, kva,
2173 	    sizeof(struct bge_ring_data));
2174 
2175 fail_2:
2176 	bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2177 
2178 fail_1:
2179 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, size);
2180 }
2181 
2182 void
2183 bge_reset(struct bge_softc *sc)
2184 {
2185 	struct pci_attach_args *pa = &sc->bge_pa;
2186 	pcireg_t cachesize, command, pcistate, new_pcistate;
2187 	u_int32_t reset;
2188 	int i, val = 0;
2189 
2190 	/* Save some important PCI state. */
2191 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2192 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2193 	pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2194 
2195 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2196 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2197 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2198 
2199 	/* Disable fastboot on controllers that support it. */
2200 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2201 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2202 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
2203 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2204 
2205 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2206 
2207 	if (sc->bge_flags & BGE_PCIE) {
2208 		if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
2209 			/* PCI Express 1.0 system */
2210 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2211 		}
2212 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2213 			/*
2214 			 * Prevent PCI Express link training
2215 			 * during global reset.
2216 			 */
2217 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2218 			reset |= (1<<29);
2219 		}
2220 	}
2221 
2222 	/*
2223 	 * Set GPHY Power Down Override to leave GPHY
2224 	 * powered up in D0 uninitialized.
2225 	 */
2226 	if (BGE_IS_5705_OR_BEYOND(sc))
2227 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
2228 
2229 	/* Issue global reset */
2230 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2231 
2232 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2233 		u_int32_t status, ctrl;
2234 
2235 		status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2236 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2237 		    status | BGE_VCPU_STATUS_DRV_RESET);
2238 		ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2239 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2240 		    ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2241 
2242 		sc->bge_flags |= BGE_NO_EEPROM;
2243 	}
2244 
2245 	DELAY(1000);
2246 
2247 	if (sc->bge_flags & BGE_PCIE) {
2248 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2249 			pcireg_t v;
2250 
2251 			DELAY(500000); /* wait for link training to complete */
2252 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
2253 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
2254 		}
2255 
2256 		/*
2257 		 * Set PCI Express max payload size to 128 bytes
2258 		 * and clear error status.
2259 		 */
2260 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2261 		    BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2262 	}
2263 
2264 	/* Reset some of the PCI state that got zapped by reset */
2265 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2266 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2267 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2268 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2269 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2270 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2271 
2272 	/* Enable memory arbiter. */
2273 	if (BGE_IS_5714_FAMILY(sc)) {
2274 		u_int32_t val;
2275 
2276 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2277 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2278 	} else
2279 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2280 
2281  	/*
2282 	 * Prevent PXE restart: write a magic number to the
2283 	 * general communications memory at 0xB50.
2284 	 */
2285 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2286 
2287 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2288 		for (i = 0; i < BGE_TIMEOUT; i++) {
2289 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2290 			if (val & BGE_VCPU_STATUS_INIT_DONE)
2291 				break;
2292 			DELAY(100);
2293 		}
2294 
2295 		if (i >= BGE_TIMEOUT)
2296 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
2297 	} else {
2298 		/*
2299 		 * Poll until we see 1's complement of the magic number.
2300 		 * This indicates that the firmware initialization
2301 		 * is complete.  We expect this to fail if no SEEPROM
2302 		 * is fitted.
2303 		 */
2304 		for (i = 0; i < BGE_TIMEOUT; i++) {
2305 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2306 			if (val == ~BGE_MAGIC_NUMBER)
2307 				break;
2308 			DELAY(10);
2309 		}
2310 
2311 		if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM)))
2312 			printf("%s: firmware handshake timed out\n",
2313 			   sc->bge_dev.dv_xname);
2314 	}
2315 
2316 	/*
2317 	 * XXX Wait for the value of the PCISTATE register to
2318 	 * return to its original pre-reset state. This is a
2319 	 * fairly good indicator of reset completion. If we don't
2320 	 * wait for the reset to fully complete, trying to read
2321 	 * from the device's non-PCI registers may yield garbage
2322 	 * results.
2323 	 */
2324 	for (i = 0; i < BGE_TIMEOUT; i++) {
2325 		new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2326 		    BGE_PCI_PCISTATE);
2327 		if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2328 		    (pcistate & ~BGE_PCISTATE_RESERVED))
2329 			break;
2330 		DELAY(10);
2331 	}
2332 	if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2333 	    (pcistate & ~BGE_PCISTATE_RESERVED)) {
2334 		DPRINTFN(5, ("%s: pcistate failed to revert\n",
2335 		    sc->bge_dev.dv_xname));
2336 	}
2337 
2338 	/* Fix up byte swapping */
2339 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2340 
2341 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2342 
2343 	/*
2344 	 * The 5704 in TBI mode apparently needs some special
2345 	 * adjustment to insure the SERDES drive level is set
2346 	 * to 1.2V.
2347 	 */
2348 	if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
2349 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2350 		u_int32_t serdescfg;
2351 
2352 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2353 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2354 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2355 	}
2356 
2357 	if (sc->bge_flags & BGE_PCIE &&
2358 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2359 		u_int32_t v;
2360 
2361 		/* Enable PCI Express bug fix */
2362 		v = CSR_READ_4(sc, 0x7c00);
2363 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2364 	}
2365 	DELAY(10000);
2366 }
2367 
2368 /*
2369  * Frame reception handling. This is called if there's a frame
2370  * on the receive return list.
2371  *
2372  * Note: we have to be able to handle two possibilities here:
2373  * 1) the frame is from the jumbo receive ring
2374  * 2) the frame is from the standard receive ring
2375  */
2376 
2377 void
2378 bge_rxeof(struct bge_softc *sc)
2379 {
2380 	struct ifnet *ifp;
2381 	int stdcnt = 0, jumbocnt = 0;
2382 	bus_dmamap_t dmamap;
2383 	bus_addr_t offset, toff;
2384 	bus_size_t tlen;
2385 	int tosync;
2386 
2387 	/* Nothing to do */
2388 	if (sc->bge_rx_saved_considx ==
2389 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx)
2390 		return;
2391 
2392 	ifp = &sc->arpcom.ac_if;
2393 
2394 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2395 	    offsetof(struct bge_ring_data, bge_status_block),
2396 	    sizeof (struct bge_status_block),
2397 	    BUS_DMASYNC_POSTREAD);
2398 
2399 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2400 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2401 	    sc->bge_rx_saved_considx;
2402 
2403 	toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2404 
2405 	if (tosync < 0) {
2406 		tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2407 		    sizeof (struct bge_rx_bd);
2408 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2409 		    toff, tlen, BUS_DMASYNC_POSTREAD);
2410 		tosync = -tosync;
2411 	}
2412 
2413 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2414 	    offset, tosync * sizeof (struct bge_rx_bd),
2415 	    BUS_DMASYNC_POSTREAD);
2416 
2417 	while(sc->bge_rx_saved_considx !=
2418 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2419 		struct bge_rx_bd	*cur_rx;
2420 		u_int32_t		rxidx;
2421 		struct mbuf		*m = NULL;
2422 #ifdef BGE_CHECKSUM
2423 		u_int16_t		sumflags = 0;
2424 #endif
2425 
2426 		cur_rx = &sc->bge_rdata->
2427 			bge_rx_return_ring[sc->bge_rx_saved_considx];
2428 
2429 		rxidx = cur_rx->bge_idx;
2430 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2431 
2432 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2433 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2434 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2435 
2436 			jumbocnt++;
2437 			sc->bge_jumbo_cnt--;
2438 
2439 			dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx];
2440 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
2441 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2442 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2443 
2444 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2445 				m_freem(m);
2446 				ifp->if_ierrors++;
2447 				continue;
2448 			}
2449 		} else {
2450 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2451 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2452 
2453 			stdcnt++;
2454 			sc->bge_std_cnt--;
2455 
2456 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2457 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
2458 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2459 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2460 
2461 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2462 				m_freem(m);
2463 				ifp->if_ierrors++;
2464 				continue;
2465 			}
2466 		}
2467 
2468 		ifp->if_ipackets++;
2469 #ifdef __STRICT_ALIGNMENT
2470 		/*
2471 		 * The i386 allows unaligned accesses, but for other
2472 		 * platforms we must make sure the payload is aligned.
2473 		 */
2474 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
2475 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2476 			    cur_rx->bge_len);
2477 			m->m_data += ETHER_ALIGN;
2478 		}
2479 #endif
2480 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2481 		m->m_pkthdr.rcvif = ifp;
2482 
2483 #if NVLAN > 0
2484 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2485 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
2486 			m->m_flags |= M_VLANTAG;
2487 		}
2488 #endif
2489 
2490 #if NBPFILTER > 0
2491 		/*
2492 		 * Handle BPF listeners. Let the BPF user see the packet.
2493 		 */
2494 		if (ifp->if_bpf)
2495 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
2496 #endif
2497 
2498 #ifdef BGE_CHECKSUM
2499 		if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2500 			sumflags |= M_IPV4_CSUM_IN_OK;
2501 		else
2502 			sumflags |= M_IPV4_CSUM_IN_BAD;
2503 
2504 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2505 			m->m_pkthdr.csum_data =
2506 				cur_rx->bge_tcp_udp_csum;
2507 			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2508 		}
2509 
2510 		m->m_pkthdr.csum_flags = sumflags;
2511 		sumflags = 0;
2512 #endif
2513 		ether_input_mbuf(ifp, m);
2514 	}
2515 
2516 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2517 	if (stdcnt)
2518 		bge_fill_rx_ring_std(sc);
2519 	if (jumbocnt)
2520 		bge_fill_rx_ring_jumbo(sc);
2521 }
2522 
2523 void
2524 bge_txeof(struct bge_softc *sc)
2525 {
2526 	struct bge_tx_bd *cur_tx = NULL;
2527 	struct ifnet *ifp;
2528 	struct txdmamap_pool_entry *dma;
2529 	bus_addr_t offset, toff;
2530 	bus_size_t tlen;
2531 	int tosync;
2532 	struct mbuf *m;
2533 
2534 	/* Nothing to do */
2535 	if (sc->bge_tx_saved_considx ==
2536 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx)
2537 		return;
2538 
2539 	ifp = &sc->arpcom.ac_if;
2540 
2541 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2542 	    offsetof(struct bge_ring_data, bge_status_block),
2543 	    sizeof (struct bge_status_block),
2544 	    BUS_DMASYNC_POSTREAD);
2545 
2546 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
2547 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2548 	    sc->bge_tx_saved_considx;
2549 
2550 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2551 
2552 	if (tosync < 0) {
2553 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2554 		    sizeof (struct bge_tx_bd);
2555 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2556 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2557 		tosync = -tosync;
2558 	}
2559 
2560 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2561 	    offset, tosync * sizeof (struct bge_tx_bd),
2562 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2563 
2564 	/*
2565 	 * Go through our tx ring and free mbufs for those
2566 	 * frames that have been sent.
2567 	 */
2568 	while (sc->bge_tx_saved_considx !=
2569 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2570 		u_int32_t		idx = 0;
2571 
2572 		idx = sc->bge_tx_saved_considx;
2573 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2574 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2575 			ifp->if_opackets++;
2576 		m = sc->bge_cdata.bge_tx_chain[idx];
2577 		if (m != NULL) {
2578 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2579 			dma = sc->txdma[idx];
2580 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2581 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2582 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2583 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2584 			sc->txdma[idx] = NULL;
2585 
2586 			m_freem(m);
2587 		}
2588 		sc->bge_txcnt--;
2589 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2590 	}
2591 
2592 	if (sc->bge_txcnt < BGE_TX_RING_CNT - 16)
2593 		ifp->if_flags &= ~IFF_OACTIVE;
2594 	if (sc->bge_txcnt == 0)
2595 		ifp->if_timer = 0;
2596 }
2597 
2598 int
2599 bge_intr(void *xsc)
2600 {
2601 	struct bge_softc *sc;
2602 	struct ifnet *ifp;
2603 	u_int32_t statusword;
2604 
2605 	sc = xsc;
2606 	ifp = &sc->arpcom.ac_if;
2607 
2608 	/* It is possible for the interrupt to arrive before
2609 	 * the status block is updated prior to the interrupt.
2610 	 * Reading the PCI State register will confirm whether the
2611 	 * interrupt is ours and will flush the status block.
2612 	 */
2613 
2614 	/* read status word from status block */
2615 	statusword = sc->bge_rdata->bge_status_block.bge_status;
2616 
2617 	if ((statusword & BGE_STATFLAG_UPDATED) ||
2618 	    (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
2619 
2620 		/* Ack interrupt and stop others from occurring. */
2621 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2622 
2623 		/* clear status word */
2624 		sc->bge_rdata->bge_status_block.bge_status = 0;
2625 
2626 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2627 		    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
2628 		    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
2629 			bge_link_upd(sc);
2630 
2631 		if (ifp->if_flags & IFF_RUNNING) {
2632 			/* Check RX return ring producer/consumer */
2633 			bge_rxeof(sc);
2634 
2635 			/* Check TX ring producer/consumer */
2636 			bge_txeof(sc);
2637 		}
2638 
2639 		/* Re-enable interrupts. */
2640 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2641 
2642 		bge_start(ifp);
2643 
2644 		return (1);
2645 	} else
2646 		return (0);
2647 }
2648 
2649 void
2650 bge_tick(void *xsc)
2651 {
2652 	struct bge_softc *sc = xsc;
2653 	struct mii_data *mii = &sc->bge_mii;
2654 	int s;
2655 
2656 	s = splnet();
2657 
2658 	if (BGE_IS_5705_OR_BEYOND(sc))
2659 		bge_stats_update_regs(sc);
2660 	else
2661 		bge_stats_update(sc);
2662 
2663 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2664 		/*
2665 		 * Since in TBI mode auto-polling can't be used we should poll
2666 		 * link status manually. Here we register pending link event
2667 		 * and trigger interrupt.
2668 		 */
2669 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
2670 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2671 	} else {
2672 		/*
2673 		 * Do not touch PHY if we have link up. This could break
2674 		 * IPMI/ASF mode or produce extra input errors.
2675 		 * (extra input errors was reported for bcm5701 & bcm5704).
2676 		 */
2677 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
2678 			mii_tick(mii);
2679 	}
2680 
2681 	timeout_add_sec(&sc->bge_timeout, 1);
2682 
2683 	splx(s);
2684 }
2685 
2686 void
2687 bge_stats_update_regs(struct bge_softc *sc)
2688 {
2689 	struct ifnet *ifp = &sc->arpcom.ac_if;
2690 
2691 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
2692 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
2693 
2694 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2695 
2696 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
2697 
2698 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
2699 }
2700 
2701 void
2702 bge_stats_update(struct bge_softc *sc)
2703 {
2704 	struct ifnet *ifp = &sc->arpcom.ac_if;
2705 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2706 	u_int32_t cnt;
2707 
2708 #define READ_STAT(sc, stats, stat) \
2709 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2710 
2711 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
2712 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
2713 	sc->bge_tx_collisions = cnt;
2714 
2715 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2716 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
2717 	sc->bge_rx_discards = cnt;
2718 
2719 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
2720 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_inerrors);
2721 	sc->bge_rx_inerrors = cnt;
2722 
2723 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
2724 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_overruns);
2725 	sc->bge_rx_overruns = cnt;
2726 
2727 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2728 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
2729 	sc->bge_tx_discards = cnt;
2730 
2731 #undef READ_STAT
2732 }
2733 
2734 /*
2735  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
2736  */
2737 int
2738 bge_compact_dma_runt(struct mbuf *pkt)
2739 {
2740 	struct mbuf	*m, *prev, *n = NULL;
2741 	int 		totlen, prevlen, newprevlen;
2742 
2743 	prev = NULL;
2744 	totlen = 0;
2745 	prevlen = -1;
2746 
2747 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
2748 		int mlen = m->m_len;
2749 		int shortfall = 8 - mlen ;
2750 
2751 		totlen += mlen;
2752 		if (mlen == 0)
2753 			continue;
2754 		if (mlen >= 8)
2755 			continue;
2756 
2757 		/* If we get here, mbuf data is too small for DMA engine.
2758 		 * Try to fix by shuffling data to prev or next in chain.
2759 		 * If that fails, do a compacting deep-copy of the whole chain.
2760 		 */
2761 
2762 		/* Internal frag. If fits in prev, copy it there. */
2763 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
2764 			bcopy(m->m_data,
2765 			      prev->m_data+prev->m_len,
2766 			      mlen);
2767 			prev->m_len += mlen;
2768 			m->m_len = 0;
2769 			/* XXX stitch chain */
2770 			prev->m_next = m_free(m);
2771 			m = prev;
2772 			continue;
2773 		} else if (m->m_next != NULL &&
2774 			   M_TRAILINGSPACE(m) >= shortfall &&
2775 			   m->m_next->m_len >= (8 + shortfall)) {
2776 			/* m is writable and have enough data in next, pull up. */
2777 
2778 			bcopy(m->m_next->m_data,
2779 			      m->m_data+m->m_len,
2780 			      shortfall);
2781 			m->m_len += shortfall;
2782 			m->m_next->m_len -= shortfall;
2783 			m->m_next->m_data += shortfall;
2784 		} else if (m->m_next == NULL || 1) {
2785 			/* Got a runt at the very end of the packet.
2786 			 * borrow data from the tail of the preceding mbuf and
2787 			 * update its length in-place. (The original data is still
2788 			 * valid, so we can do this even if prev is not writable.)
2789 			 */
2790 
2791 			/* if we'd make prev a runt, just move all of its data. */
2792 #ifdef DEBUG
2793 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
2794 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
2795 #endif
2796 			if ((prev->m_len - shortfall) < 8)
2797 				shortfall = prev->m_len;
2798 
2799 			newprevlen = prev->m_len - shortfall;
2800 
2801 			MGET(n, M_NOWAIT, MT_DATA);
2802 			if (n == NULL)
2803 				return (ENOBUFS);
2804 			KASSERT(m->m_len + shortfall < MLEN
2805 				/*,
2806 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
2807 
2808 			/* first copy the data we're stealing from prev */
2809 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
2810 
2811 			/* update prev->m_len accordingly */
2812 			prev->m_len -= shortfall;
2813 
2814 			/* copy data from runt m */
2815 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
2816 
2817 			/* n holds what we stole from prev, plus m */
2818 			n->m_len = shortfall + m->m_len;
2819 
2820 			/* stitch n into chain and free m */
2821 			n->m_next = m->m_next;
2822 			prev->m_next = n;
2823 			/* KASSERT(m->m_next == NULL); */
2824 			m->m_next = NULL;
2825 			m_free(m);
2826 			m = n;	/* for continuing loop */
2827 		}
2828 		prevlen = m->m_len;
2829 	}
2830 	return (0);
2831 }
2832 
2833 /*
2834  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2835  * pointers to descriptors.
2836  */
2837 int
2838 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2839 {
2840 	struct bge_tx_bd	*f = NULL;
2841 	u_int32_t		frag, cur;
2842 	u_int16_t		csum_flags = 0;
2843 	struct txdmamap_pool_entry *dma;
2844 	bus_dmamap_t dmamap;
2845 	int			i = 0;
2846 
2847 	cur = frag = *txidx;
2848 
2849 #ifdef BGE_CHECKSUM
2850 	if (m_head->m_pkthdr.csum_flags) {
2851 		if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2852 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2853 		if (m_head->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT |
2854 					     M_UDPV4_CSUM_OUT))
2855 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2856 #ifdef fake
2857 		if (m_head->m_flags & M_LASTFRAG)
2858 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2859 		else if (m_head->m_flags & M_FRAG)
2860 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2861 #endif
2862 	}
2863 #endif
2864 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
2865 		goto doit;
2866 
2867 	/*
2868 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
2869 	 * less than eight bytes.  If we encounter a teeny mbuf
2870 	 * at the end of a chain, we can pad.  Otherwise, copy.
2871 	 */
2872 	if (bge_compact_dma_runt(m_head) != 0)
2873 		return (ENOBUFS);
2874 
2875 doit:
2876 	dma = SLIST_FIRST(&sc->txdma_list);
2877 	if (dma == NULL)
2878 		return (ENOBUFS);
2879 	dmamap = dma->dmamap;
2880 
2881 	/*
2882 	 * Start packing the mbufs in this chain into
2883 	 * the fragment pointers. Stop when we run out
2884 	 * of fragments or hit the end of the mbuf chain.
2885 	 */
2886 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
2887 	    BUS_DMA_NOWAIT))
2888 		return (ENOBUFS);
2889 
2890 	/*
2891 	 * Sanity check: avoid coming within 16 descriptors
2892 	 * of the end of the ring.
2893 	 */
2894 	if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16))
2895 		goto fail_unload;
2896 
2897 	for (i = 0; i < dmamap->dm_nsegs; i++) {
2898 		f = &sc->bge_rdata->bge_tx_ring[frag];
2899 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2900 			break;
2901 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
2902 		f->bge_len = dmamap->dm_segs[i].ds_len;
2903 		f->bge_flags = csum_flags;
2904 		f->bge_vlan_tag = 0;
2905 #if NVLAN > 0
2906 		if (m_head->m_flags & M_VLANTAG) {
2907 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2908 			f->bge_vlan_tag = m_head->m_pkthdr.ether_vtag;
2909 		}
2910 #endif
2911 		cur = frag;
2912 		BGE_INC(frag, BGE_TX_RING_CNT);
2913 	}
2914 
2915 	if (i < dmamap->dm_nsegs)
2916 		goto fail_unload;
2917 
2918 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
2919 	    BUS_DMASYNC_PREWRITE);
2920 
2921 	if (frag == sc->bge_tx_saved_considx)
2922 		goto fail_unload;
2923 
2924 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2925 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
2926 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
2927 	sc->txdma[cur] = dma;
2928 	sc->bge_txcnt += dmamap->dm_nsegs;
2929 
2930 	*txidx = frag;
2931 
2932 	return (0);
2933 
2934 fail_unload:
2935 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
2936 
2937 	return (ENOBUFS);
2938 }
2939 
2940 /*
2941  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2942  * to the mbuf data regions directly in the transmit descriptors.
2943  */
2944 void
2945 bge_start(struct ifnet *ifp)
2946 {
2947 	struct bge_softc *sc;
2948 	struct mbuf *m_head = NULL;
2949 	u_int32_t prodidx;
2950 	int pkts = 0;
2951 
2952 	sc = ifp->if_softc;
2953 
2954 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2955 		return;
2956 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
2957 		return;
2958 	if (IFQ_IS_EMPTY(&ifp->if_snd))
2959 		return;
2960 
2961 	prodidx = sc->bge_tx_prodidx;
2962 
2963 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2964 		IFQ_POLL(&ifp->if_snd, m_head);
2965 		if (m_head == NULL)
2966 			break;
2967 
2968 		/*
2969 		 * Pack the data into the transmit ring. If we
2970 		 * don't have room, set the OACTIVE flag and wait
2971 		 * for the NIC to drain the ring.
2972 		 */
2973 		if (bge_encap(sc, m_head, &prodidx)) {
2974 			ifp->if_flags |= IFF_OACTIVE;
2975 			break;
2976 		}
2977 
2978 		/* now we are committed to transmit the packet */
2979 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
2980 		pkts++;
2981 
2982 #if NBPFILTER > 0
2983 		/*
2984 		 * If there's a BPF listener, bounce a copy of this frame
2985 		 * to him.
2986 		 */
2987 		if (ifp->if_bpf)
2988 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2989 #endif
2990 	}
2991 	if (pkts == 0)
2992 		return;
2993 
2994 	/* Transmit */
2995 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2996 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
2997 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2998 
2999 	sc->bge_tx_prodidx = prodidx;
3000 
3001 	/*
3002 	 * Set a timeout in case the chip goes out to lunch.
3003 	 */
3004 	ifp->if_timer = 5;
3005 }
3006 
3007 void
3008 bge_init(void *xsc)
3009 {
3010 	struct bge_softc *sc = xsc;
3011 	struct ifnet *ifp;
3012 	u_int16_t *m;
3013 	int s;
3014 
3015 	s = splnet();
3016 
3017 	ifp = &sc->arpcom.ac_if;
3018 
3019 	/* Cancel pending I/O and flush buffers. */
3020 	bge_stop(sc);
3021 	bge_reset(sc);
3022 	bge_chipinit(sc);
3023 
3024 	/*
3025 	 * Init the various state machines, ring
3026 	 * control blocks and firmware.
3027 	 */
3028 	if (bge_blockinit(sc)) {
3029 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
3030 		splx(s);
3031 		return;
3032 	}
3033 
3034 	ifp = &sc->arpcom.ac_if;
3035 
3036 	/* Specify MRU. */
3037 	if (BGE_IS_JUMBO_CAPABLE(sc))
3038 		CSR_WRITE_4(sc, BGE_RX_MTU,
3039 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
3040 	else
3041 		CSR_WRITE_4(sc, BGE_RX_MTU,
3042 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
3043 
3044 	/* Load our MAC address. */
3045 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3046 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3047 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3048 
3049 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
3050 		/* Disable hardware decapsulation of VLAN frames. */
3051 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
3052 	}
3053 
3054 	/* Program promiscuous mode and multicast filters. */
3055 	bge_iff(sc);
3056 
3057 	/* Init RX ring. */
3058 	bge_init_rx_ring_std(sc);
3059 
3060 	/*
3061 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3062 	 * memory to insure that the chip has in fact read the first
3063 	 * entry of the ring.
3064 	 */
3065 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3066 		u_int32_t		v, i;
3067 		for (i = 0; i < 10; i++) {
3068 			DELAY(20);
3069 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3070 			if (v == (MCLBYTES - ETHER_ALIGN))
3071 				break;
3072 		}
3073 		if (i == 10)
3074 			printf("%s: 5705 A0 chip failed to load RX ring\n",
3075 			    sc->bge_dev.dv_xname);
3076 	}
3077 
3078 	/* Init Jumbo RX ring. */
3079 	if (BGE_IS_JUMBO_CAPABLE(sc))
3080 		bge_init_rx_ring_jumbo(sc);
3081 
3082 	/* Init our RX return ring index */
3083 	sc->bge_rx_saved_considx = 0;
3084 
3085 	/* Init our RX/TX stat counters. */
3086 	sc->bge_tx_collisions = 0;
3087 	sc->bge_rx_discards = 0;
3088 	sc->bge_rx_inerrors = 0;
3089 	sc->bge_rx_overruns = 0;
3090 	sc->bge_tx_discards = 0;
3091 
3092 	/* Init TX ring. */
3093 	bge_init_tx_ring(sc);
3094 
3095 	/* Turn on transmitter */
3096 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3097 
3098 	/* Turn on receiver */
3099 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3100 
3101 	CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3102 
3103 	/* Tell firmware we're alive. */
3104 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3105 
3106 	/* Enable host interrupts. */
3107 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3108 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3109 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3110 
3111 	bge_ifmedia_upd(ifp);
3112 
3113 	ifp->if_flags |= IFF_RUNNING;
3114 	ifp->if_flags &= ~IFF_OACTIVE;
3115 
3116 	splx(s);
3117 
3118 	timeout_add_sec(&sc->bge_timeout, 1);
3119 }
3120 
3121 /*
3122  * Set media options.
3123  */
3124 int
3125 bge_ifmedia_upd(struct ifnet *ifp)
3126 {
3127 	struct bge_softc *sc = ifp->if_softc;
3128 	struct mii_data *mii = &sc->bge_mii;
3129 	struct ifmedia *ifm = &sc->bge_ifmedia;
3130 
3131 	/* If this is a 1000baseX NIC, enable the TBI port. */
3132 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3133 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3134 			return (EINVAL);
3135 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3136 		case IFM_AUTO:
3137 			/*
3138 			 * The BCM5704 ASIC appears to have a special
3139 			 * mechanism for programming the autoneg
3140 			 * advertisement registers in TBI mode.
3141 			 */
3142 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3143 				u_int32_t sgdig;
3144 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3145 				if (sgdig & BGE_SGDIGSTS_DONE) {
3146 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3147 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3148 					sgdig |= BGE_SGDIGCFG_AUTO |
3149 					    BGE_SGDIGCFG_PAUSE_CAP |
3150 					    BGE_SGDIGCFG_ASYM_PAUSE;
3151 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3152 					    sgdig | BGE_SGDIGCFG_SEND);
3153 					DELAY(5);
3154 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3155 				}
3156 			}
3157 			break;
3158 		case IFM_1000_SX:
3159 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3160 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3161 				    BGE_MACMODE_HALF_DUPLEX);
3162 			} else {
3163 				BGE_SETBIT(sc, BGE_MAC_MODE,
3164 				    BGE_MACMODE_HALF_DUPLEX);
3165 			}
3166 			break;
3167 		default:
3168 			return (EINVAL);
3169 		}
3170 		/* XXX 802.3x flow control for 1000BASE-SX */
3171 		return (0);
3172 	}
3173 
3174 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3175 	if (mii->mii_instance) {
3176 		struct mii_softc *miisc;
3177 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3178 			mii_phy_reset(miisc);
3179 	}
3180 	mii_mediachg(mii);
3181 
3182 	/*
3183 	 * Force an interrupt so that we will call bge_link_upd
3184 	 * if needed and clear any pending link state attention.
3185 	 * Without this we are not getting any further interrupts
3186 	 * for link state changes and thus will not UP the link and
3187 	 * not be able to send in bge_start. The only way to get
3188 	 * things working was to receive a packet and get a RX intr.
3189 	 */
3190 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3191 	    sc->bge_flags & BGE_IS_5788)
3192 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3193 	else
3194 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3195 
3196 	return (0);
3197 }
3198 
3199 /*
3200  * Report current media status.
3201  */
3202 void
3203 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3204 {
3205 	struct bge_softc *sc = ifp->if_softc;
3206 	struct mii_data *mii = &sc->bge_mii;
3207 
3208 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3209 		ifmr->ifm_status = IFM_AVALID;
3210 		ifmr->ifm_active = IFM_ETHER;
3211 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3212 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3213 			ifmr->ifm_status |= IFM_ACTIVE;
3214 		} else {
3215 			ifmr->ifm_active |= IFM_NONE;
3216 			return;
3217 		}
3218 		ifmr->ifm_active |= IFM_1000_SX;
3219 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3220 			ifmr->ifm_active |= IFM_HDX;
3221 		else
3222 			ifmr->ifm_active |= IFM_FDX;
3223 		return;
3224 	}
3225 
3226 	mii_pollstat(mii);
3227 	ifmr->ifm_status = mii->mii_media_status;
3228 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3229 	    sc->bge_flowflags;
3230 }
3231 
3232 int
3233 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3234 {
3235 	struct bge_softc *sc = ifp->if_softc;
3236 	struct ifaddr *ifa = (struct ifaddr *) data;
3237 	struct ifreq *ifr = (struct ifreq *) data;
3238 	int s, error = 0;
3239 	struct mii_data *mii;
3240 
3241 	s = splnet();
3242 
3243 	switch(command) {
3244 	case SIOCSIFADDR:
3245 		ifp->if_flags |= IFF_UP;
3246 		if (!(ifp->if_flags & IFF_RUNNING))
3247 			bge_init(sc);
3248 #ifdef INET
3249 		if (ifa->ifa_addr->sa_family == AF_INET)
3250 			arp_ifinit(&sc->arpcom, ifa);
3251 #endif /* INET */
3252 		break;
3253 
3254 	case SIOCSIFFLAGS:
3255 		if (ifp->if_flags & IFF_UP) {
3256 			if (ifp->if_flags & IFF_RUNNING)
3257 				bge_iff(sc);
3258 			else
3259 				bge_init(sc);
3260 		} else {
3261 			if (ifp->if_flags & IFF_RUNNING)
3262 				bge_stop(sc);
3263 		}
3264 		sc->bge_if_flags = ifp->if_flags;
3265 		break;
3266 
3267 	case SIOCSIFMEDIA:
3268 		/* XXX Flow control is not supported for 1000BASE-SX */
3269 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3270 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3271 			sc->bge_flowflags = 0;
3272 		}
3273 
3274 		/* Flow control requires full-duplex mode. */
3275 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3276 		    (ifr->ifr_media & IFM_FDX) == 0) {
3277 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
3278 		}
3279 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3280 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3281 				/* We can do both TXPAUSE and RXPAUSE. */
3282 				ifr->ifr_media |=
3283 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3284 			}
3285 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3286 		}
3287 		/* FALLTHROUGH */
3288 	case SIOCGIFMEDIA:
3289 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3290 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3291 			    command);
3292 		} else {
3293 			mii = &sc->bge_mii;
3294 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3295 			    command);
3296 		}
3297 		break;
3298 
3299 	default:
3300 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
3301 	}
3302 
3303 	if (error == ENETRESET) {
3304 		if (ifp->if_flags & IFF_RUNNING)
3305 			bge_iff(sc);
3306 		error = 0;
3307 	}
3308 
3309 	splx(s);
3310 	return (error);
3311 }
3312 
3313 void
3314 bge_watchdog(struct ifnet *ifp)
3315 {
3316 	struct bge_softc *sc;
3317 
3318 	sc = ifp->if_softc;
3319 
3320 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3321 
3322 	bge_init(sc);
3323 
3324 	ifp->if_oerrors++;
3325 }
3326 
3327 void
3328 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
3329 {
3330 	int i;
3331 
3332 	BGE_CLRBIT(sc, reg, bit);
3333 
3334 	for (i = 0; i < BGE_TIMEOUT; i++) {
3335 		if ((CSR_READ_4(sc, reg) & bit) == 0)
3336 			return;
3337 		delay(100);
3338 	}
3339 
3340 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3341 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
3342 }
3343 
3344 /*
3345  * Stop the adapter and free any mbufs allocated to the
3346  * RX and TX lists.
3347  */
3348 void
3349 bge_stop(struct bge_softc *sc)
3350 {
3351 	struct ifnet *ifp = &sc->arpcom.ac_if;
3352 	struct ifmedia_entry *ifm;
3353 	struct mii_data *mii;
3354 	int mtmp, itmp;
3355 
3356 	timeout_del(&sc->bge_timeout);
3357 	timeout_del(&sc->bge_rxtimeout);
3358 
3359 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3360 
3361 	/*
3362 	 * Disable all of the receiver blocks
3363 	 */
3364 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3365 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3366 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3367 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3368 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3369 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3370 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3371 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3372 
3373 	/*
3374 	 * Disable all of the transmit blocks
3375 	 */
3376 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3377 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3378 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3379 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3380 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3381 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3382 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3383 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3384 
3385 	/*
3386 	 * Shut down all of the memory managers and related
3387 	 * state machines.
3388 	 */
3389 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3390 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3391 	if (!(BGE_IS_5705_OR_BEYOND(sc)))
3392 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3393 
3394 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3395 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3396 
3397 	if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3398 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3399 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3400 	}
3401 
3402 	/* Disable host interrupts. */
3403 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3404 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3405 
3406 	/*
3407 	 * Tell firmware we're shutting down.
3408 	 */
3409 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3410 
3411 	/* Free the RX lists. */
3412 	bge_free_rx_ring_std(sc);
3413 
3414 	/* Free jumbo RX list. */
3415 	if (BGE_IS_JUMBO_CAPABLE(sc))
3416 		bge_free_rx_ring_jumbo(sc);
3417 
3418 	/* Free TX buffers. */
3419 	bge_free_tx_ring(sc);
3420 
3421 	/*
3422 	 * Isolate/power down the PHY, but leave the media selection
3423 	 * unchanged so that things will be put back to normal when
3424 	 * we bring the interface back up.
3425 	 */
3426 	if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) {
3427 		mii = &sc->bge_mii;
3428 		itmp = ifp->if_flags;
3429 		ifp->if_flags |= IFF_UP;
3430 		ifm = mii->mii_media.ifm_cur;
3431 		mtmp = ifm->ifm_media;
3432 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3433 		mii_mediachg(mii);
3434 		ifm->ifm_media = mtmp;
3435 		ifp->if_flags = itmp;
3436 	}
3437 
3438 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3439 
3440 	/* Clear MAC's link state (PHY may still have link UP). */
3441 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3442 }
3443 
3444 /*
3445  * Stop all chip I/O so that the kernel's probe routines don't
3446  * get confused by errant DMAs when rebooting.
3447  */
3448 void
3449 bge_shutdown(void *xsc)
3450 {
3451 	struct bge_softc *sc = (struct bge_softc *)xsc;
3452 
3453 	bge_stop(sc);
3454 	bge_reset(sc);
3455 }
3456 
3457 void
3458 bge_link_upd(struct bge_softc *sc)
3459 {
3460 	struct ifnet *ifp = &sc->arpcom.ac_if;
3461 	struct mii_data *mii = &sc->bge_mii;
3462 	u_int32_t status;
3463 	int link;
3464 
3465 	/* Clear 'pending link event' flag */
3466 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
3467 
3468 	/*
3469 	 * Process link state changes.
3470 	 * Grrr. The link status word in the status block does
3471 	 * not work correctly on the BCM5700 rev AX and BX chips,
3472 	 * according to all available information. Hence, we have
3473 	 * to enable MII interrupts in order to properly obtain
3474 	 * async link changes. Unfortunately, this also means that
3475 	 * we have to read the MAC status register to detect link
3476 	 * changes, thereby adding an additional register access to
3477 	 * the interrupt handler.
3478 	 *
3479 	 */
3480 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3481 		status = CSR_READ_4(sc, BGE_MAC_STS);
3482 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3483 			mii_pollstat(mii);
3484 
3485 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3486 			    mii->mii_media_status & IFM_ACTIVE &&
3487 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3488 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3489 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3490 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3491 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3492 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3493 
3494 			/* Clear the interrupt */
3495 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3496 			    BGE_EVTENB_MI_INTERRUPT);
3497 			bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
3498 			bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
3499 			    BRGPHY_INTRS);
3500 		}
3501 		return;
3502 	}
3503 
3504 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3505 		status = CSR_READ_4(sc, BGE_MAC_STS);
3506 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3507 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
3508 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3509 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
3510 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3511 					    BGE_MACMODE_TBI_SEND_CFGS);
3512 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3513 				status = CSR_READ_4(sc, BGE_MAC_MODE);
3514 				ifp->if_link_state =
3515 				    (status & BGE_MACMODE_HALF_DUPLEX) ?
3516 				    LINK_STATE_HALF_DUPLEX :
3517 				    LINK_STATE_FULL_DUPLEX;
3518 				if_link_state_change(ifp);
3519 				ifp->if_baudrate = IF_Gbps(1);
3520 			}
3521 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
3522 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3523 			ifp->if_link_state = LINK_STATE_DOWN;
3524 			if_link_state_change(ifp);
3525 			ifp->if_baudrate = 0;
3526 		}
3527         /*
3528 	 * Discard link events for MII/GMII cards if MI auto-polling disabled.
3529 	 * This should not happen since mii callouts are locked now, but
3530 	 * we keep this check for debug.
3531 	 */
3532 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
3533 		/*
3534 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3535 		 * in status word always set. Workaround this bug by reading
3536 		 * PHY link status directly.
3537 		 */
3538 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
3539 		    BGE_STS_LINK : 0;
3540 
3541 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
3542 			mii_pollstat(mii);
3543 
3544 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3545 			    mii->mii_media_status & IFM_ACTIVE &&
3546 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3547 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3548 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3549 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3550 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3551 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3552 		}
3553 	}
3554 
3555 	/* Clear the attention */
3556 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3557 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3558 	    BGE_MACSTAT_LINK_CHANGED);
3559 }
3560 
3561 void
3562 bge_power(int why, void *xsc)
3563 {
3564 	struct bge_softc *sc = (struct bge_softc *)xsc;
3565 	struct ifnet *ifp;
3566 
3567 	if (why == PWR_RESUME) {
3568 		ifp = &sc->arpcom.ac_if;
3569 		if (ifp->if_flags & IFF_UP) {
3570 			bge_init(xsc);
3571 			bge_start(ifp);
3572 		}
3573 	}
3574 }
3575