xref: /openbsd-src/sys/dev/pci/if_bge.c (revision 43003dfe3ad45d1698bed8a37f2b0f5b14f20d4f)
1 /*	$OpenBSD: if_bge.c,v 1.286 2009/10/11 16:53:13 sthen Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, Jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/if_ether.h>
98 #endif
99 
100 #if NVLAN > 0
101 #include <net/if_types.h>
102 #include <net/if_vlan_var.h>
103 #endif
104 
105 #if NBPFILTER > 0
106 #include <net/bpf.h>
107 #endif
108 
109 #ifdef __sparc64__
110 #include <sparc64/autoconf.h>
111 #include <dev/ofw/openfirm.h>
112 #endif
113 
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117 
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122 
123 #include <dev/pci/if_bgereg.h>
124 
125 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
126 
127 const struct bge_revision * bge_lookup_rev(u_int32_t);
128 int bge_probe(struct device *, void *, void *);
129 void bge_attach(struct device *, struct device *, void *);
130 
131 struct cfattach bge_ca = {
132 	sizeof(struct bge_softc), bge_probe, bge_attach
133 };
134 
135 struct cfdriver bge_cd = {
136 	NULL, "bge", DV_IFNET
137 };
138 
139 void bge_txeof(struct bge_softc *);
140 void bge_rxeof(struct bge_softc *);
141 
142 void bge_tick(void *);
143 void bge_stats_update(struct bge_softc *);
144 void bge_stats_update_regs(struct bge_softc *);
145 int bge_cksum_pad(struct mbuf *);
146 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
147 int bge_compact_dma_runt(struct mbuf *);
148 
149 int bge_intr(void *);
150 void bge_start(struct ifnet *);
151 int bge_ioctl(struct ifnet *, u_long, caddr_t);
152 void bge_init(void *);
153 void bge_power(int, void *);
154 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
155 void bge_stop(struct bge_softc *);
156 void bge_watchdog(struct ifnet *);
157 void bge_shutdown(void *);
158 int bge_ifmedia_upd(struct ifnet *);
159 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
160 
161 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
162 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
163 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
164 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
165 
166 void bge_iff(struct bge_softc *);
167 
168 int bge_newbuf_jumbo(struct bge_softc *, int);
169 int bge_init_rx_ring_jumbo(struct bge_softc *);
170 void bge_fill_rx_ring_jumbo(struct bge_softc *);
171 void bge_free_rx_ring_jumbo(struct bge_softc *);
172 
173 int bge_newbuf(struct bge_softc *, int);
174 int bge_init_rx_ring_std(struct bge_softc *);
175 void bge_rxtick(void *);
176 void bge_fill_rx_ring_std(struct bge_softc *);
177 void bge_free_rx_ring_std(struct bge_softc *);
178 
179 void bge_free_tx_ring(struct bge_softc *);
180 int bge_init_tx_ring(struct bge_softc *);
181 
182 void bge_chipinit(struct bge_softc *);
183 int bge_blockinit(struct bge_softc *);
184 
185 u_int32_t bge_readmem_ind(struct bge_softc *, int);
186 void bge_writemem_ind(struct bge_softc *, int, int);
187 void bge_writereg_ind(struct bge_softc *, int, int);
188 void bge_writembx(struct bge_softc *, int, int);
189 
190 int bge_miibus_readreg(struct device *, int, int);
191 void bge_miibus_writereg(struct device *, int, int, int);
192 void bge_miibus_statchg(struct device *);
193 
194 void bge_reset(struct bge_softc *);
195 void bge_link_upd(struct bge_softc *);
196 
197 #ifdef BGE_DEBUG
198 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
199 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
200 int	bgedebug = 0;
201 #else
202 #define DPRINTF(x)
203 #define DPRINTFN(n,x)
204 #endif
205 
206 /*
207  * Various supported device vendors/types and their names. Note: the
208  * spec seems to indicate that the hardware still has Alteon's vendor
209  * ID burned into it, though it will always be overridden by the vendor
210  * ID in the EEPROM. Just to be safe, we cover all possibilities.
211  */
212 const struct pci_matchid bge_devices[] = {
213 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
214 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
215 
216 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
217 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
218 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
219 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
220 
221 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
222 
223 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
225 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
227 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
229 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
231 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717S },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718C },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718S },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750 },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 },
271 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
272 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
273 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
274 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
275 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 },
276 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F },
277 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G },
278 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
279 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
280 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
281 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
282 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
283 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
284 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
285 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
286 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
287 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
288 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
289 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 },
290 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 },
291 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 },
292 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 },
293 
294 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
295 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
296 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
297 
298 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
299 
300 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
301 };
302 
303 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_5705_PLUS)
304 #define BGE_IS_5750_PLUS(sc)		((sc)->bge_flags & BGE_5750_PLUS)
305 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGE_5755_PLUS)
306 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_5700_FAMILY)
307 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_5714_FAMILY)
308 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_JUMBO_CAPABLE)
309 
310 static const struct bge_revision {
311 	u_int32_t		br_chipid;
312 	const char		*br_name;
313 } bge_revisions[] = {
314 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
315 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
316 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
317 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
318 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
319 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
320 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
321 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
322 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
323 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
324 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
325 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
326 	/* the 5702 and 5703 share the same ASIC ID */
327 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
328 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
329 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
330 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
331 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
332 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
333 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
334 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
335 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
336 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
337 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
338 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
339 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
340 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
341 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
342 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
343 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
344 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
345 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
346 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
347 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
348 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
349 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
350 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
351 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
352 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
353 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
354 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
355 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
356 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
357 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
358 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
359 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
360 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
361 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
362 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
363 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
364 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
365 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
366 	/* the 5754 and 5787 share the same ASIC ID */
367 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
368 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
369 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
370 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
371 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
372 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
373 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
374 
375 	{ 0, NULL }
376 };
377 
378 /*
379  * Some defaults for major revisions, so that newer steppings
380  * that we don't know about have a shot at working.
381  */
382 static const struct bge_revision bge_majorrevs[] = {
383 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
384 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
385 	/* 5702 and 5703 share the same ASIC ID */
386 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
387 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
388 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
389 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
390 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
391 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
392 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
393 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
394 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
395 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
396 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
397 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
398 	/* 5754 and 5787 share the same ASIC ID */
399 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
400 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
401 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
402 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
403 
404 	{ 0, NULL }
405 };
406 
407 u_int32_t
408 bge_readmem_ind(struct bge_softc *sc, int off)
409 {
410 	struct pci_attach_args	*pa = &(sc->bge_pa);
411 
412 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
413 	return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA));
414 }
415 
416 void
417 bge_writemem_ind(struct bge_softc *sc, int off, int val)
418 {
419 	struct pci_attach_args	*pa = &(sc->bge_pa);
420 
421 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
422 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
423 }
424 
425 void
426 bge_writereg_ind(struct bge_softc *sc, int off, int val)
427 {
428 	struct pci_attach_args	*pa = &(sc->bge_pa);
429 
430 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
431 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
432 }
433 
434 void
435 bge_writembx(struct bge_softc *sc, int off, int val)
436 {
437 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
438 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
439 
440 	CSR_WRITE_4(sc, off, val);
441 }
442 
443 u_int8_t
444 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
445 {
446 	u_int32_t access, byte = 0;
447 	int i;
448 
449 	/* Lock. */
450 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
451 	for (i = 0; i < 8000; i++) {
452 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
453 			break;
454 		DELAY(20);
455 	}
456 	if (i == 8000)
457 		return (1);
458 
459 	/* Enable access. */
460 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
461 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
462 
463 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
464 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
465 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
466 		DELAY(10);
467 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
468 			DELAY(10);
469 			break;
470 		}
471 	}
472 
473 	if (i == BGE_TIMEOUT * 10) {
474 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
475 		return (1);
476 	}
477 
478 	/* Get result. */
479 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
480 
481 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
482 
483 	/* Disable access. */
484 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
485 
486 	/* Unlock. */
487 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
488 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
489 
490 	return (0);
491 }
492 
493 /*
494  * Read a sequence of bytes from NVRAM.
495  */
496 
497 int
498 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
499 {
500 	int err = 0, i;
501 	u_int8_t byte = 0;
502 
503 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
504 		return (1);
505 
506 	for (i = 0; i < cnt; i++) {
507 		err = bge_nvram_getbyte(sc, off + i, &byte);
508 		if (err)
509 			break;
510 		*(dest + i) = byte;
511 	}
512 
513 	return (err ? 1 : 0);
514 }
515 
516 /*
517  * Read a byte of data stored in the EEPROM at address 'addr.' The
518  * BCM570x supports both the traditional bitbang interface and an
519  * auto access interface for reading the EEPROM. We use the auto
520  * access method.
521  */
522 u_int8_t
523 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
524 {
525 	int i;
526 	u_int32_t byte = 0;
527 
528 	/*
529 	 * Enable use of auto EEPROM access so we can avoid
530 	 * having to use the bitbang method.
531 	 */
532 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
533 
534 	/* Reset the EEPROM, load the clock period. */
535 	CSR_WRITE_4(sc, BGE_EE_ADDR,
536 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
537 	DELAY(20);
538 
539 	/* Issue the read EEPROM command. */
540 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
541 
542 	/* Wait for completion */
543 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
544 		DELAY(10);
545 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
546 			break;
547 	}
548 
549 	if (i == BGE_TIMEOUT * 10) {
550 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
551 		return (1);
552 	}
553 
554 	/* Get result. */
555 	byte = CSR_READ_4(sc, BGE_EE_DATA);
556 
557 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
558 
559 	return (0);
560 }
561 
562 /*
563  * Read a sequence of bytes from the EEPROM.
564  */
565 int
566 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
567 {
568 	int err = 0, i;
569 	u_int8_t byte = 0;
570 
571 	for (i = 0; i < cnt; i++) {
572 		err = bge_eeprom_getbyte(sc, off + i, &byte);
573 		if (err)
574 			break;
575 		*(dest + i) = byte;
576 	}
577 
578 	return (err ? 1 : 0);
579 }
580 
581 int
582 bge_miibus_readreg(struct device *dev, int phy, int reg)
583 {
584 	struct bge_softc *sc = (struct bge_softc *)dev;
585 	u_int32_t val, autopoll;
586 	int i;
587 
588 	/*
589 	 * Broadcom's own driver always assumes the internal
590 	 * PHY is at GMII address 1. On some chips, the PHY responds
591 	 * to accesses at all addresses, which could cause us to
592 	 * bogusly attach the PHY 32 times at probe type. Always
593 	 * restricting the lookup to address 1 is simpler than
594 	 * trying to figure out which chips revisions should be
595 	 * special-cased.
596 	 */
597 	if (phy != 1)
598 		return (0);
599 
600 	/* Reading with autopolling on may trigger PCI errors */
601 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
602 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
603 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
604 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
605 		DELAY(40);
606 	}
607 
608 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
609 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
610 
611 	for (i = 0; i < 200; i++) {
612 		delay(1);
613 		val = CSR_READ_4(sc, BGE_MI_COMM);
614 		if (!(val & BGE_MICOMM_BUSY))
615 			break;
616 		delay(10);
617 	}
618 
619 	if (i == 200) {
620 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
621 		val = 0;
622 		goto done;
623 	}
624 
625 	val = CSR_READ_4(sc, BGE_MI_COMM);
626 
627 done:
628 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
629 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
630 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
631 		DELAY(40);
632 	}
633 
634 	if (val & BGE_MICOMM_READFAIL)
635 		return (0);
636 
637 	return (val & 0xFFFF);
638 }
639 
640 void
641 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
642 {
643 	struct bge_softc *sc = (struct bge_softc *)dev;
644 	u_int32_t autopoll;
645 	int i;
646 
647 	/* Reading with autopolling on may trigger PCI errors */
648 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
649 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
650 		DELAY(40);
651 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
652 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
653 		DELAY(10); /* 40 usec is supposed to be adequate */
654 	}
655 
656 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
657 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
658 
659 	for (i = 0; i < 200; i++) {
660 		delay(1);
661 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
662 			break;
663 		delay(10);
664 	}
665 
666 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
667 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
668 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
669 		DELAY(40);
670 	}
671 
672 	if (i == 200) {
673 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
674 	}
675 }
676 
677 void
678 bge_miibus_statchg(struct device *dev)
679 {
680 	struct bge_softc *sc = (struct bge_softc *)dev;
681 	struct mii_data *mii = &sc->bge_mii;
682 
683 	/*
684 	 * Get flow control negotiation result.
685 	 */
686 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
687 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
688 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
689 		mii->mii_media_active &= ~IFM_ETH_FMASK;
690 	}
691 
692 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
693 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
694 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
695 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
696 	else
697 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
698 
699 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
700 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
701 	else
702 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
703 
704 	/*
705 	 * 802.3x flow control
706 	 */
707 	if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
708 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
709 	else
710 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
711 
712 	if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
713 		BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
714 	else
715 		BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
716 }
717 
718 /*
719  * Intialize a standard receive ring descriptor.
720  */
721 int
722 bge_newbuf(struct bge_softc *sc, int i)
723 {
724 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
725 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
726 	struct mbuf		*m;
727 	int			error;
728 
729 	m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
730 	if (!m)
731 		return (ENOBUFS);
732 	m->m_len = m->m_pkthdr.len = MCLBYTES;
733 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
734 	    m_adj(m, ETHER_ALIGN);
735 
736 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
737 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
738 	if (error) {
739 		m_freem(m);
740 		return (ENOBUFS);
741 	}
742 
743 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
744 	    BUS_DMASYNC_PREREAD);
745 	sc->bge_cdata.bge_rx_std_chain[i] = m;
746 
747 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
748 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
749 		i * sizeof (struct bge_rx_bd),
750 	    sizeof (struct bge_rx_bd),
751 	    BUS_DMASYNC_POSTWRITE);
752 
753 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
754 	r->bge_flags = BGE_RXBDFLAG_END;
755 	r->bge_len = m->m_len;
756 	r->bge_idx = i;
757 
758 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
759 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
760 		i * sizeof (struct bge_rx_bd),
761 	    sizeof (struct bge_rx_bd),
762 	    BUS_DMASYNC_PREWRITE);
763 
764 	sc->bge_std_cnt++;
765 
766 	return (0);
767 }
768 
769 /*
770  * Initialize a Jumbo receive ring descriptor.
771  */
772 int
773 bge_newbuf_jumbo(struct bge_softc *sc, int i)
774 {
775 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
776 	struct bge_ext_rx_bd	*r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
777 	struct mbuf		*m;
778 	int			error;
779 
780 	m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, BGE_JLEN);
781 	if (!m)
782 		return (ENOBUFS);
783 	m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
784 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
785 	    m_adj(m, ETHER_ALIGN);
786 
787 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
788 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
789 	if (error) {
790 		m_freem(m);
791 		return (ENOBUFS);
792 	}
793 
794 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
795 	    BUS_DMASYNC_PREREAD);
796 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
797 
798 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
799 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
800 		i * sizeof (struct bge_ext_rx_bd),
801 	    sizeof (struct bge_ext_rx_bd),
802 	    BUS_DMASYNC_POSTWRITE);
803 
804 	/*
805 	 * Fill in the extended RX buffer descriptor.
806 	 */
807 	r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
808 	r->bge_bd.bge_idx = i;
809 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
810 	switch (dmap->dm_nsegs) {
811 	case 4:
812 		BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr);
813 		r->bge_len3 = dmap->dm_segs[3].ds_len;
814 		/* FALLTHROUGH */
815 	case 3:
816 		BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr);
817 		r->bge_len2 = dmap->dm_segs[2].ds_len;
818 		/* FALLTHROUGH */
819 	case 2:
820 		BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr);
821 		r->bge_len1 = dmap->dm_segs[1].ds_len;
822 		/* FALLTHROUGH */
823 	case 1:
824 		BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr);
825 		r->bge_bd.bge_len = dmap->dm_segs[0].ds_len;
826 		break;
827 	default:
828 		panic("%s: %d segments\n", __func__, dmap->dm_nsegs);
829 	}
830 
831 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
832 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
833 		i * sizeof (struct bge_ext_rx_bd),
834 	    sizeof (struct bge_ext_rx_bd),
835 	    BUS_DMASYNC_PREWRITE);
836 
837 	sc->bge_jumbo_cnt++;
838 
839 	return (0);
840 }
841 
842 /*
843  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
844  * that's 1MB or memory, which is a lot. For now, we fill only the first
845  * 256 ring entries and hope that our CPU is fast enough to keep up with
846  * the NIC.
847  */
848 int
849 bge_init_rx_ring_std(struct bge_softc *sc)
850 {
851 	int i;
852 
853 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
854 		return (0);
855 
856 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
857 		if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 0,
858 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
859 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
860 			printf("%s: unable to create dmamap for slot %d\n",
861 			    sc->bge_dev.dv_xname, i);
862 			goto uncreate;
863 		}
864 		bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
865 		    sizeof(struct bge_rx_bd));
866 	}
867 
868 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
869 	sc->bge_std_cnt = 0;
870 	bge_fill_rx_ring_std(sc);
871 
872 	SET(sc->bge_flags, BGE_RXRING_VALID);
873 
874 	return (0);
875 
876 uncreate:
877 	while (--i) {
878 		bus_dmamap_destroy(sc->bge_dmatag,
879 		    sc->bge_cdata.bge_rx_std_map[i]);
880 	}
881 	return (1);
882 }
883 
884 void
885 bge_rxtick(void *arg)
886 {
887 	struct bge_softc *sc = arg;
888 	int s;
889 
890 	s = splnet();
891 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID) &&
892 	    sc->bge_std_cnt <= 8)
893 		bge_fill_rx_ring_std(sc);
894 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) &&
895 	    sc->bge_jumbo_cnt <= 8)
896 		bge_fill_rx_ring_jumbo(sc);
897 	splx(s);
898 }
899 
900 void
901 bge_fill_rx_ring_std(struct bge_softc *sc)
902 {
903 	int i;
904 	int post = 0;
905 
906 	i = sc->bge_std;
907 	while (sc->bge_std_cnt < BGE_STD_RX_RING_CNT) {
908 		BGE_INC(i, BGE_STD_RX_RING_CNT);
909 
910 		if (bge_newbuf(sc, i) != 0)
911 			break;
912 
913 		sc->bge_std = i;
914 		post = 1;
915 	}
916 
917 	if (post)
918 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
919 
920 	/*
921 	 * bge always needs more than 8 packets on the ring. if we cant do
922 	 * that now, then try again later.
923 	 */
924 	if (sc->bge_std_cnt <= 8)
925 		timeout_add(&sc->bge_rxtimeout, 1);
926 }
927 
928 void
929 bge_free_rx_ring_std(struct bge_softc *sc)
930 {
931 	bus_dmamap_t dmap;
932 	struct mbuf *m;
933 	int i;
934 
935 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
936 		return;
937 
938 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
939 		dmap = sc->bge_cdata.bge_rx_std_map[i];
940 		m = sc->bge_cdata.bge_rx_std_chain[i];
941 		if (m != NULL) {
942 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
943 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
944 			bus_dmamap_unload(sc->bge_dmatag, dmap);
945 			m_freem(m);
946 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
947 		}
948 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
949 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
950 		bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
951 		    sizeof(struct bge_rx_bd));
952 	}
953 
954 	CLR(sc->bge_flags, BGE_RXRING_VALID);
955 }
956 
957 int
958 bge_init_rx_ring_jumbo(struct bge_softc *sc)
959 {
960 	volatile struct bge_rcb *rcb;
961 	int i;
962 
963 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
964 		return (0);
965 
966 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
967 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,
968 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
969 		    &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) {
970 			printf("%s: unable to create dmamap for slot %d\n",
971 			    sc->bge_dev.dv_xname, i);
972 			goto uncreate;
973 		}
974 		bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
975 		    sizeof(struct bge_ext_rx_bd));
976 	}
977 
978 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
979 	sc->bge_jumbo_cnt = 0;
980 	bge_fill_rx_ring_jumbo(sc);
981 
982 	SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
983 
984 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
985 	rcb->bge_maxlen_flags =
986 	    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
987 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
988 
989 	return (0);
990 
991 uncreate:
992 	while (--i) {
993 		bus_dmamap_destroy(sc->bge_dmatag,
994 		    sc->bge_cdata.bge_rx_jumbo_map[i]);
995 	}
996 	return (1);
997 }
998 
999 void
1000 bge_fill_rx_ring_jumbo(struct bge_softc *sc)
1001 {
1002 	int i;
1003 	int post = 0;
1004 
1005 	i = sc->bge_jumbo;
1006 	while (sc->bge_jumbo_cnt < BGE_JUMBO_RX_RING_CNT) {
1007 		BGE_INC(i, BGE_JUMBO_RX_RING_CNT);
1008 
1009 		if (bge_newbuf_jumbo(sc, i) != 0)
1010 			break;
1011 
1012 		sc->bge_jumbo = i;
1013 		post = 1;
1014 	}
1015 
1016 	if (post)
1017 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1018 
1019 	/*
1020 	 * bge always needs more than 8 packets on the ring. if we cant do
1021 	 * that now, then try again later.
1022 	 */
1023 	if (sc->bge_jumbo_cnt <= 8)
1024 		timeout_add(&sc->bge_rxtimeout, 1);
1025 }
1026 
1027 void
1028 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1029 {
1030 	bus_dmamap_t dmap;
1031 	struct mbuf *m;
1032 	int i;
1033 
1034 	if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1035 		return;
1036 
1037 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1038 		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1039 		m = sc->bge_cdata.bge_rx_jumbo_chain[i];
1040 		if (m != NULL) {
1041 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1042 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1043 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1044 			m_freem(m);
1045 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1046 		}
1047 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1048 		sc->bge_cdata.bge_rx_jumbo_map[i] = NULL;
1049 		bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
1050 		    sizeof(struct bge_ext_rx_bd));
1051 	}
1052 
1053 	CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1054 }
1055 
1056 void
1057 bge_free_tx_ring(struct bge_softc *sc)
1058 {
1059 	int i;
1060 	struct txdmamap_pool_entry *dma;
1061 
1062 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1063 		return;
1064 
1065 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1066 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1067 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1068 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1069 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1070 					    link);
1071 			sc->txdma[i] = 0;
1072 		}
1073 		bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1074 		    sizeof(struct bge_tx_bd));
1075 	}
1076 
1077 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1078 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1079 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1080 		free(dma, M_DEVBUF);
1081 	}
1082 
1083 	sc->bge_flags &= ~BGE_TXRING_VALID;
1084 }
1085 
1086 int
1087 bge_init_tx_ring(struct bge_softc *sc)
1088 {
1089 	int i;
1090 	bus_dmamap_t dmamap;
1091 	struct txdmamap_pool_entry *dma;
1092 
1093 	if (sc->bge_flags & BGE_TXRING_VALID)
1094 		return (0);
1095 
1096 	sc->bge_txcnt = 0;
1097 	sc->bge_tx_saved_considx = 0;
1098 
1099 	/* Initialize transmit producer index for host-memory send ring. */
1100 	sc->bge_tx_prodidx = 0;
1101 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1102 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1103 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1104 
1105 	/* NIC-memory send ring not used; initialize to zero. */
1106 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1107 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1108 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1109 
1110 	SLIST_INIT(&sc->txdma_list);
1111 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1112 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
1113 		    BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
1114 		    &dmamap))
1115 			return (ENOBUFS);
1116 		if (dmamap == NULL)
1117 			panic("dmamap NULL in bge_init_tx_ring");
1118 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1119 		if (dma == NULL) {
1120 			printf("%s: can't alloc txdmamap_pool_entry\n",
1121 			    sc->bge_dev.dv_xname);
1122 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1123 			return (ENOMEM);
1124 		}
1125 		dma->dmamap = dmamap;
1126 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1127 	}
1128 
1129 	sc->bge_flags |= BGE_TXRING_VALID;
1130 
1131 	return (0);
1132 }
1133 
1134 void
1135 bge_iff(struct bge_softc *sc)
1136 {
1137 	struct arpcom		*ac = &sc->arpcom;
1138 	struct ifnet		*ifp = &ac->ac_if;
1139 	struct ether_multi	*enm;
1140 	struct ether_multistep  step;
1141 	u_int8_t		hashes[16];
1142 	u_int32_t		h, rxmode;
1143 
1144 	/* First, zot all the existing filters. */
1145 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1146 	ifp->if_flags &= ~IFF_ALLMULTI;
1147 	memset(hashes, 0x00, sizeof(hashes));
1148 
1149 	if (ifp->if_flags & IFF_PROMISC) {
1150 		ifp->if_flags |= IFF_ALLMULTI;
1151 		rxmode |= BGE_RXMODE_RX_PROMISC;
1152 	} else if (ac->ac_multirangecnt > 0) {
1153 		ifp->if_flags |= IFF_ALLMULTI;
1154 		memset(hashes, 0xff, sizeof(hashes));
1155 	} else {
1156 		ETHER_FIRST_MULTI(step, ac, enm);
1157 		while (enm != NULL) {
1158 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1159 
1160 			setbit(hashes, h & 0x7F);
1161 
1162 			ETHER_NEXT_MULTI(step, enm);
1163 		}
1164 	}
1165 
1166 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1167 	    hashes, sizeof(hashes));
1168 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1169 }
1170 
1171 /*
1172  * Do endian, PCI and DMA initialization.
1173  */
1174 void
1175 bge_chipinit(struct bge_softc *sc)
1176 {
1177 	struct pci_attach_args	*pa = &(sc->bge_pa);
1178 	u_int32_t dma_rw_ctl;
1179 	int i;
1180 
1181 	/* Set endianness before we access any non-PCI registers. */
1182 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1183 	    BGE_INIT);
1184 
1185 	/* Clear the MAC control register */
1186 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1187 
1188 	/*
1189 	 * Clear the MAC statistics block in the NIC's
1190 	 * internal memory.
1191 	 */
1192 	for (i = BGE_STATS_BLOCK;
1193 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1194 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1195 
1196 	for (i = BGE_STATUS_BLOCK;
1197 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1198 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1199 
1200 	/*
1201 	 * Set up the PCI DMA control register.
1202 	 */
1203 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1204 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1205 
1206 	if (sc->bge_flags & BGE_PCIE) {
1207 		/* Read watermark not used, 128 bytes for write. */
1208 		dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1209 	} else if (sc->bge_flags & BGE_PCIX) {
1210 		/* PCI-X bus */
1211 		if (BGE_IS_5714_FAMILY(sc)) {
1212 			/* 256 bytes for read and write. */
1213 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1214 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1215 
1216 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1217 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1218 			else
1219 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1220 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1221 			/* 1536 bytes for read, 384 bytes for write. */
1222 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1223 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1224 		} else {
1225 			/* 384 bytes for read and write. */
1226 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1227 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1228 			    (0x0F);
1229 		}
1230 
1231 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1232 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1233 			u_int32_t tmp;
1234 
1235 			/* Set ONEDMA_ATONCE for hardware workaround. */
1236 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1237 			if (tmp == 6 || tmp == 7)
1238 				dma_rw_ctl |=
1239 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1240 
1241 			/* Set PCI-X DMA write workaround. */
1242 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1243 		}
1244 	} else {
1245 		/* Conventional PCI bus: 256 bytes for read and write. */
1246 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1247 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1248 
1249 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1250 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1251 			dma_rw_ctl |= 0x0F;
1252 	}
1253 
1254 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1255 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1256 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1257 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1258 
1259 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1260 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1261 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1262 
1263 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1264 
1265 	/*
1266 	 * Set up general mode register.
1267 	 */
1268 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1269 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1270 		    BGE_MODECTL_TX_NO_PHDR_CSUM);
1271 
1272 	/*
1273 	 * BCM5701 B5 have a bug causing data corruption when using
1274 	 * 64-bit DMA reads, which can be terminated early and then
1275 	 * completed later as 32-bit accesses, in combination with
1276 	 * certain bridges.
1277 	 */
1278 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1279 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1280 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1281 
1282 	/*
1283 	 * Disable memory write invalidate.  Apparently it is not supported
1284 	 * properly by these devices.
1285 	 */
1286 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1287 	    PCI_COMMAND_INVALIDATE_ENABLE);
1288 
1289 #ifdef __brokenalpha__
1290 	/*
1291 	 * Must insure that we do not cross an 8K (bytes) boundary
1292 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1293 	 * restriction on some ALPHA platforms with early revision
1294 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1295 	 */
1296 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1297 	    BGE_PCI_READ_BNDRY_1024);
1298 #endif
1299 
1300 	/* Set the timer prescaler (always 66MHz) */
1301 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1302 
1303 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1304 		DELAY(40);	/* XXX */
1305 
1306 		/* Put PHY into ready state */
1307 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1308 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1309 		DELAY(40);
1310 	}
1311 }
1312 
1313 int
1314 bge_blockinit(struct bge_softc *sc)
1315 {
1316 	volatile struct bge_rcb		*rcb;
1317 	vaddr_t			rcb_addr;
1318 	int			i;
1319 	bge_hostaddr		taddr;
1320 	u_int32_t		val;
1321 
1322 	/*
1323 	 * Initialize the memory window pointer register so that
1324 	 * we can access the first 32K of internal NIC RAM. This will
1325 	 * allow us to set up the TX send ring RCBs and the RX return
1326 	 * ring RCBs, plus other things which live in NIC memory.
1327 	 */
1328 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1329 
1330 	/* Configure mbuf memory pool */
1331 	if (BGE_IS_5700_FAMILY(sc)) {
1332 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1333 		    BGE_BUFFPOOL_1);
1334 
1335 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1336 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1337 		else
1338 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1339 
1340 		/* Configure DMA resource pool */
1341 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1342 		    BGE_DMA_DESCRIPTORS);
1343 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1344 	}
1345 
1346 	/* Configure mbuf pool watermarks */
1347 	/* new Broadcom docs strongly recommend these: */
1348 	if (BGE_IS_5705_PLUS(sc) &&
1349 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717) {
1350 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1351 
1352 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1353 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1354 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1355 		} else {
1356 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1357 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1358 		}
1359 	} else {
1360 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1361 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1362 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1363 	}
1364 
1365 	/* Configure DMA resource watermarks */
1366 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1367 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1368 
1369 	/* Enable buffer manager */
1370 	CSR_WRITE_4(sc, BGE_BMAN_MODE,
1371 	    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1372 
1373 	/* Poll for buffer manager start indication */
1374 	for (i = 0; i < 2000; i++) {
1375 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1376 			break;
1377 		DELAY(10);
1378 	}
1379 
1380 	if (i == 2000) {
1381 		printf("%s: buffer manager failed to start\n",
1382 		    sc->bge_dev.dv_xname);
1383 		return (ENXIO);
1384 	}
1385 
1386 	/* Enable flow-through queues */
1387 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1388 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1389 
1390 	/* Wait until queue initialization is complete */
1391 	for (i = 0; i < 2000; i++) {
1392 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1393 			break;
1394 		DELAY(10);
1395 	}
1396 
1397 	if (i == 2000) {
1398 		printf("%s: flow-through queue init failed\n",
1399 		    sc->bge_dev.dv_xname);
1400 		return (ENXIO);
1401 	}
1402 
1403 	/* Initialize the standard RX ring control block */
1404 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1405 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1406 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
1407 		rcb->bge_maxlen_flags = (BGE_RCB_MAXLEN_FLAGS(512, 0) |
1408 					(ETHER_MAX_DIX_LEN << 2));
1409 	else if (BGE_IS_5705_PLUS(sc))
1410 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1411 	else
1412 		rcb->bge_maxlen_flags =
1413 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1414 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1415 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1416 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1417 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1418 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1419 
1420 	/*
1421 	 * Initialize the Jumbo RX ring control block
1422 	 * We set the 'ring disabled' bit in the flags
1423 	 * field until we're actually ready to start
1424 	 * using this ring (i.e. once we set the MTU
1425 	 * high enough to require it).
1426 	 */
1427 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1428 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1429 		BGE_HOSTADDR(rcb->bge_hostaddr,
1430 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1431 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1432 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1433 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1434 
1435 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1436 		    rcb->bge_hostaddr.bge_addr_hi);
1437 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1438 		    rcb->bge_hostaddr.bge_addr_lo);
1439 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1440 		    rcb->bge_maxlen_flags);
1441 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR,
1442 		    rcb->bge_nicaddr);
1443 
1444 		/* Set up dummy disabled mini ring RCB */
1445 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1446 		rcb->bge_maxlen_flags =
1447 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1448 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1449 		    rcb->bge_maxlen_flags);
1450 
1451 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1452 		    offsetof(struct bge_ring_data, bge_info),
1453 		    sizeof (struct bge_gib),
1454 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1455 	}
1456 
1457 	/*
1458 	 * Set the BD ring replenish thresholds. The recommended
1459 	 * values are 1/8th the number of descriptors allocated to
1460 	 * each ring, but since we try to avoid filling the entire
1461 	 * ring we set these to the minimal value of 8.  This needs to
1462 	 * be done on several of the supported chip revisions anyway,
1463 	 * to work around HW bugs.
1464 	 */
1465 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
1466 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
1467 
1468 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) {
1469 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
1470 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
1471 	}
1472 
1473 	/*
1474 	 * Disable all unused send rings by setting the 'ring disabled'
1475 	 * bit in the flags field of all the TX send ring control blocks.
1476 	 * These are located in NIC memory.
1477 	 */
1478 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1479 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1480 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1481 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1482 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1483 		rcb_addr += sizeof(struct bge_rcb);
1484 	}
1485 
1486 	/* Configure TX RCB 0 (we use only the first ring) */
1487 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1488 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1489 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1490 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1491 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1492 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1493 	if (BGE_IS_5700_FAMILY(sc))
1494 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1495 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1496 
1497 	/* Disable all unused RX return rings */
1498 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1499 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1500 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1501 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1502 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1503 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1504 			BGE_RCB_FLAG_RING_DISABLED));
1505 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1506 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1507 		    (i * (sizeof(u_int64_t))), 0);
1508 		rcb_addr += sizeof(struct bge_rcb);
1509 	}
1510 
1511 	/* Initialize RX ring indexes */
1512 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1513 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1514 	bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1515 
1516 	/*
1517 	 * Set up RX return ring 0
1518 	 * Note that the NIC address for RX return rings is 0x00000000.
1519 	 * The return rings live entirely within the host, so the
1520 	 * nicaddr field in the RCB isn't used.
1521 	 */
1522 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1523 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1524 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1525 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1526 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1527 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1528 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1529 
1530 	/* Set random backoff seed for TX */
1531 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1532 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1533 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1534 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1535 	    BGE_TX_BACKOFF_SEED_MASK);
1536 
1537 	/* Set inter-packet gap */
1538 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1539 
1540 	/*
1541 	 * Specify which ring to use for packets that don't match
1542 	 * any RX rules.
1543 	 */
1544 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1545 
1546 	/*
1547 	 * Configure number of RX lists. One interrupt distribution
1548 	 * list, sixteen active lists, one bad frames class.
1549 	 */
1550 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1551 
1552 	/* Inialize RX list placement stats mask. */
1553 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1554 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1555 
1556 	/* Disable host coalescing until we get it set up */
1557 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1558 
1559 	/* Poll to make sure it's shut down. */
1560 	for (i = 0; i < 2000; i++) {
1561 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1562 			break;
1563 		DELAY(10);
1564 	}
1565 
1566 	if (i == 2000) {
1567 		printf("%s: host coalescing engine failed to idle\n",
1568 		    sc->bge_dev.dv_xname);
1569 		return (ENXIO);
1570 	}
1571 
1572 	/* Set up host coalescing defaults */
1573 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1574 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1575 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1576 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1577 	if (BGE_IS_5700_FAMILY(sc)) {
1578 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1579 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1580 	}
1581 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1582 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1583 
1584 	/* Set up address of statistics block */
1585 	if (BGE_IS_5700_FAMILY(sc)) {
1586 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1587 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1588 			    BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1589 
1590 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1591 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1592 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1593 	}
1594 
1595 	/* Set up address of status block */
1596 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1597 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1598 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1599 
1600 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1601 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1602 
1603 	/* Turn on host coalescing state machine */
1604 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1605 
1606 	/* Turn on RX BD completion state machine and enable attentions */
1607 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1608 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1609 
1610 	/* Turn on RX list placement state machine */
1611 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1612 
1613 	/* Turn on RX list selector state machine. */
1614 	if (BGE_IS_5700_FAMILY(sc))
1615 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1616 
1617 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1618 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1619 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1620 	    BGE_MACMODE_FRMHDR_DMA_ENB;
1621 
1622 	if (sc->bge_flags & BGE_PHY_FIBER_TBI)
1623 	    val |= BGE_PORTMODE_TBI;
1624 	else if (sc->bge_flags & BGE_PHY_FIBER_MII)
1625 	    val |= BGE_PORTMODE_GMII;
1626 	else
1627 	    val |= BGE_PORTMODE_MII;
1628 
1629 	/* Turn on DMA, clear stats */
1630 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1631 
1632 	/* Set misc. local control, enable interrupts on attentions */
1633 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1634 
1635 #ifdef notdef
1636 	/* Assert GPIO pins for PHY reset */
1637 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1638 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1639 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1640 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1641 #endif
1642 
1643 	/* Turn on DMA completion state machine */
1644 	if (BGE_IS_5700_FAMILY(sc))
1645 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1646 
1647 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1648 
1649 	/* Enable host coalescing bug fix. */
1650 	if (BGE_IS_5755_PLUS(sc))
1651 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1652 
1653 	/* Turn on write DMA state machine */
1654 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1655 
1656 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
1657 
1658 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
1659 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
1660 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
1661 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1662 		       BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1663 		       BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1664 
1665 	if (sc->bge_flags & BGE_PCIE)
1666 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1667 
1668 	/* Turn on read DMA state machine */
1669 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1670 
1671 	/* Turn on RX data completion state machine */
1672 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1673 
1674 	/* Turn on RX BD initiator state machine */
1675 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1676 
1677 	/* Turn on RX data and RX BD initiator state machine */
1678 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1679 
1680 	/* Turn on Mbuf cluster free state machine */
1681 	if (BGE_IS_5700_FAMILY(sc))
1682 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1683 
1684 	/* Turn on send BD completion state machine */
1685 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1686 
1687 	val = BGE_SDCMODE_ENABLE;
1688 
1689 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
1690 		val |= BGE_SDCMODE_CDELAY;
1691 
1692 	/* Turn on send data completion state machine */
1693 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1694 
1695 	/* Turn on send data initiator state machine */
1696 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1697 
1698 	/* Turn on send BD initiator state machine */
1699 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1700 
1701 	/* Turn on send BD selector state machine */
1702 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1703 
1704 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1705 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1706 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1707 
1708 	/* ack/clear link change events */
1709 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1710 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1711 	    BGE_MACSTAT_LINK_CHANGED);
1712 
1713 	/* Enable PHY auto polling (for MII/GMII only) */
1714 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1715 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1716  	} else {
1717 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1718 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1719 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
1720 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1721 			    BGE_EVTENB_MI_INTERRUPT);
1722 	}
1723 
1724 	/*
1725 	 * Clear any pending link state attention.
1726 	 * Otherwise some link state change events may be lost until attention
1727 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1728 	 * It's not necessary on newer BCM chips - perhaps enabling link
1729 	 * state change attentions implies clearing pending attention.
1730 	 */
1731 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1732 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1733 	    BGE_MACSTAT_LINK_CHANGED);
1734 
1735 	/* Enable link state change attentions. */
1736 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1737 
1738 	return (0);
1739 }
1740 
1741 const struct bge_revision *
1742 bge_lookup_rev(u_int32_t chipid)
1743 {
1744 	const struct bge_revision *br;
1745 
1746 	for (br = bge_revisions; br->br_name != NULL; br++) {
1747 		if (br->br_chipid == chipid)
1748 			return (br);
1749 	}
1750 
1751 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1752 		if (br->br_chipid == BGE_ASICREV(chipid))
1753 			return (br);
1754 	}
1755 
1756 	return (NULL);
1757 }
1758 
1759 /*
1760  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1761  * against our list and return its name if we find a match. Note
1762  * that since the Broadcom controller contains VPD support, we
1763  * can get the device name string from the controller itself instead
1764  * of the compiled-in string. This is a little slow, but it guarantees
1765  * we'll always announce the right product name.
1766  */
1767 int
1768 bge_probe(struct device *parent, void *match, void *aux)
1769 {
1770 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
1771 }
1772 
1773 void
1774 bge_attach(struct device *parent, struct device *self, void *aux)
1775 {
1776 	struct bge_softc	*sc = (struct bge_softc *)self;
1777 	struct pci_attach_args	*pa = aux;
1778 	pci_chipset_tag_t	pc = pa->pa_pc;
1779 	const struct bge_revision *br;
1780 	pcireg_t		pm_ctl, memtype, subid;
1781 	pci_intr_handle_t	ih;
1782 	const char		*intrstr = NULL;
1783 	bus_size_t		size;
1784 	bus_dma_segment_t	seg;
1785 	int			rseg, gotenaddr = 0;
1786 	u_int32_t		hwcfg = 0;
1787 	u_int32_t		mac_addr = 0;
1788 	u_int32_t		misccfg;
1789 	struct ifnet		*ifp;
1790 	caddr_t			kva;
1791 #ifdef __sparc64__
1792 	char			name[32];
1793 #endif
1794 
1795 	sc->bge_pa = *pa;
1796 
1797 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1798 
1799 	/*
1800 	 * Map control/status registers.
1801 	 */
1802 	DPRINTFN(5, ("Map control/status regs\n"));
1803 
1804 	DPRINTFN(5, ("pci_mapreg_map\n"));
1805 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1806 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
1807 	    &sc->bge_bhandle, NULL, &size, 0)) {
1808 		printf(": can't find mem space\n");
1809 		return;
1810 	}
1811 
1812 	DPRINTFN(5, ("pci_intr_map\n"));
1813 	if (pci_intr_map(pa, &ih)) {
1814 		printf(": couldn't map interrupt\n");
1815 		goto fail_1;
1816 	}
1817 
1818 	DPRINTFN(5, ("pci_intr_string\n"));
1819 	intrstr = pci_intr_string(pc, ih);
1820 
1821 	/*
1822 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
1823 	 * can clobber the chip's PCI config-space power control registers,
1824 	 * leaving the card in D3 powersave state.
1825 	 * We do not have memory-mapped registers in this state,
1826 	 * so force device into D0 state before starting initialization.
1827 	 */
1828 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
1829 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
1830 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
1831 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1832 	DELAY(1000);	/* 27 usec is allegedly sufficent */
1833 
1834 	/*
1835 	 * Save ASIC rev.
1836 	 */
1837 	sc->bge_chipid =
1838 	     (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL)
1839 	      >> BGE_PCIMISCCTL_ASICREV_SHIFT);
1840 
1841 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1842 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717C ||
1843 		    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717S ||
1844 		    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718C ||
1845 		    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718S)
1846 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
1847 			    BGE_PCI_GEN2_PRODID_ASICREV);
1848 		else
1849 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
1850 			    BGE_PCI_PRODID_ASICREV);
1851 	}
1852 
1853 	printf(", ");
1854 	br = bge_lookup_rev(sc->bge_chipid);
1855 	if (br == NULL)
1856 		printf("unknown ASIC (0x%x)", sc->bge_chipid);
1857 	else
1858 		printf("%s (0x%x)", br->br_name, sc->bge_chipid);
1859 
1860 	/*
1861 	 * PCI Express check.
1862 	 */
1863 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1864 	    NULL, NULL) != 0)
1865 		sc->bge_flags |= BGE_PCIE;
1866 
1867 	/*
1868 	 * PCI-X check.
1869 	 */
1870 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
1871 	    BGE_PCISTATE_PCI_BUSMODE) == 0)
1872 		sc->bge_flags |= BGE_PCIX;
1873 
1874 	/*
1875 	 * SEEPROM check.
1876 	 */
1877 #ifdef __sparc64__
1878 	/*
1879 	 * Onboard interfaces on UltraSPARC systems generally don't
1880 	 * have a SEEPROM fitted.  These interfaces, and cards that
1881 	 * have FCode, are named "network" by the PROM, whereas cards
1882 	 * without FCode show up as "ethernet".  Since we don't really
1883 	 * need the information from the SEEPROM on cards that have
1884 	 * FCode it's fine to pretend they don't have one.
1885 	 */
1886 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
1887 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
1888 		sc->bge_flags |= BGE_NO_EEPROM;
1889 #endif
1890 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1891 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 ||
1892 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1893 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1894 		sc->bge_flags |= BGE_5700_FAMILY;
1895 
1896 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 ||
1897 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 ||
1898 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
1899 		sc->bge_flags |= BGE_5714_FAMILY;
1900 
1901 	/* Intentionally exclude BGE_ASICREV_BCM5906 */
1902 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1903 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1904 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
1905 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
1906 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
1907 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
1908 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
1909 		sc->bge_flags |= BGE_5755_PLUS;
1910 
1911 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
1912 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1913 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 ||
1914 	    BGE_IS_5755_PLUS(sc) ||
1915 	    BGE_IS_5714_FAMILY(sc))
1916 		sc->bge_flags |= BGE_5750_PLUS;
1917 
1918 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 ||
1919 	    BGE_IS_5750_PLUS(sc))
1920 		sc->bge_flags |= BGE_5705_PLUS;
1921 
1922 	/*
1923 	 * When using the BCM5701 in PCI-X mode, data corruption has
1924 	 * been observed in the first few bytes of some received packets.
1925 	 * Aligning the packet buffer in memory eliminates the corruption.
1926 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1927 	 * which do not support unaligned accesses, we will realign the
1928 	 * payloads by copying the received packets.
1929 	 */
1930 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1931 	    sc->bge_flags & BGE_PCIX)
1932 		sc->bge_flags |= BGE_RX_ALIGNBUG;
1933 
1934 	if (BGE_IS_5700_FAMILY(sc))
1935 		sc->bge_flags |= BGE_JUMBO_CAPABLE;
1936 
1937 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1938 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
1939 	    PCI_VENDOR(subid) == DELL_VENDORID)
1940 		sc->bge_flags |= BGE_NO_3LED;
1941 
1942 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
1943 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
1944 
1945 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1946 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
1947 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
1948 		sc->bge_flags |= BGE_IS_5788;
1949 
1950 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
1951 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
1952 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1953 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1954 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
1955 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1956 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1957 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1958 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
1959 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
1960 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1961 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
1962 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1963 		sc->bge_flags |= BGE_10_100_ONLY;
1964 
1965 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1966 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1967 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1968 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1969 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1970 		sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
1971 
1972 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1973 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1974 		sc->bge_flags |= BGE_PHY_CRC_BUG;
1975 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
1976 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
1977 		sc->bge_flags |= BGE_PHY_ADC_BUG;
1978 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1979 		sc->bge_flags |= BGE_PHY_5704_A0_BUG;
1980 
1981 	if ((BGE_IS_5705_PLUS(sc)) &&
1982 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
1983 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
1984 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
1985 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) {
1986 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1987 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
1988 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
1989 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
1990 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
1991 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
1992 				sc->bge_flags |= BGE_PHY_JITTER_BUG;
1993 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
1994 				sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
1995 		} else
1996 			sc->bge_flags |= BGE_PHY_BER_BUG;
1997 	}
1998 
1999 	/* Try to reset the chip. */
2000 	DPRINTFN(5, ("bge_reset\n"));
2001 	bge_reset(sc);
2002 
2003 	bge_chipinit(sc);
2004 
2005 #ifdef __sparc64__
2006 	if (!gotenaddr) {
2007 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
2008 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
2009 			gotenaddr = 1;
2010 	}
2011 #endif
2012 
2013 	/*
2014 	 * Get station address from the EEPROM.
2015 	 */
2016 	if (!gotenaddr) {
2017 		mac_addr = bge_readmem_ind(sc, 0x0c14);
2018 		if ((mac_addr >> 16) == 0x484b) {
2019 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2020 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2021 			mac_addr = bge_readmem_ind(sc, 0x0c18);
2022 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2023 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2024 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2025 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2026 			gotenaddr = 1;
2027 		}
2028 	}
2029 	if (!gotenaddr) {
2030 		int mac_offset = BGE_EE_MAC_OFFSET;
2031 
2032 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2033 			mac_offset = BGE_EE_MAC_OFFSET_5906;
2034 
2035 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2036 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
2037 			gotenaddr = 1;
2038 	}
2039 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
2040 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2041 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
2042 			gotenaddr = 1;
2043 	}
2044 
2045 #ifdef __sparc64__
2046 	if (!gotenaddr) {
2047 		extern void myetheraddr(u_char *);
2048 
2049 		myetheraddr(sc->arpcom.ac_enaddr);
2050 		gotenaddr = 1;
2051 	}
2052 #endif
2053 
2054 	if (!gotenaddr) {
2055 		printf(": failed to read station address\n");
2056 		goto fail_1;
2057 	}
2058 
2059 	/* Allocate the general information block and ring buffers. */
2060 	sc->bge_dmatag = pa->pa_dmat;
2061 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
2062 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2063 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2064 		printf(": can't alloc rx buffers\n");
2065 		goto fail_1;
2066 	}
2067 	DPRINTFN(5, ("bus_dmamem_map\n"));
2068 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2069 			   sizeof(struct bge_ring_data), &kva,
2070 			   BUS_DMA_NOWAIT)) {
2071 		printf(": can't map dma buffers (%lu bytes)\n",
2072 		    sizeof(struct bge_ring_data));
2073 		goto fail_2;
2074 	}
2075 	DPRINTFN(5, ("bus_dmamem_create\n"));
2076 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2077 	    sizeof(struct bge_ring_data), 0,
2078 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2079 		printf(": can't create dma map\n");
2080 		goto fail_3;
2081 	}
2082 	DPRINTFN(5, ("bus_dmamem_load\n"));
2083 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2084 			    sizeof(struct bge_ring_data), NULL,
2085 			    BUS_DMA_NOWAIT)) {
2086 		goto fail_4;
2087 	}
2088 
2089 	DPRINTFN(5, ("bzero\n"));
2090 	sc->bge_rdata = (struct bge_ring_data *)kva;
2091 
2092 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2093 
2094 	/* Set default tuneable values. */
2095 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2096 	sc->bge_rx_coal_ticks = 150;
2097 	sc->bge_rx_max_coal_bds = 64;
2098 	sc->bge_tx_coal_ticks = 300;
2099 	sc->bge_tx_max_coal_bds = 400;
2100 
2101 	/* 5705 limits RX return ring to 512 entries. */
2102 	if (BGE_IS_5705_PLUS(sc))
2103 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2104 	else
2105 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2106 
2107 	/* Set up ifnet structure */
2108 	ifp = &sc->arpcom.ac_if;
2109 	ifp->if_softc = sc;
2110 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2111 	ifp->if_ioctl = bge_ioctl;
2112 	ifp->if_start = bge_start;
2113 	ifp->if_watchdog = bge_watchdog;
2114 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2115 	IFQ_SET_READY(&ifp->if_snd);
2116 
2117 	/* lwm must be greater than the replenish threshold */
2118 	m_clsetwms(ifp, MCLBYTES, 17, BGE_STD_RX_RING_CNT);
2119 	m_clsetwms(ifp, BGE_JLEN, 17, BGE_JUMBO_RX_RING_CNT);
2120 
2121 	DPRINTFN(5, ("bcopy\n"));
2122 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2123 
2124 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2125 
2126 #if NVLAN > 0
2127 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2128 #endif
2129 
2130 	/*
2131 	 * 5700 B0 chips do not support checksumming correctly due
2132 	 * to hardware bugs.
2133 	 */
2134 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
2135 		ifp->if_capabilities |= IFCAP_CSUM_IPv4;
2136 #if 0	/* TCP/UDP checksum offload breaks with pf(4) */
2137 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
2138 #endif
2139 
2140 	if (BGE_IS_JUMBO_CAPABLE(sc))
2141 		ifp->if_hardmtu = BGE_JUMBO_MTU;
2142 
2143 	/*
2144 	 * Do MII setup.
2145 	 */
2146 	DPRINTFN(5, ("mii setup\n"));
2147 	sc->bge_mii.mii_ifp = ifp;
2148 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
2149 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
2150 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
2151 
2152 	/*
2153 	 * Figure out what sort of media we have by checking the hardware
2154 	 * config word in the first 32K of internal NIC memory, or fall back to
2155 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
2156 	 * this value seems to be unset. If that's the case, we have to rely on
2157 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
2158 	 * SysKonnect SK-9D41.
2159 	 */
2160 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2161 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2162 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
2163 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2164 		    sizeof(hwcfg))) {
2165 			printf(": failed to read media type\n");
2166 			goto fail_5;
2167 		}
2168 		hwcfg = ntohl(hwcfg);
2169 	}
2170 
2171 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2172 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
2173 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2174 		if (BGE_IS_5714_FAMILY(sc))
2175 		    sc->bge_flags |= BGE_PHY_FIBER_MII;
2176 		else
2177 		    sc->bge_flags |= BGE_PHY_FIBER_TBI;
2178 	}
2179 
2180 	/* Hookup IRQ last. */
2181 	DPRINTFN(5, ("pci_intr_establish\n"));
2182 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc,
2183 	    sc->bge_dev.dv_xname);
2184 	if (sc->bge_intrhand == NULL) {
2185 		printf(": couldn't establish interrupt");
2186 		if (intrstr != NULL)
2187 			printf(" at %s", intrstr);
2188 		printf("\n");
2189 		goto fail_5;
2190 	}
2191 
2192 	/*
2193 	 * A Broadcom chip was detected. Inform the world.
2194 	 */
2195 	printf(": %s, address %s\n", intrstr,
2196 	    ether_sprintf(sc->arpcom.ac_enaddr));
2197 
2198 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2199 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2200 		    bge_ifmedia_sts);
2201 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2202 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2203 			    0, NULL);
2204 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2205 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2206 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2207 	} else {
2208 		int mii_flags;
2209 
2210 		/*
2211 		 * Do transceiver setup.
2212 		 */
2213 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2214 			     bge_ifmedia_sts);
2215 		mii_flags = MIIF_DOPAUSE;
2216 		if (sc->bge_flags & BGE_PHY_FIBER_MII)
2217 			mii_flags |= MIIF_HAVEFIBER;
2218 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2219 			   MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
2220 
2221 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2222 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2223 			ifmedia_add(&sc->bge_mii.mii_media,
2224 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
2225 			ifmedia_set(&sc->bge_mii.mii_media,
2226 				    IFM_ETHER|IFM_MANUAL);
2227 		} else
2228 			ifmedia_set(&sc->bge_mii.mii_media,
2229 				    IFM_ETHER|IFM_AUTO);
2230 	}
2231 
2232 	/*
2233 	 * Call MI attach routine.
2234 	 */
2235 	if_attach(ifp);
2236 	ether_ifattach(ifp);
2237 
2238 	sc->sc_shutdownhook = shutdownhook_establish(bge_shutdown, sc);
2239 	sc->sc_powerhook = powerhook_establish(bge_power, sc);
2240 
2241 	timeout_set(&sc->bge_timeout, bge_tick, sc);
2242 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
2243 	return;
2244 
2245 fail_5:
2246 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
2247 
2248 fail_4:
2249 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2250 
2251 fail_3:
2252 	bus_dmamem_unmap(sc->bge_dmatag, kva,
2253 	    sizeof(struct bge_ring_data));
2254 
2255 fail_2:
2256 	bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2257 
2258 fail_1:
2259 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, size);
2260 }
2261 
2262 void
2263 bge_reset(struct bge_softc *sc)
2264 {
2265 	struct pci_attach_args *pa = &sc->bge_pa;
2266 	pcireg_t cachesize, command, pcistate, new_pcistate;
2267 	u_int32_t reset;
2268 	int i, val = 0;
2269 
2270 	/* Save some important PCI state. */
2271 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2272 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2273 	pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2274 
2275 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2276 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2277 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2278 
2279 	/* Disable fastboot on controllers that support it. */
2280 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2281 	    BGE_IS_5755_PLUS(sc))
2282 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2283 
2284 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2285 
2286 	if (sc->bge_flags & BGE_PCIE) {
2287 		if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
2288 			/* PCI Express 1.0 system */
2289 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2290 		}
2291 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2292 			/*
2293 			 * Prevent PCI Express link training
2294 			 * during global reset.
2295 			 */
2296 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2297 			reset |= (1<<29);
2298 		}
2299 	}
2300 
2301 	/*
2302 	 * Set GPHY Power Down Override to leave GPHY
2303 	 * powered up in D0 uninitialized.
2304 	 */
2305 	if (BGE_IS_5705_PLUS(sc))
2306 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
2307 
2308 	/* Issue global reset */
2309 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2310 
2311 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2312 		u_int32_t status, ctrl;
2313 
2314 		status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2315 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2316 		    status | BGE_VCPU_STATUS_DRV_RESET);
2317 		ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2318 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2319 		    ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2320 
2321 		sc->bge_flags |= BGE_NO_EEPROM;
2322 	}
2323 
2324 	DELAY(1000);
2325 
2326 	if (sc->bge_flags & BGE_PCIE) {
2327 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2328 			pcireg_t v;
2329 
2330 			DELAY(500000); /* wait for link training to complete */
2331 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
2332 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
2333 		}
2334 
2335 		/*
2336 		 * Set PCI Express max payload size to 128 bytes
2337 		 * and clear error status.
2338 		 */
2339 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2340 		    BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2341 	}
2342 
2343 	/* Reset some of the PCI state that got zapped by reset */
2344 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2345 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2346 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2347 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2348 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2349 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2350 
2351 	/* Enable memory arbiter. */
2352 	if (BGE_IS_5714_FAMILY(sc)) {
2353 		u_int32_t val;
2354 
2355 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2356 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2357 	} else
2358 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2359 
2360  	/*
2361 	 * Prevent PXE restart: write a magic number to the
2362 	 * general communications memory at 0xB50.
2363 	 */
2364 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2365 
2366 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2367 		for (i = 0; i < BGE_TIMEOUT; i++) {
2368 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2369 			if (val & BGE_VCPU_STATUS_INIT_DONE)
2370 				break;
2371 			DELAY(100);
2372 		}
2373 
2374 		if (i >= BGE_TIMEOUT)
2375 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
2376 	} else {
2377 		/*
2378 		 * Poll until we see 1's complement of the magic number.
2379 		 * This indicates that the firmware initialization
2380 		 * is complete.  We expect this to fail if no SEEPROM
2381 		 * is fitted.
2382 		 */
2383 		for (i = 0; i < BGE_TIMEOUT; i++) {
2384 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2385 			if (val == ~BGE_MAGIC_NUMBER)
2386 				break;
2387 			DELAY(10);
2388 		}
2389 
2390 		if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM)))
2391 			printf("%s: firmware handshake timed out\n",
2392 			   sc->bge_dev.dv_xname);
2393 	}
2394 
2395 	/*
2396 	 * XXX Wait for the value of the PCISTATE register to
2397 	 * return to its original pre-reset state. This is a
2398 	 * fairly good indicator of reset completion. If we don't
2399 	 * wait for the reset to fully complete, trying to read
2400 	 * from the device's non-PCI registers may yield garbage
2401 	 * results.
2402 	 */
2403 	for (i = 0; i < BGE_TIMEOUT; i++) {
2404 		new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2405 		    BGE_PCI_PCISTATE);
2406 		if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2407 		    (pcistate & ~BGE_PCISTATE_RESERVED))
2408 			break;
2409 		DELAY(10);
2410 	}
2411 	if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2412 	    (pcistate & ~BGE_PCISTATE_RESERVED)) {
2413 		DPRINTFN(5, ("%s: pcistate failed to revert\n",
2414 		    sc->bge_dev.dv_xname));
2415 	}
2416 
2417 	/* Fix up byte swapping */
2418 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2419 
2420 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2421 
2422 	/*
2423 	 * The 5704 in TBI mode apparently needs some special
2424 	 * adjustment to insure the SERDES drive level is set
2425 	 * to 1.2V.
2426 	 */
2427 	if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
2428 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2429 		u_int32_t serdescfg;
2430 
2431 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2432 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2433 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2434 	}
2435 
2436 	if (sc->bge_flags & BGE_PCIE &&
2437 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
2438 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2439 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
2440 		u_int32_t v;
2441 
2442 		/* Enable PCI Express bug fix */
2443 		v = CSR_READ_4(sc, 0x7c00);
2444 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2445 	}
2446 	DELAY(10000);
2447 }
2448 
2449 /*
2450  * Frame reception handling. This is called if there's a frame
2451  * on the receive return list.
2452  *
2453  * Note: we have to be able to handle two possibilities here:
2454  * 1) the frame is from the jumbo receive ring
2455  * 2) the frame is from the standard receive ring
2456  */
2457 
2458 void
2459 bge_rxeof(struct bge_softc *sc)
2460 {
2461 	struct ifnet *ifp;
2462 	int stdcnt = 0, jumbocnt = 0;
2463 	bus_dmamap_t dmamap;
2464 	bus_addr_t offset, toff;
2465 	bus_size_t tlen;
2466 	int tosync;
2467 
2468 	/* Nothing to do */
2469 	if (sc->bge_rx_saved_considx ==
2470 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx)
2471 		return;
2472 
2473 	ifp = &sc->arpcom.ac_if;
2474 
2475 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2476 	    offsetof(struct bge_ring_data, bge_status_block),
2477 	    sizeof (struct bge_status_block),
2478 	    BUS_DMASYNC_POSTREAD);
2479 
2480 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2481 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2482 	    sc->bge_rx_saved_considx;
2483 
2484 	toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2485 
2486 	if (tosync < 0) {
2487 		tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2488 		    sizeof (struct bge_rx_bd);
2489 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2490 		    toff, tlen, BUS_DMASYNC_POSTREAD);
2491 		tosync = -tosync;
2492 	}
2493 
2494 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2495 	    offset, tosync * sizeof (struct bge_rx_bd),
2496 	    BUS_DMASYNC_POSTREAD);
2497 
2498 	while(sc->bge_rx_saved_considx !=
2499 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2500 		struct bge_rx_bd	*cur_rx;
2501 		u_int32_t		rxidx;
2502 		struct mbuf		*m = NULL;
2503 
2504 		cur_rx = &sc->bge_rdata->
2505 			bge_rx_return_ring[sc->bge_rx_saved_considx];
2506 
2507 		rxidx = cur_rx->bge_idx;
2508 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2509 
2510 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2511 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2512 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2513 
2514 			jumbocnt++;
2515 			sc->bge_jumbo_cnt--;
2516 
2517 			dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx];
2518 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
2519 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2520 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2521 
2522 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2523 				m_freem(m);
2524 				ifp->if_ierrors++;
2525 				continue;
2526 			}
2527 		} else {
2528 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2529 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2530 
2531 			stdcnt++;
2532 			sc->bge_std_cnt--;
2533 
2534 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2535 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
2536 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2537 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2538 
2539 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2540 				m_freem(m);
2541 				ifp->if_ierrors++;
2542 				continue;
2543 			}
2544 		}
2545 
2546 		ifp->if_ipackets++;
2547 #ifdef __STRICT_ALIGNMENT
2548 		/*
2549 		 * The i386 allows unaligned accesses, but for other
2550 		 * platforms we must make sure the payload is aligned.
2551 		 */
2552 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
2553 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2554 			    cur_rx->bge_len);
2555 			m->m_data += ETHER_ALIGN;
2556 		}
2557 #endif
2558 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2559 		m->m_pkthdr.rcvif = ifp;
2560 
2561 		/*
2562 		 * 5700 B0 chips do not support checksumming correctly due
2563 		 * to hardware bugs.
2564 		 */
2565 		if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
2566 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2567 				if (cur_rx->bge_ip_csum == 0xFFFF)
2568 					m->m_pkthdr.csum_flags |=
2569 					    M_IPV4_CSUM_IN_OK;
2570 				else
2571 					m->m_pkthdr.csum_flags |=
2572 					    M_IPV4_CSUM_IN_BAD;
2573 			}
2574 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2575 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2576 				if (cur_rx->bge_tcp_udp_csum == 0xFFFF)
2577 					m->m_pkthdr.csum_flags |=
2578 					    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
2579 			}
2580 		}
2581 
2582 #if NVLAN > 0
2583 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2584 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
2585 			m->m_flags |= M_VLANTAG;
2586 		}
2587 #endif
2588 
2589 #if NBPFILTER > 0
2590 		/*
2591 		 * Handle BPF listeners. Let the BPF user see the packet.
2592 		 */
2593 		if (ifp->if_bpf)
2594 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
2595 #endif
2596 
2597 		ether_input_mbuf(ifp, m);
2598 	}
2599 
2600 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2601 	if (stdcnt)
2602 		bge_fill_rx_ring_std(sc);
2603 	if (jumbocnt)
2604 		bge_fill_rx_ring_jumbo(sc);
2605 }
2606 
2607 void
2608 bge_txeof(struct bge_softc *sc)
2609 {
2610 	struct bge_tx_bd *cur_tx = NULL;
2611 	struct ifnet *ifp;
2612 	struct txdmamap_pool_entry *dma;
2613 	bus_addr_t offset, toff;
2614 	bus_size_t tlen;
2615 	int tosync;
2616 	struct mbuf *m;
2617 
2618 	/* Nothing to do */
2619 	if (sc->bge_tx_saved_considx ==
2620 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx)
2621 		return;
2622 
2623 	ifp = &sc->arpcom.ac_if;
2624 
2625 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2626 	    offsetof(struct bge_ring_data, bge_status_block),
2627 	    sizeof (struct bge_status_block),
2628 	    BUS_DMASYNC_POSTREAD);
2629 
2630 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
2631 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2632 	    sc->bge_tx_saved_considx;
2633 
2634 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2635 
2636 	if (tosync < 0) {
2637 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2638 		    sizeof (struct bge_tx_bd);
2639 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2640 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2641 		tosync = -tosync;
2642 	}
2643 
2644 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2645 	    offset, tosync * sizeof (struct bge_tx_bd),
2646 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2647 
2648 	/*
2649 	 * Go through our tx ring and free mbufs for those
2650 	 * frames that have been sent.
2651 	 */
2652 	while (sc->bge_tx_saved_considx !=
2653 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2654 		u_int32_t		idx = 0;
2655 
2656 		idx = sc->bge_tx_saved_considx;
2657 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2658 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2659 			ifp->if_opackets++;
2660 		m = sc->bge_cdata.bge_tx_chain[idx];
2661 		if (m != NULL) {
2662 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2663 			dma = sc->txdma[idx];
2664 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2665 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2666 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2667 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2668 			sc->txdma[idx] = NULL;
2669 
2670 			m_freem(m);
2671 		}
2672 		sc->bge_txcnt--;
2673 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2674 	}
2675 
2676 	if (sc->bge_txcnt < BGE_TX_RING_CNT - 16)
2677 		ifp->if_flags &= ~IFF_OACTIVE;
2678 	if (sc->bge_txcnt == 0)
2679 		ifp->if_timer = 0;
2680 }
2681 
2682 int
2683 bge_intr(void *xsc)
2684 {
2685 	struct bge_softc *sc;
2686 	struct ifnet *ifp;
2687 	u_int32_t statusword;
2688 
2689 	sc = xsc;
2690 	ifp = &sc->arpcom.ac_if;
2691 
2692 	/* It is possible for the interrupt to arrive before
2693 	 * the status block is updated prior to the interrupt.
2694 	 * Reading the PCI State register will confirm whether the
2695 	 * interrupt is ours and will flush the status block.
2696 	 */
2697 
2698 	/* read status word from status block */
2699 	statusword = sc->bge_rdata->bge_status_block.bge_status;
2700 
2701 	if ((statusword & BGE_STATFLAG_UPDATED) ||
2702 	    (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
2703 
2704 		/* Ack interrupt and stop others from occurring. */
2705 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2706 
2707 		/* clear status word */
2708 		sc->bge_rdata->bge_status_block.bge_status = 0;
2709 
2710 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2711 		    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
2712 		    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
2713 			bge_link_upd(sc);
2714 
2715 		if (ifp->if_flags & IFF_RUNNING) {
2716 			/* Check RX return ring producer/consumer */
2717 			bge_rxeof(sc);
2718 
2719 			/* Check TX ring producer/consumer */
2720 			bge_txeof(sc);
2721 		}
2722 
2723 		/* Re-enable interrupts. */
2724 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2725 
2726 		bge_start(ifp);
2727 
2728 		return (1);
2729 	} else
2730 		return (0);
2731 }
2732 
2733 void
2734 bge_tick(void *xsc)
2735 {
2736 	struct bge_softc *sc = xsc;
2737 	struct mii_data *mii = &sc->bge_mii;
2738 	int s;
2739 
2740 	s = splnet();
2741 
2742 	if (BGE_IS_5705_PLUS(sc))
2743 		bge_stats_update_regs(sc);
2744 	else
2745 		bge_stats_update(sc);
2746 
2747 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2748 		/*
2749 		 * Since in TBI mode auto-polling can't be used we should poll
2750 		 * link status manually. Here we register pending link event
2751 		 * and trigger interrupt.
2752 		 */
2753 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
2754 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2755 	} else {
2756 		/*
2757 		 * Do not touch PHY if we have link up. This could break
2758 		 * IPMI/ASF mode or produce extra input errors.
2759 		 * (extra input errors was reported for bcm5701 & bcm5704).
2760 		 */
2761 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
2762 			mii_tick(mii);
2763 	}
2764 
2765 	timeout_add_sec(&sc->bge_timeout, 1);
2766 
2767 	splx(s);
2768 }
2769 
2770 void
2771 bge_stats_update_regs(struct bge_softc *sc)
2772 {
2773 	struct ifnet *ifp = &sc->arpcom.ac_if;
2774 
2775 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
2776 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
2777 
2778 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2779 
2780 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
2781 
2782 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
2783 }
2784 
2785 void
2786 bge_stats_update(struct bge_softc *sc)
2787 {
2788 	struct ifnet *ifp = &sc->arpcom.ac_if;
2789 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2790 	u_int32_t cnt;
2791 
2792 #define READ_STAT(sc, stats, stat) \
2793 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2794 
2795 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
2796 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
2797 	sc->bge_tx_collisions = cnt;
2798 
2799 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2800 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
2801 	sc->bge_rx_discards = cnt;
2802 
2803 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
2804 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_inerrors);
2805 	sc->bge_rx_inerrors = cnt;
2806 
2807 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
2808 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_overruns);
2809 	sc->bge_rx_overruns = cnt;
2810 
2811 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2812 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
2813 	sc->bge_tx_discards = cnt;
2814 
2815 #undef READ_STAT
2816 }
2817 
2818 /*
2819  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
2820  */
2821 int
2822 bge_compact_dma_runt(struct mbuf *pkt)
2823 {
2824 	struct mbuf	*m, *prev, *n = NULL;
2825 	int 		totlen, newprevlen;
2826 
2827 	prev = NULL;
2828 	totlen = 0;
2829 
2830 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
2831 		int mlen = m->m_len;
2832 		int shortfall = 8 - mlen ;
2833 
2834 		totlen += mlen;
2835 		if (mlen == 0)
2836 			continue;
2837 		if (mlen >= 8)
2838 			continue;
2839 
2840 		/* If we get here, mbuf data is too small for DMA engine.
2841 		 * Try to fix by shuffling data to prev or next in chain.
2842 		 * If that fails, do a compacting deep-copy of the whole chain.
2843 		 */
2844 
2845 		/* Internal frag. If fits in prev, copy it there. */
2846 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
2847 			bcopy(m->m_data,
2848 			      prev->m_data+prev->m_len,
2849 			      mlen);
2850 			prev->m_len += mlen;
2851 			m->m_len = 0;
2852 			/* XXX stitch chain */
2853 			prev->m_next = m_free(m);
2854 			m = prev;
2855 			continue;
2856 		} else if (m->m_next != NULL &&
2857 			   M_TRAILINGSPACE(m) >= shortfall &&
2858 			   m->m_next->m_len >= (8 + shortfall)) {
2859 			/* m is writable and have enough data in next, pull up. */
2860 
2861 			bcopy(m->m_next->m_data,
2862 			      m->m_data+m->m_len,
2863 			      shortfall);
2864 			m->m_len += shortfall;
2865 			m->m_next->m_len -= shortfall;
2866 			m->m_next->m_data += shortfall;
2867 		} else if (m->m_next == NULL || 1) {
2868 			/* Got a runt at the very end of the packet.
2869 			 * borrow data from the tail of the preceding mbuf and
2870 			 * update its length in-place. (The original data is still
2871 			 * valid, so we can do this even if prev is not writable.)
2872 			 */
2873 
2874 			/* if we'd make prev a runt, just move all of its data. */
2875 #ifdef DEBUG
2876 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
2877 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
2878 #endif
2879 			if ((prev->m_len - shortfall) < 8)
2880 				shortfall = prev->m_len;
2881 
2882 			newprevlen = prev->m_len - shortfall;
2883 
2884 			MGET(n, M_NOWAIT, MT_DATA);
2885 			if (n == NULL)
2886 				return (ENOBUFS);
2887 			KASSERT(m->m_len + shortfall < MLEN
2888 				/*,
2889 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
2890 
2891 			/* first copy the data we're stealing from prev */
2892 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
2893 
2894 			/* update prev->m_len accordingly */
2895 			prev->m_len -= shortfall;
2896 
2897 			/* copy data from runt m */
2898 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
2899 
2900 			/* n holds what we stole from prev, plus m */
2901 			n->m_len = shortfall + m->m_len;
2902 
2903 			/* stitch n into chain and free m */
2904 			n->m_next = m->m_next;
2905 			prev->m_next = n;
2906 			/* KASSERT(m->m_next == NULL); */
2907 			m->m_next = NULL;
2908 			m_free(m);
2909 			m = n;	/* for continuing loop */
2910 		}
2911 	}
2912 	return (0);
2913 }
2914 
2915 /*
2916  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2917  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2918  * but when such padded frames employ the bge IP/TCP checksum offload,
2919  * the hardware checksum assist gives incorrect results (possibly
2920  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2921  * If we pad such runts with zeros, the onboard checksum comes out correct.
2922  */
2923 int
2924 bge_cksum_pad(struct mbuf *m)
2925 {
2926 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2927 	struct mbuf *last;
2928 
2929 	/* If there's only the packet-header and we can pad there, use it. */
2930 	if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
2931 		last = m;
2932 	} else {
2933 		/*
2934 		 * Walk packet chain to find last mbuf. We will either
2935 		 * pad there, or append a new mbuf and pad it.
2936 		 */
2937 		for (last = m; last->m_next != NULL; last = last->m_next);
2938 		if (M_TRAILINGSPACE(last) < padlen) {
2939 			/* Allocate new empty mbuf, pad it. Compact later. */
2940 			struct mbuf *n;
2941 
2942 			MGET(n, M_DONTWAIT, MT_DATA);
2943 			if (n == NULL)
2944 				return (ENOBUFS);
2945 			n->m_len = 0;
2946 			last->m_next = n;
2947 			last = n;
2948 		}
2949 	}
2950 
2951 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
2952 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2953 	last->m_len += padlen;
2954 	m->m_pkthdr.len += padlen;
2955 
2956 	return (0);
2957 }
2958 
2959 /*
2960  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2961  * pointers to descriptors.
2962  */
2963 int
2964 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2965 {
2966 	struct bge_tx_bd	*f = NULL;
2967 	u_int32_t		frag, cur;
2968 	u_int16_t		csum_flags = 0;
2969 	struct txdmamap_pool_entry *dma;
2970 	bus_dmamap_t dmamap;
2971 	int			i = 0;
2972 
2973 	cur = frag = *txidx;
2974 
2975 	if (m_head->m_pkthdr.csum_flags) {
2976 		if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2977 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2978 		if (m_head->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT |
2979 		    M_UDPV4_CSUM_OUT)) {
2980 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2981 			if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
2982 			    bge_cksum_pad(m_head) != 0)
2983 				return (ENOBUFS);
2984 		}
2985 	}
2986 
2987 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
2988 		goto doit;
2989 
2990 	/*
2991 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
2992 	 * less than eight bytes.  If we encounter a teeny mbuf
2993 	 * at the end of a chain, we can pad.  Otherwise, copy.
2994 	 */
2995 	if (bge_compact_dma_runt(m_head) != 0)
2996 		return (ENOBUFS);
2997 
2998 doit:
2999 	dma = SLIST_FIRST(&sc->txdma_list);
3000 	if (dma == NULL)
3001 		return (ENOBUFS);
3002 	dmamap = dma->dmamap;
3003 
3004 	/*
3005 	 * Start packing the mbufs in this chain into
3006 	 * the fragment pointers. Stop when we run out
3007 	 * of fragments or hit the end of the mbuf chain.
3008 	 */
3009 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3010 	    BUS_DMA_NOWAIT))
3011 		return (ENOBUFS);
3012 
3013 	/*
3014 	 * Sanity check: avoid coming within 16 descriptors
3015 	 * of the end of the ring.
3016 	 */
3017 	if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16))
3018 		goto fail_unload;
3019 
3020 	for (i = 0; i < dmamap->dm_nsegs; i++) {
3021 		f = &sc->bge_rdata->bge_tx_ring[frag];
3022 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3023 			break;
3024 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
3025 		f->bge_len = dmamap->dm_segs[i].ds_len;
3026 		f->bge_flags = csum_flags;
3027 		f->bge_vlan_tag = 0;
3028 #if NVLAN > 0
3029 		if (m_head->m_flags & M_VLANTAG) {
3030 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3031 			f->bge_vlan_tag = m_head->m_pkthdr.ether_vtag;
3032 		}
3033 #endif
3034 		cur = frag;
3035 		BGE_INC(frag, BGE_TX_RING_CNT);
3036 	}
3037 
3038 	if (i < dmamap->dm_nsegs)
3039 		goto fail_unload;
3040 
3041 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3042 	    BUS_DMASYNC_PREWRITE);
3043 
3044 	if (frag == sc->bge_tx_saved_considx)
3045 		goto fail_unload;
3046 
3047 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3048 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
3049 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3050 	sc->txdma[cur] = dma;
3051 	sc->bge_txcnt += dmamap->dm_nsegs;
3052 
3053 	*txidx = frag;
3054 
3055 	return (0);
3056 
3057 fail_unload:
3058 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
3059 
3060 	return (ENOBUFS);
3061 }
3062 
3063 /*
3064  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3065  * to the mbuf data regions directly in the transmit descriptors.
3066  */
3067 void
3068 bge_start(struct ifnet *ifp)
3069 {
3070 	struct bge_softc *sc;
3071 	struct mbuf *m_head = NULL;
3072 	u_int32_t prodidx;
3073 	int pkts = 0;
3074 
3075 	sc = ifp->if_softc;
3076 
3077 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3078 		return;
3079 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3080 		return;
3081 	if (IFQ_IS_EMPTY(&ifp->if_snd))
3082 		return;
3083 
3084 	prodidx = sc->bge_tx_prodidx;
3085 
3086 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3087 		IFQ_POLL(&ifp->if_snd, m_head);
3088 		if (m_head == NULL)
3089 			break;
3090 
3091 		/*
3092 		 * Pack the data into the transmit ring. If we
3093 		 * don't have room, set the OACTIVE flag and wait
3094 		 * for the NIC to drain the ring.
3095 		 */
3096 		if (bge_encap(sc, m_head, &prodidx)) {
3097 			ifp->if_flags |= IFF_OACTIVE;
3098 			break;
3099 		}
3100 
3101 		/* now we are committed to transmit the packet */
3102 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
3103 		pkts++;
3104 
3105 #if NBPFILTER > 0
3106 		/*
3107 		 * If there's a BPF listener, bounce a copy of this frame
3108 		 * to him.
3109 		 */
3110 		if (ifp->if_bpf)
3111 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
3112 #endif
3113 	}
3114 	if (pkts == 0)
3115 		return;
3116 
3117 	/* Transmit */
3118 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3119 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
3120 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3121 
3122 	sc->bge_tx_prodidx = prodidx;
3123 
3124 	/*
3125 	 * Set a timeout in case the chip goes out to lunch.
3126 	 */
3127 	ifp->if_timer = 5;
3128 }
3129 
3130 void
3131 bge_init(void *xsc)
3132 {
3133 	struct bge_softc *sc = xsc;
3134 	struct ifnet *ifp;
3135 	u_int16_t *m;
3136 	u_int32_t rxmode;
3137 	int s;
3138 
3139 	s = splnet();
3140 
3141 	ifp = &sc->arpcom.ac_if;
3142 
3143 	/* Cancel pending I/O and flush buffers. */
3144 	bge_stop(sc);
3145 	bge_reset(sc);
3146 	bge_chipinit(sc);
3147 
3148 	/*
3149 	 * Init the various state machines, ring
3150 	 * control blocks and firmware.
3151 	 */
3152 	if (bge_blockinit(sc)) {
3153 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
3154 		splx(s);
3155 		return;
3156 	}
3157 
3158 	/* Specify MRU. */
3159 	if (BGE_IS_JUMBO_CAPABLE(sc))
3160 		CSR_WRITE_4(sc, BGE_RX_MTU,
3161 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
3162 	else
3163 		CSR_WRITE_4(sc, BGE_RX_MTU,
3164 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
3165 
3166 	/* Load our MAC address. */
3167 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3168 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3169 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3170 
3171 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
3172 		/* Disable hardware decapsulation of VLAN frames. */
3173 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
3174 	}
3175 
3176 	/* Program promiscuous mode and multicast filters. */
3177 	bge_iff(sc);
3178 
3179 	/* Init RX ring. */
3180 	bge_init_rx_ring_std(sc);
3181 
3182 	/*
3183 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3184 	 * memory to insure that the chip has in fact read the first
3185 	 * entry of the ring.
3186 	 */
3187 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3188 		u_int32_t		v, i;
3189 		for (i = 0; i < 10; i++) {
3190 			DELAY(20);
3191 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3192 			if (v == (MCLBYTES - ETHER_ALIGN))
3193 				break;
3194 		}
3195 		if (i == 10)
3196 			printf("%s: 5705 A0 chip failed to load RX ring\n",
3197 			    sc->bge_dev.dv_xname);
3198 	}
3199 
3200 	/* Init Jumbo RX ring. */
3201 	if (BGE_IS_JUMBO_CAPABLE(sc))
3202 		bge_init_rx_ring_jumbo(sc);
3203 
3204 	/* Init our RX return ring index */
3205 	sc->bge_rx_saved_considx = 0;
3206 
3207 	/* Init our RX/TX stat counters. */
3208 	sc->bge_tx_collisions = 0;
3209 	sc->bge_rx_discards = 0;
3210 	sc->bge_rx_inerrors = 0;
3211 	sc->bge_rx_overruns = 0;
3212 	sc->bge_tx_discards = 0;
3213 
3214 	/* Init TX ring. */
3215 	bge_init_tx_ring(sc);
3216 
3217 	/* Turn on transmitter */
3218 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3219 
3220 	rxmode = BGE_RXMODE_ENABLE;
3221 
3222 	if (BGE_IS_5755_PLUS(sc))
3223 		rxmode |= BGE_RXMODE_RX_IPV6_CSUM_ENABLE;
3224 
3225 	/* Turn on receiver */
3226 	BGE_SETBIT(sc, BGE_RX_MODE, rxmode);
3227 
3228 	CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3229 
3230 	/* Tell firmware we're alive. */
3231 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3232 
3233 	/* Enable host interrupts. */
3234 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3235 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3236 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3237 
3238 	bge_ifmedia_upd(ifp);
3239 
3240 	ifp->if_flags |= IFF_RUNNING;
3241 	ifp->if_flags &= ~IFF_OACTIVE;
3242 
3243 	splx(s);
3244 
3245 	timeout_add_sec(&sc->bge_timeout, 1);
3246 }
3247 
3248 /*
3249  * Set media options.
3250  */
3251 int
3252 bge_ifmedia_upd(struct ifnet *ifp)
3253 {
3254 	struct bge_softc *sc = ifp->if_softc;
3255 	struct mii_data *mii = &sc->bge_mii;
3256 	struct ifmedia *ifm = &sc->bge_ifmedia;
3257 
3258 	/* If this is a 1000baseX NIC, enable the TBI port. */
3259 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3260 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3261 			return (EINVAL);
3262 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3263 		case IFM_AUTO:
3264 			/*
3265 			 * The BCM5704 ASIC appears to have a special
3266 			 * mechanism for programming the autoneg
3267 			 * advertisement registers in TBI mode.
3268 			 */
3269 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3270 				u_int32_t sgdig;
3271 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3272 				if (sgdig & BGE_SGDIGSTS_DONE) {
3273 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3274 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3275 					sgdig |= BGE_SGDIGCFG_AUTO |
3276 					    BGE_SGDIGCFG_PAUSE_CAP |
3277 					    BGE_SGDIGCFG_ASYM_PAUSE;
3278 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3279 					    sgdig | BGE_SGDIGCFG_SEND);
3280 					DELAY(5);
3281 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3282 				}
3283 			}
3284 			break;
3285 		case IFM_1000_SX:
3286 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3287 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3288 				    BGE_MACMODE_HALF_DUPLEX);
3289 			} else {
3290 				BGE_SETBIT(sc, BGE_MAC_MODE,
3291 				    BGE_MACMODE_HALF_DUPLEX);
3292 			}
3293 			break;
3294 		default:
3295 			return (EINVAL);
3296 		}
3297 		/* XXX 802.3x flow control for 1000BASE-SX */
3298 		return (0);
3299 	}
3300 
3301 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3302 	if (mii->mii_instance) {
3303 		struct mii_softc *miisc;
3304 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3305 			mii_phy_reset(miisc);
3306 	}
3307 	mii_mediachg(mii);
3308 
3309 	/*
3310 	 * Force an interrupt so that we will call bge_link_upd
3311 	 * if needed and clear any pending link state attention.
3312 	 * Without this we are not getting any further interrupts
3313 	 * for link state changes and thus will not UP the link and
3314 	 * not be able to send in bge_start. The only way to get
3315 	 * things working was to receive a packet and get a RX intr.
3316 	 */
3317 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3318 	    sc->bge_flags & BGE_IS_5788)
3319 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3320 	else
3321 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3322 
3323 	return (0);
3324 }
3325 
3326 /*
3327  * Report current media status.
3328  */
3329 void
3330 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3331 {
3332 	struct bge_softc *sc = ifp->if_softc;
3333 	struct mii_data *mii = &sc->bge_mii;
3334 
3335 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3336 		ifmr->ifm_status = IFM_AVALID;
3337 		ifmr->ifm_active = IFM_ETHER;
3338 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3339 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3340 			ifmr->ifm_status |= IFM_ACTIVE;
3341 		} else {
3342 			ifmr->ifm_active |= IFM_NONE;
3343 			return;
3344 		}
3345 		ifmr->ifm_active |= IFM_1000_SX;
3346 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3347 			ifmr->ifm_active |= IFM_HDX;
3348 		else
3349 			ifmr->ifm_active |= IFM_FDX;
3350 		return;
3351 	}
3352 
3353 	mii_pollstat(mii);
3354 	ifmr->ifm_status = mii->mii_media_status;
3355 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3356 	    sc->bge_flowflags;
3357 }
3358 
3359 int
3360 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3361 {
3362 	struct bge_softc *sc = ifp->if_softc;
3363 	struct ifaddr *ifa = (struct ifaddr *) data;
3364 	struct ifreq *ifr = (struct ifreq *) data;
3365 	int s, error = 0;
3366 	struct mii_data *mii;
3367 
3368 	s = splnet();
3369 
3370 	switch(command) {
3371 	case SIOCSIFADDR:
3372 		ifp->if_flags |= IFF_UP;
3373 		if (!(ifp->if_flags & IFF_RUNNING))
3374 			bge_init(sc);
3375 #ifdef INET
3376 		if (ifa->ifa_addr->sa_family == AF_INET)
3377 			arp_ifinit(&sc->arpcom, ifa);
3378 #endif /* INET */
3379 		break;
3380 
3381 	case SIOCSIFFLAGS:
3382 		if (ifp->if_flags & IFF_UP) {
3383 			if (ifp->if_flags & IFF_RUNNING)
3384 				error = ENETRESET;
3385 			else
3386 				bge_init(sc);
3387 		} else {
3388 			if (ifp->if_flags & IFF_RUNNING)
3389 				bge_stop(sc);
3390 		}
3391 		break;
3392 
3393 	case SIOCSIFMEDIA:
3394 		/* XXX Flow control is not supported for 1000BASE-SX */
3395 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3396 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3397 			sc->bge_flowflags = 0;
3398 		}
3399 
3400 		/* Flow control requires full-duplex mode. */
3401 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3402 		    (ifr->ifr_media & IFM_FDX) == 0) {
3403 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
3404 		}
3405 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3406 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3407 				/* We can do both TXPAUSE and RXPAUSE. */
3408 				ifr->ifr_media |=
3409 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3410 			}
3411 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3412 		}
3413 		/* FALLTHROUGH */
3414 	case SIOCGIFMEDIA:
3415 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3416 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3417 			    command);
3418 		} else {
3419 			mii = &sc->bge_mii;
3420 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3421 			    command);
3422 		}
3423 		break;
3424 
3425 	default:
3426 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
3427 	}
3428 
3429 	if (error == ENETRESET) {
3430 		if (ifp->if_flags & IFF_RUNNING)
3431 			bge_iff(sc);
3432 		error = 0;
3433 	}
3434 
3435 	splx(s);
3436 	return (error);
3437 }
3438 
3439 void
3440 bge_watchdog(struct ifnet *ifp)
3441 {
3442 	struct bge_softc *sc;
3443 
3444 	sc = ifp->if_softc;
3445 
3446 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3447 
3448 	bge_init(sc);
3449 
3450 	ifp->if_oerrors++;
3451 }
3452 
3453 void
3454 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
3455 {
3456 	int i;
3457 
3458 	BGE_CLRBIT(sc, reg, bit);
3459 
3460 	for (i = 0; i < BGE_TIMEOUT; i++) {
3461 		if ((CSR_READ_4(sc, reg) & bit) == 0)
3462 			return;
3463 		delay(100);
3464 	}
3465 
3466 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3467 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
3468 }
3469 
3470 /*
3471  * Stop the adapter and free any mbufs allocated to the
3472  * RX and TX lists.
3473  */
3474 void
3475 bge_stop(struct bge_softc *sc)
3476 {
3477 	struct ifnet *ifp = &sc->arpcom.ac_if;
3478 	struct ifmedia_entry *ifm;
3479 	struct mii_data *mii;
3480 	int mtmp, itmp;
3481 
3482 	timeout_del(&sc->bge_timeout);
3483 	timeout_del(&sc->bge_rxtimeout);
3484 
3485 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3486 
3487 	/*
3488 	 * Disable all of the receiver blocks
3489 	 */
3490 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3491 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3492 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3493 	if (BGE_IS_5700_FAMILY(sc))
3494 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3495 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3496 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3497 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3498 
3499 	/*
3500 	 * Disable all of the transmit blocks
3501 	 */
3502 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3503 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3504 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3505 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3506 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3507 	if (BGE_IS_5700_FAMILY(sc))
3508 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3509 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3510 
3511 	/*
3512 	 * Shut down all of the memory managers and related
3513 	 * state machines.
3514 	 */
3515 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3516 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3517 	if (BGE_IS_5700_FAMILY(sc))
3518 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3519 
3520 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3521 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3522 
3523 	if (BGE_IS_5700_FAMILY(sc)) {
3524 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3525 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3526 	}
3527 
3528 	/* Disable host interrupts. */
3529 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3530 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3531 
3532 	/*
3533 	 * Tell firmware we're shutting down.
3534 	 */
3535 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3536 
3537 	/* Free the RX lists. */
3538 	bge_free_rx_ring_std(sc);
3539 
3540 	/* Free jumbo RX list. */
3541 	if (BGE_IS_JUMBO_CAPABLE(sc))
3542 		bge_free_rx_ring_jumbo(sc);
3543 
3544 	/* Free TX buffers. */
3545 	bge_free_tx_ring(sc);
3546 
3547 	/*
3548 	 * Isolate/power down the PHY, but leave the media selection
3549 	 * unchanged so that things will be put back to normal when
3550 	 * we bring the interface back up.
3551 	 */
3552 	if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) {
3553 		mii = &sc->bge_mii;
3554 		itmp = ifp->if_flags;
3555 		ifp->if_flags |= IFF_UP;
3556 		ifm = mii->mii_media.ifm_cur;
3557 		mtmp = ifm->ifm_media;
3558 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3559 		mii_mediachg(mii);
3560 		ifm->ifm_media = mtmp;
3561 		ifp->if_flags = itmp;
3562 	}
3563 
3564 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3565 
3566 	/* Clear MAC's link state (PHY may still have link UP). */
3567 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3568 }
3569 
3570 /*
3571  * Stop all chip I/O so that the kernel's probe routines don't
3572  * get confused by errant DMAs when rebooting.
3573  */
3574 void
3575 bge_shutdown(void *xsc)
3576 {
3577 	struct bge_softc *sc = (struct bge_softc *)xsc;
3578 
3579 	bge_stop(sc);
3580 	bge_reset(sc);
3581 }
3582 
3583 void
3584 bge_link_upd(struct bge_softc *sc)
3585 {
3586 	struct ifnet *ifp = &sc->arpcom.ac_if;
3587 	struct mii_data *mii = &sc->bge_mii;
3588 	u_int32_t status;
3589 	int link;
3590 
3591 	/* Clear 'pending link event' flag */
3592 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
3593 
3594 	/*
3595 	 * Process link state changes.
3596 	 * Grrr. The link status word in the status block does
3597 	 * not work correctly on the BCM5700 rev AX and BX chips,
3598 	 * according to all available information. Hence, we have
3599 	 * to enable MII interrupts in order to properly obtain
3600 	 * async link changes. Unfortunately, this also means that
3601 	 * we have to read the MAC status register to detect link
3602 	 * changes, thereby adding an additional register access to
3603 	 * the interrupt handler.
3604 	 *
3605 	 */
3606 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3607 		status = CSR_READ_4(sc, BGE_MAC_STS);
3608 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3609 			mii_pollstat(mii);
3610 
3611 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3612 			    mii->mii_media_status & IFM_ACTIVE &&
3613 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3614 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3615 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3616 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3617 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3618 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3619 
3620 			/* Clear the interrupt */
3621 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3622 			    BGE_EVTENB_MI_INTERRUPT);
3623 			bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
3624 			bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
3625 			    BRGPHY_INTRS);
3626 		}
3627 		return;
3628 	}
3629 
3630 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3631 		status = CSR_READ_4(sc, BGE_MAC_STS);
3632 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3633 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
3634 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3635 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
3636 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3637 					    BGE_MACMODE_TBI_SEND_CFGS);
3638 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3639 				status = CSR_READ_4(sc, BGE_MAC_MODE);
3640 				ifp->if_link_state =
3641 				    (status & BGE_MACMODE_HALF_DUPLEX) ?
3642 				    LINK_STATE_HALF_DUPLEX :
3643 				    LINK_STATE_FULL_DUPLEX;
3644 				if_link_state_change(ifp);
3645 				ifp->if_baudrate = IF_Gbps(1);
3646 			}
3647 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
3648 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3649 			ifp->if_link_state = LINK_STATE_DOWN;
3650 			if_link_state_change(ifp);
3651 			ifp->if_baudrate = 0;
3652 		}
3653         /*
3654 	 * Discard link events for MII/GMII cards if MI auto-polling disabled.
3655 	 * This should not happen since mii callouts are locked now, but
3656 	 * we keep this check for debug.
3657 	 */
3658 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
3659 		/*
3660 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3661 		 * in status word always set. Workaround this bug by reading
3662 		 * PHY link status directly.
3663 		 */
3664 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
3665 		    BGE_STS_LINK : 0;
3666 
3667 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
3668 			mii_pollstat(mii);
3669 
3670 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3671 			    mii->mii_media_status & IFM_ACTIVE &&
3672 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3673 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3674 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3675 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3676 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3677 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3678 		}
3679 	}
3680 
3681 	/* Clear the attention */
3682 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3683 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3684 	    BGE_MACSTAT_LINK_CHANGED);
3685 }
3686 
3687 void
3688 bge_power(int why, void *xsc)
3689 {
3690 	struct bge_softc *sc = (struct bge_softc *)xsc;
3691 	struct ifnet *ifp;
3692 
3693 	if (why == PWR_RESUME) {
3694 		ifp = &sc->arpcom.ac_if;
3695 		if (ifp->if_flags & IFF_UP) {
3696 			bge_init(xsc);
3697 			bge_start(ifp);
3698 		}
3699 	}
3700 }
3701