xref: /openbsd-src/sys/dev/pci/if_bge.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_bge.c,v 1.357 2014/07/12 18:48:51 tedu Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/if_ether.h>
97 #endif
98 
99 #if NVLAN > 0
100 #include <net/if_types.h>
101 #include <net/if_vlan_var.h>
102 #endif
103 
104 #if NBPFILTER > 0
105 #include <net/bpf.h>
106 #endif
107 
108 #ifdef __sparc64__
109 #include <sparc64/autoconf.h>
110 #include <dev/ofw/openfirm.h>
111 #endif
112 
113 #include <dev/pci/pcireg.h>
114 #include <dev/pci/pcivar.h>
115 #include <dev/pci/pcidevs.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/mii/miidevs.h>
120 #include <dev/mii/brgphyreg.h>
121 
122 #include <dev/pci/if_bgereg.h>
123 
124 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
125 
126 const struct bge_revision * bge_lookup_rev(u_int32_t);
127 int bge_can_use_msi(struct bge_softc *);
128 int bge_probe(struct device *, void *, void *);
129 void bge_attach(struct device *, struct device *, void *);
130 int bge_activate(struct device *, int);
131 
132 struct cfattach bge_ca = {
133 	sizeof(struct bge_softc), bge_probe, bge_attach, NULL, bge_activate
134 };
135 
136 struct cfdriver bge_cd = {
137 	NULL, "bge", DV_IFNET
138 };
139 
140 void bge_txeof(struct bge_softc *);
141 void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
142 void bge_rxeof(struct bge_softc *);
143 
144 void bge_tick(void *);
145 void bge_stats_update(struct bge_softc *);
146 void bge_stats_update_regs(struct bge_softc *);
147 int bge_cksum_pad(struct mbuf *);
148 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
149 int bge_compact_dma_runt(struct mbuf *);
150 
151 int bge_intr(void *);
152 void bge_start(struct ifnet *);
153 int bge_ioctl(struct ifnet *, u_long, caddr_t);
154 int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *);
155 void bge_init(void *);
156 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
157 void bge_stop(struct bge_softc *);
158 void bge_watchdog(struct ifnet *);
159 int bge_ifmedia_upd(struct ifnet *);
160 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
161 
162 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
163 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
164 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
165 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
166 
167 void bge_iff(struct bge_softc *);
168 
169 int bge_newbuf_jumbo(struct bge_softc *, int);
170 int bge_init_rx_ring_jumbo(struct bge_softc *);
171 void bge_fill_rx_ring_jumbo(struct bge_softc *);
172 void bge_free_rx_ring_jumbo(struct bge_softc *);
173 
174 int bge_newbuf(struct bge_softc *, int);
175 int bge_init_rx_ring_std(struct bge_softc *);
176 void bge_rxtick(void *);
177 void bge_fill_rx_ring_std(struct bge_softc *);
178 void bge_free_rx_ring_std(struct bge_softc *);
179 
180 void bge_free_tx_ring(struct bge_softc *);
181 int bge_init_tx_ring(struct bge_softc *);
182 
183 void bge_chipinit(struct bge_softc *);
184 int bge_blockinit(struct bge_softc *);
185 u_int32_t bge_dma_swap_options(struct bge_softc *);
186 int bge_phy_addr(struct bge_softc *);
187 
188 u_int32_t bge_readmem_ind(struct bge_softc *, int);
189 void bge_writemem_ind(struct bge_softc *, int, int);
190 void bge_writereg_ind(struct bge_softc *, int, int);
191 void bge_writembx(struct bge_softc *, int, int);
192 
193 int bge_miibus_readreg(struct device *, int, int);
194 void bge_miibus_writereg(struct device *, int, int, int);
195 void bge_miibus_statchg(struct device *);
196 
197 #define BGE_RESET_SHUTDOWN	0
198 #define BGE_RESET_START		1
199 #define BGE_RESET_SUSPEND	2
200 void bge_sig_post_reset(struct bge_softc *, int);
201 void bge_sig_legacy(struct bge_softc *, int);
202 void bge_sig_pre_reset(struct bge_softc *, int);
203 void bge_stop_fw(struct bge_softc *, int);
204 void bge_reset(struct bge_softc *);
205 void bge_link_upd(struct bge_softc *);
206 
207 void bge_ape_lock_init(struct bge_softc *);
208 void bge_ape_read_fw_ver(struct bge_softc *);
209 int bge_ape_lock(struct bge_softc *, int);
210 void bge_ape_unlock(struct bge_softc *, int);
211 void bge_ape_send_event(struct bge_softc *, uint32_t);
212 void bge_ape_driver_state_change(struct bge_softc *, int);
213 
214 #ifdef BGE_DEBUG
215 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
216 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
217 int	bgedebug = 0;
218 #else
219 #define DPRINTF(x)
220 #define DPRINTFN(n,x)
221 #endif
222 
223 /*
224  * Various supported device vendors/types and their names. Note: the
225  * spec seems to indicate that the hardware still has Alteon's vendor
226  * ID burned into it, though it will always be overridden by the vendor
227  * ID in the EEPROM. Just to be safe, we cover all possibilities.
228  */
229 const struct pci_matchid bge_devices[] = {
230 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
231 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
232 
233 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
234 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
235 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
236 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
237 
238 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
239 
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717 },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718 },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719 },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725 },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727 },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
271 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
272 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
273 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
274 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
275 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
276 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
277 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
278 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
279 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
280 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
281 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
282 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
283 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 },
284 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E },
285 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S },
286 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE },
287 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762 },
288 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 },
289 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
290 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
291 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
292 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
293 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 },
294 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F },
295 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G },
296 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
297 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
298 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
299 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
300 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
301 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
302 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
303 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
304 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
305 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
306 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
307 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 },
308 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761 },
309 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762 },
310 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57764 },
311 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765 },
312 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766 },
313 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57767 },
314 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 },
315 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781 },
316 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782 },
317 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785 },
318 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786 },
319 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57787 },
320 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 },
321 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 },
322 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791 },
323 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795 },
324 
325 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
326 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
327 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
328 
329 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
330 
331 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
332 };
333 
334 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_JUMBO_CAPABLE)
335 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_5700_FAMILY)
336 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_5705_PLUS)
337 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_5714_FAMILY)
338 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_575X_PLUS)
339 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGE_5755_PLUS)
340 #define BGE_IS_5717_PLUS(sc)		((sc)->bge_flags & BGE_5717_PLUS)
341 #define BGE_IS_57765_PLUS(sc)		((sc)->bge_flags & BGE_57765_PLUS)
342 
343 static const struct bge_revision {
344 	u_int32_t		br_chipid;
345 	const char		*br_name;
346 } bge_revisions[] = {
347 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
348 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
349 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
350 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
351 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
352 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
353 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
354 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
355 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
356 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
357 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
358 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
359 	/* the 5702 and 5703 share the same ASIC ID */
360 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
361 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
362 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
363 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
364 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
365 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
366 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
367 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
368 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
369 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
370 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
371 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
372 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
373 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
374 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
375 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
376 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
377 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
378 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
379 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
380 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
381 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
382 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
383 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
384 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
385 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
386 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
387 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
388 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
389 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
390 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
391 	{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
392 	{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
393 	{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
394 	{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
395 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
396 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
397 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
398 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
399 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
400 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
401 	{ BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
402 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
403 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
404 	/* the 5754 and 5787 share the same ASIC ID */
405 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
406 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
407 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
408 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
409 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
410 	{ BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
411 	{ BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
412 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
413 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
414 
415 	{ 0, NULL }
416 };
417 
418 /*
419  * Some defaults for major revisions, so that newer steppings
420  * that we don't know about have a shot at working.
421  */
422 static const struct bge_revision bge_majorrevs[] = {
423 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
424 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
425 	/* 5702 and 5703 share the same ASIC ID */
426 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
427 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
428 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
429 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
430 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
431 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
432 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
433 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
434 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
435 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
436 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
437 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
438 	/* 5754 and 5787 share the same ASIC ID */
439 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
440 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
441 	{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
442 	{ BGE_ASICREV_BCM57766, "unknown BCM57766" },
443 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
444 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
445 	{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
446 	{ BGE_ASICREV_BCM5720, "unknown BCM5720" },
447 	{ BGE_ASICREV_BCM5762, "unknown BCM5762" },
448 
449 	{ 0, NULL }
450 };
451 
452 u_int32_t
453 bge_readmem_ind(struct bge_softc *sc, int off)
454 {
455 	struct pci_attach_args	*pa = &(sc->bge_pa);
456 	u_int32_t val;
457 
458 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
459 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
460 		return (0);
461 
462 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
463 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
464 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
465 	return (val);
466 }
467 
468 void
469 bge_writemem_ind(struct bge_softc *sc, int off, int val)
470 {
471 	struct pci_attach_args	*pa = &(sc->bge_pa);
472 
473 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
474 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
475 		return;
476 
477 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
478 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
479 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
480 }
481 
482 void
483 bge_writereg_ind(struct bge_softc *sc, int off, int val)
484 {
485 	struct pci_attach_args	*pa = &(sc->bge_pa);
486 
487 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
488 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
489 }
490 
491 void
492 bge_writembx(struct bge_softc *sc, int off, int val)
493 {
494 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
495 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
496 
497 	CSR_WRITE_4(sc, off, val);
498 }
499 
500 /*
501  * Clear all stale locks and select the lock for this driver instance.
502  */
503 void
504 bge_ape_lock_init(struct bge_softc *sc)
505 {
506 	struct pci_attach_args *pa = &(sc->bge_pa);
507 	uint32_t bit, regbase;
508 	int i;
509 
510 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
511 		regbase = BGE_APE_LOCK_GRANT;
512 	else
513 		regbase = BGE_APE_PER_LOCK_GRANT;
514 
515 	/* Clear any stale locks. */
516 	for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
517 		switch (i) {
518 		case BGE_APE_LOCK_PHY0:
519 		case BGE_APE_LOCK_PHY1:
520 		case BGE_APE_LOCK_PHY2:
521 		case BGE_APE_LOCK_PHY3:
522 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
523 			break;
524 		default:
525 			if (pa->pa_function == 0)
526 				bit = BGE_APE_LOCK_GRANT_DRIVER0;
527 			else
528 				bit = (1 << pa->pa_function);
529 		}
530 		APE_WRITE_4(sc, regbase + 4 * i, bit);
531 	}
532 
533 	/* Select the PHY lock based on the device's function number. */
534 	switch (pa->pa_function) {
535 	case 0:
536 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
537 		break;
538 	case 1:
539 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
540 		break;
541 	case 2:
542 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
543 		break;
544 	case 3:
545 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
546 		break;
547 	default:
548 		printf("%s: PHY lock not supported on function %d\n",
549 		    sc->bge_dev.dv_xname, pa->pa_function);
550 		break;
551 	}
552 }
553 
554 /*
555  * Check for APE firmware, set flags, and print version info.
556  */
557 void
558 bge_ape_read_fw_ver(struct bge_softc *sc)
559 {
560 	const char *fwtype;
561 	uint32_t apedata, features;
562 
563 	/* Check for a valid APE signature in shared memory. */
564 	apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
565 	if (apedata != BGE_APE_SEG_SIG_MAGIC) {
566 		sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
567 		return;
568 	}
569 
570 	/* Check if APE firmware is running. */
571 	apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
572 	if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
573 		printf("%s: APE signature found but FW status not ready! "
574 		    "0x%08x\n", sc->bge_dev.dv_xname, apedata);
575 		return;
576 	}
577 
578 	sc->bge_mfw_flags |= BGE_MFW_ON_APE;
579 
580 	/* Fetch the APE firwmare type and version. */
581 	apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
582 	features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
583 	if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
584 		sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
585 		fwtype = "NCSI";
586 	} else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
587 		sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
588 		fwtype = "DASH";
589 	} else
590 		fwtype = "UNKN";
591 
592 	/* Print the APE firmware version. */
593 	printf(", APE firmware %s %d.%d.%d.%d", fwtype,
594 	    (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
595 	    (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
596 	    (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
597 	    (apedata & BGE_APE_FW_VERSION_BLDMSK));
598 }
599 
600 int
601 bge_ape_lock(struct bge_softc *sc, int locknum)
602 {
603 	struct pci_attach_args *pa = &(sc->bge_pa);
604 	uint32_t bit, gnt, req, status;
605 	int i, off;
606 
607 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
608 		return (0);
609 
610 	/* Lock request/grant registers have different bases. */
611 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
612 		req = BGE_APE_LOCK_REQ;
613 		gnt = BGE_APE_LOCK_GRANT;
614 	} else {
615 		req = BGE_APE_PER_LOCK_REQ;
616 		gnt = BGE_APE_PER_LOCK_GRANT;
617 	}
618 
619 	off = 4 * locknum;
620 
621 	switch (locknum) {
622 	case BGE_APE_LOCK_GPIO:
623 		/* Lock required when using GPIO. */
624 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
625 			return (0);
626 		if (pa->pa_function == 0)
627 			bit = BGE_APE_LOCK_REQ_DRIVER0;
628 		else
629 			bit = (1 << pa->pa_function);
630 		break;
631 	case BGE_APE_LOCK_GRC:
632 		/* Lock required to reset the device. */
633 		if (pa->pa_function == 0)
634 			bit = BGE_APE_LOCK_REQ_DRIVER0;
635 		else
636 			bit = (1 << pa->pa_function);
637 		break;
638 	case BGE_APE_LOCK_MEM:
639 		/* Lock required when accessing certain APE memory. */
640 		if (pa->pa_function == 0)
641 			bit = BGE_APE_LOCK_REQ_DRIVER0;
642 		else
643 			bit = (1 << pa->pa_function);
644 		break;
645 	case BGE_APE_LOCK_PHY0:
646 	case BGE_APE_LOCK_PHY1:
647 	case BGE_APE_LOCK_PHY2:
648 	case BGE_APE_LOCK_PHY3:
649 		/* Lock required when accessing PHYs. */
650 		bit = BGE_APE_LOCK_REQ_DRIVER0;
651 		break;
652 	default:
653 		return (EINVAL);
654 	}
655 
656 	/* Request a lock. */
657 	APE_WRITE_4(sc, req + off, bit);
658 
659 	/* Wait up to 1 second to acquire lock. */
660 	for (i = 0; i < 20000; i++) {
661 		status = APE_READ_4(sc, gnt + off);
662 		if (status == bit)
663 			break;
664 		DELAY(50);
665 	}
666 
667 	/* Handle any errors. */
668 	if (status != bit) {
669 		printf("%s: APE lock %d request failed! "
670 		    "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
671 		    sc->bge_dev.dv_xname,
672 		    locknum, req + off, bit & 0xFFFF, gnt + off,
673 		    status & 0xFFFF);
674 		/* Revoke the lock request. */
675 		APE_WRITE_4(sc, gnt + off, bit);
676 		return (EBUSY);
677 	}
678 
679 	return (0);
680 }
681 
682 void
683 bge_ape_unlock(struct bge_softc *sc, int locknum)
684 {
685 	struct pci_attach_args *pa = &(sc->bge_pa);
686 	uint32_t bit, gnt;
687 	int off;
688 
689 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
690 		return;
691 
692 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
693 		gnt = BGE_APE_LOCK_GRANT;
694 	else
695 		gnt = BGE_APE_PER_LOCK_GRANT;
696 
697 	off = 4 * locknum;
698 
699 	switch (locknum) {
700 	case BGE_APE_LOCK_GPIO:
701 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
702 			return;
703 		if (pa->pa_function == 0)
704 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
705 		else
706 			bit = (1 << pa->pa_function);
707 		break;
708 	case BGE_APE_LOCK_GRC:
709 		if (pa->pa_function == 0)
710 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
711 		else
712 			bit = (1 << pa->pa_function);
713 		break;
714 	case BGE_APE_LOCK_MEM:
715 		if (pa->pa_function == 0)
716 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
717 		else
718 			bit = (1 << pa->pa_function);
719 		break;
720 	case BGE_APE_LOCK_PHY0:
721 	case BGE_APE_LOCK_PHY1:
722 	case BGE_APE_LOCK_PHY2:
723 	case BGE_APE_LOCK_PHY3:
724 		bit = BGE_APE_LOCK_GRANT_DRIVER0;
725 		break;
726 	default:
727 		return;
728 	}
729 
730 	APE_WRITE_4(sc, gnt + off, bit);
731 }
732 
733 /*
734  * Send an event to the APE firmware.
735  */
736 void
737 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
738 {
739 	uint32_t apedata;
740 	int i;
741 
742 	/* NCSI does not support APE events. */
743 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
744 		return;
745 
746 	/* Wait up to 1ms for APE to service previous event. */
747 	for (i = 10; i > 0; i--) {
748 		if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
749 			break;
750 		apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
751 		if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
752 			APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
753 			    BGE_APE_EVENT_STATUS_EVENT_PENDING);
754 			bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
755 			APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
756 			break;
757 		}
758 		bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
759 		DELAY(100);
760 	}
761 	if (i == 0) {
762 		printf("%s: APE event 0x%08x send timed out\n",
763 		    sc->bge_dev.dv_xname, event);
764 	}
765 }
766 
767 void
768 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
769 {
770 	uint32_t apedata, event;
771 
772 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
773 		return;
774 
775 	switch (kind) {
776 	case BGE_RESET_START:
777 		/* If this is the first load, clear the load counter. */
778 		apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
779 		if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
780 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
781 		else {
782 			apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
783 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
784 		}
785 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
786 		    BGE_APE_HOST_SEG_SIG_MAGIC);
787 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
788 		    BGE_APE_HOST_SEG_LEN_MAGIC);
789 
790 		/* Add some version info if bge(4) supports it. */
791 		APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
792 		    BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
793 		APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
794 		    BGE_APE_HOST_BEHAV_NO_PHYLOCK);
795 		APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
796 		    BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
797 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
798 		    BGE_APE_HOST_DRVR_STATE_START);
799 		event = BGE_APE_EVENT_STATUS_STATE_START;
800 		break;
801 	case BGE_RESET_SHUTDOWN:
802 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
803 		    BGE_APE_HOST_DRVR_STATE_UNLOAD);
804 		event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
805 		break;
806 	case BGE_RESET_SUSPEND:
807 		event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
808 		break;
809 	default:
810 		return;
811 	}
812 
813 	bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
814 	    BGE_APE_EVENT_STATUS_STATE_CHNGE);
815 }
816 
817 
818 u_int8_t
819 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
820 {
821 	u_int32_t access, byte = 0;
822 	int i;
823 
824 	/* Lock. */
825 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
826 	for (i = 0; i < 8000; i++) {
827 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
828 			break;
829 		DELAY(20);
830 	}
831 	if (i == 8000)
832 		return (1);
833 
834 	/* Enable access. */
835 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
836 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
837 
838 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
839 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
840 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
841 		DELAY(10);
842 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
843 			DELAY(10);
844 			break;
845 		}
846 	}
847 
848 	if (i == BGE_TIMEOUT * 10) {
849 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
850 		return (1);
851 	}
852 
853 	/* Get result. */
854 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
855 
856 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
857 
858 	/* Disable access. */
859 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
860 
861 	/* Unlock. */
862 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
863 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
864 
865 	return (0);
866 }
867 
868 /*
869  * Read a sequence of bytes from NVRAM.
870  */
871 
872 int
873 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
874 {
875 	int err = 0, i;
876 	u_int8_t byte = 0;
877 
878 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
879 		return (1);
880 
881 	for (i = 0; i < cnt; i++) {
882 		err = bge_nvram_getbyte(sc, off + i, &byte);
883 		if (err)
884 			break;
885 		*(dest + i) = byte;
886 	}
887 
888 	return (err ? 1 : 0);
889 }
890 
891 /*
892  * Read a byte of data stored in the EEPROM at address 'addr.' The
893  * BCM570x supports both the traditional bitbang interface and an
894  * auto access interface for reading the EEPROM. We use the auto
895  * access method.
896  */
897 u_int8_t
898 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
899 {
900 	int i;
901 	u_int32_t byte = 0;
902 
903 	/*
904 	 * Enable use of auto EEPROM access so we can avoid
905 	 * having to use the bitbang method.
906 	 */
907 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
908 
909 	/* Reset the EEPROM, load the clock period. */
910 	CSR_WRITE_4(sc, BGE_EE_ADDR,
911 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
912 	DELAY(20);
913 
914 	/* Issue the read EEPROM command. */
915 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
916 
917 	/* Wait for completion */
918 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
919 		DELAY(10);
920 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
921 			break;
922 	}
923 
924 	if (i == BGE_TIMEOUT * 10) {
925 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
926 		return (1);
927 	}
928 
929 	/* Get result. */
930 	byte = CSR_READ_4(sc, BGE_EE_DATA);
931 
932 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
933 
934 	return (0);
935 }
936 
937 /*
938  * Read a sequence of bytes from the EEPROM.
939  */
940 int
941 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
942 {
943 	int i, error = 0;
944 	u_int8_t byte = 0;
945 
946 	for (i = 0; i < cnt; i++) {
947 		error = bge_eeprom_getbyte(sc, off + i, &byte);
948 		if (error)
949 			break;
950 		*(dest + i) = byte;
951 	}
952 
953 	return (error ? 1 : 0);
954 }
955 
956 int
957 bge_miibus_readreg(struct device *dev, int phy, int reg)
958 {
959 	struct bge_softc *sc = (struct bge_softc *)dev;
960 	u_int32_t val, autopoll;
961 	int i;
962 
963 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
964 		return (0);
965 
966 	/* Reading with autopolling on may trigger PCI errors */
967 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
968 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
969 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
970 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
971 		DELAY(80);
972 	}
973 
974 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
975 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
976 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
977 
978 	for (i = 0; i < 200; i++) {
979 		delay(1);
980 		val = CSR_READ_4(sc, BGE_MI_COMM);
981 		if (!(val & BGE_MICOMM_BUSY))
982 			break;
983 		delay(10);
984 	}
985 
986 	if (i == 200) {
987 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
988 		val = 0;
989 		goto done;
990 	}
991 
992 	val = CSR_READ_4(sc, BGE_MI_COMM);
993 
994 done:
995 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
996 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
997 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
998 		DELAY(80);
999 	}
1000 
1001 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1002 
1003 	if (val & BGE_MICOMM_READFAIL)
1004 		return (0);
1005 
1006 	return (val & 0xFFFF);
1007 }
1008 
1009 void
1010 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
1011 {
1012 	struct bge_softc *sc = (struct bge_softc *)dev;
1013 	u_int32_t autopoll;
1014 	int i;
1015 
1016 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1017 	    (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
1018 		return;
1019 
1020 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1021 		return;
1022 
1023 	/* Reading with autopolling on may trigger PCI errors */
1024 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1025 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1026 		DELAY(40);
1027 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1028 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1029 		DELAY(40); /* 40 usec is supposed to be adequate */
1030 	}
1031 
1032 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
1033 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
1034 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
1035 
1036 	for (i = 0; i < 200; i++) {
1037 		delay(1);
1038 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
1039 			break;
1040 		delay(10);
1041 	}
1042 
1043 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1044 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1045 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1046 		DELAY(40);
1047 	}
1048 
1049 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1050 
1051 	if (i == 200) {
1052 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
1053 	}
1054 }
1055 
1056 void
1057 bge_miibus_statchg(struct device *dev)
1058 {
1059 	struct bge_softc *sc = (struct bge_softc *)dev;
1060 	struct mii_data *mii = &sc->bge_mii;
1061 	u_int32_t mac_mode, rx_mode, tx_mode;
1062 
1063 	/*
1064 	 * Get flow control negotiation result.
1065 	 */
1066 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1067 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
1068 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1069 
1070 	if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
1071 	    mii->mii_media_status & IFM_ACTIVE &&
1072 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1073 		BGE_STS_SETBIT(sc, BGE_STS_LINK);
1074 	else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
1075 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
1076 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
1077 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1078 
1079 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
1080 		return;
1081 
1082 	/* Set the port mode (MII/GMII) to match the link speed. */
1083 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1084 	    ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1085 	tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1086 	rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1087 
1088 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1089 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1090 		mac_mode |= BGE_PORTMODE_GMII;
1091 	else
1092 		mac_mode |= BGE_PORTMODE_MII;
1093 
1094 	/* Set MAC flow control behavior to match link flow control settings. */
1095 	tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1096 	rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1097 	if (mii->mii_media_active & IFM_FDX) {
1098 		if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1099 			tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1100 		if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1101 			rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1102 	} else
1103 		mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1104 
1105 	CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
1106 	DELAY(40);
1107 	CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1108 	CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1109 }
1110 
1111 /*
1112  * Intialize a standard receive ring descriptor.
1113  */
1114 int
1115 bge_newbuf(struct bge_softc *sc, int i)
1116 {
1117 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
1118 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
1119 	struct mbuf		*m;
1120 	int			error;
1121 
1122 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1123 	if (!m)
1124 		return (ENOBUFS);
1125 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1126 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1127 	    m_adj(m, ETHER_ALIGN);
1128 
1129 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1130 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1131 	if (error) {
1132 		m_freem(m);
1133 		return (ENOBUFS);
1134 	}
1135 
1136 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1137 	    BUS_DMASYNC_PREREAD);
1138 	sc->bge_cdata.bge_rx_std_chain[i] = m;
1139 
1140 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1141 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1142 		i * sizeof (struct bge_rx_bd),
1143 	    sizeof (struct bge_rx_bd),
1144 	    BUS_DMASYNC_POSTWRITE);
1145 
1146 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
1147 	r->bge_flags = BGE_RXBDFLAG_END;
1148 	r->bge_len = m->m_len;
1149 	r->bge_idx = i;
1150 
1151 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1152 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1153 		i * sizeof (struct bge_rx_bd),
1154 	    sizeof (struct bge_rx_bd),
1155 	    BUS_DMASYNC_PREWRITE);
1156 
1157 	return (0);
1158 }
1159 
1160 /*
1161  * Initialize a Jumbo receive ring descriptor.
1162  */
1163 int
1164 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1165 {
1166 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1167 	struct bge_ext_rx_bd	*r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1168 	struct mbuf		*m;
1169 	int			error;
1170 
1171 	m = MCLGETI(NULL, M_DONTWAIT, NULL, BGE_JLEN);
1172 	if (!m)
1173 		return (ENOBUFS);
1174 	m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1175 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1176 	    m_adj(m, ETHER_ALIGN);
1177 
1178 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1179 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1180 	if (error) {
1181 		m_freem(m);
1182 		return (ENOBUFS);
1183 	}
1184 
1185 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1186 	    BUS_DMASYNC_PREREAD);
1187 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1188 
1189 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1190 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1191 		i * sizeof (struct bge_ext_rx_bd),
1192 	    sizeof (struct bge_ext_rx_bd),
1193 	    BUS_DMASYNC_POSTWRITE);
1194 
1195 	/*
1196 	 * Fill in the extended RX buffer descriptor.
1197 	 */
1198 	r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1199 	r->bge_bd.bge_idx = i;
1200 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1201 	switch (dmap->dm_nsegs) {
1202 	case 4:
1203 		BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr);
1204 		r->bge_len3 = dmap->dm_segs[3].ds_len;
1205 		/* FALLTHROUGH */
1206 	case 3:
1207 		BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr);
1208 		r->bge_len2 = dmap->dm_segs[2].ds_len;
1209 		/* FALLTHROUGH */
1210 	case 2:
1211 		BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr);
1212 		r->bge_len1 = dmap->dm_segs[1].ds_len;
1213 		/* FALLTHROUGH */
1214 	case 1:
1215 		BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr);
1216 		r->bge_bd.bge_len = dmap->dm_segs[0].ds_len;
1217 		break;
1218 	default:
1219 		panic("%s: %d segments", __func__, dmap->dm_nsegs);
1220 	}
1221 
1222 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1223 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1224 		i * sizeof (struct bge_ext_rx_bd),
1225 	    sizeof (struct bge_ext_rx_bd),
1226 	    BUS_DMASYNC_PREWRITE);
1227 
1228 	return (0);
1229 }
1230 
1231 /*
1232  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1233  * that's 1MB or memory, which is a lot. For now, we fill only the first
1234  * 256 ring entries and hope that our CPU is fast enough to keep up with
1235  * the NIC.
1236  */
1237 int
1238 bge_init_rx_ring_std(struct bge_softc *sc)
1239 {
1240 	int i;
1241 
1242 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
1243 		return (0);
1244 
1245 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1246 		if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 0,
1247 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1248 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
1249 			printf("%s: unable to create dmamap for slot %d\n",
1250 			    sc->bge_dev.dv_xname, i);
1251 			goto uncreate;
1252 		}
1253 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1254 		    sizeof(struct bge_rx_bd));
1255 	}
1256 
1257 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1258 
1259 	/* lwm must be greater than the replenish threshold */
1260 	if_rxr_init(&sc->bge_std_ring, 17, BGE_JUMBO_RX_RING_CNT);
1261 	bge_fill_rx_ring_std(sc);
1262 
1263 	SET(sc->bge_flags, BGE_RXRING_VALID);
1264 
1265 	return (0);
1266 
1267 uncreate:
1268 	while (--i) {
1269 		bus_dmamap_destroy(sc->bge_dmatag,
1270 		    sc->bge_cdata.bge_rx_std_map[i]);
1271 	}
1272 	return (1);
1273 }
1274 
1275 void
1276 bge_rxtick(void *arg)
1277 {
1278 	struct bge_softc *sc = arg;
1279 	int s;
1280 
1281 	s = splnet();
1282 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID) &&
1283 	    if_rxr_inuse(&sc->bge_std_ring) <= 8)
1284 		bge_fill_rx_ring_std(sc);
1285 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) &&
1286 	    if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1287 		bge_fill_rx_ring_jumbo(sc);
1288 	splx(s);
1289 }
1290 
1291 void
1292 bge_fill_rx_ring_std(struct bge_softc *sc)
1293 {
1294 	int i;
1295 	int post = 0;
1296 	u_int slots;
1297 
1298 	i = sc->bge_std;
1299 	for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT);
1300 	    slots > 0; slots--) {
1301 		BGE_INC(i, BGE_STD_RX_RING_CNT);
1302 
1303 		if (bge_newbuf(sc, i) != 0)
1304 			break;
1305 
1306 		post = 1;
1307 	}
1308 	if_rxr_put(&sc->bge_std_ring, slots);
1309 
1310 	sc->bge_std = i;
1311 
1312 	if (post)
1313 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1314 
1315 	/*
1316 	 * bge always needs more than 8 packets on the ring. if we cant do
1317 	 * that now, then try again later.
1318 	 */
1319 	if (if_rxr_inuse(&sc->bge_std_ring) <= 8)
1320 		timeout_add(&sc->bge_rxtimeout, 1);
1321 }
1322 
1323 void
1324 bge_free_rx_ring_std(struct bge_softc *sc)
1325 {
1326 	bus_dmamap_t dmap;
1327 	struct mbuf *m;
1328 	int i;
1329 
1330 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
1331 		return;
1332 
1333 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1334 		dmap = sc->bge_cdata.bge_rx_std_map[i];
1335 		m = sc->bge_cdata.bge_rx_std_chain[i];
1336 		if (m != NULL) {
1337 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1338 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1339 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1340 			m_freem(m);
1341 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1342 		}
1343 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1344 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
1345 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1346 		    sizeof(struct bge_rx_bd));
1347 	}
1348 
1349 	CLR(sc->bge_flags, BGE_RXRING_VALID);
1350 }
1351 
1352 int
1353 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1354 {
1355 	volatile struct bge_rcb *rcb;
1356 	int i;
1357 
1358 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1359 		return (0);
1360 
1361 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1362 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,
1363 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1364 		    &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) {
1365 			printf("%s: unable to create dmamap for slot %d\n",
1366 			    sc->bge_dev.dv_xname, i);
1367 			goto uncreate;
1368 		}
1369 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1370 		    sizeof(struct bge_ext_rx_bd));
1371 	}
1372 
1373 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1374 
1375 	/* lwm must be greater than the replenish threshold */
1376 	if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT);
1377 	bge_fill_rx_ring_jumbo(sc);
1378 
1379 	SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1380 
1381 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1382 	rcb->bge_maxlen_flags =
1383 	    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1384 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1385 
1386 	return (0);
1387 
1388 uncreate:
1389 	while (--i) {
1390 		bus_dmamap_destroy(sc->bge_dmatag,
1391 		    sc->bge_cdata.bge_rx_jumbo_map[i]);
1392 	}
1393 	return (1);
1394 }
1395 
1396 void
1397 bge_fill_rx_ring_jumbo(struct bge_softc *sc)
1398 {
1399 	int i;
1400 	int post = 0;
1401 	u_int slots;
1402 
1403 	i = sc->bge_jumbo;
1404 	for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT);
1405 	    slots > 0; slots--) {
1406 		BGE_INC(i, BGE_JUMBO_RX_RING_CNT);
1407 
1408 		if (bge_newbuf_jumbo(sc, i) != 0)
1409 			break;
1410 
1411 		post = 1;
1412 	}
1413 	if_rxr_put(&sc->bge_jumbo_ring, slots);
1414 
1415 	sc->bge_jumbo = i;
1416 
1417 	if (post)
1418 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1419 
1420 	/*
1421 	 * bge always needs more than 8 packets on the ring. if we cant do
1422 	 * that now, then try again later.
1423 	 */
1424 	if (if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1425 		timeout_add(&sc->bge_rxtimeout, 1);
1426 }
1427 
1428 void
1429 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1430 {
1431 	bus_dmamap_t dmap;
1432 	struct mbuf *m;
1433 	int i;
1434 
1435 	if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1436 		return;
1437 
1438 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1439 		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1440 		m = sc->bge_cdata.bge_rx_jumbo_chain[i];
1441 		if (m != NULL) {
1442 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1443 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1444 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1445 			m_freem(m);
1446 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1447 		}
1448 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1449 		sc->bge_cdata.bge_rx_jumbo_map[i] = NULL;
1450 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1451 		    sizeof(struct bge_ext_rx_bd));
1452 	}
1453 
1454 	CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1455 }
1456 
1457 void
1458 bge_free_tx_ring(struct bge_softc *sc)
1459 {
1460 	int i;
1461 	struct txdmamap_pool_entry *dma;
1462 
1463 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1464 		return;
1465 
1466 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1467 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1468 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1469 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1470 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1471 					    link);
1472 			sc->txdma[i] = 0;
1473 		}
1474 		bzero(&sc->bge_rdata->bge_tx_ring[i],
1475 		    sizeof(struct bge_tx_bd));
1476 	}
1477 
1478 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1479 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1480 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1481 		free(dma, M_DEVBUF, 0);
1482 	}
1483 
1484 	sc->bge_flags &= ~BGE_TXRING_VALID;
1485 }
1486 
1487 int
1488 bge_init_tx_ring(struct bge_softc *sc)
1489 {
1490 	int i;
1491 	bus_dmamap_t dmamap;
1492 	struct txdmamap_pool_entry *dma;
1493 
1494 	if (sc->bge_flags & BGE_TXRING_VALID)
1495 		return (0);
1496 
1497 	sc->bge_txcnt = 0;
1498 	sc->bge_tx_saved_considx = 0;
1499 
1500 	/* Initialize transmit producer index for host-memory send ring. */
1501 	sc->bge_tx_prodidx = 0;
1502 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1503 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1504 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1505 
1506 	/* NIC-memory send ring not used; initialize to zero. */
1507 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1508 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1509 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1510 
1511 	SLIST_INIT(&sc->txdma_list);
1512 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1513 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
1514 		    BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
1515 		    &dmamap))
1516 			return (ENOBUFS);
1517 		if (dmamap == NULL)
1518 			panic("dmamap NULL in bge_init_tx_ring");
1519 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1520 		if (dma == NULL) {
1521 			printf("%s: can't alloc txdmamap_pool_entry\n",
1522 			    sc->bge_dev.dv_xname);
1523 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1524 			return (ENOMEM);
1525 		}
1526 		dma->dmamap = dmamap;
1527 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1528 	}
1529 
1530 	sc->bge_flags |= BGE_TXRING_VALID;
1531 
1532 	return (0);
1533 }
1534 
1535 void
1536 bge_iff(struct bge_softc *sc)
1537 {
1538 	struct arpcom		*ac = &sc->arpcom;
1539 	struct ifnet		*ifp = &ac->ac_if;
1540 	struct ether_multi	*enm;
1541 	struct ether_multistep  step;
1542 	u_int8_t		hashes[16];
1543 	u_int32_t		h, rxmode;
1544 
1545 	/* First, zot all the existing filters. */
1546 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1547 	ifp->if_flags &= ~IFF_ALLMULTI;
1548 	memset(hashes, 0x00, sizeof(hashes));
1549 
1550 	if (ifp->if_flags & IFF_PROMISC) {
1551 		ifp->if_flags |= IFF_ALLMULTI;
1552 		rxmode |= BGE_RXMODE_RX_PROMISC;
1553 	} else if (ac->ac_multirangecnt > 0) {
1554 		ifp->if_flags |= IFF_ALLMULTI;
1555 		memset(hashes, 0xff, sizeof(hashes));
1556 	} else {
1557 		ETHER_FIRST_MULTI(step, ac, enm);
1558 		while (enm != NULL) {
1559 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1560 
1561 			setbit(hashes, h & 0x7F);
1562 
1563 			ETHER_NEXT_MULTI(step, enm);
1564 		}
1565 	}
1566 
1567 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1568 	    hashes, sizeof(hashes));
1569 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1570 }
1571 
1572 void
1573 bge_sig_pre_reset(struct bge_softc *sc, int type)
1574 {
1575 	/* no bge_asf_mode. */
1576 
1577 	if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1578 		bge_ape_driver_state_change(sc, type);
1579 }
1580 
1581 void
1582 bge_sig_post_reset(struct bge_softc *sc, int type)
1583 {
1584 	/* no bge_asf_mode. */
1585 
1586 	if (type == BGE_RESET_SHUTDOWN)
1587 		bge_ape_driver_state_change(sc, type);
1588 }
1589 
1590 void
1591 bge_sig_legacy(struct bge_softc *sc, int type)
1592 {
1593 	/* no bge_asf_mode. */
1594 }
1595 
1596 void
1597 bge_stop_fw(struct bge_softc *sc, int type)
1598 {
1599 	/* no bge_asf_mode. */
1600 }
1601 
1602 u_int32_t
1603 bge_dma_swap_options(struct bge_softc *sc)
1604 {
1605 	u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS;
1606 
1607 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
1608 		dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1609 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1610 		    BGE_MODECTL_HTX2B_ENABLE;
1611 	}
1612 
1613 	return (dma_options);
1614 }
1615 
1616 int
1617 bge_phy_addr(struct bge_softc *sc)
1618 {
1619 	struct pci_attach_args *pa = &(sc->bge_pa);
1620 	int phy_addr = 1;
1621 
1622 	switch (BGE_ASICREV(sc->bge_chipid)) {
1623 	case BGE_ASICREV_BCM5717:
1624 	case BGE_ASICREV_BCM5719:
1625 	case BGE_ASICREV_BCM5720:
1626 		phy_addr = pa->pa_function;
1627 		if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
1628 			phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
1629 			    BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
1630 		} else {
1631 			phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
1632 			    BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
1633 		}
1634 	}
1635 
1636 	return (phy_addr);
1637 }
1638 
1639 /*
1640  * Do endian, PCI and DMA initialization.
1641  */
1642 void
1643 bge_chipinit(struct bge_softc *sc)
1644 {
1645 	struct pci_attach_args	*pa = &(sc->bge_pa);
1646 	u_int32_t dma_rw_ctl, misc_ctl, mode_ctl;
1647 	int i;
1648 
1649 	/* Set endianness before we access any non-PCI registers. */
1650 	misc_ctl = BGE_INIT;
1651 	if (sc->bge_flags & BGE_TAGGED_STATUS)
1652 		misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1653 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1654 	    misc_ctl);
1655 
1656 	/*
1657 	 * Clear the MAC statistics block in the NIC's
1658 	 * internal memory.
1659 	 */
1660 	for (i = BGE_STATS_BLOCK;
1661 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1662 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1663 
1664 	for (i = BGE_STATUS_BLOCK;
1665 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1666 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1667 
1668 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
1669 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
1670 		/*
1671 		 * For the 57766 and non Ax versions of 57765, bootcode
1672 		 * needs to setup the PCIE Fast Training Sequence (FTS)
1673 		 * value to prevent transmit hangs.
1674 		 */
1675 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
1676 		    CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
1677 			CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
1678 			BGE_CPMU_PADRNG_CTL_RDIV2);
1679 		}
1680 	}
1681 
1682 	/*
1683 	 * Set up the PCI DMA control register.
1684 	 */
1685 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1686 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1687 
1688 	if (sc->bge_flags & BGE_PCIE) {
1689 		if (sc->bge_mps >= 256)
1690 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1691 		else
1692 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1693 	} else if (sc->bge_flags & BGE_PCIX) {
1694 		/* PCI-X bus */
1695 		if (BGE_IS_5714_FAMILY(sc)) {
1696 			/* 256 bytes for read and write. */
1697 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1698 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1699 
1700 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1701 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1702 			else
1703 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1704 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1705 			/* 1536 bytes for read, 384 bytes for write. */
1706 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1707 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1708 		} else {
1709 			/* 384 bytes for read and write. */
1710 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1711 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1712 			    (0x0F);
1713 		}
1714 
1715 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1716 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1717 			u_int32_t tmp;
1718 
1719 			/* Set ONEDMA_ATONCE for hardware workaround. */
1720 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1721 			if (tmp == 6 || tmp == 7)
1722 				dma_rw_ctl |=
1723 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1724 
1725 			/* Set PCI-X DMA write workaround. */
1726 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1727 		}
1728 	} else {
1729 		/* Conventional PCI bus: 256 bytes for read and write. */
1730 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1731 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1732 
1733 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1734 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1735 			dma_rw_ctl |= 0x0F;
1736 	}
1737 
1738 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1739 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1740 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1741 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1742 
1743 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1744 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1745 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1746 
1747 	if (BGE_IS_5717_PLUS(sc)) {
1748 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1749 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1750 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1751 
1752 		/*
1753 		 * Enable HW workaround for controllers that misinterpret
1754 		 * a status tag update and leave interrupts permanently
1755 		 * disabled.
1756 		 */
1757 		if (!BGE_IS_57765_PLUS(sc) &&
1758 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
1759 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762)
1760 			dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1761 	}
1762 
1763 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1764 
1765 	/*
1766 	 * Set up general mode register.
1767 	 */
1768 	mode_ctl = bge_dma_swap_options(sc);
1769 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
1770 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1771 		/* Retain Host-2-BMC settings written by APE firmware. */
1772 		mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1773 		    (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1774 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1775 		    BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1776 	}
1777 	mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1778 	    BGE_MODECTL_TX_NO_PHDR_CSUM;
1779 
1780 	/*
1781 	 * BCM5701 B5 have a bug causing data corruption when using
1782 	 * 64-bit DMA reads, which can be terminated early and then
1783 	 * completed later as 32-bit accesses, in combination with
1784 	 * certain bridges.
1785 	 */
1786 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1787 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1788 		mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1789 
1790 	CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1791 
1792 	/*
1793 	 * Disable memory write invalidate.  Apparently it is not supported
1794 	 * properly by these devices.
1795 	 */
1796 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1797 	    PCI_COMMAND_INVALIDATE_ENABLE);
1798 
1799 #ifdef __brokenalpha__
1800 	/*
1801 	 * Must ensure that we do not cross an 8K (bytes) boundary
1802 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1803 	 * restriction on some ALPHA platforms with early revision
1804 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1805 	 */
1806 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1807 	    BGE_PCI_READ_BNDRY_1024);
1808 #endif
1809 
1810 	/* Set the timer prescaler (always 66MHz) */
1811 	CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1812 
1813 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1814 		DELAY(40);	/* XXX */
1815 
1816 		/* Put PHY into ready state */
1817 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1818 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1819 		DELAY(40);
1820 	}
1821 }
1822 
1823 int
1824 bge_blockinit(struct bge_softc *sc)
1825 {
1826 	volatile struct bge_rcb		*rcb;
1827 	vaddr_t			rcb_addr;
1828 	bge_hostaddr		taddr;
1829 	u_int32_t		dmactl, rdmareg, mimode, val;
1830 	int			i, limit;
1831 
1832 	/*
1833 	 * Initialize the memory window pointer register so that
1834 	 * we can access the first 32K of internal NIC RAM. This will
1835 	 * allow us to set up the TX send ring RCBs and the RX return
1836 	 * ring RCBs, plus other things which live in NIC memory.
1837 	 */
1838 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1839 
1840 	/* Configure mbuf memory pool */
1841 	if (!BGE_IS_5705_PLUS(sc)) {
1842 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1843 		    BGE_BUFFPOOL_1);
1844 
1845 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1846 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1847 		else
1848 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1849 
1850 		/* Configure DMA resource pool */
1851 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1852 		    BGE_DMA_DESCRIPTORS);
1853 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1854 	}
1855 
1856 	/* Configure mbuf pool watermarks */
1857 	/* new Broadcom docs strongly recommend these: */
1858 	if (BGE_IS_5717_PLUS(sc)) {
1859 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1860 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1861 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1862 	} else if (BGE_IS_5705_PLUS(sc)) {
1863 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1864 
1865 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1866 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1867 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1868 		} else {
1869 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1870 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1871 		}
1872 	} else {
1873 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1874 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1875 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1876 	}
1877 
1878 	/* Configure DMA resource watermarks */
1879 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1880 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1881 
1882 	/* Enable buffer manager */
1883 	val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1884 	/*
1885 	 * Change the arbitration algorithm of TXMBUF read request to
1886 	 * round-robin instead of priority based for BCM5719.  When
1887 	 * TXFIFO is almost empty, RDMA will hold its request until
1888 	 * TXFIFO is not almost empty.
1889 	 */
1890 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
1891 		val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1892 	CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1893 
1894 	/* Poll for buffer manager start indication */
1895 	for (i = 0; i < 2000; i++) {
1896 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1897 			break;
1898 		DELAY(10);
1899 	}
1900 
1901 	if (i == 2000) {
1902 		printf("%s: buffer manager failed to start\n",
1903 		    sc->bge_dev.dv_xname);
1904 		return (ENXIO);
1905 	}
1906 
1907 	/* Enable flow-through queues */
1908 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1909 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1910 
1911 	/* Wait until queue initialization is complete */
1912 	for (i = 0; i < 2000; i++) {
1913 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1914 			break;
1915 		DELAY(10);
1916 	}
1917 
1918 	if (i == 2000) {
1919 		printf("%s: flow-through queue init failed\n",
1920 		    sc->bge_dev.dv_xname);
1921 		return (ENXIO);
1922 	}
1923 
1924 	/*
1925 	 * Summary of rings supported by the controller:
1926 	 *
1927 	 * Standard Receive Producer Ring
1928 	 * - This ring is used to feed receive buffers for "standard"
1929 	 *   sized frames (typically 1536 bytes) to the controller.
1930 	 *
1931 	 * Jumbo Receive Producer Ring
1932 	 * - This ring is used to feed receive buffers for jumbo sized
1933 	 *   frames (i.e. anything bigger than the "standard" frames)
1934 	 *   to the controller.
1935 	 *
1936 	 * Mini Receive Producer Ring
1937 	 * - This ring is used to feed receive buffers for "mini"
1938 	 *   sized frames to the controller.
1939 	 * - This feature required external memory for the controller
1940 	 *   but was never used in a production system.  Should always
1941 	 *   be disabled.
1942 	 *
1943 	 * Receive Return Ring
1944 	 * - After the controller has placed an incoming frame into a
1945 	 *   receive buffer that buffer is moved into a receive return
1946 	 *   ring.  The driver is then responsible to passing the
1947 	 *   buffer up to the stack.  Many versions of the controller
1948 	 *   support multiple RR rings.
1949 	 *
1950 	 * Send Ring
1951 	 * - This ring is used for outgoing frames.  Many versions of
1952 	 *   the controller support multiple send rings.
1953 	 */
1954 
1955 	/* Initialize the standard RX ring control block */
1956 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1957 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1958 	if (BGE_IS_5717_PLUS(sc)) {
1959 		/*
1960 		 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1961 		 * Bits 15-2 : Maximum RX frame size
1962 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
1963 		 * Bit 0     : Reserved
1964 		 */
1965 		rcb->bge_maxlen_flags =
1966 		    BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2);
1967 	} else if (BGE_IS_5705_PLUS(sc)) {
1968 		/*
1969 		 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1970 		 * Bits 15-2 : Reserved (should be 0)
1971 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1972 		 * Bit 0     : Reserved
1973 		 */
1974 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1975 	} else {
1976 		/*
1977 		 * Ring size is always XXX entries
1978 		 * Bits 31-16: Maximum RX frame size
1979 		 * Bits 15-2 : Reserved (should be 0)
1980 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1981 		 * Bit 0     : Reserved
1982 		 */
1983 		rcb->bge_maxlen_flags =
1984 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1985 	}
1986 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1987 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
1988 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
1989 		rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1990 	else
1991 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1992 	/* Write the standard receive producer ring control block. */
1993 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1994 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1995 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1996 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1997 
1998 	/* Reset the standard receive producer ring producer index. */
1999 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2000 
2001 	/*
2002 	 * Initialize the Jumbo RX ring control block
2003 	 * We set the 'ring disabled' bit in the flags
2004 	 * field until we're actually ready to start
2005 	 * using this ring (i.e. once we set the MTU
2006 	 * high enough to require it).
2007 	 */
2008 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
2009 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
2010 		BGE_HOSTADDR(rcb->bge_hostaddr,
2011 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
2012 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2013 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2014 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2015 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2016 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2017 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2018 		else
2019 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2020 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2021 		    rcb->bge_hostaddr.bge_addr_hi);
2022 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2023 		    rcb->bge_hostaddr.bge_addr_lo);
2024 		/* Program the jumbo receive producer ring RCB parameters. */
2025 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2026 		    rcb->bge_maxlen_flags);
2027 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2028 		/* Reset the jumbo receive producer ring producer index. */
2029 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2030 	}
2031 
2032 	/* Disable the mini receive producer ring RCB. */
2033 	if (BGE_IS_5700_FAMILY(sc)) {
2034 		/* Set up dummy disabled mini ring RCB */
2035 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2036 		rcb->bge_maxlen_flags =
2037 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2038 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2039 		    rcb->bge_maxlen_flags);
2040 		/* Reset the mini receive producer ring producer index. */
2041 		bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2042 
2043 		/* XXX why? */
2044 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2045 		    offsetof(struct bge_ring_data, bge_info),
2046 		    sizeof (struct bge_gib),
2047 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2048 	}
2049 
2050 	/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2051 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2052 		if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2053 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2054 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2055 			CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2056 			    (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2057 	}
2058 	/*
2059 	 * The BD ring replenish thresholds control how often the
2060 	 * hardware fetches new BD's from the producer rings in host
2061 	 * memory.  Setting the value too low on a busy system can
2062 	 * starve the hardware and recue the throughpout.
2063 	 *
2064 	 * Set the BD ring replenish thresholds. The recommended
2065 	 * values are 1/8th the number of descriptors allocated to
2066 	 * each ring, but since we try to avoid filling the entire
2067 	 * ring we set these to the minimal value of 8.  This needs to
2068 	 * be done on several of the supported chip revisions anyway,
2069 	 * to work around HW bugs.
2070 	 */
2071 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
2072 	if (BGE_IS_JUMBO_CAPABLE(sc))
2073 		CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
2074 
2075 	if (BGE_IS_5717_PLUS(sc)) {
2076 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2077 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2078 	}
2079 
2080 	/*
2081 	 * Disable all send rings by setting the 'ring disabled' bit
2082 	 * in the flags field of all the TX send ring control blocks,
2083 	 * located in NIC memory.
2084 	 */
2085 	if (BGE_IS_5700_FAMILY(sc)) {
2086 		/* 5700 to 5704 had 16 send rings. */
2087 		limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2088 	} else if (BGE_IS_57765_PLUS(sc) ||
2089 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2090 		limit = 2;
2091 	else if (BGE_IS_5717_PLUS(sc))
2092 		limit = 4;
2093 	else
2094 		limit = 1;
2095 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2096 	for (i = 0; i < limit; i++) {
2097 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2098 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2099 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2100 		rcb_addr += sizeof(struct bge_rcb);
2101 	}
2102 
2103 	/* Configure send ring RCB 0 (we use only the first ring) */
2104 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2105 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2106 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2107 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2108 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2109 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2110 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2111 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
2112 	else
2113 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2114 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2115 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2116 	    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2117 
2118 	/*
2119 	 * Disable all receive return rings by setting the
2120 	 * 'ring diabled' bit in the flags field of all the receive
2121 	 * return ring control blocks, located in NIC memory.
2122 	 */
2123 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2124 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2125 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2126 		/* Should be 17, use 16 until we get an SRAM map. */
2127 		limit = 16;
2128 	} else if (BGE_IS_5700_FAMILY(sc))
2129 		limit = BGE_RX_RINGS_MAX;
2130 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2131 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 ||
2132 	    BGE_IS_57765_PLUS(sc))
2133 		limit = 4;
2134 	else
2135 		limit = 1;
2136 	/* Disable all receive return rings */
2137 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2138 	for (i = 0; i < limit; i++) {
2139 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2140 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2141 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2142 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2143 			BGE_RCB_FLAG_RING_DISABLED));
2144 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2145 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2146 		    (i * (sizeof(u_int64_t))), 0);
2147 		rcb_addr += sizeof(struct bge_rcb);
2148 	}
2149 
2150 	/*
2151 	 * Set up receive return ring 0.  Note that the NIC address
2152 	 * for RX return rings is 0x0.  The return rings live entirely
2153 	 * within the host, so the nicaddr field in the RCB isn't used.
2154 	 */
2155 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2156 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2157 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2158 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2159 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2160 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2161 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2162 
2163 	/* Set random backoff seed for TX */
2164 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2165 	    (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
2166 	     sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
2167 	     sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
2168 	    BGE_TX_BACKOFF_SEED_MASK);
2169 
2170 	/* Set inter-packet gap */
2171 	val = 0x2620;
2172 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2173 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2174 		val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2175 		    (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2176 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2177 
2178 	/*
2179 	 * Specify which ring to use for packets that don't match
2180 	 * any RX rules.
2181 	 */
2182 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2183 
2184 	/*
2185 	 * Configure number of RX lists. One interrupt distribution
2186 	 * list, sixteen active lists, one bad frames class.
2187 	 */
2188 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2189 
2190 	/* Inialize RX list placement stats mask. */
2191 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF);
2192 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2193 
2194 	/* Disable host coalescing until we get it set up */
2195 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2196 
2197 	/* Poll to make sure it's shut down. */
2198 	for (i = 0; i < 2000; i++) {
2199 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2200 			break;
2201 		DELAY(10);
2202 	}
2203 
2204 	if (i == 2000) {
2205 		printf("%s: host coalescing engine failed to idle\n",
2206 		    sc->bge_dev.dv_xname);
2207 		return (ENXIO);
2208 	}
2209 
2210 	/* Set up host coalescing defaults */
2211 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2212 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2213 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2214 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2215 	if (!(BGE_IS_5705_PLUS(sc))) {
2216 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2217 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2218 	}
2219 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2220 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2221 
2222 	/* Set up address of statistics block */
2223 	if (!(BGE_IS_5705_PLUS(sc))) {
2224 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
2225 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
2226 			    BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2227 
2228 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2229 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2230 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2231 	}
2232 
2233 	/* Set up address of status block */
2234 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2235 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2236 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2237 
2238 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2239 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2240 
2241 	/* Set up status block size. */
2242 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2243 	    sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2244 		val = BGE_STATBLKSZ_FULL;
2245 		bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
2246 	} else {
2247 		val = BGE_STATBLKSZ_32BYTE;
2248 		bzero(&sc->bge_rdata->bge_status_block, 32);
2249 	}
2250 
2251 	/* Turn on host coalescing state machine */
2252 	CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2253 
2254 	/* Turn on RX BD completion state machine and enable attentions */
2255 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
2256 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
2257 
2258 	/* Turn on RX list placement state machine */
2259 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2260 
2261 	/* Turn on RX list selector state machine. */
2262 	if (!(BGE_IS_5705_PLUS(sc)))
2263 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2264 
2265 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2266 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2267 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2268 	    BGE_MACMODE_FRMHDR_DMA_ENB;
2269 
2270 	if (sc->bge_flags & BGE_FIBER_TBI)
2271 	    val |= BGE_PORTMODE_TBI;
2272 	else if (sc->bge_flags & BGE_FIBER_MII)
2273 	    val |= BGE_PORTMODE_GMII;
2274 	else
2275 	    val |= BGE_PORTMODE_MII;
2276 
2277 	/* Allow APE to send/receive frames. */
2278 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2279 		val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2280 
2281 	/* Turn on DMA, clear stats */
2282 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2283 	DELAY(40);
2284 
2285 	/* Set misc. local control, enable interrupts on attentions */
2286 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2287 
2288 #ifdef notdef
2289 	/* Assert GPIO pins for PHY reset */
2290 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
2291 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
2292 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
2293 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
2294 #endif
2295 
2296 	/* Turn on DMA completion state machine */
2297 	if (!(BGE_IS_5705_PLUS(sc)))
2298 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2299 
2300 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
2301 
2302 	/* Enable host coalescing bug fix. */
2303 	if (BGE_IS_5755_PLUS(sc))
2304 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2305 
2306 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2307 		val |= BGE_WDMAMODE_BURST_ALL_DATA;
2308 
2309 	/* Turn on write DMA state machine */
2310 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2311 	DELAY(40);
2312 
2313 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
2314 
2315 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
2316 		val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2317 
2318 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2319 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2320 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2321 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2322 		       BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2323 		       BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2324 
2325 	if (sc->bge_flags & BGE_PCIE)
2326 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2327 
2328 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2329 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2330 		val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2331 		    BGE_RDMAMODE_H2BNC_VLAN_DET;
2332 		/*
2333 		 * Allow multiple outstanding read requests from
2334 		 * non-LSO read DMA engine.
2335 		 */
2336 		val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2337 	}
2338 
2339 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2340 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2341 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2342 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
2343 	    BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
2344 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2345 			rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2346 		else
2347 			rdmareg = BGE_RDMA_RSRVCTRL;
2348 		dmactl = CSR_READ_4(sc, rdmareg);
2349 		/*
2350 		 * Adjust tx margin to prevent TX data corruption and
2351 		 * fix internal FIFO overflow.
2352 		 */
2353 		if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2354 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2355 			dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2356 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2357 			    BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2358 			dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2359 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2360 			    BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2361 		}
2362 		/*
2363 		 * Enable fix for read DMA FIFO overruns.
2364 		 * The fix is to limit the number of RX BDs
2365 		 * the hardware would fetch at a fime.
2366 		 */
2367 		CSR_WRITE_4(sc, rdmareg, dmactl |
2368 		    BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2369 	}
2370 
2371 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
2372 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2373 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2374 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2375 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2376 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2377 		/*
2378 		 * Allow 4KB burst length reads for non-LSO frames.
2379 		 * Enable 512B burst length reads for buffer descriptors.
2380 		 */
2381 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2382 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2383 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2384 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2385 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2386 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2387 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2388 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2389 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2390 	}
2391 
2392 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2393 	DELAY(40);
2394 
2395 	if (sc->bge_flags & BGE_RDMA_BUG) {
2396 		for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2397 			val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2398 			if ((val & 0xFFFF) > ETHER_MAX_LEN)
2399 				break;
2400 			if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN)
2401 				break;
2402 		}
2403 		if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2404 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2405 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2406 				val |= BGE_RDMA_TX_LENGTH_WA_5719;
2407 			else
2408 				val |= BGE_RDMA_TX_LENGTH_WA_5720;
2409 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2410 		}
2411 	}
2412 
2413 	/* Turn on RX data completion state machine */
2414 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2415 
2416 	/* Turn on RX BD initiator state machine */
2417 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2418 
2419 	/* Turn on RX data and RX BD initiator state machine */
2420 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2421 
2422 	/* Turn on Mbuf cluster free state machine */
2423 	if (!BGE_IS_5705_PLUS(sc))
2424 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2425 
2426 	/* Turn on send BD completion state machine */
2427 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2428 
2429 	/* Turn on send data completion state machine */
2430 	val = BGE_SDCMODE_ENABLE;
2431 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2432 		val |= BGE_SDCMODE_CDELAY;
2433 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2434 
2435 	/* Turn on send data initiator state machine */
2436 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2437 
2438 	/* Turn on send BD initiator state machine */
2439 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2440 
2441 	/* Turn on send BD selector state machine */
2442 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2443 
2444 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF);
2445 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2446 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
2447 
2448 	/* ack/clear link change events */
2449 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2450 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2451 	    BGE_MACSTAT_LINK_CHANGED);
2452 
2453 	/* Enable PHY auto polling (for MII/GMII only) */
2454 	if (sc->bge_flags & BGE_FIBER_TBI) {
2455 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2456  	} else {
2457 		if ((sc->bge_flags & BGE_CPMU_PRESENT) != 0)
2458 			mimode = BGE_MIMODE_500KHZ_CONST;
2459 		else
2460 			mimode = BGE_MIMODE_BASE;
2461 		if (BGE_IS_5700_FAMILY(sc) ||
2462 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
2463 			mimode |= BGE_MIMODE_AUTOPOLL;
2464 			BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
2465 		}
2466 		mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
2467 		CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
2468 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
2469 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2470 			    BGE_EVTENB_MI_INTERRUPT);
2471 	}
2472 
2473 	/*
2474 	 * Clear any pending link state attention.
2475 	 * Otherwise some link state change events may be lost until attention
2476 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
2477 	 * It's not necessary on newer BCM chips - perhaps enabling link
2478 	 * state change attentions implies clearing pending attention.
2479 	 */
2480 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2481 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2482 	    BGE_MACSTAT_LINK_CHANGED);
2483 
2484 	/* Enable link state change attentions. */
2485 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2486 
2487 	return (0);
2488 }
2489 
2490 const struct bge_revision *
2491 bge_lookup_rev(u_int32_t chipid)
2492 {
2493 	const struct bge_revision *br;
2494 
2495 	for (br = bge_revisions; br->br_name != NULL; br++) {
2496 		if (br->br_chipid == chipid)
2497 			return (br);
2498 	}
2499 
2500 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
2501 		if (br->br_chipid == BGE_ASICREV(chipid))
2502 			return (br);
2503 	}
2504 
2505 	return (NULL);
2506 }
2507 
2508 int
2509 bge_can_use_msi(struct bge_softc *sc)
2510 {
2511 	int can_use_msi = 0;
2512 
2513 	switch (BGE_ASICREV(sc->bge_chipid)) {
2514 	case BGE_ASICREV_BCM5714_A0:
2515 	case BGE_ASICREV_BCM5714:
2516 		/*
2517 		 * Apparently, MSI doesn't work when these chips are
2518 		 * configured in single-port mode.
2519 		 */
2520 		break;
2521 	case BGE_ASICREV_BCM5750:
2522 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
2523 		    BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
2524 			can_use_msi = 1;
2525 		break;
2526 	default:
2527 		if (BGE_IS_575X_PLUS(sc))
2528 			can_use_msi = 1;
2529 	}
2530 
2531 	return (can_use_msi);
2532 }
2533 
2534 /*
2535  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2536  * against our list and return its name if we find a match. Note
2537  * that since the Broadcom controller contains VPD support, we
2538  * can get the device name string from the controller itself instead
2539  * of the compiled-in string. This is a little slow, but it guarantees
2540  * we'll always announce the right product name.
2541  */
2542 int
2543 bge_probe(struct device *parent, void *match, void *aux)
2544 {
2545 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
2546 }
2547 
2548 void
2549 bge_attach(struct device *parent, struct device *self, void *aux)
2550 {
2551 	struct bge_softc	*sc = (struct bge_softc *)self;
2552 	struct pci_attach_args	*pa = aux;
2553 	pci_chipset_tag_t	pc = pa->pa_pc;
2554 	const struct bge_revision *br;
2555 	pcireg_t		pm_ctl, memtype, subid, reg;
2556 	pci_intr_handle_t	ih;
2557 	const char		*intrstr = NULL;
2558 	bus_size_t		size, apesize;
2559 	bus_dma_segment_t	seg;
2560 	int			rseg, gotenaddr = 0;
2561 	u_int32_t		hwcfg = 0;
2562 	u_int32_t		mac_addr = 0;
2563 	u_int32_t		misccfg;
2564 	struct ifnet		*ifp;
2565 	caddr_t			kva;
2566 #ifdef __sparc64__
2567 	char			name[32];
2568 #endif
2569 
2570 	sc->bge_pa = *pa;
2571 
2572 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
2573 
2574 	/*
2575 	 * Map control/status registers.
2576 	 */
2577 	DPRINTFN(5, ("Map control/status regs\n"));
2578 
2579 	DPRINTFN(5, ("pci_mapreg_map\n"));
2580 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2581 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
2582 	    &sc->bge_bhandle, NULL, &size, 0)) {
2583 		printf(": can't find mem space\n");
2584 		return;
2585 	}
2586 
2587 	/*
2588 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2589 	 * can clobber the chip's PCI config-space power control registers,
2590 	 * leaving the card in D3 powersave state.
2591 	 * We do not have memory-mapped registers in this state,
2592 	 * so force device into D0 state before starting initialization.
2593 	 */
2594 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2595 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2596 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2597 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2598 	DELAY(1000);	/* 27 usec is allegedly sufficent */
2599 
2600 	/*
2601 	 * Save ASIC rev.
2602 	 */
2603 	sc->bge_chipid =
2604 	     (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL)
2605 	      >> BGE_PCIMISCCTL_ASICREV_SHIFT);
2606 
2607 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2608 		switch (PCI_PRODUCT(pa->pa_id)) {
2609 		case PCI_PRODUCT_BROADCOM_BCM5717:
2610 		case PCI_PRODUCT_BROADCOM_BCM5718:
2611 		case PCI_PRODUCT_BROADCOM_BCM5719:
2612 		case PCI_PRODUCT_BROADCOM_BCM5720:
2613 		case PCI_PRODUCT_BROADCOM_BCM5725:
2614 		case PCI_PRODUCT_BROADCOM_BCM5727:
2615 		case PCI_PRODUCT_BROADCOM_BCM5762:
2616 		case PCI_PRODUCT_BROADCOM_BCM57764:
2617 		case PCI_PRODUCT_BROADCOM_BCM57767:
2618 		case PCI_PRODUCT_BROADCOM_BCM57787:
2619 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2620 			    BGE_PCI_GEN2_PRODID_ASICREV);
2621 			break;
2622 		case PCI_PRODUCT_BROADCOM_BCM57761:
2623 		case PCI_PRODUCT_BROADCOM_BCM57762:
2624 		case PCI_PRODUCT_BROADCOM_BCM57765:
2625 		case PCI_PRODUCT_BROADCOM_BCM57766:
2626 		case PCI_PRODUCT_BROADCOM_BCM57781:
2627 		case PCI_PRODUCT_BROADCOM_BCM57782:
2628 		case PCI_PRODUCT_BROADCOM_BCM57785:
2629 		case PCI_PRODUCT_BROADCOM_BCM57786:
2630 		case PCI_PRODUCT_BROADCOM_BCM57791:
2631 		case PCI_PRODUCT_BROADCOM_BCM57795:
2632 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2633 			    BGE_PCI_GEN15_PRODID_ASICREV);
2634 			break;
2635 		default:
2636 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2637 			    BGE_PCI_PRODID_ASICREV);
2638 			break;
2639 		}
2640 	}
2641 
2642 	sc->bge_phy_addr = bge_phy_addr(sc);
2643 
2644 	printf(", ");
2645 	br = bge_lookup_rev(sc->bge_chipid);
2646 	if (br == NULL)
2647 		printf("unknown ASIC (0x%x)", sc->bge_chipid);
2648 	else
2649 		printf("%s (0x%x)", br->br_name, sc->bge_chipid);
2650 
2651 	/*
2652 	 * PCI Express or PCI-X controller check.
2653 	 */
2654 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
2655 	    &sc->bge_expcap, NULL) != 0) {
2656 		/* Extract supported maximum payload size. */
2657 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
2658 		    PCI_PCIE_DCAP);
2659 		sc->bge_mps = 128 << (reg & 0x7);
2660 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2661 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2662 			sc->bge_expmrq = (fls(2048) - 8) << 12;
2663 		else
2664 			sc->bge_expmrq = (fls(4096) - 8) << 12;
2665 		/* Disable PCIe Active State Power Management (ASPM). */
2666 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
2667 		    sc->bge_expcap + PCI_PCIE_LCSR);
2668 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
2669 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2670 		    sc->bge_expcap + PCI_PCIE_LCSR, reg);
2671 		sc->bge_flags |= BGE_PCIE;
2672 	} else {
2673 		if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2674 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
2675 			sc->bge_flags |= BGE_PCIX;
2676 	}
2677 
2678 	/*
2679 	 * SEEPROM check.
2680 	 */
2681 #ifdef __sparc64__
2682 	/*
2683 	 * Onboard interfaces on UltraSPARC systems generally don't
2684 	 * have a SEEPROM fitted.  These interfaces, and cards that
2685 	 * have FCode, are named "network" by the PROM, whereas cards
2686 	 * without FCode show up as "ethernet".  Since we don't really
2687 	 * need the information from the SEEPROM on cards that have
2688 	 * FCode it's fine to pretend they don't have one.
2689 	 */
2690 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
2691 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
2692 		sc->bge_flags |= BGE_NO_EEPROM;
2693 #endif
2694 
2695 	/* Save chipset family. */
2696 	switch (BGE_ASICREV(sc->bge_chipid)) {
2697 	case BGE_ASICREV_BCM5762:
2698 	case BGE_ASICREV_BCM57765:
2699 	case BGE_ASICREV_BCM57766:
2700 		sc->bge_flags |= BGE_57765_PLUS;
2701 		/* FALLTHROUGH */
2702 	case BGE_ASICREV_BCM5717:
2703 	case BGE_ASICREV_BCM5719:
2704 	case BGE_ASICREV_BCM5720:
2705 		sc->bge_flags |= BGE_5717_PLUS | BGE_5755_PLUS | BGE_575X_PLUS |
2706 		    BGE_5705_PLUS;
2707 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2708 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2709 			/*
2710 			 * Enable work around for DMA engine miscalculation
2711 			 * of TXMBUF available space.
2712 			 */
2713 			sc->bge_flags |= BGE_RDMA_BUG;
2714 		}
2715 		break;
2716 	case BGE_ASICREV_BCM5755:
2717 	case BGE_ASICREV_BCM5761:
2718 	case BGE_ASICREV_BCM5784:
2719 	case BGE_ASICREV_BCM5785:
2720 	case BGE_ASICREV_BCM5787:
2721 	case BGE_ASICREV_BCM57780:
2722 		sc->bge_flags |= BGE_5755_PLUS | BGE_575X_PLUS | BGE_5705_PLUS;
2723 		break;
2724 	case BGE_ASICREV_BCM5700:
2725 	case BGE_ASICREV_BCM5701:
2726 	case BGE_ASICREV_BCM5703:
2727 	case BGE_ASICREV_BCM5704:
2728 		sc->bge_flags |= BGE_5700_FAMILY | BGE_JUMBO_CAPABLE;
2729 		break;
2730 	case BGE_ASICREV_BCM5714_A0:
2731 	case BGE_ASICREV_BCM5780:
2732 	case BGE_ASICREV_BCM5714:
2733 		sc->bge_flags |= BGE_5714_FAMILY;
2734 		/* FALLTHROUGH */
2735 	case BGE_ASICREV_BCM5750:
2736 	case BGE_ASICREV_BCM5752:
2737 	case BGE_ASICREV_BCM5906:
2738 		sc->bge_flags |= BGE_575X_PLUS;
2739 		/* FALLTHROUGH */
2740 	case BGE_ASICREV_BCM5705:
2741 		sc->bge_flags |= BGE_5705_PLUS;
2742 		break;
2743 	}
2744 
2745 	/*
2746 	 * When using the BCM5701 in PCI-X mode, data corruption has
2747 	 * been observed in the first few bytes of some received packets.
2748 	 * Aligning the packet buffer in memory eliminates the corruption.
2749 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2750 	 * which do not support unaligned accesses, we will realign the
2751 	 * payloads by copying the received packets.
2752 	 */
2753 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2754 	    sc->bge_flags & BGE_PCIX)
2755 		sc->bge_flags |= BGE_RX_ALIGNBUG;
2756 
2757 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2758 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
2759 	    PCI_VENDOR(subid) == DELL_VENDORID)
2760 		sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2761 
2762 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
2763 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
2764 
2765 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2766 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2767 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2768 		sc->bge_flags |= BGE_IS_5788;
2769 
2770 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
2771 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
2772 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2773 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2774 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
2775 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2776 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2777 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2778 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
2779 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
2780 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2781 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
2782 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
2783 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
2784 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2785 		sc->bge_phy_flags |= BGE_PHY_10_100_ONLY;
2786 
2787 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2788 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2789 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2790 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2791 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2792 		sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2793 
2794 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2795 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2796 		sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2797 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2798 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2799 		sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2800 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2801 		sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2802 
2803 	if ((BGE_IS_5705_PLUS(sc)) &&
2804 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2805 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2806 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
2807 	    !BGE_IS_5717_PLUS(sc)) {
2808 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2809 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2810 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2811 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2812 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2813 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2814 				sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2815 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2816 				sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2817 		} else
2818 			sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2819 	}
2820 
2821 	/* Identify chips with APE processor. */
2822 	switch (BGE_ASICREV(sc->bge_chipid)) {
2823 	case BGE_ASICREV_BCM5717:
2824 	case BGE_ASICREV_BCM5719:
2825 	case BGE_ASICREV_BCM5720:
2826 	case BGE_ASICREV_BCM5761:
2827 	case BGE_ASICREV_BCM5762:
2828 		sc->bge_flags |= BGE_APE;
2829 		break;
2830 	}
2831 
2832 	/* Chips with APE need BAR2 access for APE registers/memory. */
2833 	if ((sc->bge_flags & BGE_APE) != 0) {
2834 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
2835 		if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
2836 		    &sc->bge_apetag, &sc->bge_apehandle, NULL, &apesize, 0)) {
2837 			printf(": couldn't map BAR2 memory\n");
2838 			goto fail_1;
2839 		}
2840 
2841 		/* Enable APE register/memory access by host driver. */
2842 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2843 		reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2844 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2845 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2846 		pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
2847 
2848 		bge_ape_lock_init(sc);
2849 		bge_ape_read_fw_ver(sc);
2850 	}
2851 
2852 	/* Identify the chips that use an CPMU. */
2853 	if (BGE_IS_5717_PLUS(sc) ||
2854 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2855 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2856 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2857 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2858 		sc->bge_flags |= BGE_CPMU_PRESENT;
2859 
2860 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI,
2861 	    &sc->bge_msicap, NULL)) {
2862 		if (bge_can_use_msi(sc) == 0)
2863 			pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
2864 	}
2865 
2866 	DPRINTFN(5, ("pci_intr_map\n"));
2867 	if (pci_intr_map_msi(pa, &ih) == 0)
2868 		sc->bge_flags |= BGE_MSI;
2869 	else if (pci_intr_map(pa, &ih)) {
2870 		printf(": couldn't map interrupt\n");
2871 		goto fail_1;
2872 	}
2873 
2874 	/*
2875 	 * All controllers except BCM5700 supports tagged status but
2876 	 * we use tagged status only for MSI case on BCM5717. Otherwise
2877 	 * MSI on BCM5717 does not work.
2878 	 */
2879 	if (BGE_IS_5717_PLUS(sc) && sc->bge_flags & BGE_MSI)
2880 		sc->bge_flags |= BGE_TAGGED_STATUS;
2881 
2882 	DPRINTFN(5, ("pci_intr_string\n"));
2883 	intrstr = pci_intr_string(pc, ih);
2884 
2885 	/* Try to reset the chip. */
2886 	DPRINTFN(5, ("bge_reset\n"));
2887 	bge_sig_pre_reset(sc, BGE_RESET_START);
2888 	bge_reset(sc);
2889 
2890 	bge_sig_legacy(sc, BGE_RESET_START);
2891 	bge_sig_post_reset(sc, BGE_RESET_START);
2892 
2893 	bge_chipinit(sc);
2894 
2895 #ifdef __sparc64__
2896 	if (!gotenaddr) {
2897 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
2898 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
2899 			gotenaddr = 1;
2900 	}
2901 #endif
2902 
2903 	/*
2904 	 * Get station address from the EEPROM.
2905 	 */
2906 	if (!gotenaddr) {
2907 		mac_addr = bge_readmem_ind(sc, 0x0c14);
2908 		if ((mac_addr >> 16) == 0x484b) {
2909 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2910 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2911 			mac_addr = bge_readmem_ind(sc, 0x0c18);
2912 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2913 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2914 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2915 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2916 			gotenaddr = 1;
2917 		}
2918 	}
2919 	if (!gotenaddr) {
2920 		int mac_offset = BGE_EE_MAC_OFFSET;
2921 
2922 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2923 			mac_offset = BGE_EE_MAC_OFFSET_5906;
2924 
2925 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2926 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
2927 			gotenaddr = 1;
2928 	}
2929 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
2930 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2931 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
2932 			gotenaddr = 1;
2933 	}
2934 
2935 #ifdef __sparc64__
2936 	if (!gotenaddr) {
2937 		extern void myetheraddr(u_char *);
2938 
2939 		myetheraddr(sc->arpcom.ac_enaddr);
2940 		gotenaddr = 1;
2941 	}
2942 #endif
2943 
2944 	if (!gotenaddr) {
2945 		printf(": failed to read station address\n");
2946 		goto fail_2;
2947 	}
2948 
2949 	/* Allocate the general information block and ring buffers. */
2950 	sc->bge_dmatag = pa->pa_dmat;
2951 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
2952 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2953 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2954 		printf(": can't alloc rx buffers\n");
2955 		goto fail_2;
2956 	}
2957 	DPRINTFN(5, ("bus_dmamem_map\n"));
2958 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2959 			   sizeof(struct bge_ring_data), &kva,
2960 			   BUS_DMA_NOWAIT)) {
2961 		printf(": can't map dma buffers (%lu bytes)\n",
2962 		    sizeof(struct bge_ring_data));
2963 		goto fail_3;
2964 	}
2965 	DPRINTFN(5, ("bus_dmamem_create\n"));
2966 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2967 	    sizeof(struct bge_ring_data), 0,
2968 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2969 		printf(": can't create dma map\n");
2970 		goto fail_4;
2971 	}
2972 	DPRINTFN(5, ("bus_dmamem_load\n"));
2973 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2974 			    sizeof(struct bge_ring_data), NULL,
2975 			    BUS_DMA_NOWAIT)) {
2976 		goto fail_5;
2977 	}
2978 
2979 	DPRINTFN(5, ("bzero\n"));
2980 	sc->bge_rdata = (struct bge_ring_data *)kva;
2981 
2982 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2983 
2984 	/* Set default tuneable values. */
2985 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2986 	sc->bge_rx_coal_ticks = 150;
2987 	sc->bge_rx_max_coal_bds = 64;
2988 	sc->bge_tx_coal_ticks = 300;
2989 	sc->bge_tx_max_coal_bds = 400;
2990 
2991 	/* 5705 limits RX return ring to 512 entries. */
2992 	if (BGE_IS_5700_FAMILY(sc) || BGE_IS_5717_PLUS(sc))
2993 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2994 	else
2995 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2996 
2997 	/* Set up ifnet structure */
2998 	ifp = &sc->arpcom.ac_if;
2999 	ifp->if_softc = sc;
3000 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3001 	ifp->if_ioctl = bge_ioctl;
3002 	ifp->if_start = bge_start;
3003 	ifp->if_watchdog = bge_watchdog;
3004 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
3005 	IFQ_SET_READY(&ifp->if_snd);
3006 
3007 	DPRINTFN(5, ("bcopy\n"));
3008 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
3009 
3010 	ifp->if_capabilities = IFCAP_VLAN_MTU;
3011 
3012 #if NVLAN > 0
3013 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
3014 #endif
3015 
3016 	/*
3017 	 * 5700 B0 chips do not support checksumming correctly due
3018 	 * to hardware bugs.
3019 	 *
3020 	 * It seems all controllers have a bug that can generate UDP
3021 	 * datagrams with a checksum value 0 when TX UDP checksum
3022 	 * offloading is enabled. Generating UDP checksum value 0 is
3023 	 * a violation of RFC 768.
3024 	 */
3025 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
3026 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4;
3027 
3028 	if (BGE_IS_JUMBO_CAPABLE(sc))
3029 		ifp->if_hardmtu = BGE_JUMBO_MTU;
3030 
3031 	/*
3032 	 * Do MII setup.
3033 	 */
3034 	DPRINTFN(5, ("mii setup\n"));
3035 	sc->bge_mii.mii_ifp = ifp;
3036 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
3037 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
3038 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
3039 
3040 	/*
3041 	 * Figure out what sort of media we have by checking the hardware
3042 	 * config word in the first 32K of internal NIC memory, or fall back to
3043 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
3044 	 * this value seems to be unset. If that's the case, we have to rely on
3045 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
3046 	 * SysKonnect SK-9D41.
3047 	 */
3048 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3049 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3050 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
3051 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3052 		    sizeof(hwcfg))) {
3053 			printf(": failed to read media type\n");
3054 			goto fail_6;
3055 		}
3056 		hwcfg = ntohl(hwcfg);
3057 	}
3058 
3059 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
3060 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
3061 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3062 		if (BGE_IS_5700_FAMILY(sc))
3063 		    sc->bge_flags |= BGE_FIBER_TBI;
3064 		else
3065 		    sc->bge_flags |= BGE_FIBER_MII;
3066 	}
3067 
3068 	/* Take advantage of single-shot MSI. */
3069 	if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_MSI)
3070 		CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3071 		    ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3072 
3073 	/* Hookup IRQ last. */
3074 	DPRINTFN(5, ("pci_intr_establish\n"));
3075 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc,
3076 	    sc->bge_dev.dv_xname);
3077 	if (sc->bge_intrhand == NULL) {
3078 		printf(": couldn't establish interrupt");
3079 		if (intrstr != NULL)
3080 			printf(" at %s", intrstr);
3081 		printf("\n");
3082 		goto fail_6;
3083 	}
3084 
3085 	/*
3086 	 * A Broadcom chip was detected. Inform the world.
3087 	 */
3088 	printf(": %s, address %s\n", intrstr,
3089 	    ether_sprintf(sc->arpcom.ac_enaddr));
3090 
3091 	if (sc->bge_flags & BGE_FIBER_TBI) {
3092 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3093 		    bge_ifmedia_sts);
3094 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
3095 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
3096 			    0, NULL);
3097 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
3098 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
3099 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3100 	} else {
3101 		int mii_flags;
3102 
3103 		/*
3104 		 * Do transceiver setup.
3105 		 */
3106 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
3107 			     bge_ifmedia_sts);
3108 		mii_flags = MIIF_DOPAUSE;
3109 		if (sc->bge_flags & BGE_FIBER_MII)
3110 			mii_flags |= MIIF_HAVEFIBER;
3111 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
3112 		    sc->bge_phy_addr, MII_OFFSET_ANY, mii_flags);
3113 
3114 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
3115 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
3116 			ifmedia_add(&sc->bge_mii.mii_media,
3117 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
3118 			ifmedia_set(&sc->bge_mii.mii_media,
3119 				    IFM_ETHER|IFM_MANUAL);
3120 		} else
3121 			ifmedia_set(&sc->bge_mii.mii_media,
3122 				    IFM_ETHER|IFM_AUTO);
3123 	}
3124 
3125 	/*
3126 	 * Call MI attach routine.
3127 	 */
3128 	if_attach(ifp);
3129 	ether_ifattach(ifp);
3130 
3131 	timeout_set(&sc->bge_timeout, bge_tick, sc);
3132 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
3133 	return;
3134 
3135 fail_6:
3136 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
3137 
3138 fail_5:
3139 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3140 
3141 fail_4:
3142 	bus_dmamem_unmap(sc->bge_dmatag, kva,
3143 	    sizeof(struct bge_ring_data));
3144 
3145 fail_3:
3146 	bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
3147 
3148 fail_2:
3149 	if ((sc->bge_flags & BGE_APE) != 0)
3150 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, apesize);
3151 
3152 fail_1:
3153 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, size);
3154 }
3155 
3156 int
3157 bge_activate(struct device *self, int act)
3158 {
3159 	struct bge_softc *sc = (struct bge_softc *)self;
3160 	struct ifnet *ifp = &sc->arpcom.ac_if;
3161 	int rv = 0;
3162 
3163 	switch (act) {
3164 	case DVACT_SUSPEND:
3165 		rv = config_activate_children(self, act);
3166 		if (ifp->if_flags & IFF_RUNNING)
3167 			bge_stop(sc);
3168 		break;
3169 	case DVACT_RESUME:
3170 		if (ifp->if_flags & IFF_UP)
3171 			bge_init(sc);
3172 		break;
3173 	default:
3174 		rv = config_activate_children(self, act);
3175 		break;
3176 	}
3177 	return (rv);
3178 }
3179 
3180 void
3181 bge_reset(struct bge_softc *sc)
3182 {
3183 	struct pci_attach_args *pa = &sc->bge_pa;
3184 	pcireg_t cachesize, command, devctl;
3185 	u_int32_t reset, mac_mode, mac_mode_mask, val;
3186 	void (*write_op)(struct bge_softc *, int, int);
3187 	int i;
3188 
3189 	mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
3190 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3191 		mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
3192 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
3193 
3194 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3195 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) {
3196 		if (sc->bge_flags & BGE_PCIE)
3197 			write_op = bge_writembx;
3198 		else
3199 			write_op = bge_writemem_ind;
3200 	} else
3201 		write_op = bge_writereg_ind;
3202 
3203 	/* Take APE lock when performing reset. */
3204 	bge_ape_lock(sc, BGE_APE_LOCK_GRC);
3205 
3206 	/* Save some important PCI state. */
3207 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
3208 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
3209 
3210 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3211 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3212 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3213 
3214 	/* Disable fastboot on controllers that support it. */
3215 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
3216 	    BGE_IS_5755_PLUS(sc))
3217 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
3218 
3219 	/*
3220 	 * Write the magic number to SRAM at offset 0xB50.
3221 	 * When firmware finishes its initialization it will
3222 	 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3223 	 */
3224 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3225 
3226 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3227 
3228 	if (sc->bge_flags & BGE_PCIE) {
3229 		if (BGE_ASICREV(sc->bge_chipid != BGE_ASICREV_BCM5785) &&
3230 		    !BGE_IS_5717_PLUS(sc)) {
3231 			if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
3232 				/* PCI Express 1.0 system */
3233 				CSR_WRITE_4(sc, 0x7e2c, 0x20);
3234 			}
3235 		}
3236 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3237 			/*
3238 			 * Prevent PCI Express link training
3239 			 * during global reset.
3240 			 */
3241 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
3242 			reset |= (1<<29);
3243 		}
3244 	}
3245 
3246 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3247 		val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3248 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3249 		    val | BGE_VCPU_STATUS_DRV_RESET);
3250                 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3251                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3252                     val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3253 
3254                 sc->bge_flags |= BGE_NO_EEPROM;
3255         }
3256 
3257 	/*
3258 	 * Set GPHY Power Down Override to leave GPHY
3259 	 * powered up in D0 uninitialized.
3260 	 */
3261 	if (BGE_IS_5705_PLUS(sc) &&
3262 	    (sc->bge_flags & BGE_CPMU_PRESENT) == 0)
3263 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
3264 
3265 	/* Issue global reset */
3266 	write_op(sc, BGE_MISC_CFG, reset);
3267 
3268 	if (sc->bge_flags & BGE_PCIE)
3269 		DELAY(100 * 1000);
3270 	else
3271 		DELAY(1000);
3272 
3273 	if (sc->bge_flags & BGE_PCIE) {
3274 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3275 			pcireg_t v;
3276 
3277 			DELAY(500000); /* wait for link training to complete */
3278 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
3279 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
3280 		}
3281 
3282 		devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3283 		    PCI_PCIE_DCSR);
3284 		/* Clear enable no snoop and disable relaxed ordering. */
3285 		devctl &= ~(PCI_PCIE_DCSR_ERO | PCI_PCIE_DCSR_ENS);
3286 		/* Set PCI Express max payload size. */
3287 		devctl = (devctl & ~PCI_PCIE_DCSR_MPS) | sc->bge_expmrq;
3288 		/* Clear error status. */
3289 		devctl |= PCI_PCIE_DCSR_CEE | PCI_PCIE_DCSR_NFE |
3290 		    PCI_PCIE_DCSR_FEE | PCI_PCIE_DCSR_URE;
3291 		pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3292 		    PCI_PCIE_DCSR, devctl);
3293 	}
3294 
3295 	/* Reset some of the PCI state that got zapped by reset */
3296 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3297 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3298 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3299 	val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
3300 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
3301 	    (sc->bge_flags & BGE_PCIX) != 0)
3302 		val |= BGE_PCISTATE_RETRY_SAME_DMA;
3303 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3304 		val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3305 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3306 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3307 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, val);
3308 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
3309 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
3310 
3311 	/* Re-enable MSI, if necessary, and enable memory arbiter. */
3312 	if (BGE_IS_5714_FAMILY(sc)) {
3313 		/* This chip disables MSI on reset. */
3314 		if (sc->bge_flags & BGE_MSI) {
3315 			val = pci_conf_read(pa->pa_pc, pa->pa_tag,
3316 			    sc->bge_msicap + PCI_MSI_MC);
3317 			pci_conf_write(pa->pa_pc, pa->pa_tag,
3318 			    sc->bge_msicap + PCI_MSI_MC,
3319 			    val | PCI_MSI_MC_MSIE);
3320 			val = CSR_READ_4(sc, BGE_MSI_MODE);
3321 			CSR_WRITE_4(sc, BGE_MSI_MODE,
3322 			    val | BGE_MSIMODE_ENABLE);
3323 		}
3324 		val = CSR_READ_4(sc, BGE_MARB_MODE);
3325 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3326 	} else
3327 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3328 
3329 	/* Fix up byte swapping */
3330 	CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3331 
3332 	val = CSR_READ_4(sc, BGE_MAC_MODE);
3333 	val = (val & ~mac_mode_mask) | mac_mode;
3334 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
3335 	DELAY(40);
3336 
3337 	bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
3338 
3339 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3340 		for (i = 0; i < BGE_TIMEOUT; i++) {
3341 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3342 			if (val & BGE_VCPU_STATUS_INIT_DONE)
3343 				break;
3344 			DELAY(100);
3345 		}
3346 
3347 		if (i >= BGE_TIMEOUT)
3348 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
3349 	} else {
3350 		/*
3351 		 * Poll until we see 1's complement of the magic number.
3352 		 * This indicates that the firmware initialization
3353 		 * is complete.  We expect this to fail if no SEEPROM
3354 		 * is fitted.
3355 		 */
3356 		for (i = 0; i < BGE_TIMEOUT; i++) {
3357 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3358 			if (val == ~BGE_MAGIC_NUMBER)
3359 				break;
3360 			DELAY(10);
3361 		}
3362 
3363 		if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM)))
3364 			printf("%s: firmware handshake timed out\n",
3365 			   sc->bge_dev.dv_xname);
3366 		/* BCM57765 A0 needs additional time before accessing. */
3367 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3368 			DELAY(10 * 1000);       /* XXX */
3369 	}
3370 
3371 	/*
3372 	 * The 5704 in TBI mode apparently needs some special
3373 	 * adjustment to ensure the SERDES drive level is set
3374 	 * to 1.2V.
3375 	 */
3376 	if (sc->bge_flags & BGE_FIBER_TBI &&
3377 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3378 		val = CSR_READ_4(sc, BGE_SERDES_CFG);
3379 		val = (val & ~0xFFF) | 0x880;
3380 		CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3381 	}
3382 
3383 	if (sc->bge_flags & BGE_PCIE &&
3384 	    !BGE_IS_5717_PLUS(sc) &&
3385 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3386 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
3387 		/* Enable Data FIFO protection. */
3388 		val = CSR_READ_4(sc, 0x7c00);
3389 		CSR_WRITE_4(sc, 0x7c00, val | (1<<25));
3390 	}
3391 
3392 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
3393 		BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3394 		    CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3395 }
3396 
3397 /*
3398  * Frame reception handling. This is called if there's a frame
3399  * on the receive return list.
3400  *
3401  * Note: we have to be able to handle two possibilities here:
3402  * 1) the frame is from the jumbo receive ring
3403  * 2) the frame is from the standard receive ring
3404  */
3405 
3406 void
3407 bge_rxeof(struct bge_softc *sc)
3408 {
3409 	struct ifnet *ifp;
3410 	uint16_t rx_prod, rx_cons;
3411 	int stdcnt = 0, jumbocnt = 0;
3412 	bus_dmamap_t dmamap;
3413 	bus_addr_t offset, toff;
3414 	bus_size_t tlen;
3415 	int tosync;
3416 
3417 	rx_cons = sc->bge_rx_saved_considx;
3418 	rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
3419 
3420 	/* Nothing to do */
3421 	if (rx_cons == rx_prod)
3422 		return;
3423 
3424 	ifp = &sc->arpcom.ac_if;
3425 
3426 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3427 	    offsetof(struct bge_ring_data, bge_status_block),
3428 	    sizeof (struct bge_status_block),
3429 	    BUS_DMASYNC_POSTREAD);
3430 
3431 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
3432 	tosync = rx_prod - rx_cons;
3433 
3434 	toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
3435 
3436 	if (tosync < 0) {
3437 		tlen = (sc->bge_return_ring_cnt - rx_cons) *
3438 		    sizeof (struct bge_rx_bd);
3439 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3440 		    toff, tlen, BUS_DMASYNC_POSTREAD);
3441 		tosync = -tosync;
3442 	}
3443 
3444 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3445 	    offset, tosync * sizeof (struct bge_rx_bd),
3446 	    BUS_DMASYNC_POSTREAD);
3447 
3448 	while (rx_cons != rx_prod) {
3449 		struct bge_rx_bd	*cur_rx;
3450 		u_int32_t		rxidx;
3451 		struct mbuf		*m = NULL;
3452 
3453 		cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
3454 
3455 		rxidx = cur_rx->bge_idx;
3456 		BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3457 
3458 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3459 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3460 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3461 
3462 			jumbocnt++;
3463 
3464 			dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx];
3465 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3466 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3467 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3468 
3469 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3470 				m_freem(m);
3471 				continue;
3472 			}
3473 		} else {
3474 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3475 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3476 
3477 			stdcnt++;
3478 
3479 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3480 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3481 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3482 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3483 
3484 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3485 				m_freem(m);
3486 				continue;
3487 			}
3488 		}
3489 
3490 		ifp->if_ipackets++;
3491 #ifdef __STRICT_ALIGNMENT
3492 		/*
3493 		 * The i386 allows unaligned accesses, but for other
3494 		 * platforms we must make sure the payload is aligned.
3495 		 */
3496 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
3497 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3498 			    cur_rx->bge_len);
3499 			m->m_data += ETHER_ALIGN;
3500 		}
3501 #endif
3502 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3503 		m->m_pkthdr.rcvif = ifp;
3504 
3505 		bge_rxcsum(sc, cur_rx, m);
3506 
3507 #if NVLAN > 0
3508 		if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING &&
3509 		    cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3510 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
3511 			m->m_flags |= M_VLANTAG;
3512 		}
3513 #endif
3514 
3515 #if NBPFILTER > 0
3516 		/*
3517 		 * Handle BPF listeners. Let the BPF user see the packet.
3518 		 */
3519 		if (ifp->if_bpf)
3520 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
3521 #endif
3522 
3523 		ether_input_mbuf(ifp, m);
3524 	}
3525 
3526 	sc->bge_rx_saved_considx = rx_cons;
3527 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3528 	if (stdcnt) {
3529 		if_rxr_put(&sc->bge_std_ring, stdcnt);
3530 		bge_fill_rx_ring_std(sc);
3531 	}
3532 	if (jumbocnt) {
3533 		if_rxr_put(&sc->bge_jumbo_ring, jumbocnt);
3534 		bge_fill_rx_ring_jumbo(sc);
3535 	}
3536 }
3537 
3538 void
3539 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3540 {
3541 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3542 		/*
3543 		 * 5700 B0 chips do not support checksumming correctly due
3544 		 * to hardware bugs.
3545 		 */
3546 		return;
3547 	} else if (BGE_IS_5717_PLUS(sc)) {
3548 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3549 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3550 			    (cur_rx->bge_error_flag &
3551 			    BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3552 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3553 
3554 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3555 				m->m_pkthdr.csum_flags |=
3556 				    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3557                         }
3558                 }
3559         } else {
3560 		if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3561 		    cur_rx->bge_ip_csum == 0xFFFF)
3562 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3563 
3564 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3565 		    m->m_pkthdr.len >= ETHER_MIN_NOPAD &&
3566 		    cur_rx->bge_tcp_udp_csum == 0xFFFF) {
3567 			m->m_pkthdr.csum_flags |=
3568 			    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3569 		}
3570 	}
3571 }
3572 
3573 void
3574 bge_txeof(struct bge_softc *sc)
3575 {
3576 	struct bge_tx_bd *cur_tx = NULL;
3577 	struct ifnet *ifp;
3578 	struct txdmamap_pool_entry *dma;
3579 	bus_addr_t offset, toff;
3580 	bus_size_t tlen;
3581 	int tosync;
3582 	struct mbuf *m;
3583 
3584 	/* Nothing to do */
3585 	if (sc->bge_tx_saved_considx ==
3586 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx)
3587 		return;
3588 
3589 	ifp = &sc->arpcom.ac_if;
3590 
3591 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3592 	    offsetof(struct bge_ring_data, bge_status_block),
3593 	    sizeof (struct bge_status_block),
3594 	    BUS_DMASYNC_POSTREAD);
3595 
3596 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
3597 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
3598 	    sc->bge_tx_saved_considx;
3599 
3600 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
3601 
3602 	if (tosync < 0) {
3603 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
3604 		    sizeof (struct bge_tx_bd);
3605 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3606 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3607 		tosync = -tosync;
3608 	}
3609 
3610 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3611 	    offset, tosync * sizeof (struct bge_tx_bd),
3612 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3613 
3614 	/*
3615 	 * Go through our tx ring and free mbufs for those
3616 	 * frames that have been sent.
3617 	 */
3618 	while (sc->bge_tx_saved_considx !=
3619 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
3620 		u_int32_t		idx = 0;
3621 
3622 		idx = sc->bge_tx_saved_considx;
3623 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
3624 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3625 			ifp->if_opackets++;
3626 		m = sc->bge_cdata.bge_tx_chain[idx];
3627 		if (m != NULL) {
3628 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
3629 			dma = sc->txdma[idx];
3630 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
3631 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3632 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
3633 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
3634 			sc->txdma[idx] = NULL;
3635 
3636 			m_freem(m);
3637 		}
3638 		sc->bge_txcnt--;
3639 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3640 	}
3641 
3642 	if (sc->bge_txcnt < BGE_TX_RING_CNT - 16)
3643 		ifp->if_flags &= ~IFF_OACTIVE;
3644 	if (sc->bge_txcnt == 0)
3645 		ifp->if_timer = 0;
3646 }
3647 
3648 int
3649 bge_intr(void *xsc)
3650 {
3651 	struct bge_softc *sc;
3652 	struct ifnet *ifp;
3653 	u_int32_t statusword, statustag;
3654 
3655 	sc = xsc;
3656 	ifp = &sc->arpcom.ac_if;
3657 
3658 	/* read status word from status block */
3659 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3660 	    offsetof(struct bge_ring_data, bge_status_block),
3661 	    sizeof (struct bge_status_block),
3662 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3663 
3664 	statusword = sc->bge_rdata->bge_status_block.bge_status;
3665 	statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
3666 
3667 	if (sc->bge_flags & BGE_TAGGED_STATUS) {
3668 		if (sc->bge_lasttag == statustag &&
3669 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3670 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3671 			return (0);
3672 		sc->bge_lasttag = statustag;
3673 	} else {
3674 		if (!(statusword & BGE_STATFLAG_UPDATED) &&
3675 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3676 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3677 			return (0);
3678 		/* Ack interrupt and stop others from occurring. */
3679 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3680 		statustag = 0;
3681 	}
3682 
3683 	/* clear status word */
3684 	sc->bge_rdata->bge_status_block.bge_status = 0;
3685 
3686 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3687 	    offsetof(struct bge_ring_data, bge_status_block),
3688 	    sizeof (struct bge_status_block),
3689 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3690 
3691 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3692 	    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
3693 	    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
3694 		bge_link_upd(sc);
3695 
3696 	/* Re-enable interrupts. */
3697 	bge_writembx(sc, BGE_MBX_IRQ0_LO, statustag);
3698 
3699 	if (ifp->if_flags & IFF_RUNNING) {
3700 		/* Check RX return ring producer/consumer */
3701 		bge_rxeof(sc);
3702 
3703 		/* Check TX ring producer/consumer */
3704 		bge_txeof(sc);
3705 
3706 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
3707 			bge_start(ifp);
3708 	}
3709 
3710 	return (1);
3711 }
3712 
3713 void
3714 bge_tick(void *xsc)
3715 {
3716 	struct bge_softc *sc = xsc;
3717 	struct mii_data *mii = &sc->bge_mii;
3718 	int s;
3719 
3720 	s = splnet();
3721 
3722 	if (BGE_IS_5705_PLUS(sc))
3723 		bge_stats_update_regs(sc);
3724 	else
3725 		bge_stats_update(sc);
3726 
3727 	if (sc->bge_flags & BGE_FIBER_TBI) {
3728 		/*
3729 		 * Since in TBI mode auto-polling can't be used we should poll
3730 		 * link status manually. Here we register pending link event
3731 		 * and trigger interrupt.
3732 		 */
3733 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3734 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3735 	} else {
3736 		/*
3737 		 * Do not touch PHY if we have link up. This could break
3738 		 * IPMI/ASF mode or produce extra input errors.
3739 		 * (extra input errors was reported for bcm5701 & bcm5704).
3740 		 */
3741 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3742 			mii_tick(mii);
3743 	}
3744 
3745 	timeout_add_sec(&sc->bge_timeout, 1);
3746 
3747 	splx(s);
3748 }
3749 
3750 void
3751 bge_stats_update_regs(struct bge_softc *sc)
3752 {
3753 	struct ifnet *ifp = &sc->arpcom.ac_if;
3754 
3755 	sc->bge_tx_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3756 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3757 
3758 	sc->bge_rx_overruns += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3759 
3760 	/*
3761 	 * XXX
3762 	 * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter
3763 	 * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0
3764 	 * controllers includes the number of unwanted multicast frames.
3765 	 * This comes from a silicon bug and known workaround to get rough
3766 	 * (not exact) counter is to enable interrupt on MBUF low watermark
3767 	 * attention. This can be accomplished by setting BGE_HCCMODE_ATTN
3768 	 * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE
3769 	 * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However
3770 	 * that change would generate more interrupts and there are still
3771 	 * possibilities of losing multiple frames during
3772 	 * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that
3773 	 * the workaround still would not get correct counter I don't think
3774 	 * it's worth to implement it. So ignore reading the counter on
3775 	 * controllers that have the silicon bug.
3776 	 */
3777 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
3778 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 &&
3779 	    sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
3780 	    sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
3781 		sc->bge_rx_discards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3782 
3783 	sc->bge_rx_inerrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3784 
3785 	ifp->if_collisions = sc->bge_tx_collisions;
3786 	ifp->if_ierrors = sc->bge_rx_discards + sc->bge_rx_inerrors;
3787 
3788 	if (sc->bge_flags & BGE_RDMA_BUG) {
3789 		u_int32_t val, ucast, mcast, bcast;
3790 
3791 		ucast = CSR_READ_4(sc, BGE_MAC_STATS +
3792 		    offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
3793 		mcast = CSR_READ_4(sc, BGE_MAC_STATS +
3794 		    offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
3795 		bcast = CSR_READ_4(sc, BGE_MAC_STATS +
3796 		    offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
3797 
3798 		/*
3799 		 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
3800 		 * frames, it's safe to disable workaround for DMA engine's
3801 		 * miscalculation of TXMBUF space.
3802 		 */
3803 		if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) {
3804 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
3805 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
3806 				val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
3807 			else
3808 				val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
3809 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
3810 			sc->bge_flags &= ~BGE_RDMA_BUG;
3811 		}
3812 	}
3813 }
3814 
3815 void
3816 bge_stats_update(struct bge_softc *sc)
3817 {
3818 	struct ifnet *ifp = &sc->arpcom.ac_if;
3819 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3820 	u_int32_t cnt;
3821 
3822 #define READ_STAT(sc, stats, stat) \
3823 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3824 
3825 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3826 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
3827 	sc->bge_tx_collisions = cnt;
3828 
3829 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
3830 	sc->bge_rx_overruns = cnt;
3831 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
3832 	ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors);
3833 	sc->bge_rx_inerrors = cnt;
3834 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3835 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
3836 	sc->bge_rx_discards = cnt;
3837 
3838 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3839 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
3840 	sc->bge_tx_discards = cnt;
3841 
3842 #undef READ_STAT
3843 }
3844 
3845 /*
3846  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3847  */
3848 int
3849 bge_compact_dma_runt(struct mbuf *pkt)
3850 {
3851 	struct mbuf	*m, *prev, *n = NULL;
3852 	int 		totlen, newprevlen;
3853 
3854 	prev = NULL;
3855 	totlen = 0;
3856 
3857 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3858 		int mlen = m->m_len;
3859 		int shortfall = 8 - mlen ;
3860 
3861 		totlen += mlen;
3862 		if (mlen == 0)
3863 			continue;
3864 		if (mlen >= 8)
3865 			continue;
3866 
3867 		/* If we get here, mbuf data is too small for DMA engine.
3868 		 * Try to fix by shuffling data to prev or next in chain.
3869 		 * If that fails, do a compacting deep-copy of the whole chain.
3870 		 */
3871 
3872 		/* Internal frag. If fits in prev, copy it there. */
3873 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
3874 			bcopy(m->m_data, prev->m_data+prev->m_len, mlen);
3875 			prev->m_len += mlen;
3876 			m->m_len = 0;
3877 			/* XXX stitch chain */
3878 			prev->m_next = m_free(m);
3879 			m = prev;
3880 			continue;
3881 		} else if (m->m_next != NULL &&
3882 			   M_TRAILINGSPACE(m) >= shortfall &&
3883 			   m->m_next->m_len >= (8 + shortfall)) {
3884 			/* m is writable and have enough data in next, pull up. */
3885 
3886 			bcopy(m->m_next->m_data, m->m_data+m->m_len, shortfall);
3887 			m->m_len += shortfall;
3888 			m->m_next->m_len -= shortfall;
3889 			m->m_next->m_data += shortfall;
3890 		} else if (m->m_next == NULL || 1) {
3891 			/* Got a runt at the very end of the packet.
3892 			 * borrow data from the tail of the preceding mbuf and
3893 			 * update its length in-place. (The original data is still
3894 			 * valid, so we can do this even if prev is not writable.)
3895 			 */
3896 
3897 			/* if we'd make prev a runt, just move all of its data. */
3898 #ifdef DEBUG
3899 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3900 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3901 #endif
3902 			if ((prev->m_len - shortfall) < 8)
3903 				shortfall = prev->m_len;
3904 
3905 			newprevlen = prev->m_len - shortfall;
3906 
3907 			MGET(n, M_NOWAIT, MT_DATA);
3908 			if (n == NULL)
3909 				return (ENOBUFS);
3910 			KASSERT(m->m_len + shortfall < MLEN
3911 				/*,
3912 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3913 
3914 			/* first copy the data we're stealing from prev */
3915 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
3916 
3917 			/* update prev->m_len accordingly */
3918 			prev->m_len -= shortfall;
3919 
3920 			/* copy data from runt m */
3921 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
3922 
3923 			/* n holds what we stole from prev, plus m */
3924 			n->m_len = shortfall + m->m_len;
3925 
3926 			/* stitch n into chain and free m */
3927 			n->m_next = m->m_next;
3928 			prev->m_next = n;
3929 			/* KASSERT(m->m_next == NULL); */
3930 			m->m_next = NULL;
3931 			m_free(m);
3932 			m = n;	/* for continuing loop */
3933 		}
3934 	}
3935 	return (0);
3936 }
3937 
3938 /*
3939  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3940  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3941  * but when such padded frames employ the bge IP/TCP checksum offload,
3942  * the hardware checksum assist gives incorrect results (possibly
3943  * from incorporating its own padding into the UDP/TCP checksum; who knows).
3944  * If we pad such runts with zeros, the onboard checksum comes out correct.
3945  */
3946 int
3947 bge_cksum_pad(struct mbuf *m)
3948 {
3949 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3950 	struct mbuf *last;
3951 
3952 	/* If there's only the packet-header and we can pad there, use it. */
3953 	if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
3954 		last = m;
3955 	} else {
3956 		/*
3957 		 * Walk packet chain to find last mbuf. We will either
3958 		 * pad there, or append a new mbuf and pad it.
3959 		 */
3960 		for (last = m; last->m_next != NULL; last = last->m_next);
3961 		if (M_TRAILINGSPACE(last) < padlen) {
3962 			/* Allocate new empty mbuf, pad it. Compact later. */
3963 			struct mbuf *n;
3964 
3965 			MGET(n, M_DONTWAIT, MT_DATA);
3966 			if (n == NULL)
3967 				return (ENOBUFS);
3968 			n->m_len = 0;
3969 			last->m_next = n;
3970 			last = n;
3971 		}
3972 	}
3973 
3974 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
3975 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3976 	last->m_len += padlen;
3977 	m->m_pkthdr.len += padlen;
3978 
3979 	return (0);
3980 }
3981 
3982 /*
3983  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3984  * pointers to descriptors.
3985  */
3986 int
3987 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
3988 {
3989 	struct bge_tx_bd	*f = NULL;
3990 	u_int32_t		frag, cur;
3991 	u_int16_t		csum_flags = 0;
3992 	struct txdmamap_pool_entry *dma;
3993 	bus_dmamap_t dmamap;
3994 	int			i = 0;
3995 
3996 	cur = frag = *txidx;
3997 
3998 	if (m_head->m_pkthdr.csum_flags) {
3999 		if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4000 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4001 		if (m_head->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT |
4002 		    M_UDP_CSUM_OUT)) {
4003 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4004 			if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
4005 			    bge_cksum_pad(m_head) != 0)
4006 				return (ENOBUFS);
4007 		}
4008 	}
4009 
4010 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
4011 		goto doit;
4012 
4013 	/*
4014 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
4015 	 * less than eight bytes.  If we encounter a teeny mbuf
4016 	 * at the end of a chain, we can pad.  Otherwise, copy.
4017 	 */
4018 	if (bge_compact_dma_runt(m_head) != 0)
4019 		return (ENOBUFS);
4020 
4021 doit:
4022 	dma = SLIST_FIRST(&sc->txdma_list);
4023 	if (dma == NULL)
4024 		return (ENOBUFS);
4025 	dmamap = dma->dmamap;
4026 
4027 	/*
4028 	 * Start packing the mbufs in this chain into
4029 	 * the fragment pointers. Stop when we run out
4030 	 * of fragments or hit the end of the mbuf chain.
4031 	 */
4032 	switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
4033 	    BUS_DMA_NOWAIT)) {
4034 	case 0:
4035 		break;
4036 	case EFBIG:
4037 		if (m_defrag(m_head, M_DONTWAIT) == 0 &&
4038 		    bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
4039 		     BUS_DMA_NOWAIT) == 0)
4040 			break;
4041 
4042 		/* FALLTHROUGH */
4043 	default:
4044 		return (ENOBUFS);
4045 	}
4046 
4047 	/* Check if we have enough free send BDs. */
4048 	if (sc->bge_txcnt + dmamap->dm_nsegs >= BGE_TX_RING_CNT)
4049 		goto fail_unload;
4050 
4051 	for (i = 0; i < dmamap->dm_nsegs; i++) {
4052 		f = &sc->bge_rdata->bge_tx_ring[frag];
4053 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
4054 			break;
4055 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
4056 		f->bge_len = dmamap->dm_segs[i].ds_len;
4057 		f->bge_flags = csum_flags;
4058 		f->bge_vlan_tag = 0;
4059 #if NVLAN > 0
4060 		if (m_head->m_flags & M_VLANTAG) {
4061 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
4062 			f->bge_vlan_tag = m_head->m_pkthdr.ether_vtag;
4063 		}
4064 #endif
4065 		cur = frag;
4066 		BGE_INC(frag, BGE_TX_RING_CNT);
4067 	}
4068 
4069 	if (i < dmamap->dm_nsegs)
4070 		goto fail_unload;
4071 
4072 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
4073 	    BUS_DMASYNC_PREWRITE);
4074 
4075 	if (frag == sc->bge_tx_saved_considx)
4076 		goto fail_unload;
4077 
4078 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
4079 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
4080 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
4081 	sc->txdma[cur] = dma;
4082 	sc->bge_txcnt += dmamap->dm_nsegs;
4083 
4084 	*txidx = frag;
4085 
4086 	return (0);
4087 
4088 fail_unload:
4089 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
4090 
4091 	return (ENOBUFS);
4092 }
4093 
4094 /*
4095  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4096  * to the mbuf data regions directly in the transmit descriptors.
4097  */
4098 void
4099 bge_start(struct ifnet *ifp)
4100 {
4101 	struct bge_softc *sc;
4102 	struct mbuf *m_head;
4103 	u_int32_t prodidx;
4104 	int pkts;
4105 
4106 	sc = ifp->if_softc;
4107 
4108 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
4109 		return;
4110 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
4111 		return;
4112 
4113 	prodidx = sc->bge_tx_prodidx;
4114 
4115 	for (pkts = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) {
4116 		if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
4117 			ifp->if_flags |= IFF_OACTIVE;
4118 			break;
4119 		}
4120 
4121 		IFQ_POLL(&ifp->if_snd, m_head);
4122 		if (m_head == NULL)
4123 			break;
4124 
4125 		/*
4126 		 * Pack the data into the transmit ring. If we
4127 		 * don't have room, set the OACTIVE flag and wait
4128 		 * for the NIC to drain the ring.
4129 		 */
4130 		if (bge_encap(sc, m_head, &prodidx)) {
4131 			ifp->if_flags |= IFF_OACTIVE;
4132 			break;
4133 		}
4134 
4135 		/* now we are committed to transmit the packet */
4136 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
4137 		pkts++;
4138 
4139 #if NBPFILTER > 0
4140 		/*
4141 		 * If there's a BPF listener, bounce a copy of this frame
4142 		 * to him.
4143 		 */
4144 		if (ifp->if_bpf)
4145 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
4146 #endif
4147 	}
4148 	if (pkts == 0)
4149 		return;
4150 
4151 	/* Transmit */
4152 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4153 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
4154 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4155 
4156 	sc->bge_tx_prodidx = prodidx;
4157 
4158 	/*
4159 	 * Set a timeout in case the chip goes out to lunch.
4160 	 */
4161 	ifp->if_timer = 5;
4162 }
4163 
4164 void
4165 bge_init(void *xsc)
4166 {
4167 	struct bge_softc *sc = xsc;
4168 	struct ifnet *ifp;
4169 	u_int16_t *m;
4170 	u_int32_t mode;
4171 	int s;
4172 
4173 	s = splnet();
4174 
4175 	ifp = &sc->arpcom.ac_if;
4176 
4177 	/* Cancel pending I/O and flush buffers. */
4178 	bge_stop(sc);
4179 	bge_sig_pre_reset(sc, BGE_RESET_START);
4180 	bge_reset(sc);
4181 	bge_sig_legacy(sc, BGE_RESET_START);
4182 	bge_sig_post_reset(sc, BGE_RESET_START);
4183 
4184 	bge_chipinit(sc);
4185 
4186 	/*
4187 	 * Init the various state machines, ring
4188 	 * control blocks and firmware.
4189 	 */
4190 	if (bge_blockinit(sc)) {
4191 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
4192 		splx(s);
4193 		return;
4194 	}
4195 
4196 	/* Specify MRU. */
4197 	if (BGE_IS_JUMBO_CAPABLE(sc))
4198 		CSR_WRITE_4(sc, BGE_RX_MTU,
4199 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
4200 	else
4201 		CSR_WRITE_4(sc, BGE_RX_MTU,
4202 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
4203 
4204 	/* Load our MAC address. */
4205 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
4206 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4207 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4208 
4209 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
4210 		/* Disable hardware decapsulation of VLAN frames. */
4211 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
4212 	}
4213 
4214 	/* Program promiscuous mode and multicast filters. */
4215 	bge_iff(sc);
4216 
4217 	/* Init RX ring. */
4218 	bge_init_rx_ring_std(sc);
4219 
4220 	/*
4221 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4222 	 * memory to ensure that the chip has in fact read the first
4223 	 * entry of the ring.
4224 	 */
4225 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4226 		u_int32_t		v, i;
4227 		for (i = 0; i < 10; i++) {
4228 			DELAY(20);
4229 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4230 			if (v == (MCLBYTES - ETHER_ALIGN))
4231 				break;
4232 		}
4233 		if (i == 10)
4234 			printf("%s: 5705 A0 chip failed to load RX ring\n",
4235 			    sc->bge_dev.dv_xname);
4236 	}
4237 
4238 	/* Init Jumbo RX ring. */
4239 	if (BGE_IS_JUMBO_CAPABLE(sc))
4240 		bge_init_rx_ring_jumbo(sc);
4241 
4242 	/* Init our RX return ring index */
4243 	sc->bge_rx_saved_considx = 0;
4244 
4245 	/* Init our RX/TX stat counters. */
4246 	sc->bge_tx_collisions = 0;
4247 	sc->bge_rx_discards = 0;
4248 	sc->bge_rx_inerrors = 0;
4249 	sc->bge_rx_overruns = 0;
4250 	sc->bge_tx_discards = 0;
4251 
4252 	/* Init TX ring. */
4253 	bge_init_tx_ring(sc);
4254 
4255 	/* Enable TX MAC state machine lockup fix. */
4256 	mode = CSR_READ_4(sc, BGE_TX_MODE);
4257 	if (BGE_IS_5755_PLUS(sc) ||
4258 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
4259 		mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4260 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
4261 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
4262 		mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4263 		mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4264 		    (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4265 	}
4266 
4267 	/* Turn on transmitter */
4268 	CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4269 	DELAY(100);
4270 
4271 	mode = CSR_READ_4(sc, BGE_RX_MODE);
4272 	if (BGE_IS_5755_PLUS(sc))
4273 		mode |= BGE_RXMODE_IPV6_ENABLE;
4274 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
4275 		mode |= BGE_RXMODE_IPV4_FRAG_FIX;
4276 
4277 	/* Turn on receiver */
4278 	CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
4279 	DELAY(10);
4280 
4281 	/*
4282 	 * Set the number of good frames to receive after RX MBUF
4283 	 * Low Watermark has been reached. After the RX MAC receives
4284 	 * this number of frames, it will drop subsequent incoming
4285 	 * frames until the MBUF High Watermark is reached.
4286 	 */
4287 	if (BGE_IS_57765_PLUS(sc))
4288 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4289 	else
4290 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4291 
4292 	/* Tell firmware we're alive. */
4293 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4294 
4295 	/* Enable host interrupts. */
4296 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4297 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4298 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4299 
4300 	bge_ifmedia_upd(ifp);
4301 
4302 	ifp->if_flags |= IFF_RUNNING;
4303 	ifp->if_flags &= ~IFF_OACTIVE;
4304 
4305 	splx(s);
4306 
4307 	timeout_add_sec(&sc->bge_timeout, 1);
4308 }
4309 
4310 /*
4311  * Set media options.
4312  */
4313 int
4314 bge_ifmedia_upd(struct ifnet *ifp)
4315 {
4316 	struct bge_softc *sc = ifp->if_softc;
4317 	struct mii_data *mii = &sc->bge_mii;
4318 	struct ifmedia *ifm = &sc->bge_ifmedia;
4319 
4320 	/* If this is a 1000baseX NIC, enable the TBI port. */
4321 	if (sc->bge_flags & BGE_FIBER_TBI) {
4322 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4323 			return (EINVAL);
4324 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
4325 		case IFM_AUTO:
4326 			/*
4327 			 * The BCM5704 ASIC appears to have a special
4328 			 * mechanism for programming the autoneg
4329 			 * advertisement registers in TBI mode.
4330 			 */
4331 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4332 				u_int32_t sgdig;
4333 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4334 				if (sgdig & BGE_SGDIGSTS_DONE) {
4335 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4336 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4337 					sgdig |= BGE_SGDIGCFG_AUTO |
4338 					    BGE_SGDIGCFG_PAUSE_CAP |
4339 					    BGE_SGDIGCFG_ASYM_PAUSE;
4340 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4341 					    sgdig | BGE_SGDIGCFG_SEND);
4342 					DELAY(5);
4343 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4344 				}
4345 			}
4346 			break;
4347 		case IFM_1000_SX:
4348 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4349 				BGE_CLRBIT(sc, BGE_MAC_MODE,
4350 				    BGE_MACMODE_HALF_DUPLEX);
4351 			} else {
4352 				BGE_SETBIT(sc, BGE_MAC_MODE,
4353 				    BGE_MACMODE_HALF_DUPLEX);
4354 			}
4355 			DELAY(40);
4356 			break;
4357 		default:
4358 			return (EINVAL);
4359 		}
4360 		/* XXX 802.3x flow control for 1000BASE-SX */
4361 		return (0);
4362 	}
4363 
4364 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4365 	if (mii->mii_instance) {
4366 		struct mii_softc *miisc;
4367 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4368 			mii_phy_reset(miisc);
4369 	}
4370 	mii_mediachg(mii);
4371 
4372 	/*
4373 	 * Force an interrupt so that we will call bge_link_upd
4374 	 * if needed and clear any pending link state attention.
4375 	 * Without this we are not getting any further interrupts
4376 	 * for link state changes and thus will not UP the link and
4377 	 * not be able to send in bge_start. The only way to get
4378 	 * things working was to receive a packet and get a RX intr.
4379 	 */
4380 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4381 	    sc->bge_flags & BGE_IS_5788)
4382 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4383 	else
4384 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4385 
4386 	return (0);
4387 }
4388 
4389 /*
4390  * Report current media status.
4391  */
4392 void
4393 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4394 {
4395 	struct bge_softc *sc = ifp->if_softc;
4396 	struct mii_data *mii = &sc->bge_mii;
4397 
4398 	if (sc->bge_flags & BGE_FIBER_TBI) {
4399 		ifmr->ifm_status = IFM_AVALID;
4400 		ifmr->ifm_active = IFM_ETHER;
4401 		if (CSR_READ_4(sc, BGE_MAC_STS) &
4402 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
4403 			ifmr->ifm_status |= IFM_ACTIVE;
4404 		} else {
4405 			ifmr->ifm_active |= IFM_NONE;
4406 			return;
4407 		}
4408 		ifmr->ifm_active |= IFM_1000_SX;
4409 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4410 			ifmr->ifm_active |= IFM_HDX;
4411 		else
4412 			ifmr->ifm_active |= IFM_FDX;
4413 		return;
4414 	}
4415 
4416 	mii_pollstat(mii);
4417 	ifmr->ifm_status = mii->mii_media_status;
4418 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4419 	    sc->bge_flowflags;
4420 }
4421 
4422 int
4423 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4424 {
4425 	struct bge_softc *sc = ifp->if_softc;
4426 	struct ifaddr *ifa = (struct ifaddr *) data;
4427 	struct ifreq *ifr = (struct ifreq *) data;
4428 	int s, error = 0;
4429 	struct mii_data *mii;
4430 
4431 	s = splnet();
4432 
4433 	switch(command) {
4434 	case SIOCSIFADDR:
4435 		ifp->if_flags |= IFF_UP;
4436 		if (!(ifp->if_flags & IFF_RUNNING))
4437 			bge_init(sc);
4438 #ifdef INET
4439 		if (ifa->ifa_addr->sa_family == AF_INET)
4440 			arp_ifinit(&sc->arpcom, ifa);
4441 #endif /* INET */
4442 		break;
4443 
4444 	case SIOCSIFFLAGS:
4445 		if (ifp->if_flags & IFF_UP) {
4446 			if (ifp->if_flags & IFF_RUNNING)
4447 				error = ENETRESET;
4448 			else
4449 				bge_init(sc);
4450 		} else {
4451 			if (ifp->if_flags & IFF_RUNNING)
4452 				bge_stop(sc);
4453 		}
4454 		break;
4455 
4456 	case SIOCSIFMEDIA:
4457 		/* XXX Flow control is not supported for 1000BASE-SX */
4458 		if (sc->bge_flags & BGE_FIBER_TBI) {
4459 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4460 			sc->bge_flowflags = 0;
4461 		}
4462 
4463 		/* Flow control requires full-duplex mode. */
4464 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4465 		    (ifr->ifr_media & IFM_FDX) == 0) {
4466 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
4467 		}
4468 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4469 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4470 				/* We can do both TXPAUSE and RXPAUSE. */
4471 				ifr->ifr_media |=
4472 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4473 			}
4474 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4475 		}
4476 		/* FALLTHROUGH */
4477 	case SIOCGIFMEDIA:
4478 		if (sc->bge_flags & BGE_FIBER_TBI) {
4479 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4480 			    command);
4481 		} else {
4482 			mii = &sc->bge_mii;
4483 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4484 			    command);
4485 		}
4486 		break;
4487 
4488 	case SIOCGIFRXR:
4489 		error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
4490 		break;
4491 
4492 	default:
4493 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
4494 	}
4495 
4496 	if (error == ENETRESET) {
4497 		if (ifp->if_flags & IFF_RUNNING)
4498 			bge_iff(sc);
4499 		error = 0;
4500 	}
4501 
4502 	splx(s);
4503 	return (error);
4504 }
4505 
4506 int
4507 bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri)
4508 {
4509 	struct if_rxring_info ifr[2];
4510 	u_int n = 0;
4511 
4512 	memset(ifr, 0, sizeof(ifr));
4513 
4514 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) {
4515 		ifr[n].ifr_size = MCLBYTES;
4516 		strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name));
4517 		ifr[n].ifr_info = sc->bge_std_ring;
4518 
4519 		n++;
4520 	}
4521 
4522 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) {
4523 		ifr[n].ifr_size = BGE_JLEN;
4524 		strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name));
4525 		ifr[n].ifr_info = sc->bge_jumbo_ring;
4526 
4527 		n++;
4528 	}
4529 
4530 	return (if_rxr_info_ioctl(ifri, n, ifr));
4531 }
4532 
4533 void
4534 bge_watchdog(struct ifnet *ifp)
4535 {
4536 	struct bge_softc *sc;
4537 
4538 	sc = ifp->if_softc;
4539 
4540 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
4541 
4542 	bge_init(sc);
4543 
4544 	ifp->if_oerrors++;
4545 }
4546 
4547 void
4548 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
4549 {
4550 	int i;
4551 
4552 	BGE_CLRBIT(sc, reg, bit);
4553 
4554 	for (i = 0; i < BGE_TIMEOUT; i++) {
4555 		if ((CSR_READ_4(sc, reg) & bit) == 0)
4556 			return;
4557 		delay(100);
4558 	}
4559 
4560 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
4561 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
4562 }
4563 
4564 /*
4565  * Stop the adapter and free any mbufs allocated to the
4566  * RX and TX lists.
4567  */
4568 void
4569 bge_stop(struct bge_softc *sc)
4570 {
4571 	struct ifnet *ifp = &sc->arpcom.ac_if;
4572 	struct ifmedia_entry *ifm;
4573 	struct mii_data *mii;
4574 	int mtmp, itmp;
4575 
4576 	timeout_del(&sc->bge_timeout);
4577 	timeout_del(&sc->bge_rxtimeout);
4578 
4579 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4580 
4581 	/*
4582 	 * Tell firmware we're shutting down.
4583 	 */
4584 	/* bge_stop_fw(sc); */
4585 	bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
4586 
4587 
4588 	/*
4589 	 * Disable all of the receiver blocks
4590 	 */
4591 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4592 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4593 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4594 	if (BGE_IS_5700_FAMILY(sc))
4595 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4596 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4597 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4598 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4599 
4600 	/*
4601 	 * Disable all of the transmit blocks
4602 	 */
4603 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4604 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4605 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4606 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4607 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4608 	if (BGE_IS_5700_FAMILY(sc))
4609 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4610 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4611 
4612 	/*
4613 	 * Shut down all of the memory managers and related
4614 	 * state machines.
4615 	 */
4616 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4617 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4618 	if (BGE_IS_5700_FAMILY(sc))
4619 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4620 
4621 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4622 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4623 
4624 	if (!BGE_IS_5705_PLUS(sc)) {
4625 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4626 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4627 	}
4628 
4629 	bge_reset(sc);
4630 	bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
4631 	bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
4632 
4633 	/*
4634 	 * Tell firmware we're shutting down.
4635 	 */
4636 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4637 
4638 	/* Free the RX lists. */
4639 	bge_free_rx_ring_std(sc);
4640 
4641 	/* Free jumbo RX list. */
4642 	if (BGE_IS_JUMBO_CAPABLE(sc))
4643 		bge_free_rx_ring_jumbo(sc);
4644 
4645 	/* Free TX buffers. */
4646 	bge_free_tx_ring(sc);
4647 
4648 	/*
4649 	 * Isolate/power down the PHY, but leave the media selection
4650 	 * unchanged so that things will be put back to normal when
4651 	 * we bring the interface back up.
4652 	 */
4653 	if (!(sc->bge_flags & BGE_FIBER_TBI)) {
4654 		mii = &sc->bge_mii;
4655 		itmp = ifp->if_flags;
4656 		ifp->if_flags |= IFF_UP;
4657 		ifm = mii->mii_media.ifm_cur;
4658 		mtmp = ifm->ifm_media;
4659 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
4660 		mii_mediachg(mii);
4661 		ifm->ifm_media = mtmp;
4662 		ifp->if_flags = itmp;
4663 	}
4664 
4665 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4666 
4667 	/* Clear MAC's link state (PHY may still have link UP). */
4668 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4669 }
4670 
4671 void
4672 bge_link_upd(struct bge_softc *sc)
4673 {
4674 	struct ifnet *ifp = &sc->arpcom.ac_if;
4675 	struct mii_data *mii = &sc->bge_mii;
4676 	u_int32_t status;
4677 	int link;
4678 
4679 	/* Clear 'pending link event' flag */
4680 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
4681 
4682 	/*
4683 	 * Process link state changes.
4684 	 * Grrr. The link status word in the status block does
4685 	 * not work correctly on the BCM5700 rev AX and BX chips,
4686 	 * according to all available information. Hence, we have
4687 	 * to enable MII interrupts in order to properly obtain
4688 	 * async link changes. Unfortunately, this also means that
4689 	 * we have to read the MAC status register to detect link
4690 	 * changes, thereby adding an additional register access to
4691 	 * the interrupt handler.
4692 	 *
4693 	 */
4694 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
4695 		status = CSR_READ_4(sc, BGE_MAC_STS);
4696 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
4697 			mii_pollstat(mii);
4698 
4699 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4700 			    mii->mii_media_status & IFM_ACTIVE &&
4701 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4702 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4703 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4704 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4705 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4706 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4707 
4708 			/* Clear the interrupt */
4709 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4710 			    BGE_EVTENB_MI_INTERRUPT);
4711 			bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr,
4712 			    BRGPHY_MII_ISR);
4713 			bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr,
4714 			    BRGPHY_MII_IMR, BRGPHY_INTRS);
4715 		}
4716 		return;
4717 	}
4718 
4719 	if (sc->bge_flags & BGE_FIBER_TBI) {
4720 		status = CSR_READ_4(sc, BGE_MAC_STS);
4721 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4722 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4723 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4724 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
4725 					BGE_CLRBIT(sc, BGE_MAC_MODE,
4726 					    BGE_MACMODE_TBI_SEND_CFGS);
4727 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4728 				status = CSR_READ_4(sc, BGE_MAC_MODE);
4729 				link = (status & BGE_MACMODE_HALF_DUPLEX) ?
4730 				    LINK_STATE_HALF_DUPLEX :
4731 				    LINK_STATE_FULL_DUPLEX;
4732 				ifp->if_baudrate = IF_Gbps(1);
4733 				if (ifp->if_link_state != link) {
4734 					ifp->if_link_state = link;
4735 					if_link_state_change(ifp);
4736 				}
4737 			}
4738 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
4739 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4740 			link = LINK_STATE_DOWN;
4741 			ifp->if_baudrate = 0;
4742 			if (ifp->if_link_state != link) {
4743 				ifp->if_link_state = link;
4744 				if_link_state_change(ifp);
4745 			}
4746 		}
4747 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
4748 		/*
4749 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4750 		 * in status word always set. Workaround this bug by reading
4751 		 * PHY link status directly.
4752 		 */
4753 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
4754 		    BGE_STS_LINK : 0;
4755 
4756 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
4757 			mii_pollstat(mii);
4758 
4759 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4760 			    mii->mii_media_status & IFM_ACTIVE &&
4761 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4762 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4763 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4764 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4765 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4766 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4767 		}
4768 	} else {
4769 		/*
4770 		 * For controllers that call mii_tick, we have to poll
4771 		 * link status.
4772 		 */
4773 		mii_pollstat(mii);
4774 	}
4775 
4776 	/* Clear the attention */
4777 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4778 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4779 	    BGE_MACSTAT_LINK_CHANGED);
4780 }
4781