xref: /openbsd-src/sys/dev/pci/if_bge.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_bge.c,v 1.392 2020/07/26 17:44:15 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 #include <sys/atomic.h>
88 
89 #include <net/if.h>
90 #include <net/if_media.h>
91 
92 #include <netinet/in.h>
93 #include <netinet/if_ether.h>
94 
95 #if NBPFILTER > 0
96 #include <net/bpf.h>
97 #endif
98 
99 #ifdef __sparc64__
100 #include <sparc64/autoconf.h>
101 #include <dev/ofw/openfirm.h>
102 #endif
103 
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107 
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110 #include <dev/mii/miidevs.h>
111 #include <dev/mii/brgphyreg.h>
112 
113 #include <dev/pci/if_bgereg.h>
114 
115 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
116 
117 const struct bge_revision * bge_lookup_rev(u_int32_t);
118 int bge_can_use_msi(struct bge_softc *);
119 int bge_probe(struct device *, void *, void *);
120 void bge_attach(struct device *, struct device *, void *);
121 int bge_detach(struct device *, int);
122 int bge_activate(struct device *, int);
123 
124 struct cfattach bge_ca = {
125 	sizeof(struct bge_softc), bge_probe, bge_attach, bge_detach,
126 	bge_activate
127 };
128 
129 struct cfdriver bge_cd = {
130 	NULL, "bge", DV_IFNET
131 };
132 
133 void bge_txeof(struct bge_softc *);
134 void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
135 void bge_rxeof(struct bge_softc *);
136 
137 void bge_tick(void *);
138 void bge_stats_update(struct bge_softc *);
139 void bge_stats_update_regs(struct bge_softc *);
140 int bge_cksum_pad(struct mbuf *);
141 int bge_encap(struct bge_softc *, struct mbuf *, int *);
142 int bge_compact_dma_runt(struct mbuf *);
143 
144 int bge_intr(void *);
145 void bge_start(struct ifqueue *);
146 int bge_ioctl(struct ifnet *, u_long, caddr_t);
147 int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *);
148 void bge_init(void *);
149 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
150 void bge_stop(struct bge_softc *, int);
151 void bge_watchdog(struct ifnet *);
152 int bge_ifmedia_upd(struct ifnet *);
153 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
154 
155 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
156 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
157 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
158 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
159 
160 void bge_iff(struct bge_softc *);
161 
162 int bge_newbuf_jumbo(struct bge_softc *, int);
163 int bge_init_rx_ring_jumbo(struct bge_softc *);
164 void bge_fill_rx_ring_jumbo(struct bge_softc *);
165 void bge_free_rx_ring_jumbo(struct bge_softc *);
166 
167 int bge_newbuf(struct bge_softc *, int);
168 int bge_init_rx_ring_std(struct bge_softc *);
169 void bge_rxtick(void *);
170 void bge_fill_rx_ring_std(struct bge_softc *);
171 void bge_free_rx_ring_std(struct bge_softc *);
172 
173 void bge_free_tx_ring(struct bge_softc *);
174 int bge_init_tx_ring(struct bge_softc *);
175 
176 void bge_chipinit(struct bge_softc *);
177 int bge_blockinit(struct bge_softc *);
178 u_int32_t bge_dma_swap_options(struct bge_softc *);
179 int bge_phy_addr(struct bge_softc *);
180 
181 u_int32_t bge_readmem_ind(struct bge_softc *, int);
182 void bge_writemem_ind(struct bge_softc *, int, int);
183 void bge_writereg_ind(struct bge_softc *, int, int);
184 void bge_writembx(struct bge_softc *, int, int);
185 
186 int bge_miibus_readreg(struct device *, int, int);
187 void bge_miibus_writereg(struct device *, int, int, int);
188 void bge_miibus_statchg(struct device *);
189 
190 #define BGE_RESET_SHUTDOWN	0
191 #define BGE_RESET_START		1
192 #define BGE_RESET_SUSPEND	2
193 void bge_sig_post_reset(struct bge_softc *, int);
194 void bge_sig_legacy(struct bge_softc *, int);
195 void bge_sig_pre_reset(struct bge_softc *, int);
196 void bge_stop_fw(struct bge_softc *, int);
197 void bge_reset(struct bge_softc *);
198 void bge_link_upd(struct bge_softc *);
199 
200 void bge_ape_lock_init(struct bge_softc *);
201 void bge_ape_read_fw_ver(struct bge_softc *);
202 int bge_ape_lock(struct bge_softc *, int);
203 void bge_ape_unlock(struct bge_softc *, int);
204 void bge_ape_send_event(struct bge_softc *, uint32_t);
205 void bge_ape_driver_state_change(struct bge_softc *, int);
206 
207 #ifdef BGE_DEBUG
208 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
209 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
210 int	bgedebug = 0;
211 #else
212 #define DPRINTF(x)
213 #define DPRINTFN(n,x)
214 #endif
215 
216 /*
217  * Various supported device vendors/types and their names. Note: the
218  * spec seems to indicate that the hardware still has Alteon's vendor
219  * ID burned into it, though it will always be overridden by the vendor
220  * ID in the EEPROM. Just to be safe, we cover all possibilities.
221  */
222 const struct pci_matchid bge_devices[] = {
223 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
224 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
225 
226 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
227 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
228 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
229 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
230 
231 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
232 
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717 },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718 },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719 },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725 },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727 },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
271 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
272 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
273 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
274 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
275 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
276 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 },
277 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E },
278 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S },
279 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE },
280 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762 },
281 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 },
282 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
283 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
284 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
285 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
286 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 },
287 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F },
288 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G },
289 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
290 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
291 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
292 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
293 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
294 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
295 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
296 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
297 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
298 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
299 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
300 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 },
301 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761 },
302 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762 },
303 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57764 },
304 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765 },
305 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766 },
306 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57767 },
307 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 },
308 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781 },
309 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782 },
310 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785 },
311 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786 },
312 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57787 },
313 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 },
314 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 },
315 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791 },
316 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795 },
317 
318 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
319 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
320 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
321 
322 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
323 
324 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
325 };
326 
327 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_JUMBO_CAPABLE)
328 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_5700_FAMILY)
329 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_5705_PLUS)
330 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_5714_FAMILY)
331 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_575X_PLUS)
332 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGE_5755_PLUS)
333 #define BGE_IS_5717_PLUS(sc)		((sc)->bge_flags & BGE_5717_PLUS)
334 #define BGE_IS_57765_PLUS(sc)		((sc)->bge_flags & BGE_57765_PLUS)
335 
336 static const struct bge_revision {
337 	u_int32_t		br_chipid;
338 	const char		*br_name;
339 } bge_revisions[] = {
340 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
341 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
342 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
343 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
344 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
345 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
346 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
347 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
348 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
349 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
350 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
351 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
352 	/* the 5702 and 5703 share the same ASIC ID */
353 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
354 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
355 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
356 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
357 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
358 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
359 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
360 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
361 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
362 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
363 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
364 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
365 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
366 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
367 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
368 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
369 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
370 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
371 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
372 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
373 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
374 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
375 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
376 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
377 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
378 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
379 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
380 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
381 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
382 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
383 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
384 	{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
385 	{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
386 	{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
387 	{ BGE_CHIPID_BCM5719_A1, "BCM5719 A1" },
388 	{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
389 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
390 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
391 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
392 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
393 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
394 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
395 	{ BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
396 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
397 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
398 	/* the 5754 and 5787 share the same ASIC ID */
399 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
400 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
401 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
402 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
403 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
404 	{ BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
405 	{ BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
406 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
407 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
408 
409 	{ 0, NULL }
410 };
411 
412 /*
413  * Some defaults for major revisions, so that newer steppings
414  * that we don't know about have a shot at working.
415  */
416 static const struct bge_revision bge_majorrevs[] = {
417 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
418 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
419 	/* 5702 and 5703 share the same ASIC ID */
420 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
421 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
422 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
423 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
424 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
425 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
426 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
427 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
428 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
429 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
430 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
431 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
432 	/* 5754 and 5787 share the same ASIC ID */
433 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
434 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
435 	{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
436 	{ BGE_ASICREV_BCM57766, "unknown BCM57766" },
437 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
438 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
439 	{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
440 	{ BGE_ASICREV_BCM5720, "unknown BCM5720" },
441 	{ BGE_ASICREV_BCM5762, "unknown BCM5762" },
442 
443 	{ 0, NULL }
444 };
445 
446 u_int32_t
447 bge_readmem_ind(struct bge_softc *sc, int off)
448 {
449 	struct pci_attach_args	*pa = &(sc->bge_pa);
450 	u_int32_t val;
451 
452 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
453 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
454 		return (0);
455 
456 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
457 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
458 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
459 	return (val);
460 }
461 
462 void
463 bge_writemem_ind(struct bge_softc *sc, int off, int val)
464 {
465 	struct pci_attach_args	*pa = &(sc->bge_pa);
466 
467 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
468 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
469 		return;
470 
471 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
472 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
473 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
474 }
475 
476 void
477 bge_writereg_ind(struct bge_softc *sc, int off, int val)
478 {
479 	struct pci_attach_args	*pa = &(sc->bge_pa);
480 
481 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
482 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
483 }
484 
485 void
486 bge_writembx(struct bge_softc *sc, int off, int val)
487 {
488 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
489 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
490 
491 	CSR_WRITE_4(sc, off, val);
492 }
493 
494 /*
495  * Clear all stale locks and select the lock for this driver instance.
496  */
497 void
498 bge_ape_lock_init(struct bge_softc *sc)
499 {
500 	struct pci_attach_args *pa = &(sc->bge_pa);
501 	uint32_t bit, regbase;
502 	int i;
503 
504 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
505 		regbase = BGE_APE_LOCK_GRANT;
506 	else
507 		regbase = BGE_APE_PER_LOCK_GRANT;
508 
509 	/* Clear any stale locks. */
510 	for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
511 		switch (i) {
512 		case BGE_APE_LOCK_PHY0:
513 		case BGE_APE_LOCK_PHY1:
514 		case BGE_APE_LOCK_PHY2:
515 		case BGE_APE_LOCK_PHY3:
516 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
517 			break;
518 		default:
519 			if (pa->pa_function == 0)
520 				bit = BGE_APE_LOCK_GRANT_DRIVER0;
521 			else
522 				bit = (1 << pa->pa_function);
523 		}
524 		APE_WRITE_4(sc, regbase + 4 * i, bit);
525 	}
526 
527 	/* Select the PHY lock based on the device's function number. */
528 	switch (pa->pa_function) {
529 	case 0:
530 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
531 		break;
532 	case 1:
533 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
534 		break;
535 	case 2:
536 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
537 		break;
538 	case 3:
539 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
540 		break;
541 	default:
542 		printf("%s: PHY lock not supported on function %d\n",
543 		    sc->bge_dev.dv_xname, pa->pa_function);
544 		break;
545 	}
546 }
547 
548 /*
549  * Check for APE firmware, set flags, and print version info.
550  */
551 void
552 bge_ape_read_fw_ver(struct bge_softc *sc)
553 {
554 	const char *fwtype;
555 	uint32_t apedata, features;
556 
557 	/* Check for a valid APE signature in shared memory. */
558 	apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
559 	if (apedata != BGE_APE_SEG_SIG_MAGIC) {
560 		sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
561 		return;
562 	}
563 
564 	/* Check if APE firmware is running. */
565 	apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
566 	if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
567 		printf("%s: APE signature found but FW status not ready! "
568 		    "0x%08x\n", sc->bge_dev.dv_xname, apedata);
569 		return;
570 	}
571 
572 	sc->bge_mfw_flags |= BGE_MFW_ON_APE;
573 
574 	/* Fetch the APE firwmare type and version. */
575 	apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
576 	features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
577 	if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
578 		sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
579 		fwtype = "NCSI";
580 	} else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
581 		sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
582 		fwtype = "DASH";
583 	} else
584 		fwtype = "UNKN";
585 
586 	/* Print the APE firmware version. */
587 	printf(", APE firmware %s %d.%d.%d.%d", fwtype,
588 	    (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
589 	    (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
590 	    (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
591 	    (apedata & BGE_APE_FW_VERSION_BLDMSK));
592 }
593 
594 int
595 bge_ape_lock(struct bge_softc *sc, int locknum)
596 {
597 	struct pci_attach_args *pa = &(sc->bge_pa);
598 	uint32_t bit, gnt, req, status;
599 	int i, off;
600 
601 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
602 		return (0);
603 
604 	/* Lock request/grant registers have different bases. */
605 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
606 		req = BGE_APE_LOCK_REQ;
607 		gnt = BGE_APE_LOCK_GRANT;
608 	} else {
609 		req = BGE_APE_PER_LOCK_REQ;
610 		gnt = BGE_APE_PER_LOCK_GRANT;
611 	}
612 
613 	off = 4 * locknum;
614 
615 	switch (locknum) {
616 	case BGE_APE_LOCK_GPIO:
617 		/* Lock required when using GPIO. */
618 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
619 			return (0);
620 		if (pa->pa_function == 0)
621 			bit = BGE_APE_LOCK_REQ_DRIVER0;
622 		else
623 			bit = (1 << pa->pa_function);
624 		break;
625 	case BGE_APE_LOCK_GRC:
626 		/* Lock required to reset the device. */
627 		if (pa->pa_function == 0)
628 			bit = BGE_APE_LOCK_REQ_DRIVER0;
629 		else
630 			bit = (1 << pa->pa_function);
631 		break;
632 	case BGE_APE_LOCK_MEM:
633 		/* Lock required when accessing certain APE memory. */
634 		if (pa->pa_function == 0)
635 			bit = BGE_APE_LOCK_REQ_DRIVER0;
636 		else
637 			bit = (1 << pa->pa_function);
638 		break;
639 	case BGE_APE_LOCK_PHY0:
640 	case BGE_APE_LOCK_PHY1:
641 	case BGE_APE_LOCK_PHY2:
642 	case BGE_APE_LOCK_PHY3:
643 		/* Lock required when accessing PHYs. */
644 		bit = BGE_APE_LOCK_REQ_DRIVER0;
645 		break;
646 	default:
647 		return (EINVAL);
648 	}
649 
650 	/* Request a lock. */
651 	APE_WRITE_4(sc, req + off, bit);
652 
653 	/* Wait up to 1 second to acquire lock. */
654 	for (i = 0; i < 20000; i++) {
655 		status = APE_READ_4(sc, gnt + off);
656 		if (status == bit)
657 			break;
658 		DELAY(50);
659 	}
660 
661 	/* Handle any errors. */
662 	if (status != bit) {
663 		printf("%s: APE lock %d request failed! "
664 		    "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
665 		    sc->bge_dev.dv_xname,
666 		    locknum, req + off, bit & 0xFFFF, gnt + off,
667 		    status & 0xFFFF);
668 		/* Revoke the lock request. */
669 		APE_WRITE_4(sc, gnt + off, bit);
670 		return (EBUSY);
671 	}
672 
673 	return (0);
674 }
675 
676 void
677 bge_ape_unlock(struct bge_softc *sc, int locknum)
678 {
679 	struct pci_attach_args *pa = &(sc->bge_pa);
680 	uint32_t bit, gnt;
681 	int off;
682 
683 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
684 		return;
685 
686 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
687 		gnt = BGE_APE_LOCK_GRANT;
688 	else
689 		gnt = BGE_APE_PER_LOCK_GRANT;
690 
691 	off = 4 * locknum;
692 
693 	switch (locknum) {
694 	case BGE_APE_LOCK_GPIO:
695 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
696 			return;
697 		if (pa->pa_function == 0)
698 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
699 		else
700 			bit = (1 << pa->pa_function);
701 		break;
702 	case BGE_APE_LOCK_GRC:
703 		if (pa->pa_function == 0)
704 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
705 		else
706 			bit = (1 << pa->pa_function);
707 		break;
708 	case BGE_APE_LOCK_MEM:
709 		if (pa->pa_function == 0)
710 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
711 		else
712 			bit = (1 << pa->pa_function);
713 		break;
714 	case BGE_APE_LOCK_PHY0:
715 	case BGE_APE_LOCK_PHY1:
716 	case BGE_APE_LOCK_PHY2:
717 	case BGE_APE_LOCK_PHY3:
718 		bit = BGE_APE_LOCK_GRANT_DRIVER0;
719 		break;
720 	default:
721 		return;
722 	}
723 
724 	APE_WRITE_4(sc, gnt + off, bit);
725 }
726 
727 /*
728  * Send an event to the APE firmware.
729  */
730 void
731 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
732 {
733 	uint32_t apedata;
734 	int i;
735 
736 	/* NCSI does not support APE events. */
737 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
738 		return;
739 
740 	/* Wait up to 1ms for APE to service previous event. */
741 	for (i = 10; i > 0; i--) {
742 		if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
743 			break;
744 		apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
745 		if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
746 			APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
747 			    BGE_APE_EVENT_STATUS_EVENT_PENDING);
748 			bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
749 			APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
750 			break;
751 		}
752 		bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
753 		DELAY(100);
754 	}
755 	if (i == 0) {
756 		printf("%s: APE event 0x%08x send timed out\n",
757 		    sc->bge_dev.dv_xname, event);
758 	}
759 }
760 
761 void
762 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
763 {
764 	uint32_t apedata, event;
765 
766 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
767 		return;
768 
769 	switch (kind) {
770 	case BGE_RESET_START:
771 		/* If this is the first load, clear the load counter. */
772 		apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
773 		if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
774 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
775 		else {
776 			apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
777 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
778 		}
779 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
780 		    BGE_APE_HOST_SEG_SIG_MAGIC);
781 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
782 		    BGE_APE_HOST_SEG_LEN_MAGIC);
783 
784 		/* Add some version info if bge(4) supports it. */
785 		APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
786 		    BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
787 		APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
788 		    BGE_APE_HOST_BEHAV_NO_PHYLOCK);
789 		APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
790 		    BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
791 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
792 		    BGE_APE_HOST_DRVR_STATE_START);
793 		event = BGE_APE_EVENT_STATUS_STATE_START;
794 		break;
795 	case BGE_RESET_SHUTDOWN:
796 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
797 		    BGE_APE_HOST_DRVR_STATE_UNLOAD);
798 		event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
799 		break;
800 	case BGE_RESET_SUSPEND:
801 		event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
802 		break;
803 	default:
804 		return;
805 	}
806 
807 	bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
808 	    BGE_APE_EVENT_STATUS_STATE_CHNGE);
809 }
810 
811 
812 u_int8_t
813 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
814 {
815 	u_int32_t access, byte = 0;
816 	int i;
817 
818 	/* Lock. */
819 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
820 	for (i = 0; i < 8000; i++) {
821 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
822 			break;
823 		DELAY(20);
824 	}
825 	if (i == 8000)
826 		return (1);
827 
828 	/* Enable access. */
829 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
830 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
831 
832 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
833 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
834 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
835 		DELAY(10);
836 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
837 			DELAY(10);
838 			break;
839 		}
840 	}
841 
842 	if (i == BGE_TIMEOUT * 10) {
843 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
844 		return (1);
845 	}
846 
847 	/* Get result. */
848 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
849 
850 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
851 
852 	/* Disable access. */
853 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
854 
855 	/* Unlock. */
856 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
857 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
858 
859 	return (0);
860 }
861 
862 /*
863  * Read a sequence of bytes from NVRAM.
864  */
865 
866 int
867 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
868 {
869 	int err = 0, i;
870 	u_int8_t byte = 0;
871 
872 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
873 		return (1);
874 
875 	for (i = 0; i < cnt; i++) {
876 		err = bge_nvram_getbyte(sc, off + i, &byte);
877 		if (err)
878 			break;
879 		*(dest + i) = byte;
880 	}
881 
882 	return (err ? 1 : 0);
883 }
884 
885 /*
886  * Read a byte of data stored in the EEPROM at address 'addr.' The
887  * BCM570x supports both the traditional bitbang interface and an
888  * auto access interface for reading the EEPROM. We use the auto
889  * access method.
890  */
891 u_int8_t
892 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
893 {
894 	int i;
895 	u_int32_t byte = 0;
896 
897 	/*
898 	 * Enable use of auto EEPROM access so we can avoid
899 	 * having to use the bitbang method.
900 	 */
901 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
902 
903 	/* Reset the EEPROM, load the clock period. */
904 	CSR_WRITE_4(sc, BGE_EE_ADDR,
905 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
906 	DELAY(20);
907 
908 	/* Issue the read EEPROM command. */
909 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
910 
911 	/* Wait for completion */
912 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
913 		DELAY(10);
914 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
915 			break;
916 	}
917 
918 	if (i == BGE_TIMEOUT * 10) {
919 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
920 		return (1);
921 	}
922 
923 	/* Get result. */
924 	byte = CSR_READ_4(sc, BGE_EE_DATA);
925 
926 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
927 
928 	return (0);
929 }
930 
931 /*
932  * Read a sequence of bytes from the EEPROM.
933  */
934 int
935 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
936 {
937 	int i, error = 0;
938 	u_int8_t byte = 0;
939 
940 	for (i = 0; i < cnt; i++) {
941 		error = bge_eeprom_getbyte(sc, off + i, &byte);
942 		if (error)
943 			break;
944 		*(dest + i) = byte;
945 	}
946 
947 	return (error ? 1 : 0);
948 }
949 
950 int
951 bge_miibus_readreg(struct device *dev, int phy, int reg)
952 {
953 	struct bge_softc *sc = (struct bge_softc *)dev;
954 	u_int32_t val, autopoll;
955 	int i;
956 
957 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
958 		return (0);
959 
960 	/* Reading with autopolling on may trigger PCI errors */
961 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
962 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
963 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
964 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
965 		DELAY(80);
966 	}
967 
968 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
969 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
970 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
971 
972 	for (i = 0; i < 200; i++) {
973 		delay(1);
974 		val = CSR_READ_4(sc, BGE_MI_COMM);
975 		if (!(val & BGE_MICOMM_BUSY))
976 			break;
977 		delay(10);
978 	}
979 
980 	if (i == 200) {
981 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
982 		val = 0;
983 		goto done;
984 	}
985 
986 	val = CSR_READ_4(sc, BGE_MI_COMM);
987 
988 done:
989 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
990 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
991 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
992 		DELAY(80);
993 	}
994 
995 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
996 
997 	if (val & BGE_MICOMM_READFAIL)
998 		return (0);
999 
1000 	return (val & 0xFFFF);
1001 }
1002 
1003 void
1004 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
1005 {
1006 	struct bge_softc *sc = (struct bge_softc *)dev;
1007 	u_int32_t autopoll;
1008 	int i;
1009 
1010 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1011 	    (reg == MII_100T2CR || reg == BRGPHY_MII_AUXCTL))
1012 		return;
1013 
1014 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1015 		return;
1016 
1017 	/* Reading with autopolling on may trigger PCI errors */
1018 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1019 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1020 		DELAY(40);
1021 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1022 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1023 		DELAY(40); /* 40 usec is supposed to be adequate */
1024 	}
1025 
1026 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
1027 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
1028 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
1029 
1030 	for (i = 0; i < 200; i++) {
1031 		delay(1);
1032 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
1033 			break;
1034 		delay(10);
1035 	}
1036 
1037 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1038 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1039 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1040 		DELAY(40);
1041 	}
1042 
1043 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1044 
1045 	if (i == 200) {
1046 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
1047 	}
1048 }
1049 
1050 void
1051 bge_miibus_statchg(struct device *dev)
1052 {
1053 	struct bge_softc *sc = (struct bge_softc *)dev;
1054 	struct mii_data *mii = &sc->bge_mii;
1055 	u_int32_t mac_mode, rx_mode, tx_mode;
1056 
1057 	/*
1058 	 * Get flow control negotiation result.
1059 	 */
1060 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1061 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
1062 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1063 
1064 	if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
1065 	    mii->mii_media_status & IFM_ACTIVE &&
1066 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1067 		BGE_STS_SETBIT(sc, BGE_STS_LINK);
1068 	else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
1069 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
1070 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
1071 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1072 
1073 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
1074 		return;
1075 
1076 	/* Set the port mode (MII/GMII) to match the link speed. */
1077 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1078 	    ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1079 	tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1080 	rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1081 
1082 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1083 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1084 		mac_mode |= BGE_PORTMODE_GMII;
1085 	else
1086 		mac_mode |= BGE_PORTMODE_MII;
1087 
1088 	/* Set MAC flow control behavior to match link flow control settings. */
1089 	tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1090 	rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1091 	if (mii->mii_media_active & IFM_FDX) {
1092 		if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1093 			tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1094 		if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1095 			rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1096 	} else
1097 		mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1098 
1099 	CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
1100 	DELAY(40);
1101 	CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1102 	CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1103 }
1104 
1105 /*
1106  * Intialize a standard receive ring descriptor.
1107  */
1108 int
1109 bge_newbuf(struct bge_softc *sc, int i)
1110 {
1111 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
1112 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
1113 	struct mbuf		*m;
1114 	int			error;
1115 
1116 	m = MCLGETI(NULL, M_DONTWAIT, NULL, sc->bge_rx_std_len);
1117 	if (!m)
1118 		return (ENOBUFS);
1119 	m->m_len = m->m_pkthdr.len = sc->bge_rx_std_len;
1120 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1121 	    m_adj(m, ETHER_ALIGN);
1122 
1123 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1124 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1125 	if (error) {
1126 		m_freem(m);
1127 		return (ENOBUFS);
1128 	}
1129 
1130 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1131 	    BUS_DMASYNC_PREREAD);
1132 	sc->bge_cdata.bge_rx_std_chain[i] = m;
1133 
1134 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1135 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1136 		i * sizeof (struct bge_rx_bd),
1137 	    sizeof (struct bge_rx_bd),
1138 	    BUS_DMASYNC_POSTWRITE);
1139 
1140 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
1141 	r->bge_flags = BGE_RXBDFLAG_END;
1142 	r->bge_len = m->m_len;
1143 	r->bge_idx = i;
1144 
1145 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1146 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1147 		i * sizeof (struct bge_rx_bd),
1148 	    sizeof (struct bge_rx_bd),
1149 	    BUS_DMASYNC_PREWRITE);
1150 
1151 	return (0);
1152 }
1153 
1154 /*
1155  * Initialize a Jumbo receive ring descriptor.
1156  */
1157 int
1158 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1159 {
1160 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1161 	struct bge_ext_rx_bd	*r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1162 	struct mbuf		*m;
1163 	int			error;
1164 
1165 	m = MCLGETI(NULL, M_DONTWAIT, NULL, BGE_JLEN);
1166 	if (!m)
1167 		return (ENOBUFS);
1168 	m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1169 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1170 	    m_adj(m, ETHER_ALIGN);
1171 
1172 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1173 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1174 	if (error) {
1175 		m_freem(m);
1176 		return (ENOBUFS);
1177 	}
1178 
1179 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1180 	    BUS_DMASYNC_PREREAD);
1181 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1182 
1183 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1184 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1185 		i * sizeof (struct bge_ext_rx_bd),
1186 	    sizeof (struct bge_ext_rx_bd),
1187 	    BUS_DMASYNC_POSTWRITE);
1188 
1189 	/*
1190 	 * Fill in the extended RX buffer descriptor.
1191 	 */
1192 	r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1193 	r->bge_bd.bge_idx = i;
1194 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1195 	switch (dmap->dm_nsegs) {
1196 	case 4:
1197 		BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr);
1198 		r->bge_len3 = dmap->dm_segs[3].ds_len;
1199 		/* FALLTHROUGH */
1200 	case 3:
1201 		BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr);
1202 		r->bge_len2 = dmap->dm_segs[2].ds_len;
1203 		/* FALLTHROUGH */
1204 	case 2:
1205 		BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr);
1206 		r->bge_len1 = dmap->dm_segs[1].ds_len;
1207 		/* FALLTHROUGH */
1208 	case 1:
1209 		BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr);
1210 		r->bge_bd.bge_len = dmap->dm_segs[0].ds_len;
1211 		break;
1212 	default:
1213 		panic("%s: %d segments", __func__, dmap->dm_nsegs);
1214 	}
1215 
1216 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1217 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1218 		i * sizeof (struct bge_ext_rx_bd),
1219 	    sizeof (struct bge_ext_rx_bd),
1220 	    BUS_DMASYNC_PREWRITE);
1221 
1222 	return (0);
1223 }
1224 
1225 int
1226 bge_init_rx_ring_std(struct bge_softc *sc)
1227 {
1228 	int i;
1229 
1230 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
1231 		return (0);
1232 
1233 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1234 		if (bus_dmamap_create(sc->bge_dmatag, sc->bge_rx_std_len, 1,
1235 		    sc->bge_rx_std_len, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1236 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
1237 			printf("%s: unable to create dmamap for slot %d\n",
1238 			    sc->bge_dev.dv_xname, i);
1239 			goto uncreate;
1240 		}
1241 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1242 		    sizeof(struct bge_rx_bd));
1243 	}
1244 
1245 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1246 
1247 	/* lwm must be greater than the replenish threshold */
1248 	if_rxr_init(&sc->bge_std_ring, 17, BGE_STD_RX_RING_CNT);
1249 	bge_fill_rx_ring_std(sc);
1250 
1251 	SET(sc->bge_flags, BGE_RXRING_VALID);
1252 
1253 	return (0);
1254 
1255 uncreate:
1256 	while (--i) {
1257 		bus_dmamap_destroy(sc->bge_dmatag,
1258 		    sc->bge_cdata.bge_rx_std_map[i]);
1259 	}
1260 	return (1);
1261 }
1262 
1263 /*
1264  * When the refill timeout for a ring is active, that ring is so empty
1265  * that no more packets can be received on it, so the interrupt handler
1266  * will not attempt to refill it, meaning we don't need to protect against
1267  * interrupts here.
1268  */
1269 
1270 void
1271 bge_rxtick(void *arg)
1272 {
1273 	struct bge_softc *sc = arg;
1274 
1275 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID) &&
1276 	    if_rxr_inuse(&sc->bge_std_ring) <= 8)
1277 		bge_fill_rx_ring_std(sc);
1278 }
1279 
1280 void
1281 bge_rxtick_jumbo(void *arg)
1282 {
1283 	struct bge_softc *sc = arg;
1284 
1285 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) &&
1286 	    if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1287 		bge_fill_rx_ring_jumbo(sc);
1288 }
1289 
1290 void
1291 bge_fill_rx_ring_std(struct bge_softc *sc)
1292 {
1293 	int i;
1294 	int post = 0;
1295 	u_int slots;
1296 
1297 	i = sc->bge_std;
1298 	for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT);
1299 	    slots > 0; slots--) {
1300 		BGE_INC(i, BGE_STD_RX_RING_CNT);
1301 
1302 		if (bge_newbuf(sc, i) != 0)
1303 			break;
1304 
1305 		sc->bge_std = i;
1306 		post = 1;
1307 	}
1308 	if_rxr_put(&sc->bge_std_ring, slots);
1309 
1310 	if (post)
1311 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1312 
1313 	/*
1314 	 * bge always needs more than 8 packets on the ring. if we cant do
1315 	 * that now, then try again later.
1316 	 */
1317 	if (if_rxr_inuse(&sc->bge_std_ring) <= 8)
1318 		timeout_add(&sc->bge_rxtimeout, 1);
1319 }
1320 
1321 void
1322 bge_free_rx_ring_std(struct bge_softc *sc)
1323 {
1324 	bus_dmamap_t dmap;
1325 	struct mbuf *m;
1326 	int i;
1327 
1328 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
1329 		return;
1330 
1331 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1332 		dmap = sc->bge_cdata.bge_rx_std_map[i];
1333 		m = sc->bge_cdata.bge_rx_std_chain[i];
1334 		if (m != NULL) {
1335 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1336 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1337 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1338 			m_freem(m);
1339 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1340 		}
1341 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1342 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
1343 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1344 		    sizeof(struct bge_rx_bd));
1345 	}
1346 
1347 	CLR(sc->bge_flags, BGE_RXRING_VALID);
1348 }
1349 
1350 int
1351 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1352 {
1353 	volatile struct bge_rcb *rcb;
1354 	int i;
1355 
1356 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1357 		return (0);
1358 
1359 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1360 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,
1361 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1362 		    &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) {
1363 			printf("%s: unable to create dmamap for slot %d\n",
1364 			    sc->bge_dev.dv_xname, i);
1365 			goto uncreate;
1366 		}
1367 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1368 		    sizeof(struct bge_ext_rx_bd));
1369 	}
1370 
1371 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1372 
1373 	/* lwm must be greater than the replenish threshold */
1374 	if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT);
1375 	bge_fill_rx_ring_jumbo(sc);
1376 
1377 	SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1378 
1379 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1380 	rcb->bge_maxlen_flags =
1381 	    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1382 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1383 
1384 	return (0);
1385 
1386 uncreate:
1387 	while (--i) {
1388 		bus_dmamap_destroy(sc->bge_dmatag,
1389 		    sc->bge_cdata.bge_rx_jumbo_map[i]);
1390 	}
1391 	return (1);
1392 }
1393 
1394 void
1395 bge_fill_rx_ring_jumbo(struct bge_softc *sc)
1396 {
1397 	int i;
1398 	int post = 0;
1399 	u_int slots;
1400 
1401 	i = sc->bge_jumbo;
1402 	for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT);
1403 	    slots > 0; slots--) {
1404 		BGE_INC(i, BGE_JUMBO_RX_RING_CNT);
1405 
1406 		if (bge_newbuf_jumbo(sc, i) != 0)
1407 			break;
1408 
1409 		sc->bge_jumbo = i;
1410 		post = 1;
1411 	}
1412 	if_rxr_put(&sc->bge_jumbo_ring, slots);
1413 
1414 	if (post)
1415 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1416 
1417 	/*
1418 	 * bge always needs more than 8 packets on the ring. if we cant do
1419 	 * that now, then try again later.
1420 	 */
1421 	if (if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1422 		timeout_add(&sc->bge_rxtimeout_jumbo, 1);
1423 }
1424 
1425 void
1426 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1427 {
1428 	bus_dmamap_t dmap;
1429 	struct mbuf *m;
1430 	int i;
1431 
1432 	if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1433 		return;
1434 
1435 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1436 		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1437 		m = sc->bge_cdata.bge_rx_jumbo_chain[i];
1438 		if (m != NULL) {
1439 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1440 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1441 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1442 			m_freem(m);
1443 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1444 		}
1445 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1446 		sc->bge_cdata.bge_rx_jumbo_map[i] = NULL;
1447 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1448 		    sizeof(struct bge_ext_rx_bd));
1449 	}
1450 
1451 	CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1452 }
1453 
1454 void
1455 bge_free_tx_ring(struct bge_softc *sc)
1456 {
1457 	int i;
1458 
1459 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1460 		return;
1461 
1462 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1463 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1464 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1465 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1466 			sc->bge_cdata.bge_tx_map[i] = NULL;
1467 		}
1468 		bzero(&sc->bge_rdata->bge_tx_ring[i],
1469 		    sizeof(struct bge_tx_bd));
1470 
1471 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_txdma[i]);
1472 	}
1473 
1474 	sc->bge_flags &= ~BGE_TXRING_VALID;
1475 }
1476 
1477 int
1478 bge_init_tx_ring(struct bge_softc *sc)
1479 {
1480 	int i;
1481 	bus_size_t txsegsz, txmaxsegsz;
1482 
1483 	if (sc->bge_flags & BGE_TXRING_VALID)
1484 		return (0);
1485 
1486 	sc->bge_txcnt = 0;
1487 	sc->bge_tx_saved_considx = 0;
1488 
1489 	/* Initialize transmit producer index for host-memory send ring. */
1490 	sc->bge_tx_prodidx = 0;
1491 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1492 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1493 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1494 
1495 	/* NIC-memory send ring not used; initialize to zero. */
1496 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1497 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1498 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1499 
1500 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1501 		txsegsz = 4096;
1502 		txmaxsegsz = BGE_JLEN;
1503 	} else {
1504 		txsegsz = MCLBYTES;
1505 		txmaxsegsz = MCLBYTES;
1506 	}
1507 
1508 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1509 		if (bus_dmamap_create(sc->bge_dmatag, txmaxsegsz,
1510 		    BGE_NTXSEG, txsegsz, 0, BUS_DMA_NOWAIT, &sc->bge_txdma[i]))
1511 			return (ENOBUFS);
1512 	}
1513 
1514 	sc->bge_flags |= BGE_TXRING_VALID;
1515 
1516 	return (0);
1517 }
1518 
1519 void
1520 bge_iff(struct bge_softc *sc)
1521 {
1522 	struct arpcom		*ac = &sc->arpcom;
1523 	struct ifnet		*ifp = &ac->ac_if;
1524 	struct ether_multi	*enm;
1525 	struct ether_multistep  step;
1526 	u_int8_t		hashes[16];
1527 	u_int32_t		h, rxmode;
1528 
1529 	/* First, zot all the existing filters. */
1530 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1531 	ifp->if_flags &= ~IFF_ALLMULTI;
1532 	memset(hashes, 0x00, sizeof(hashes));
1533 
1534 	if (ifp->if_flags & IFF_PROMISC) {
1535 		ifp->if_flags |= IFF_ALLMULTI;
1536 		rxmode |= BGE_RXMODE_RX_PROMISC;
1537 	} else if (ac->ac_multirangecnt > 0) {
1538 		ifp->if_flags |= IFF_ALLMULTI;
1539 		memset(hashes, 0xff, sizeof(hashes));
1540 	} else {
1541 		ETHER_FIRST_MULTI(step, ac, enm);
1542 		while (enm != NULL) {
1543 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1544 
1545 			setbit(hashes, h & 0x7F);
1546 
1547 			ETHER_NEXT_MULTI(step, enm);
1548 		}
1549 	}
1550 
1551 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1552 	    hashes, sizeof(hashes));
1553 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1554 }
1555 
1556 void
1557 bge_sig_pre_reset(struct bge_softc *sc, int type)
1558 {
1559 	/* no bge_asf_mode. */
1560 
1561 	if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1562 		bge_ape_driver_state_change(sc, type);
1563 }
1564 
1565 void
1566 bge_sig_post_reset(struct bge_softc *sc, int type)
1567 {
1568 	/* no bge_asf_mode. */
1569 
1570 	if (type == BGE_RESET_SHUTDOWN)
1571 		bge_ape_driver_state_change(sc, type);
1572 }
1573 
1574 void
1575 bge_sig_legacy(struct bge_softc *sc, int type)
1576 {
1577 	/* no bge_asf_mode. */
1578 }
1579 
1580 void
1581 bge_stop_fw(struct bge_softc *sc, int type)
1582 {
1583 	/* no bge_asf_mode. */
1584 }
1585 
1586 u_int32_t
1587 bge_dma_swap_options(struct bge_softc *sc)
1588 {
1589 	u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS;
1590 
1591 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
1592 		dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1593 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1594 		    BGE_MODECTL_HTX2B_ENABLE;
1595 	}
1596 
1597 	return (dma_options);
1598 }
1599 
1600 int
1601 bge_phy_addr(struct bge_softc *sc)
1602 {
1603 	struct pci_attach_args *pa = &(sc->bge_pa);
1604 	int phy_addr = 1;
1605 
1606 	switch (BGE_ASICREV(sc->bge_chipid)) {
1607 	case BGE_ASICREV_BCM5717:
1608 	case BGE_ASICREV_BCM5719:
1609 	case BGE_ASICREV_BCM5720:
1610 		phy_addr = pa->pa_function;
1611 		if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
1612 			phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
1613 			    BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
1614 		} else {
1615 			phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
1616 			    BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
1617 		}
1618 	}
1619 
1620 	return (phy_addr);
1621 }
1622 
1623 /*
1624  * Do endian, PCI and DMA initialization.
1625  */
1626 void
1627 bge_chipinit(struct bge_softc *sc)
1628 {
1629 	struct pci_attach_args	*pa = &(sc->bge_pa);
1630 	u_int32_t dma_rw_ctl, misc_ctl, mode_ctl;
1631 	int i;
1632 
1633 	/* Set endianness before we access any non-PCI registers. */
1634 	misc_ctl = BGE_INIT;
1635 	if (sc->bge_flags & BGE_TAGGED_STATUS)
1636 		misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1637 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1638 	    misc_ctl);
1639 
1640 	/*
1641 	 * Clear the MAC statistics block in the NIC's
1642 	 * internal memory.
1643 	 */
1644 	for (i = BGE_STATS_BLOCK;
1645 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1646 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1647 
1648 	for (i = BGE_STATUS_BLOCK;
1649 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1650 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1651 
1652 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
1653 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
1654 		/*
1655 		 * For the 57766 and non Ax versions of 57765, bootcode
1656 		 * needs to setup the PCIE Fast Training Sequence (FTS)
1657 		 * value to prevent transmit hangs.
1658 		 */
1659 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
1660 		    CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
1661 			CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
1662 			BGE_CPMU_PADRNG_CTL_RDIV2);
1663 		}
1664 	}
1665 
1666 	/*
1667 	 * Set up the PCI DMA control register.
1668 	 */
1669 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1670 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1671 
1672 	if (sc->bge_flags & BGE_PCIE) {
1673 		if (sc->bge_mps >= 256)
1674 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1675 		else
1676 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1677 	} else if (sc->bge_flags & BGE_PCIX) {
1678 		/* PCI-X bus */
1679 		if (BGE_IS_5714_FAMILY(sc)) {
1680 			/* 256 bytes for read and write. */
1681 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1682 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1683 
1684 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1685 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1686 			else
1687 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1688 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1689 			/* 1536 bytes for read, 384 bytes for write. */
1690 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1691 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1692 		} else {
1693 			/* 384 bytes for read and write. */
1694 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1695 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1696 			    (0x0F);
1697 		}
1698 
1699 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1700 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1701 			u_int32_t tmp;
1702 
1703 			/* Set ONEDMA_ATONCE for hardware workaround. */
1704 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1705 			if (tmp == 6 || tmp == 7)
1706 				dma_rw_ctl |=
1707 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1708 
1709 			/* Set PCI-X DMA write workaround. */
1710 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1711 		}
1712 	} else {
1713 		/* Conventional PCI bus: 256 bytes for read and write. */
1714 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1715 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1716 
1717 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1718 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1719 			dma_rw_ctl |= 0x0F;
1720 	}
1721 
1722 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1723 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1724 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1725 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1726 
1727 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1728 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1729 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1730 
1731 	if (BGE_IS_5717_PLUS(sc)) {
1732 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1733 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1734 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1735 
1736 		/*
1737 		 * Enable HW workaround for controllers that misinterpret
1738 		 * a status tag update and leave interrupts permanently
1739 		 * disabled.
1740 		 */
1741 		if (!BGE_IS_57765_PLUS(sc) &&
1742 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
1743 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762)
1744 			dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1745 	}
1746 
1747 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1748 
1749 	/*
1750 	 * Set up general mode register.
1751 	 */
1752 	mode_ctl = bge_dma_swap_options(sc);
1753 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
1754 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1755 		/* Retain Host-2-BMC settings written by APE firmware. */
1756 		mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1757 		    (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1758 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1759 		    BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1760 	}
1761 	mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1762 	    BGE_MODECTL_TX_NO_PHDR_CSUM;
1763 
1764 	/*
1765 	 * BCM5701 B5 have a bug causing data corruption when using
1766 	 * 64-bit DMA reads, which can be terminated early and then
1767 	 * completed later as 32-bit accesses, in combination with
1768 	 * certain bridges.
1769 	 */
1770 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1771 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1772 		mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1773 
1774 	CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1775 
1776 	/*
1777 	 * Disable memory write invalidate.  Apparently it is not supported
1778 	 * properly by these devices.
1779 	 */
1780 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1781 	    PCI_COMMAND_INVALIDATE_ENABLE);
1782 
1783 #ifdef __brokenalpha__
1784 	/*
1785 	 * Must ensure that we do not cross an 8K (bytes) boundary
1786 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1787 	 * restriction on some ALPHA platforms with early revision
1788 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1789 	 */
1790 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1791 	    BGE_PCI_READ_BNDRY_1024);
1792 #endif
1793 
1794 	/* Set the timer prescaler (always 66MHz) */
1795 	CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1796 
1797 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1798 		DELAY(40);	/* XXX */
1799 
1800 		/* Put PHY into ready state */
1801 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1802 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1803 		DELAY(40);
1804 	}
1805 }
1806 
1807 int
1808 bge_blockinit(struct bge_softc *sc)
1809 {
1810 	volatile struct bge_rcb		*rcb;
1811 	vaddr_t			rcb_addr;
1812 	bge_hostaddr		taddr;
1813 	u_int32_t		dmactl, rdmareg, mimode, val;
1814 	int			i, limit;
1815 
1816 	/*
1817 	 * Initialize the memory window pointer register so that
1818 	 * we can access the first 32K of internal NIC RAM. This will
1819 	 * allow us to set up the TX send ring RCBs and the RX return
1820 	 * ring RCBs, plus other things which live in NIC memory.
1821 	 */
1822 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1823 
1824 	/* Configure mbuf memory pool */
1825 	if (!BGE_IS_5705_PLUS(sc)) {
1826 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1827 		    BGE_BUFFPOOL_1);
1828 
1829 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1830 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1831 		else
1832 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1833 
1834 		/* Configure DMA resource pool */
1835 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1836 		    BGE_DMA_DESCRIPTORS);
1837 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1838 	}
1839 
1840 	/* Configure mbuf pool watermarks */
1841 	/* new Broadcom docs strongly recommend these: */
1842 	if (BGE_IS_5717_PLUS(sc)) {
1843 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1844 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1845 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1846 	} else if (BGE_IS_5705_PLUS(sc)) {
1847 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1848 
1849 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1850 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1851 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1852 		} else {
1853 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1854 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1855 		}
1856 	} else {
1857 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1858 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1859 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1860 	}
1861 
1862 	/* Configure DMA resource watermarks */
1863 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1864 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1865 
1866 	/* Enable buffer manager */
1867 	val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1868 	/*
1869 	 * Change the arbitration algorithm of TXMBUF read request to
1870 	 * round-robin instead of priority based for BCM5719.  When
1871 	 * TXFIFO is almost empty, RDMA will hold its request until
1872 	 * TXFIFO is not almost empty.
1873 	 */
1874 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
1875 		val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1876 	CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1877 
1878 	/* Poll for buffer manager start indication */
1879 	for (i = 0; i < 2000; i++) {
1880 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1881 			break;
1882 		DELAY(10);
1883 	}
1884 
1885 	if (i == 2000) {
1886 		printf("%s: buffer manager failed to start\n",
1887 		    sc->bge_dev.dv_xname);
1888 		return (ENXIO);
1889 	}
1890 
1891 	/* Enable flow-through queues */
1892 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1893 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1894 
1895 	/* Wait until queue initialization is complete */
1896 	for (i = 0; i < 2000; i++) {
1897 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1898 			break;
1899 		DELAY(10);
1900 	}
1901 
1902 	if (i == 2000) {
1903 		printf("%s: flow-through queue init failed\n",
1904 		    sc->bge_dev.dv_xname);
1905 		return (ENXIO);
1906 	}
1907 
1908 	/*
1909 	 * Summary of rings supported by the controller:
1910 	 *
1911 	 * Standard Receive Producer Ring
1912 	 * - This ring is used to feed receive buffers for "standard"
1913 	 *   sized frames (typically 1536 bytes) to the controller.
1914 	 *
1915 	 * Jumbo Receive Producer Ring
1916 	 * - This ring is used to feed receive buffers for jumbo sized
1917 	 *   frames (i.e. anything bigger than the "standard" frames)
1918 	 *   to the controller.
1919 	 *
1920 	 * Mini Receive Producer Ring
1921 	 * - This ring is used to feed receive buffers for "mini"
1922 	 *   sized frames to the controller.
1923 	 * - This feature required external memory for the controller
1924 	 *   but was never used in a production system.  Should always
1925 	 *   be disabled.
1926 	 *
1927 	 * Receive Return Ring
1928 	 * - After the controller has placed an incoming frame into a
1929 	 *   receive buffer that buffer is moved into a receive return
1930 	 *   ring.  The driver is then responsible to passing the
1931 	 *   buffer up to the stack.  Many versions of the controller
1932 	 *   support multiple RR rings.
1933 	 *
1934 	 * Send Ring
1935 	 * - This ring is used for outgoing frames.  Many versions of
1936 	 *   the controller support multiple send rings.
1937 	 */
1938 
1939 	/* Initialize the standard RX ring control block */
1940 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1941 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1942 	if (BGE_IS_5717_PLUS(sc)) {
1943 		/*
1944 		 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1945 		 * Bits 15-2 : Maximum RX frame size
1946 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
1947 		 * Bit 0     : Reserved
1948 		 */
1949 		rcb->bge_maxlen_flags =
1950 		    BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2);
1951 	} else if (BGE_IS_5705_PLUS(sc)) {
1952 		/*
1953 		 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1954 		 * Bits 15-2 : Reserved (should be 0)
1955 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1956 		 * Bit 0     : Reserved
1957 		 */
1958 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1959 	} else {
1960 		/*
1961 		 * Ring size is always XXX entries
1962 		 * Bits 31-16: Maximum RX frame size
1963 		 * Bits 15-2 : Reserved (should be 0)
1964 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1965 		 * Bit 0     : Reserved
1966 		 */
1967 		rcb->bge_maxlen_flags =
1968 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1969 	}
1970 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1971 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
1972 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
1973 		rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1974 	else
1975 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1976 	/* Write the standard receive producer ring control block. */
1977 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1978 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1979 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1980 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1981 
1982 	/* Reset the standard receive producer ring producer index. */
1983 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1984 
1985 	/*
1986 	 * Initialize the Jumbo RX ring control block
1987 	 * We set the 'ring disabled' bit in the flags
1988 	 * field until we're actually ready to start
1989 	 * using this ring (i.e. once we set the MTU
1990 	 * high enough to require it).
1991 	 */
1992 	if (sc->bge_flags & BGE_JUMBO_RING) {
1993 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1994 		BGE_HOSTADDR(rcb->bge_hostaddr,
1995 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1996 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1997 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1998 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1999 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2000 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2001 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2002 		else
2003 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2004 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2005 		    rcb->bge_hostaddr.bge_addr_hi);
2006 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2007 		    rcb->bge_hostaddr.bge_addr_lo);
2008 		/* Program the jumbo receive producer ring RCB parameters. */
2009 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2010 		    rcb->bge_maxlen_flags);
2011 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2012 		/* Reset the jumbo receive producer ring producer index. */
2013 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2014 	}
2015 
2016 	/* Disable the mini receive producer ring RCB. */
2017 	if (BGE_IS_5700_FAMILY(sc)) {
2018 		/* Set up dummy disabled mini ring RCB */
2019 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2020 		rcb->bge_maxlen_flags =
2021 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2022 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2023 		    rcb->bge_maxlen_flags);
2024 		/* Reset the mini receive producer ring producer index. */
2025 		bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2026 
2027 		/* XXX why? */
2028 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2029 		    offsetof(struct bge_ring_data, bge_info),
2030 		    sizeof (struct bge_gib),
2031 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2032 	}
2033 
2034 	/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2035 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2036 		if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2037 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2038 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2039 			CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2040 			    (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2041 	}
2042 	/*
2043 	 * The BD ring replenish thresholds control how often the
2044 	 * hardware fetches new BD's from the producer rings in host
2045 	 * memory.  Setting the value too low on a busy system can
2046 	 * starve the hardware and recue the throughpout.
2047 	 *
2048 	 * Set the BD ring replenish thresholds. The recommended
2049 	 * values are 1/8th the number of descriptors allocated to
2050 	 * each ring, but since we try to avoid filling the entire
2051 	 * ring we set these to the minimal value of 8.  This needs to
2052 	 * be done on several of the supported chip revisions anyway,
2053 	 * to work around HW bugs.
2054 	 */
2055 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
2056 	if (sc->bge_flags & BGE_JUMBO_RING)
2057 		CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
2058 
2059 	if (BGE_IS_5717_PLUS(sc)) {
2060 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2061 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2062 	}
2063 
2064 	/*
2065 	 * Disable all send rings by setting the 'ring disabled' bit
2066 	 * in the flags field of all the TX send ring control blocks,
2067 	 * located in NIC memory.
2068 	 */
2069 	if (BGE_IS_5700_FAMILY(sc)) {
2070 		/* 5700 to 5704 had 16 send rings. */
2071 		limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2072 	} else if (BGE_IS_57765_PLUS(sc) ||
2073 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2074 		limit = 2;
2075 	else if (BGE_IS_5717_PLUS(sc))
2076 		limit = 4;
2077 	else
2078 		limit = 1;
2079 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2080 	for (i = 0; i < limit; i++) {
2081 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2082 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2083 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2084 		rcb_addr += sizeof(struct bge_rcb);
2085 	}
2086 
2087 	/* Configure send ring RCB 0 (we use only the first ring) */
2088 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2089 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2090 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2091 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2092 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2093 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2094 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2095 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
2096 	else
2097 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2098 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2099 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2100 	    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2101 
2102 	/*
2103 	 * Disable all receive return rings by setting the
2104 	 * 'ring diabled' bit in the flags field of all the receive
2105 	 * return ring control blocks, located in NIC memory.
2106 	 */
2107 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2108 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2109 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2110 		/* Should be 17, use 16 until we get an SRAM map. */
2111 		limit = 16;
2112 	} else if (BGE_IS_5700_FAMILY(sc))
2113 		limit = BGE_RX_RINGS_MAX;
2114 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2115 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 ||
2116 	    BGE_IS_57765_PLUS(sc))
2117 		limit = 4;
2118 	else
2119 		limit = 1;
2120 	/* Disable all receive return rings */
2121 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2122 	for (i = 0; i < limit; i++) {
2123 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2124 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2125 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2126 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2127 			BGE_RCB_FLAG_RING_DISABLED));
2128 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2129 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2130 		    (i * (sizeof(u_int64_t))), 0);
2131 		rcb_addr += sizeof(struct bge_rcb);
2132 	}
2133 
2134 	/*
2135 	 * Set up receive return ring 0.  Note that the NIC address
2136 	 * for RX return rings is 0x0.  The return rings live entirely
2137 	 * within the host, so the nicaddr field in the RCB isn't used.
2138 	 */
2139 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2140 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2141 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2142 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2143 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2144 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2145 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2146 
2147 	/* Set random backoff seed for TX */
2148 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2149 	    (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
2150 	     sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
2151 	     sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
2152 	    BGE_TX_BACKOFF_SEED_MASK);
2153 
2154 	/* Set inter-packet gap */
2155 	val = 0x2620;
2156 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2157 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2158 		val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2159 		    (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2160 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2161 
2162 	/*
2163 	 * Specify which ring to use for packets that don't match
2164 	 * any RX rules.
2165 	 */
2166 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2167 
2168 	/*
2169 	 * Configure number of RX lists. One interrupt distribution
2170 	 * list, sixteen active lists, one bad frames class.
2171 	 */
2172 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2173 
2174 	/* Inialize RX list placement stats mask. */
2175 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF);
2176 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2177 
2178 	/* Disable host coalescing until we get it set up */
2179 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2180 
2181 	/* Poll to make sure it's shut down. */
2182 	for (i = 0; i < 2000; i++) {
2183 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2184 			break;
2185 		DELAY(10);
2186 	}
2187 
2188 	if (i == 2000) {
2189 		printf("%s: host coalescing engine failed to idle\n",
2190 		    sc->bge_dev.dv_xname);
2191 		return (ENXIO);
2192 	}
2193 
2194 	/* Set up host coalescing defaults */
2195 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2196 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2197 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2198 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2199 	if (!(BGE_IS_5705_PLUS(sc))) {
2200 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2201 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2202 	}
2203 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2204 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2205 
2206 	/* Set up address of statistics block */
2207 	if (!(BGE_IS_5705_PLUS(sc))) {
2208 		BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2209 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2210 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
2211 
2212 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2213 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2214 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2215 	}
2216 
2217 	/* Set up address of status block */
2218 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2219 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2220 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2221 
2222 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2223 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2224 
2225 	/* Set up status block size. */
2226 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2227 	    sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2228 		val = BGE_STATBLKSZ_FULL;
2229 		bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
2230 	} else {
2231 		val = BGE_STATBLKSZ_32BYTE;
2232 		bzero(&sc->bge_rdata->bge_status_block, 32);
2233 	}
2234 
2235 	/* Turn on host coalescing state machine */
2236 	CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2237 
2238 	/* Turn on RX BD completion state machine and enable attentions */
2239 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
2240 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
2241 
2242 	/* Turn on RX list placement state machine */
2243 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2244 
2245 	/* Turn on RX list selector state machine. */
2246 	if (!(BGE_IS_5705_PLUS(sc)))
2247 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2248 
2249 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2250 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2251 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2252 	    BGE_MACMODE_FRMHDR_DMA_ENB;
2253 
2254 	if (sc->bge_flags & BGE_FIBER_TBI)
2255 	    val |= BGE_PORTMODE_TBI;
2256 	else if (sc->bge_flags & BGE_FIBER_MII)
2257 	    val |= BGE_PORTMODE_GMII;
2258 	else
2259 	    val |= BGE_PORTMODE_MII;
2260 
2261 	/* Allow APE to send/receive frames. */
2262 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2263 		val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2264 
2265 	/* Turn on DMA, clear stats */
2266 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2267 	DELAY(40);
2268 
2269 	/* Set misc. local control, enable interrupts on attentions */
2270 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2271 
2272 #ifdef notdef
2273 	/* Assert GPIO pins for PHY reset */
2274 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
2275 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
2276 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
2277 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
2278 #endif
2279 
2280 	/* Turn on DMA completion state machine */
2281 	if (!(BGE_IS_5705_PLUS(sc)))
2282 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2283 
2284 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
2285 
2286 	/* Enable host coalescing bug fix. */
2287 	if (BGE_IS_5755_PLUS(sc))
2288 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2289 
2290 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2291 		val |= BGE_WDMAMODE_BURST_ALL_DATA;
2292 
2293 	/* Turn on write DMA state machine */
2294 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2295 	DELAY(40);
2296 
2297 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
2298 
2299 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
2300 		val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2301 
2302 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2303 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2304 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2305 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2306 		       BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2307 		       BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2308 
2309 	if (sc->bge_flags & BGE_PCIE)
2310 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2311 
2312 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2313 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2314 		val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2315 		    BGE_RDMAMODE_H2BNC_VLAN_DET;
2316 		/*
2317 		 * Allow multiple outstanding read requests from
2318 		 * non-LSO read DMA engine.
2319 		 */
2320 		val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2321 	}
2322 
2323 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2324 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2325 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2326 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
2327 	    BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
2328 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2329 			rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2330 		else
2331 			rdmareg = BGE_RDMA_RSRVCTRL;
2332 		dmactl = CSR_READ_4(sc, rdmareg);
2333 		/*
2334 		 * Adjust tx margin to prevent TX data corruption and
2335 		 * fix internal FIFO overflow.
2336 		 */
2337 		if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2338 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2339 			dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2340 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2341 			    BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2342 			dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2343 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2344 			    BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2345 		}
2346 		/*
2347 		 * Enable fix for read DMA FIFO overruns.
2348 		 * The fix is to limit the number of RX BDs
2349 		 * the hardware would fetch at a fime.
2350 		 */
2351 		CSR_WRITE_4(sc, rdmareg, dmactl |
2352 		    BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2353 	}
2354 
2355 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
2356 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2357 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2358 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2359 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2360 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2361 		/*
2362 		 * Allow 4KB burst length reads for non-LSO frames.
2363 		 * Enable 512B burst length reads for buffer descriptors.
2364 		 */
2365 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2366 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2367 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2368 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2369 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2370 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2371 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2372 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2373 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2374 	}
2375 
2376 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2377 	DELAY(40);
2378 
2379 	if (sc->bge_flags & BGE_RDMA_BUG) {
2380 		for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2381 			val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2382 			if ((val & 0xFFFF) > ETHER_MAX_LEN)
2383 				break;
2384 			if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN)
2385 				break;
2386 		}
2387 		if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2388 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2389 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2390 				val |= BGE_RDMA_TX_LENGTH_WA_5719;
2391 			else
2392 				val |= BGE_RDMA_TX_LENGTH_WA_5720;
2393 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2394 		}
2395 	}
2396 
2397 	/* Turn on RX data completion state machine */
2398 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2399 
2400 	/* Turn on RX BD initiator state machine */
2401 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2402 
2403 	/* Turn on RX data and RX BD initiator state machine */
2404 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2405 
2406 	/* Turn on Mbuf cluster free state machine */
2407 	if (!BGE_IS_5705_PLUS(sc))
2408 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2409 
2410 	/* Turn on send BD completion state machine */
2411 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2412 
2413 	/* Turn on send data completion state machine */
2414 	val = BGE_SDCMODE_ENABLE;
2415 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2416 		val |= BGE_SDCMODE_CDELAY;
2417 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2418 
2419 	/* Turn on send data initiator state machine */
2420 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2421 
2422 	/* Turn on send BD initiator state machine */
2423 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2424 
2425 	/* Turn on send BD selector state machine */
2426 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2427 
2428 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF);
2429 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2430 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
2431 
2432 	/* ack/clear link change events */
2433 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2434 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2435 	    BGE_MACSTAT_LINK_CHANGED);
2436 
2437 	/* Enable PHY auto polling (for MII/GMII only) */
2438 	if (sc->bge_flags & BGE_FIBER_TBI) {
2439 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2440  	} else {
2441 		if ((sc->bge_flags & BGE_CPMU_PRESENT) != 0)
2442 			mimode = BGE_MIMODE_500KHZ_CONST;
2443 		else
2444 			mimode = BGE_MIMODE_BASE;
2445 		if (BGE_IS_5700_FAMILY(sc) ||
2446 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
2447 			mimode |= BGE_MIMODE_AUTOPOLL;
2448 			BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
2449 		}
2450 		mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
2451 		CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
2452 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
2453 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2454 			    BGE_EVTENB_MI_INTERRUPT);
2455 	}
2456 
2457 	/*
2458 	 * Clear any pending link state attention.
2459 	 * Otherwise some link state change events may be lost until attention
2460 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
2461 	 * It's not necessary on newer BCM chips - perhaps enabling link
2462 	 * state change attentions implies clearing pending attention.
2463 	 */
2464 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2465 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2466 	    BGE_MACSTAT_LINK_CHANGED);
2467 
2468 	/* Enable link state change attentions. */
2469 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2470 
2471 	return (0);
2472 }
2473 
2474 const struct bge_revision *
2475 bge_lookup_rev(u_int32_t chipid)
2476 {
2477 	const struct bge_revision *br;
2478 
2479 	for (br = bge_revisions; br->br_name != NULL; br++) {
2480 		if (br->br_chipid == chipid)
2481 			return (br);
2482 	}
2483 
2484 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
2485 		if (br->br_chipid == BGE_ASICREV(chipid))
2486 			return (br);
2487 	}
2488 
2489 	return (NULL);
2490 }
2491 
2492 int
2493 bge_can_use_msi(struct bge_softc *sc)
2494 {
2495 	int can_use_msi = 0;
2496 
2497 	switch (BGE_ASICREV(sc->bge_chipid)) {
2498 	case BGE_ASICREV_BCM5714_A0:
2499 	case BGE_ASICREV_BCM5714:
2500 		/*
2501 		 * Apparently, MSI doesn't work when these chips are
2502 		 * configured in single-port mode.
2503 		 */
2504 		break;
2505 	case BGE_ASICREV_BCM5750:
2506 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
2507 		    BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
2508 			can_use_msi = 1;
2509 		break;
2510 	default:
2511 		if (BGE_IS_575X_PLUS(sc))
2512 			can_use_msi = 1;
2513 	}
2514 
2515 	return (can_use_msi);
2516 }
2517 
2518 /*
2519  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2520  * against our list and return its name if we find a match. Note
2521  * that since the Broadcom controller contains VPD support, we
2522  * can get the device name string from the controller itself instead
2523  * of the compiled-in string. This is a little slow, but it guarantees
2524  * we'll always announce the right product name.
2525  */
2526 int
2527 bge_probe(struct device *parent, void *match, void *aux)
2528 {
2529 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
2530 }
2531 
2532 void
2533 bge_attach(struct device *parent, struct device *self, void *aux)
2534 {
2535 	struct bge_softc	*sc = (struct bge_softc *)self;
2536 	struct pci_attach_args	*pa = aux;
2537 	pci_chipset_tag_t	pc = pa->pa_pc;
2538 	const struct bge_revision *br;
2539 	pcireg_t		pm_ctl, memtype, subid, reg;
2540 	pci_intr_handle_t	ih;
2541 	const char		*intrstr = NULL;
2542 	int			gotenaddr = 0;
2543 	u_int32_t		hwcfg = 0;
2544 	u_int32_t		mac_addr = 0;
2545 	u_int32_t		misccfg;
2546 	struct ifnet		*ifp;
2547 	caddr_t			kva;
2548 #ifdef __sparc64__
2549 	char			name[32];
2550 #endif
2551 
2552 	sc->bge_pa = *pa;
2553 
2554 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
2555 
2556 	/*
2557 	 * Map control/status registers.
2558 	 */
2559 	DPRINTFN(5, ("Map control/status regs\n"));
2560 
2561 	DPRINTFN(5, ("pci_mapreg_map\n"));
2562 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2563 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
2564 	    &sc->bge_bhandle, NULL, &sc->bge_bsize, 0)) {
2565 		printf(": can't find mem space\n");
2566 		return;
2567 	}
2568 
2569 	/*
2570 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2571 	 * can clobber the chip's PCI config-space power control registers,
2572 	 * leaving the card in D3 powersave state.
2573 	 * We do not have memory-mapped registers in this state,
2574 	 * so force device into D0 state before starting initialization.
2575 	 */
2576 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2577 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2578 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2579 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2580 	DELAY(1000);	/* 27 usec is allegedly sufficent */
2581 
2582 	/*
2583 	 * Save ASIC rev.
2584 	 */
2585 	sc->bge_chipid =
2586 	     (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL)
2587 	      >> BGE_PCIMISCCTL_ASICREV_SHIFT);
2588 
2589 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2590 		switch (PCI_PRODUCT(pa->pa_id)) {
2591 		case PCI_PRODUCT_BROADCOM_BCM5717:
2592 		case PCI_PRODUCT_BROADCOM_BCM5718:
2593 		case PCI_PRODUCT_BROADCOM_BCM5719:
2594 		case PCI_PRODUCT_BROADCOM_BCM5720:
2595 		case PCI_PRODUCT_BROADCOM_BCM5725:
2596 		case PCI_PRODUCT_BROADCOM_BCM5727:
2597 		case PCI_PRODUCT_BROADCOM_BCM5762:
2598 		case PCI_PRODUCT_BROADCOM_BCM57764:
2599 		case PCI_PRODUCT_BROADCOM_BCM57767:
2600 		case PCI_PRODUCT_BROADCOM_BCM57787:
2601 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2602 			    BGE_PCI_GEN2_PRODID_ASICREV);
2603 			break;
2604 		case PCI_PRODUCT_BROADCOM_BCM57761:
2605 		case PCI_PRODUCT_BROADCOM_BCM57762:
2606 		case PCI_PRODUCT_BROADCOM_BCM57765:
2607 		case PCI_PRODUCT_BROADCOM_BCM57766:
2608 		case PCI_PRODUCT_BROADCOM_BCM57781:
2609 		case PCI_PRODUCT_BROADCOM_BCM57782:
2610 		case PCI_PRODUCT_BROADCOM_BCM57785:
2611 		case PCI_PRODUCT_BROADCOM_BCM57786:
2612 		case PCI_PRODUCT_BROADCOM_BCM57791:
2613 		case PCI_PRODUCT_BROADCOM_BCM57795:
2614 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2615 			    BGE_PCI_GEN15_PRODID_ASICREV);
2616 			break;
2617 		default:
2618 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2619 			    BGE_PCI_PRODID_ASICREV);
2620 			break;
2621 		}
2622 	}
2623 
2624 	sc->bge_phy_addr = bge_phy_addr(sc);
2625 
2626 	printf(", ");
2627 	br = bge_lookup_rev(sc->bge_chipid);
2628 	if (br == NULL)
2629 		printf("unknown ASIC (0x%x)", sc->bge_chipid);
2630 	else
2631 		printf("%s (0x%x)", br->br_name, sc->bge_chipid);
2632 
2633 	/*
2634 	 * PCI Express or PCI-X controller check.
2635 	 */
2636 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
2637 	    &sc->bge_expcap, NULL) != 0) {
2638 		/* Extract supported maximum payload size. */
2639 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
2640 		    PCI_PCIE_DCAP);
2641 		sc->bge_mps = 128 << (reg & 0x7);
2642 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2643 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2644 			sc->bge_expmrq = (fls(2048) - 8) << 12;
2645 		else
2646 			sc->bge_expmrq = (fls(4096) - 8) << 12;
2647 		/* Disable PCIe Active State Power Management (ASPM). */
2648 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
2649 		    sc->bge_expcap + PCI_PCIE_LCSR);
2650 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
2651 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2652 		    sc->bge_expcap + PCI_PCIE_LCSR, reg);
2653 		sc->bge_flags |= BGE_PCIE;
2654 	} else {
2655 		if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2656 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
2657 			sc->bge_flags |= BGE_PCIX;
2658 	}
2659 
2660 	/*
2661 	 * SEEPROM check.
2662 	 */
2663 #ifdef __sparc64__
2664 	/*
2665 	 * Onboard interfaces on UltraSPARC systems generally don't
2666 	 * have a SEEPROM fitted.  These interfaces, and cards that
2667 	 * have FCode, are named "network" by the PROM, whereas cards
2668 	 * without FCode show up as "ethernet".  Since we don't really
2669 	 * need the information from the SEEPROM on cards that have
2670 	 * FCode it's fine to pretend they don't have one.
2671 	 */
2672 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
2673 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
2674 		sc->bge_flags |= BGE_NO_EEPROM;
2675 #endif
2676 
2677 	/* Save chipset family. */
2678 	switch (BGE_ASICREV(sc->bge_chipid)) {
2679 	case BGE_ASICREV_BCM5762:
2680 	case BGE_ASICREV_BCM57765:
2681 	case BGE_ASICREV_BCM57766:
2682 		sc->bge_flags |= BGE_57765_PLUS;
2683 		/* FALLTHROUGH */
2684 	case BGE_ASICREV_BCM5717:
2685 	case BGE_ASICREV_BCM5719:
2686 	case BGE_ASICREV_BCM5720:
2687 		sc->bge_flags |= BGE_5717_PLUS | BGE_5755_PLUS | BGE_575X_PLUS |
2688 		    BGE_5705_PLUS | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING |
2689 		    BGE_JUMBO_FRAME;
2690 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2691 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2692 			/*
2693 			 * Enable work around for DMA engine miscalculation
2694 			 * of TXMBUF available space.
2695 			 */
2696 			sc->bge_flags |= BGE_RDMA_BUG;
2697 
2698 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 &&
2699 			    sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2700 				/* Jumbo frame on BCM5719 A0 does not work. */
2701 				sc->bge_flags &= ~(BGE_JUMBO_CAPABLE |
2702 				    BGE_JUMBO_RING | BGE_JUMBO_FRAME);
2703 			}
2704 		}
2705 		break;
2706 	case BGE_ASICREV_BCM5755:
2707 	case BGE_ASICREV_BCM5761:
2708 	case BGE_ASICREV_BCM5784:
2709 	case BGE_ASICREV_BCM5785:
2710 	case BGE_ASICREV_BCM5787:
2711 	case BGE_ASICREV_BCM57780:
2712 		sc->bge_flags |= BGE_5755_PLUS | BGE_575X_PLUS | BGE_5705_PLUS;
2713 		break;
2714 	case BGE_ASICREV_BCM5700:
2715 	case BGE_ASICREV_BCM5701:
2716 	case BGE_ASICREV_BCM5703:
2717 	case BGE_ASICREV_BCM5704:
2718 		sc->bge_flags |= BGE_5700_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING;
2719 		break;
2720 	case BGE_ASICREV_BCM5714_A0:
2721 	case BGE_ASICREV_BCM5780:
2722 	case BGE_ASICREV_BCM5714:
2723 		sc->bge_flags |= BGE_5714_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_STD;
2724 		/* FALLTHROUGH */
2725 	case BGE_ASICREV_BCM5750:
2726 	case BGE_ASICREV_BCM5752:
2727 	case BGE_ASICREV_BCM5906:
2728 		sc->bge_flags |= BGE_575X_PLUS;
2729 		/* FALLTHROUGH */
2730 	case BGE_ASICREV_BCM5705:
2731 		sc->bge_flags |= BGE_5705_PLUS;
2732 		break;
2733 	}
2734 
2735 	if (sc->bge_flags & BGE_JUMBO_STD)
2736 		sc->bge_rx_std_len = BGE_JLEN;
2737 	else
2738 		sc->bge_rx_std_len = MCLBYTES;
2739 
2740 	/*
2741 	 * When using the BCM5701 in PCI-X mode, data corruption has
2742 	 * been observed in the first few bytes of some received packets.
2743 	 * Aligning the packet buffer in memory eliminates the corruption.
2744 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2745 	 * which do not support unaligned accesses, we will realign the
2746 	 * payloads by copying the received packets.
2747 	 */
2748 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2749 	    sc->bge_flags & BGE_PCIX)
2750 		sc->bge_flags |= BGE_RX_ALIGNBUG;
2751 
2752 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2753 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
2754 	    PCI_VENDOR(subid) == DELL_VENDORID)
2755 		sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2756 
2757 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
2758 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
2759 
2760 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2761 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2762 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2763 		sc->bge_flags |= BGE_IS_5788;
2764 
2765 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
2766 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
2767 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2768 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2769 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
2770 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2771 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2772 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2773 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
2774 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
2775 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2776 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
2777 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
2778 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
2779 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2780 		sc->bge_phy_flags |= BGE_PHY_10_100_ONLY;
2781 
2782 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2783 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2784 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2785 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2786 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2787 		sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2788 
2789 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2790 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2791 		sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2792 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2793 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2794 		sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2795 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2796 		sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2797 
2798 	if ((BGE_IS_5705_PLUS(sc)) &&
2799 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2800 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2801 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
2802 	    !BGE_IS_5717_PLUS(sc)) {
2803 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2804 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2805 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2806 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2807 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2808 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2809 				sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2810 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2811 				sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2812 		} else
2813 			sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2814 	}
2815 
2816 	/* Identify chips with APE processor. */
2817 	switch (BGE_ASICREV(sc->bge_chipid)) {
2818 	case BGE_ASICREV_BCM5717:
2819 	case BGE_ASICREV_BCM5719:
2820 	case BGE_ASICREV_BCM5720:
2821 	case BGE_ASICREV_BCM5761:
2822 	case BGE_ASICREV_BCM5762:
2823 		sc->bge_flags |= BGE_APE;
2824 		break;
2825 	}
2826 
2827 	/* Chips with APE need BAR2 access for APE registers/memory. */
2828 	if ((sc->bge_flags & BGE_APE) != 0) {
2829 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
2830 		if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
2831 		    &sc->bge_apetag, &sc->bge_apehandle, NULL,
2832 		    &sc->bge_apesize, 0)) {
2833 			printf(": couldn't map BAR2 memory\n");
2834 			goto fail_1;
2835 		}
2836 
2837 		/* Enable APE register/memory access by host driver. */
2838 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2839 		reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2840 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2841 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2842 		pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
2843 
2844 		bge_ape_lock_init(sc);
2845 		bge_ape_read_fw_ver(sc);
2846 	}
2847 
2848 	/* Identify the chips that use an CPMU. */
2849 	if (BGE_IS_5717_PLUS(sc) ||
2850 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2851 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2852 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2853 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2854 		sc->bge_flags |= BGE_CPMU_PRESENT;
2855 
2856 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI,
2857 	    &sc->bge_msicap, NULL)) {
2858 		if (bge_can_use_msi(sc) == 0)
2859 			pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
2860 	}
2861 
2862 	DPRINTFN(5, ("pci_intr_map\n"));
2863 	if (pci_intr_map_msi(pa, &ih) == 0)
2864 		sc->bge_flags |= BGE_MSI;
2865 	else if (pci_intr_map(pa, &ih)) {
2866 		printf(": couldn't map interrupt\n");
2867 		goto fail_1;
2868 	}
2869 
2870 	/*
2871 	 * All controllers except BCM5700 supports tagged status but
2872 	 * we use tagged status only for MSI case on BCM5717. Otherwise
2873 	 * MSI on BCM5717 does not work.
2874 	 */
2875 	if (BGE_IS_5717_PLUS(sc) && sc->bge_flags & BGE_MSI)
2876 		sc->bge_flags |= BGE_TAGGED_STATUS;
2877 
2878 	DPRINTFN(5, ("pci_intr_string\n"));
2879 	intrstr = pci_intr_string(pc, ih);
2880 
2881 	/* Try to reset the chip. */
2882 	DPRINTFN(5, ("bge_reset\n"));
2883 	bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
2884 	bge_reset(sc);
2885 
2886 	bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
2887 	bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
2888 
2889 	bge_chipinit(sc);
2890 
2891 #ifdef __sparc64__
2892 	if (!gotenaddr) {
2893 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
2894 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
2895 			gotenaddr = 1;
2896 	}
2897 #endif
2898 
2899 	/*
2900 	 * Get station address from the EEPROM.
2901 	 */
2902 	if (!gotenaddr) {
2903 		mac_addr = bge_readmem_ind(sc, 0x0c14);
2904 		if ((mac_addr >> 16) == 0x484b) {
2905 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2906 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2907 			mac_addr = bge_readmem_ind(sc, 0x0c18);
2908 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2909 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2910 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2911 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2912 			gotenaddr = 1;
2913 		}
2914 	}
2915 	if (!gotenaddr) {
2916 		int mac_offset = BGE_EE_MAC_OFFSET;
2917 
2918 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2919 			mac_offset = BGE_EE_MAC_OFFSET_5906;
2920 
2921 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2922 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
2923 			gotenaddr = 1;
2924 	}
2925 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
2926 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2927 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
2928 			gotenaddr = 1;
2929 	}
2930 
2931 #ifdef __sparc64__
2932 	if (!gotenaddr) {
2933 		extern void myetheraddr(u_char *);
2934 
2935 		myetheraddr(sc->arpcom.ac_enaddr);
2936 		gotenaddr = 1;
2937 	}
2938 #endif
2939 
2940 	if (!gotenaddr) {
2941 		printf(": failed to read station address\n");
2942 		goto fail_2;
2943 	}
2944 
2945 	/* Allocate the general information block and ring buffers. */
2946 	sc->bge_dmatag = pa->pa_dmat;
2947 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
2948 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2949 	    PAGE_SIZE, 0, &sc->bge_ring_seg, 1, &sc->bge_ring_nseg,
2950 	    BUS_DMA_NOWAIT)) {
2951 		printf(": can't alloc rx buffers\n");
2952 		goto fail_2;
2953 	}
2954 	DPRINTFN(5, ("bus_dmamem_map\n"));
2955 	if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
2956 	    sc->bge_ring_nseg, sizeof(struct bge_ring_data), &kva,
2957 	    BUS_DMA_NOWAIT)) {
2958 		printf(": can't map dma buffers (%lu bytes)\n",
2959 		    sizeof(struct bge_ring_data));
2960 		goto fail_3;
2961 	}
2962 	DPRINTFN(5, ("bus_dmamem_create\n"));
2963 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2964 	    sizeof(struct bge_ring_data), 0,
2965 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2966 		printf(": can't create dma map\n");
2967 		goto fail_4;
2968 	}
2969 	DPRINTFN(5, ("bus_dmamem_load\n"));
2970 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2971 			    sizeof(struct bge_ring_data), NULL,
2972 			    BUS_DMA_NOWAIT)) {
2973 		goto fail_5;
2974 	}
2975 
2976 	DPRINTFN(5, ("bzero\n"));
2977 	sc->bge_rdata = (struct bge_ring_data *)kva;
2978 
2979 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2980 
2981 	/* Set default tuneable values. */
2982 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2983 	sc->bge_rx_coal_ticks = 150;
2984 	sc->bge_rx_max_coal_bds = 64;
2985 	sc->bge_tx_coal_ticks = 300;
2986 	sc->bge_tx_max_coal_bds = 400;
2987 
2988 	/* 5705 limits RX return ring to 512 entries. */
2989 	if (BGE_IS_5700_FAMILY(sc) || BGE_IS_5717_PLUS(sc))
2990 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2991 	else
2992 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2993 
2994 	/* Set up ifnet structure */
2995 	ifp = &sc->arpcom.ac_if;
2996 	ifp->if_softc = sc;
2997 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2998 	ifp->if_xflags = IFXF_MPSAFE;
2999 	ifp->if_ioctl = bge_ioctl;
3000 	ifp->if_qstart = bge_start;
3001 	ifp->if_watchdog = bge_watchdog;
3002 	ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
3003 
3004 	DPRINTFN(5, ("bcopy\n"));
3005 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
3006 
3007 	ifp->if_capabilities = IFCAP_VLAN_MTU;
3008 
3009 #if NVLAN > 0
3010 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
3011 #endif
3012 
3013 	/*
3014 	 * 5700 B0 chips do not support checksumming correctly due
3015 	 * to hardware bugs.
3016 	 *
3017 	 * It seems all controllers have a bug that can generate UDP
3018 	 * datagrams with a checksum value 0 when TX UDP checksum
3019 	 * offloading is enabled. Generating UDP checksum value 0 is
3020 	 * a violation of RFC 768.
3021 	 */
3022 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
3023 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4;
3024 
3025 	if (BGE_IS_JUMBO_CAPABLE(sc))
3026 		ifp->if_hardmtu = BGE_JUMBO_MTU;
3027 
3028 	/*
3029 	 * Do MII setup.
3030 	 */
3031 	DPRINTFN(5, ("mii setup\n"));
3032 	sc->bge_mii.mii_ifp = ifp;
3033 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
3034 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
3035 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
3036 
3037 	/*
3038 	 * Figure out what sort of media we have by checking the hardware
3039 	 * config word in the first 32K of internal NIC memory, or fall back to
3040 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
3041 	 * this value seems to be unset. If that's the case, we have to rely on
3042 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
3043 	 * SysKonnect SK-9D41.
3044 	 */
3045 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3046 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3047 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
3048 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3049 		    sizeof(hwcfg))) {
3050 			printf(": failed to read media type\n");
3051 			goto fail_6;
3052 		}
3053 		hwcfg = ntohl(hwcfg);
3054 	}
3055 
3056 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
3057 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
3058 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3059 		if (BGE_IS_5700_FAMILY(sc))
3060 		    sc->bge_flags |= BGE_FIBER_TBI;
3061 		else
3062 		    sc->bge_flags |= BGE_FIBER_MII;
3063 	}
3064 
3065 	/* Take advantage of single-shot MSI. */
3066 	if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_MSI)
3067 		CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3068 		    ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3069 
3070 	/* Hookup IRQ last. */
3071 	DPRINTFN(5, ("pci_intr_establish\n"));
3072 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
3073 	    bge_intr, sc, sc->bge_dev.dv_xname);
3074 	if (sc->bge_intrhand == NULL) {
3075 		printf(": couldn't establish interrupt");
3076 		if (intrstr != NULL)
3077 			printf(" at %s", intrstr);
3078 		printf("\n");
3079 		goto fail_6;
3080 	}
3081 
3082 	/*
3083 	 * A Broadcom chip was detected. Inform the world.
3084 	 */
3085 	printf(": %s, address %s\n", intrstr,
3086 	    ether_sprintf(sc->arpcom.ac_enaddr));
3087 
3088 	if (sc->bge_flags & BGE_FIBER_TBI) {
3089 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3090 		    bge_ifmedia_sts);
3091 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
3092 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
3093 			    0, NULL);
3094 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
3095 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
3096 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3097 	} else {
3098 		int mii_flags;
3099 
3100 		/*
3101 		 * Do transceiver setup.
3102 		 */
3103 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
3104 			     bge_ifmedia_sts);
3105 		mii_flags = MIIF_DOPAUSE;
3106 		if (sc->bge_flags & BGE_FIBER_MII)
3107 			mii_flags |= MIIF_HAVEFIBER;
3108 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
3109 		    sc->bge_phy_addr, MII_OFFSET_ANY, mii_flags);
3110 
3111 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
3112 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
3113 			ifmedia_add(&sc->bge_mii.mii_media,
3114 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
3115 			ifmedia_set(&sc->bge_mii.mii_media,
3116 				    IFM_ETHER|IFM_MANUAL);
3117 		} else
3118 			ifmedia_set(&sc->bge_mii.mii_media,
3119 				    IFM_ETHER|IFM_AUTO);
3120 	}
3121 
3122 	/*
3123 	 * Call MI attach routine.
3124 	 */
3125 	if_attach(ifp);
3126 	ether_ifattach(ifp);
3127 
3128 	timeout_set(&sc->bge_timeout, bge_tick, sc);
3129 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
3130 	timeout_set(&sc->bge_rxtimeout_jumbo, bge_rxtick_jumbo, sc);
3131 	return;
3132 
3133 fail_6:
3134 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
3135 
3136 fail_5:
3137 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3138 
3139 fail_4:
3140 	bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,
3141 	    sizeof(struct bge_ring_data));
3142 
3143 fail_3:
3144 	bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg);
3145 
3146 fail_2:
3147 	if ((sc->bge_flags & BGE_APE) != 0)
3148 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
3149 		    sc->bge_apesize);
3150 
3151 fail_1:
3152 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
3153 }
3154 
3155 int
3156 bge_detach(struct device *self, int flags)
3157 {
3158 	struct bge_softc *sc = (struct bge_softc *)self;
3159 	struct ifnet *ifp = &sc->arpcom.ac_if;
3160 
3161 	if (sc->bge_intrhand)
3162 		pci_intr_disestablish(sc->bge_pa.pa_pc, sc->bge_intrhand);
3163 
3164 	bge_stop(sc, 1);
3165 
3166 	/* Detach any PHYs we might have. */
3167 	if (LIST_FIRST(&sc->bge_mii.mii_phys) != NULL)
3168 		mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3169 
3170 	/* Delete any remaining media. */
3171 	ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY);
3172 
3173 	ether_ifdetach(ifp);
3174 	if_detach(ifp);
3175 
3176 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
3177 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3178 	bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,
3179 	    sizeof(struct bge_ring_data));
3180 	bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg);
3181 
3182 	if ((sc->bge_flags & BGE_APE) != 0)
3183 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
3184 		    sc->bge_apesize);
3185 
3186 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
3187 	return (0);
3188 }
3189 
3190 int
3191 bge_activate(struct device *self, int act)
3192 {
3193 	struct bge_softc *sc = (struct bge_softc *)self;
3194 	struct ifnet *ifp = &sc->arpcom.ac_if;
3195 	int rv = 0;
3196 
3197 	switch (act) {
3198 	case DVACT_SUSPEND:
3199 		rv = config_activate_children(self, act);
3200 		if (ifp->if_flags & IFF_RUNNING)
3201 			bge_stop(sc, 0);
3202 		break;
3203 	case DVACT_RESUME:
3204 		if (ifp->if_flags & IFF_UP)
3205 			bge_init(sc);
3206 		break;
3207 	default:
3208 		rv = config_activate_children(self, act);
3209 		break;
3210 	}
3211 	return (rv);
3212 }
3213 
3214 void
3215 bge_reset(struct bge_softc *sc)
3216 {
3217 	struct pci_attach_args *pa = &sc->bge_pa;
3218 	pcireg_t cachesize, command, devctl;
3219 	u_int32_t reset, mac_mode, mac_mode_mask, val;
3220 	void (*write_op)(struct bge_softc *, int, int);
3221 	int i;
3222 
3223 	mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
3224 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3225 		mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
3226 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
3227 
3228 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3229 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) {
3230 		if (sc->bge_flags & BGE_PCIE)
3231 			write_op = bge_writembx;
3232 		else
3233 			write_op = bge_writemem_ind;
3234 	} else
3235 		write_op = bge_writereg_ind;
3236 
3237 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
3238 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701 &&
3239 	    !(sc->bge_flags & BGE_NO_EEPROM)) {
3240 		CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
3241 		for (i = 0; i < 8000; i++) {
3242 			if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
3243 			    BGE_NVRAMSWARB_GNT1)
3244 				break;
3245 			DELAY(20);
3246 		}
3247 		if (i == 8000)
3248 			printf("%s: nvram lock timed out\n",
3249 			    sc->bge_dev.dv_xname);
3250 	}
3251 	/* Take APE lock when performing reset. */
3252 	bge_ape_lock(sc, BGE_APE_LOCK_GRC);
3253 
3254 	/* Save some important PCI state. */
3255 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
3256 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
3257 
3258 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3259 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3260 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3261 
3262 	/* Disable fastboot on controllers that support it. */
3263 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
3264 	    BGE_IS_5755_PLUS(sc))
3265 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
3266 
3267 	/*
3268 	 * Write the magic number to SRAM at offset 0xB50.
3269 	 * When firmware finishes its initialization it will
3270 	 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3271 	 */
3272 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3273 
3274 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3275 
3276 	if (sc->bge_flags & BGE_PCIE) {
3277 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3278 		    !BGE_IS_5717_PLUS(sc)) {
3279 			if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
3280 				/* PCI Express 1.0 system */
3281 				CSR_WRITE_4(sc, 0x7e2c, 0x20);
3282 			}
3283 		}
3284 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3285 			/*
3286 			 * Prevent PCI Express link training
3287 			 * during global reset.
3288 			 */
3289 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
3290 			reset |= (1<<29);
3291 		}
3292 	}
3293 
3294 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3295 		val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3296 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3297 		    val | BGE_VCPU_STATUS_DRV_RESET);
3298                 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3299                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3300                     val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3301 
3302                 sc->bge_flags |= BGE_NO_EEPROM;
3303         }
3304 
3305 	/*
3306 	 * Set GPHY Power Down Override to leave GPHY
3307 	 * powered up in D0 uninitialized.
3308 	 */
3309 	if (BGE_IS_5705_PLUS(sc) &&
3310 	    (sc->bge_flags & BGE_CPMU_PRESENT) == 0)
3311 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
3312 
3313 	/* Issue global reset */
3314 	write_op(sc, BGE_MISC_CFG, reset);
3315 
3316 	if (sc->bge_flags & BGE_PCIE)
3317 		DELAY(100 * 1000);
3318 	else
3319 		DELAY(1000);
3320 
3321 	if (sc->bge_flags & BGE_PCIE) {
3322 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3323 			pcireg_t v;
3324 
3325 			DELAY(500000); /* wait for link training to complete */
3326 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
3327 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
3328 		}
3329 
3330 		devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3331 		    PCI_PCIE_DCSR);
3332 		/* Clear enable no snoop and disable relaxed ordering. */
3333 		devctl &= ~(PCI_PCIE_DCSR_ERO | PCI_PCIE_DCSR_ENS);
3334 		/* Set PCI Express max payload size. */
3335 		devctl = (devctl & ~PCI_PCIE_DCSR_MPS) | sc->bge_expmrq;
3336 		/* Clear error status. */
3337 		devctl |= PCI_PCIE_DCSR_CEE | PCI_PCIE_DCSR_NFE |
3338 		    PCI_PCIE_DCSR_FEE | PCI_PCIE_DCSR_URE;
3339 		pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3340 		    PCI_PCIE_DCSR, devctl);
3341 	}
3342 
3343 	/* Reset some of the PCI state that got zapped by reset */
3344 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3345 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3346 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3347 	val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
3348 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
3349 	    (sc->bge_flags & BGE_PCIX) != 0)
3350 		val |= BGE_PCISTATE_RETRY_SAME_DMA;
3351 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3352 		val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3353 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3354 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3355 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, val);
3356 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
3357 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
3358 
3359 	/* Re-enable MSI, if necessary, and enable memory arbiter. */
3360 	if (BGE_IS_5714_FAMILY(sc)) {
3361 		/* This chip disables MSI on reset. */
3362 		if (sc->bge_flags & BGE_MSI) {
3363 			val = pci_conf_read(pa->pa_pc, pa->pa_tag,
3364 			    sc->bge_msicap + PCI_MSI_MC);
3365 			pci_conf_write(pa->pa_pc, pa->pa_tag,
3366 			    sc->bge_msicap + PCI_MSI_MC,
3367 			    val | PCI_MSI_MC_MSIE);
3368 			val = CSR_READ_4(sc, BGE_MSI_MODE);
3369 			CSR_WRITE_4(sc, BGE_MSI_MODE,
3370 			    val | BGE_MSIMODE_ENABLE);
3371 		}
3372 		val = CSR_READ_4(sc, BGE_MARB_MODE);
3373 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3374 	} else
3375 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3376 
3377 	/* Fix up byte swapping */
3378 	CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3379 
3380 	val = CSR_READ_4(sc, BGE_MAC_MODE);
3381 	val = (val & ~mac_mode_mask) | mac_mode;
3382 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
3383 	DELAY(40);
3384 
3385 	bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
3386 
3387 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3388 		for (i = 0; i < BGE_TIMEOUT; i++) {
3389 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3390 			if (val & BGE_VCPU_STATUS_INIT_DONE)
3391 				break;
3392 			DELAY(100);
3393 		}
3394 
3395 		if (i >= BGE_TIMEOUT)
3396 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
3397 	} else {
3398 		/*
3399 		 * Poll until we see 1's complement of the magic number.
3400 		 * This indicates that the firmware initialization
3401 		 * is complete.  We expect this to fail if no SEEPROM
3402 		 * is fitted.
3403 		 */
3404 		for (i = 0; i < BGE_TIMEOUT * 10; i++) {
3405 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3406 			if (val == ~BGE_MAGIC_NUMBER)
3407 				break;
3408 			DELAY(10);
3409 		}
3410 
3411 		if ((i >= BGE_TIMEOUT * 10) &&
3412 		    (!(sc->bge_flags & BGE_NO_EEPROM)))
3413 			printf("%s: firmware handshake timed out\n",
3414 			   sc->bge_dev.dv_xname);
3415 		/* BCM57765 A0 needs additional time before accessing. */
3416 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3417 			DELAY(10 * 1000);       /* XXX */
3418 	}
3419 
3420 	/*
3421 	 * The 5704 in TBI mode apparently needs some special
3422 	 * adjustment to ensure the SERDES drive level is set
3423 	 * to 1.2V.
3424 	 */
3425 	if (sc->bge_flags & BGE_FIBER_TBI &&
3426 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3427 		val = CSR_READ_4(sc, BGE_SERDES_CFG);
3428 		val = (val & ~0xFFF) | 0x880;
3429 		CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3430 	}
3431 
3432 	if (sc->bge_flags & BGE_PCIE &&
3433 	    !BGE_IS_5717_PLUS(sc) &&
3434 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3435 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
3436 		/* Enable Data FIFO protection. */
3437 		val = CSR_READ_4(sc, 0x7c00);
3438 		CSR_WRITE_4(sc, 0x7c00, val | (1<<25));
3439 	}
3440 
3441 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
3442 		BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3443 		    CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3444 }
3445 
3446 /*
3447  * Frame reception handling. This is called if there's a frame
3448  * on the receive return list.
3449  *
3450  * Note: we have to be able to handle two possibilities here:
3451  * 1) the frame is from the jumbo receive ring
3452  * 2) the frame is from the standard receive ring
3453  */
3454 
3455 void
3456 bge_rxeof(struct bge_softc *sc)
3457 {
3458 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3459 	struct ifnet *ifp;
3460 	uint16_t rx_prod, rx_cons;
3461 	int stdcnt = 0, jumbocnt = 0;
3462 	bus_dmamap_t dmamap;
3463 	bus_addr_t offset, toff;
3464 	bus_size_t tlen;
3465 	int tosync;
3466 	int livelocked;
3467 
3468 	rx_cons = sc->bge_rx_saved_considx;
3469 	rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
3470 
3471 	/* Nothing to do */
3472 	if (rx_cons == rx_prod)
3473 		return;
3474 
3475 	ifp = &sc->arpcom.ac_if;
3476 
3477 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3478 	    offsetof(struct bge_ring_data, bge_status_block),
3479 	    sizeof (struct bge_status_block),
3480 	    BUS_DMASYNC_POSTREAD);
3481 
3482 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
3483 	tosync = rx_prod - rx_cons;
3484 
3485 	toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
3486 
3487 	if (tosync < 0) {
3488 		tlen = (sc->bge_return_ring_cnt - rx_cons) *
3489 		    sizeof (struct bge_rx_bd);
3490 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3491 		    toff, tlen, BUS_DMASYNC_POSTREAD);
3492 		tosync = -tosync;
3493 	}
3494 
3495 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3496 	    offset, tosync * sizeof (struct bge_rx_bd),
3497 	    BUS_DMASYNC_POSTREAD);
3498 
3499 	while (rx_cons != rx_prod) {
3500 		struct bge_rx_bd	*cur_rx;
3501 		u_int32_t		rxidx;
3502 		struct mbuf		*m = NULL;
3503 
3504 		cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
3505 
3506 		rxidx = cur_rx->bge_idx;
3507 		BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3508 
3509 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3510 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3511 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3512 
3513 			jumbocnt++;
3514 
3515 			dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx];
3516 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3517 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3518 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3519 
3520 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3521 				m_freem(m);
3522 				continue;
3523 			}
3524 		} else {
3525 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3526 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3527 
3528 			stdcnt++;
3529 
3530 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3531 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3532 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3533 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3534 
3535 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3536 				m_freem(m);
3537 				continue;
3538 			}
3539 		}
3540 
3541 #ifdef __STRICT_ALIGNMENT
3542 		/*
3543 		 * The i386 allows unaligned accesses, but for other
3544 		 * platforms we must make sure the payload is aligned.
3545 		 */
3546 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
3547 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3548 			    cur_rx->bge_len);
3549 			m->m_data += ETHER_ALIGN;
3550 		}
3551 #endif
3552 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3553 
3554 		bge_rxcsum(sc, cur_rx, m);
3555 
3556 #if NVLAN > 0
3557 		if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING &&
3558 		    cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3559 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
3560 			m->m_flags |= M_VLANTAG;
3561 		}
3562 #endif
3563 
3564 		ml_enqueue(&ml, m);
3565 	}
3566 
3567 	sc->bge_rx_saved_considx = rx_cons;
3568 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3569 
3570 	livelocked = ifiq_input(&ifp->if_rcv, &ml);
3571 	if (stdcnt) {
3572 		if_rxr_put(&sc->bge_std_ring, stdcnt);
3573 		if (livelocked)
3574 			if_rxr_livelocked(&sc->bge_std_ring);
3575 		bge_fill_rx_ring_std(sc);
3576 	}
3577 	if (jumbocnt) {
3578 		if_rxr_put(&sc->bge_jumbo_ring, jumbocnt);
3579 		if (livelocked)
3580 			if_rxr_livelocked(&sc->bge_jumbo_ring);
3581 		bge_fill_rx_ring_jumbo(sc);
3582 	}
3583 }
3584 
3585 void
3586 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3587 {
3588 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3589 		/*
3590 		 * 5700 B0 chips do not support checksumming correctly due
3591 		 * to hardware bugs.
3592 		 */
3593 		return;
3594 	} else if (BGE_IS_5717_PLUS(sc)) {
3595 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3596 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3597 			    (cur_rx->bge_error_flag &
3598 			    BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3599 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3600 
3601 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3602 				m->m_pkthdr.csum_flags |=
3603 				    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3604                         }
3605                 }
3606         } else {
3607 		if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3608 		    cur_rx->bge_ip_csum == 0xFFFF)
3609 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3610 
3611 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3612 		    m->m_pkthdr.len >= ETHER_MIN_NOPAD &&
3613 		    cur_rx->bge_tcp_udp_csum == 0xFFFF) {
3614 			m->m_pkthdr.csum_flags |=
3615 			    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3616 		}
3617 	}
3618 }
3619 
3620 void
3621 bge_txeof(struct bge_softc *sc)
3622 {
3623 	struct bge_tx_bd *cur_tx = NULL;
3624 	struct ifnet *ifp;
3625 	bus_dmamap_t dmamap;
3626 	bus_addr_t offset, toff;
3627 	bus_size_t tlen;
3628 	int tosync, freed, txcnt;
3629 	u_int32_t cons, newcons;
3630 	struct mbuf *m;
3631 
3632 	/* Nothing to do */
3633 	cons = sc->bge_tx_saved_considx;
3634 	newcons = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx;
3635 	if (cons == newcons)
3636 		return;
3637 
3638 	ifp = &sc->arpcom.ac_if;
3639 
3640 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3641 	    offsetof(struct bge_ring_data, bge_status_block),
3642 	    sizeof (struct bge_status_block),
3643 	    BUS_DMASYNC_POSTREAD);
3644 
3645 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
3646 	tosync = newcons - cons;
3647 
3648 	toff = offset + (cons * sizeof (struct bge_tx_bd));
3649 
3650 	if (tosync < 0) {
3651 		tlen = (BGE_TX_RING_CNT - cons) * sizeof (struct bge_tx_bd);
3652 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3653 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3654 		tosync = -tosync;
3655 	}
3656 
3657 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3658 	    offset, tosync * sizeof (struct bge_tx_bd),
3659 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3660 
3661 	/*
3662 	 * Go through our tx ring and free mbufs for those
3663 	 * frames that have been sent.
3664 	 */
3665 	freed = 0;
3666 	while (cons != newcons) {
3667 		cur_tx = &sc->bge_rdata->bge_tx_ring[cons];
3668 		m = sc->bge_cdata.bge_tx_chain[cons];
3669 		if (m != NULL) {
3670 			dmamap = sc->bge_cdata.bge_tx_map[cons];
3671 
3672 			sc->bge_cdata.bge_tx_chain[cons] = NULL;
3673 			sc->bge_cdata.bge_tx_map[cons] = NULL;
3674 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3675 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3676 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3677 
3678 			m_freem(m);
3679 		}
3680 		freed++;
3681 		BGE_INC(cons, BGE_TX_RING_CNT);
3682 	}
3683 
3684 	txcnt = atomic_sub_int_nv(&sc->bge_txcnt, freed);
3685 
3686 	sc->bge_tx_saved_considx = cons;
3687 
3688 	if (ifq_is_oactive(&ifp->if_snd))
3689 		ifq_restart(&ifp->if_snd);
3690 	else if (txcnt == 0)
3691 		ifp->if_timer = 0;
3692 }
3693 
3694 int
3695 bge_intr(void *xsc)
3696 {
3697 	struct bge_softc *sc;
3698 	struct ifnet *ifp;
3699 	u_int32_t statusword, statustag;
3700 
3701 	sc = xsc;
3702 	ifp = &sc->arpcom.ac_if;
3703 
3704 	/* read status word from status block */
3705 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3706 	    offsetof(struct bge_ring_data, bge_status_block),
3707 	    sizeof (struct bge_status_block),
3708 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3709 
3710 	statusword = sc->bge_rdata->bge_status_block.bge_status;
3711 	statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
3712 
3713 	if (sc->bge_flags & BGE_TAGGED_STATUS) {
3714 		if (sc->bge_lasttag == statustag &&
3715 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3716 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3717 			return (0);
3718 		sc->bge_lasttag = statustag;
3719 	} else {
3720 		if (!(statusword & BGE_STATFLAG_UPDATED) &&
3721 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3722 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3723 			return (0);
3724 		/* Ack interrupt and stop others from occurring. */
3725 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3726 		statustag = 0;
3727 	}
3728 
3729 	/* clear status word */
3730 	sc->bge_rdata->bge_status_block.bge_status = 0;
3731 
3732 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3733 	    offsetof(struct bge_ring_data, bge_status_block),
3734 	    sizeof (struct bge_status_block),
3735 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3736 
3737 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3738 	    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
3739 	    BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) {
3740 		KERNEL_LOCK();
3741 		bge_link_upd(sc);
3742 		KERNEL_UNLOCK();
3743 	}
3744 
3745 	/* Re-enable interrupts. */
3746 	bge_writembx(sc, BGE_MBX_IRQ0_LO, statustag);
3747 
3748 	if (ifp->if_flags & IFF_RUNNING) {
3749 		/* Check RX return ring producer/consumer */
3750 		bge_rxeof(sc);
3751 
3752 		/* Check TX ring producer/consumer */
3753 		bge_txeof(sc);
3754 	}
3755 
3756 	return (1);
3757 }
3758 
3759 void
3760 bge_tick(void *xsc)
3761 {
3762 	struct bge_softc *sc = xsc;
3763 	struct mii_data *mii = &sc->bge_mii;
3764 	int s;
3765 
3766 	s = splnet();
3767 
3768 	if (BGE_IS_5705_PLUS(sc))
3769 		bge_stats_update_regs(sc);
3770 	else
3771 		bge_stats_update(sc);
3772 
3773 	if (sc->bge_flags & BGE_FIBER_TBI) {
3774 		/*
3775 		 * Since in TBI mode auto-polling can't be used we should poll
3776 		 * link status manually. Here we register pending link event
3777 		 * and trigger interrupt.
3778 		 */
3779 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3780 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3781 	} else {
3782 		/*
3783 		 * Do not touch PHY if we have link up. This could break
3784 		 * IPMI/ASF mode or produce extra input errors.
3785 		 * (extra input errors was reported for bcm5701 & bcm5704).
3786 		 */
3787 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3788 			mii_tick(mii);
3789 	}
3790 
3791 	timeout_add_sec(&sc->bge_timeout, 1);
3792 
3793 	splx(s);
3794 }
3795 
3796 void
3797 bge_stats_update_regs(struct bge_softc *sc)
3798 {
3799 	struct ifnet *ifp = &sc->arpcom.ac_if;
3800 
3801 	sc->bge_tx_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3802 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3803 
3804 	sc->bge_rx_overruns += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3805 
3806 	/*
3807 	 * XXX
3808 	 * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter
3809 	 * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0
3810 	 * controllers includes the number of unwanted multicast frames.
3811 	 * This comes from a silicon bug and known workaround to get rough
3812 	 * (not exact) counter is to enable interrupt on MBUF low watermark
3813 	 * attention. This can be accomplished by setting BGE_HCCMODE_ATTN
3814 	 * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE
3815 	 * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However
3816 	 * that change would generate more interrupts and there are still
3817 	 * possibilities of losing multiple frames during
3818 	 * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that
3819 	 * the workaround still would not get correct counter I don't think
3820 	 * it's worth to implement it. So ignore reading the counter on
3821 	 * controllers that have the silicon bug.
3822 	 */
3823 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
3824 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 &&
3825 	    sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
3826 	    sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
3827 		sc->bge_rx_discards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3828 
3829 	sc->bge_rx_inerrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3830 
3831 	ifp->if_collisions = sc->bge_tx_collisions;
3832 	ifp->if_ierrors = sc->bge_rx_discards + sc->bge_rx_inerrors;
3833 
3834 	if (sc->bge_flags & BGE_RDMA_BUG) {
3835 		u_int32_t val, ucast, mcast, bcast;
3836 
3837 		ucast = CSR_READ_4(sc, BGE_MAC_STATS +
3838 		    offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
3839 		mcast = CSR_READ_4(sc, BGE_MAC_STATS +
3840 		    offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
3841 		bcast = CSR_READ_4(sc, BGE_MAC_STATS +
3842 		    offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
3843 
3844 		/*
3845 		 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
3846 		 * frames, it's safe to disable workaround for DMA engine's
3847 		 * miscalculation of TXMBUF space.
3848 		 */
3849 		if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) {
3850 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
3851 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
3852 				val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
3853 			else
3854 				val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
3855 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
3856 			sc->bge_flags &= ~BGE_RDMA_BUG;
3857 		}
3858 	}
3859 }
3860 
3861 void
3862 bge_stats_update(struct bge_softc *sc)
3863 {
3864 	struct ifnet *ifp = &sc->arpcom.ac_if;
3865 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3866 	u_int32_t cnt;
3867 
3868 #define READ_STAT(sc, stats, stat) \
3869 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3870 
3871 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3872 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
3873 	sc->bge_tx_collisions = cnt;
3874 
3875 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
3876 	sc->bge_rx_overruns = cnt;
3877 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
3878 	ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors);
3879 	sc->bge_rx_inerrors = cnt;
3880 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3881 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
3882 	sc->bge_rx_discards = cnt;
3883 
3884 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3885 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
3886 	sc->bge_tx_discards = cnt;
3887 
3888 #undef READ_STAT
3889 }
3890 
3891 /*
3892  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3893  */
3894 int
3895 bge_compact_dma_runt(struct mbuf *pkt)
3896 {
3897 	struct mbuf	*m, *prev, *n = NULL;
3898 	int 		totlen, newprevlen;
3899 
3900 	prev = NULL;
3901 	totlen = 0;
3902 
3903 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3904 		int mlen = m->m_len;
3905 		int shortfall = 8 - mlen ;
3906 
3907 		totlen += mlen;
3908 		if (mlen == 0)
3909 			continue;
3910 		if (mlen >= 8)
3911 			continue;
3912 
3913 		/* If we get here, mbuf data is too small for DMA engine.
3914 		 * Try to fix by shuffling data to prev or next in chain.
3915 		 * If that fails, do a compacting deep-copy of the whole chain.
3916 		 */
3917 
3918 		/* Internal frag. If fits in prev, copy it there. */
3919 		if (prev && m_trailingspace(prev) >= m->m_len) {
3920 			bcopy(m->m_data, prev->m_data+prev->m_len, mlen);
3921 			prev->m_len += mlen;
3922 			m->m_len = 0;
3923 			/* XXX stitch chain */
3924 			prev->m_next = m_free(m);
3925 			m = prev;
3926 			continue;
3927 		} else if (m->m_next != NULL &&
3928 			   m_trailingspace(m) >= shortfall &&
3929 			   m->m_next->m_len >= (8 + shortfall)) {
3930 			/* m is writable and have enough data in next, pull up. */
3931 
3932 			bcopy(m->m_next->m_data, m->m_data+m->m_len, shortfall);
3933 			m->m_len += shortfall;
3934 			m->m_next->m_len -= shortfall;
3935 			m->m_next->m_data += shortfall;
3936 		} else if (m->m_next == NULL || 1) {
3937 			/* Got a runt at the very end of the packet.
3938 			 * borrow data from the tail of the preceding mbuf and
3939 			 * update its length in-place. (The original data is still
3940 			 * valid, so we can do this even if prev is not writable.)
3941 			 */
3942 
3943 			/* if we'd make prev a runt, just move all of its data. */
3944 #ifdef DEBUG
3945 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3946 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3947 #endif
3948 			if ((prev->m_len - shortfall) < 8)
3949 				shortfall = prev->m_len;
3950 
3951 			newprevlen = prev->m_len - shortfall;
3952 
3953 			MGET(n, M_NOWAIT, MT_DATA);
3954 			if (n == NULL)
3955 				return (ENOBUFS);
3956 			KASSERT(m->m_len + shortfall < MLEN
3957 				/*,
3958 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3959 
3960 			/* first copy the data we're stealing from prev */
3961 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
3962 
3963 			/* update prev->m_len accordingly */
3964 			prev->m_len -= shortfall;
3965 
3966 			/* copy data from runt m */
3967 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
3968 
3969 			/* n holds what we stole from prev, plus m */
3970 			n->m_len = shortfall + m->m_len;
3971 
3972 			/* stitch n into chain and free m */
3973 			n->m_next = m->m_next;
3974 			prev->m_next = n;
3975 			/* KASSERT(m->m_next == NULL); */
3976 			m->m_next = NULL;
3977 			m_free(m);
3978 			m = n;	/* for continuing loop */
3979 		}
3980 	}
3981 	return (0);
3982 }
3983 
3984 /*
3985  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3986  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3987  * but when such padded frames employ the bge IP/TCP checksum offload,
3988  * the hardware checksum assist gives incorrect results (possibly
3989  * from incorporating its own padding into the UDP/TCP checksum; who knows).
3990  * If we pad such runts with zeros, the onboard checksum comes out correct.
3991  */
3992 int
3993 bge_cksum_pad(struct mbuf *m)
3994 {
3995 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3996 	struct mbuf *last;
3997 
3998 	/* If there's only the packet-header and we can pad there, use it. */
3999 	if (m->m_pkthdr.len == m->m_len && m_trailingspace(m) >= padlen) {
4000 		last = m;
4001 	} else {
4002 		/*
4003 		 * Walk packet chain to find last mbuf. We will either
4004 		 * pad there, or append a new mbuf and pad it.
4005 		 */
4006 		for (last = m; last->m_next != NULL; last = last->m_next);
4007 		if (m_trailingspace(last) < padlen) {
4008 			/* Allocate new empty mbuf, pad it. Compact later. */
4009 			struct mbuf *n;
4010 
4011 			MGET(n, M_DONTWAIT, MT_DATA);
4012 			if (n == NULL)
4013 				return (ENOBUFS);
4014 			n->m_len = 0;
4015 			last->m_next = n;
4016 			last = n;
4017 		}
4018 	}
4019 
4020 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
4021 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4022 	last->m_len += padlen;
4023 	m->m_pkthdr.len += padlen;
4024 
4025 	return (0);
4026 }
4027 
4028 /*
4029  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4030  * pointers to descriptors.
4031  */
4032 int
4033 bge_encap(struct bge_softc *sc, struct mbuf *m, int *txinc)
4034 {
4035 	struct bge_tx_bd	*f = NULL;
4036 	u_int32_t		frag, cur;
4037 	u_int16_t		csum_flags = 0;
4038 	bus_dmamap_t		dmamap;
4039 	int			i = 0;
4040 
4041 	cur = frag = (sc->bge_tx_prodidx + *txinc) % BGE_TX_RING_CNT;
4042 
4043 	if (m->m_pkthdr.csum_flags) {
4044 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4045 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4046 		if (m->m_pkthdr.csum_flags &
4047 		    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) {
4048 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4049 			if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4050 			    bge_cksum_pad(m) != 0)
4051 				return (ENOBUFS);
4052 		}
4053 	}
4054 
4055 	if (sc->bge_flags & BGE_JUMBO_FRAME &&
4056 	    m->m_pkthdr.len > ETHER_MAX_LEN)
4057 		csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4058 
4059 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
4060 		goto doit;
4061 
4062 	/*
4063 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
4064 	 * less than eight bytes.  If we encounter a teeny mbuf
4065 	 * at the end of a chain, we can pad.  Otherwise, copy.
4066 	 */
4067 	if (bge_compact_dma_runt(m) != 0)
4068 		return (ENOBUFS);
4069 
4070 doit:
4071 	dmamap = sc->bge_txdma[cur];
4072 
4073 	/*
4074 	 * Start packing the mbufs in this chain into
4075 	 * the fragment pointers. Stop when we run out
4076 	 * of fragments or hit the end of the mbuf chain.
4077 	 */
4078 	switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,
4079 	    BUS_DMA_NOWAIT)) {
4080 	case 0:
4081 		break;
4082 	case EFBIG:
4083 		if (m_defrag(m, M_DONTWAIT) == 0 &&
4084 		    bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,
4085 		     BUS_DMA_NOWAIT) == 0)
4086 			break;
4087 
4088 		/* FALLTHROUGH */
4089 	default:
4090 		return (ENOBUFS);
4091 	}
4092 
4093 	for (i = 0; i < dmamap->dm_nsegs; i++) {
4094 		f = &sc->bge_rdata->bge_tx_ring[frag];
4095 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
4096 			break;
4097 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
4098 		f->bge_len = dmamap->dm_segs[i].ds_len;
4099 		f->bge_flags = csum_flags;
4100 		f->bge_vlan_tag = 0;
4101 #if NVLAN > 0
4102 		if (m->m_flags & M_VLANTAG) {
4103 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
4104 			f->bge_vlan_tag = m->m_pkthdr.ether_vtag;
4105 		}
4106 #endif
4107 		cur = frag;
4108 		BGE_INC(frag, BGE_TX_RING_CNT);
4109 	}
4110 
4111 	if (i < dmamap->dm_nsegs)
4112 		goto fail_unload;
4113 
4114 	if (frag == sc->bge_tx_saved_considx)
4115 		goto fail_unload;
4116 
4117 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
4118 	    BUS_DMASYNC_PREWRITE);
4119 
4120 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
4121 	sc->bge_cdata.bge_tx_chain[cur] = m;
4122 	sc->bge_cdata.bge_tx_map[cur] = dmamap;
4123 
4124 	*txinc += dmamap->dm_nsegs;
4125 
4126 	return (0);
4127 
4128 fail_unload:
4129 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
4130 
4131 	return (ENOBUFS);
4132 }
4133 
4134 /*
4135  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4136  * to the mbuf data regions directly in the transmit descriptors.
4137  */
4138 void
4139 bge_start(struct ifqueue *ifq)
4140 {
4141 	struct ifnet *ifp = ifq->ifq_if;
4142 	struct bge_softc *sc = ifp->if_softc;
4143 	struct mbuf *m;
4144 	int txinc;
4145 
4146 	if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4147 		ifq_purge(ifq);
4148 		return;
4149 	}
4150 
4151 	txinc = 0;
4152 	while (1) {
4153 		/* Check if we have enough free send BDs. */
4154 		if (sc->bge_txcnt + txinc + BGE_NTXSEG + 16 >=
4155 		    BGE_TX_RING_CNT) {
4156 			ifq_set_oactive(ifq);
4157 			break;
4158 		}
4159 
4160 		m = ifq_dequeue(ifq);
4161 		if (m == NULL)
4162 			break;
4163 
4164 		if (bge_encap(sc, m, &txinc) != 0) {
4165 			m_freem(m);
4166 			continue;
4167 		}
4168 
4169 #if NBPFILTER > 0
4170 		if (ifp->if_bpf)
4171 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
4172 #endif
4173 	}
4174 
4175 	if (txinc != 0) {
4176 		/* Transmit */
4177 		sc->bge_tx_prodidx = (sc->bge_tx_prodidx + txinc) %
4178 		    BGE_TX_RING_CNT;
4179 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
4180 		if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
4181 			bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO,
4182 			    sc->bge_tx_prodidx);
4183 
4184 		atomic_add_int(&sc->bge_txcnt, txinc);
4185 
4186 		/*
4187 		 * Set a timeout in case the chip goes out to lunch.
4188 		 */
4189 		ifp->if_timer = 5;
4190 	}
4191 }
4192 
4193 void
4194 bge_init(void *xsc)
4195 {
4196 	struct bge_softc *sc = xsc;
4197 	struct ifnet *ifp;
4198 	u_int16_t *m;
4199 	u_int32_t mode;
4200 	int s;
4201 
4202 	s = splnet();
4203 
4204 	ifp = &sc->arpcom.ac_if;
4205 
4206 	/* Cancel pending I/O and flush buffers. */
4207 	bge_stop(sc, 0);
4208 	bge_sig_pre_reset(sc, BGE_RESET_START);
4209 	bge_reset(sc);
4210 	bge_sig_legacy(sc, BGE_RESET_START);
4211 	bge_sig_post_reset(sc, BGE_RESET_START);
4212 
4213 	bge_chipinit(sc);
4214 
4215 	/*
4216 	 * Init the various state machines, ring
4217 	 * control blocks and firmware.
4218 	 */
4219 	if (bge_blockinit(sc)) {
4220 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
4221 		splx(s);
4222 		return;
4223 	}
4224 
4225 	/* Specify MRU. */
4226 	if (BGE_IS_JUMBO_CAPABLE(sc))
4227 		CSR_WRITE_4(sc, BGE_RX_MTU,
4228 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
4229 	else
4230 		CSR_WRITE_4(sc, BGE_RX_MTU,
4231 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
4232 
4233 	/* Load our MAC address. */
4234 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
4235 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4236 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4237 
4238 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
4239 		/* Disable hardware decapsulation of VLAN frames. */
4240 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
4241 	}
4242 
4243 	/* Program promiscuous mode and multicast filters. */
4244 	bge_iff(sc);
4245 
4246 	/* Init RX ring. */
4247 	bge_init_rx_ring_std(sc);
4248 
4249 	/*
4250 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4251 	 * memory to ensure that the chip has in fact read the first
4252 	 * entry of the ring.
4253 	 */
4254 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4255 		u_int32_t		v, i;
4256 		for (i = 0; i < 10; i++) {
4257 			DELAY(20);
4258 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4259 			if (v == (MCLBYTES - ETHER_ALIGN))
4260 				break;
4261 		}
4262 		if (i == 10)
4263 			printf("%s: 5705 A0 chip failed to load RX ring\n",
4264 			    sc->bge_dev.dv_xname);
4265 	}
4266 
4267 	/* Init Jumbo RX ring. */
4268 	if (sc->bge_flags & BGE_JUMBO_RING)
4269 		bge_init_rx_ring_jumbo(sc);
4270 
4271 	/* Init our RX return ring index */
4272 	sc->bge_rx_saved_considx = 0;
4273 
4274 	/* Init our RX/TX stat counters. */
4275 	sc->bge_tx_collisions = 0;
4276 	sc->bge_rx_discards = 0;
4277 	sc->bge_rx_inerrors = 0;
4278 	sc->bge_rx_overruns = 0;
4279 	sc->bge_tx_discards = 0;
4280 
4281 	/* Init TX ring. */
4282 	bge_init_tx_ring(sc);
4283 
4284 	/* Enable TX MAC state machine lockup fix. */
4285 	mode = CSR_READ_4(sc, BGE_TX_MODE);
4286 	if (BGE_IS_5755_PLUS(sc) ||
4287 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
4288 		mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4289 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
4290 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
4291 		mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4292 		mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4293 		    (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4294 	}
4295 
4296 	/* Turn on transmitter */
4297 	CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4298 	DELAY(100);
4299 
4300 	mode = CSR_READ_4(sc, BGE_RX_MODE);
4301 	if (BGE_IS_5755_PLUS(sc))
4302 		mode |= BGE_RXMODE_IPV6_ENABLE;
4303 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
4304 		mode |= BGE_RXMODE_IPV4_FRAG_FIX;
4305 
4306 	/* Turn on receiver */
4307 	CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
4308 	DELAY(10);
4309 
4310 	/*
4311 	 * Set the number of good frames to receive after RX MBUF
4312 	 * Low Watermark has been reached. After the RX MAC receives
4313 	 * this number of frames, it will drop subsequent incoming
4314 	 * frames until the MBUF High Watermark is reached.
4315 	 */
4316 	if (BGE_IS_57765_PLUS(sc))
4317 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4318 	else
4319 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4320 
4321 	/* Tell firmware we're alive. */
4322 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4323 
4324 	/* Enable host interrupts. */
4325 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4326 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4327 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4328 
4329 	bge_ifmedia_upd(ifp);
4330 
4331 	ifp->if_flags |= IFF_RUNNING;
4332 	ifq_clr_oactive(&ifp->if_snd);
4333 
4334 	splx(s);
4335 
4336 	timeout_add_sec(&sc->bge_timeout, 1);
4337 }
4338 
4339 /*
4340  * Set media options.
4341  */
4342 int
4343 bge_ifmedia_upd(struct ifnet *ifp)
4344 {
4345 	struct bge_softc *sc = ifp->if_softc;
4346 	struct mii_data *mii = &sc->bge_mii;
4347 	struct ifmedia *ifm = &sc->bge_ifmedia;
4348 
4349 	/* If this is a 1000baseX NIC, enable the TBI port. */
4350 	if (sc->bge_flags & BGE_FIBER_TBI) {
4351 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4352 			return (EINVAL);
4353 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
4354 		case IFM_AUTO:
4355 			/*
4356 			 * The BCM5704 ASIC appears to have a special
4357 			 * mechanism for programming the autoneg
4358 			 * advertisement registers in TBI mode.
4359 			 */
4360 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4361 				u_int32_t sgdig;
4362 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4363 				if (sgdig & BGE_SGDIGSTS_DONE) {
4364 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4365 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4366 					sgdig |= BGE_SGDIGCFG_AUTO |
4367 					    BGE_SGDIGCFG_PAUSE_CAP |
4368 					    BGE_SGDIGCFG_ASYM_PAUSE;
4369 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4370 					    sgdig | BGE_SGDIGCFG_SEND);
4371 					DELAY(5);
4372 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4373 				}
4374 			}
4375 			break;
4376 		case IFM_1000_SX:
4377 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4378 				BGE_CLRBIT(sc, BGE_MAC_MODE,
4379 				    BGE_MACMODE_HALF_DUPLEX);
4380 			} else {
4381 				BGE_SETBIT(sc, BGE_MAC_MODE,
4382 				    BGE_MACMODE_HALF_DUPLEX);
4383 			}
4384 			DELAY(40);
4385 			break;
4386 		default:
4387 			return (EINVAL);
4388 		}
4389 		/* XXX 802.3x flow control for 1000BASE-SX */
4390 		return (0);
4391 	}
4392 
4393 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4394 	if (mii->mii_instance) {
4395 		struct mii_softc *miisc;
4396 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4397 			mii_phy_reset(miisc);
4398 	}
4399 	mii_mediachg(mii);
4400 
4401 	/*
4402 	 * Force an interrupt so that we will call bge_link_upd
4403 	 * if needed and clear any pending link state attention.
4404 	 * Without this we are not getting any further interrupts
4405 	 * for link state changes and thus will not UP the link and
4406 	 * not be able to send in bge_start. The only way to get
4407 	 * things working was to receive a packet and get a RX intr.
4408 	 */
4409 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4410 	    sc->bge_flags & BGE_IS_5788)
4411 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4412 	else
4413 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4414 
4415 	return (0);
4416 }
4417 
4418 /*
4419  * Report current media status.
4420  */
4421 void
4422 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4423 {
4424 	struct bge_softc *sc = ifp->if_softc;
4425 	struct mii_data *mii = &sc->bge_mii;
4426 
4427 	if (sc->bge_flags & BGE_FIBER_TBI) {
4428 		ifmr->ifm_status = IFM_AVALID;
4429 		ifmr->ifm_active = IFM_ETHER;
4430 		if (CSR_READ_4(sc, BGE_MAC_STS) &
4431 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
4432 			ifmr->ifm_status |= IFM_ACTIVE;
4433 		} else {
4434 			ifmr->ifm_active |= IFM_NONE;
4435 			return;
4436 		}
4437 		ifmr->ifm_active |= IFM_1000_SX;
4438 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4439 			ifmr->ifm_active |= IFM_HDX;
4440 		else
4441 			ifmr->ifm_active |= IFM_FDX;
4442 		return;
4443 	}
4444 
4445 	mii_pollstat(mii);
4446 	ifmr->ifm_status = mii->mii_media_status;
4447 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4448 	    sc->bge_flowflags;
4449 }
4450 
4451 int
4452 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4453 {
4454 	struct bge_softc *sc = ifp->if_softc;
4455 	struct ifreq *ifr = (struct ifreq *) data;
4456 	int s, error = 0;
4457 	struct mii_data *mii;
4458 
4459 	s = splnet();
4460 
4461 	switch(command) {
4462 	case SIOCSIFADDR:
4463 		ifp->if_flags |= IFF_UP;
4464 		if (!(ifp->if_flags & IFF_RUNNING))
4465 			bge_init(sc);
4466 		break;
4467 
4468 	case SIOCSIFFLAGS:
4469 		if (ifp->if_flags & IFF_UP) {
4470 			if (ifp->if_flags & IFF_RUNNING)
4471 				error = ENETRESET;
4472 			else
4473 				bge_init(sc);
4474 		} else {
4475 			if (ifp->if_flags & IFF_RUNNING)
4476 				bge_stop(sc, 0);
4477 		}
4478 		break;
4479 
4480 	case SIOCSIFMEDIA:
4481 		/* XXX Flow control is not supported for 1000BASE-SX */
4482 		if (sc->bge_flags & BGE_FIBER_TBI) {
4483 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4484 			sc->bge_flowflags = 0;
4485 		}
4486 
4487 		/* Flow control requires full-duplex mode. */
4488 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4489 		    (ifr->ifr_media & IFM_FDX) == 0) {
4490 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
4491 		}
4492 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4493 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4494 				/* We can do both TXPAUSE and RXPAUSE. */
4495 				ifr->ifr_media |=
4496 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4497 			}
4498 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4499 		}
4500 		/* FALLTHROUGH */
4501 	case SIOCGIFMEDIA:
4502 		if (sc->bge_flags & BGE_FIBER_TBI) {
4503 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4504 			    command);
4505 		} else {
4506 			mii = &sc->bge_mii;
4507 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4508 			    command);
4509 		}
4510 		break;
4511 
4512 	case SIOCGIFRXR:
4513 		error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
4514 		break;
4515 
4516 	default:
4517 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
4518 	}
4519 
4520 	if (error == ENETRESET) {
4521 		if (ifp->if_flags & IFF_RUNNING)
4522 			bge_iff(sc);
4523 		error = 0;
4524 	}
4525 
4526 	splx(s);
4527 	return (error);
4528 }
4529 
4530 int
4531 bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri)
4532 {
4533 	struct if_rxring_info ifr[2];
4534 	u_int n = 0;
4535 
4536 	memset(ifr, 0, sizeof(ifr));
4537 
4538 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) {
4539 		ifr[n].ifr_size = sc->bge_rx_std_len;
4540 		strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name));
4541 		ifr[n].ifr_info = sc->bge_std_ring;
4542 
4543 		n++;
4544 	}
4545 
4546 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) {
4547 		ifr[n].ifr_size = BGE_JLEN;
4548 		strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name));
4549 		ifr[n].ifr_info = sc->bge_jumbo_ring;
4550 
4551 		n++;
4552 	}
4553 
4554 	return (if_rxr_info_ioctl(ifri, n, ifr));
4555 }
4556 
4557 void
4558 bge_watchdog(struct ifnet *ifp)
4559 {
4560 	struct bge_softc *sc;
4561 
4562 	sc = ifp->if_softc;
4563 
4564 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
4565 
4566 	bge_init(sc);
4567 
4568 	ifp->if_oerrors++;
4569 }
4570 
4571 void
4572 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
4573 {
4574 	int i;
4575 
4576 	BGE_CLRBIT(sc, reg, bit);
4577 
4578 	for (i = 0; i < BGE_TIMEOUT; i++) {
4579 		if ((CSR_READ_4(sc, reg) & bit) == 0)
4580 			return;
4581 		delay(100);
4582 	}
4583 
4584 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
4585 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
4586 }
4587 
4588 /*
4589  * Stop the adapter and free any mbufs allocated to the
4590  * RX and TX lists.
4591  */
4592 void
4593 bge_stop(struct bge_softc *sc, int softonly)
4594 {
4595 	struct ifnet *ifp = &sc->arpcom.ac_if;
4596 	struct ifmedia_entry *ifm;
4597 	struct mii_data *mii;
4598 	int mtmp, itmp;
4599 
4600 	timeout_del(&sc->bge_timeout);
4601 	timeout_del(&sc->bge_rxtimeout);
4602 	timeout_del(&sc->bge_rxtimeout_jumbo);
4603 
4604 	ifp->if_flags &= ~IFF_RUNNING;
4605 	ifp->if_timer = 0;
4606 
4607 	if (!softonly) {
4608 		/*
4609 		 * Tell firmware we're shutting down.
4610 		 */
4611 		/* bge_stop_fw(sc); */
4612 		bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
4613 
4614 		/*
4615 		 * Disable all of the receiver blocks
4616 		 */
4617 		bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4618 		bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4619 		bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4620 		if (BGE_IS_5700_FAMILY(sc))
4621 			bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4622 		bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4623 		bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4624 		bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4625 
4626 		/*
4627 		 * Disable all of the transmit blocks
4628 		 */
4629 		bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4630 		bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4631 		bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4632 		bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4633 		bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4634 		if (BGE_IS_5700_FAMILY(sc))
4635 			bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4636 		bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4637 
4638 		/*
4639 		 * Shut down all of the memory managers and related
4640 		 * state machines.
4641 		 */
4642 		bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4643 		bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4644 		if (BGE_IS_5700_FAMILY(sc))
4645 			bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4646 
4647 		CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4648 		CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4649 
4650 		if (!BGE_IS_5705_PLUS(sc)) {
4651 			bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4652 			bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4653 		}
4654 
4655 		bge_reset(sc);
4656 		bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
4657 		bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
4658 
4659 		/*
4660 		 * Tell firmware we're shutting down.
4661 		 */
4662 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4663 	}
4664 
4665 	intr_barrier(sc->bge_intrhand);
4666 	ifq_barrier(&ifp->if_snd);
4667 
4668 	ifq_clr_oactive(&ifp->if_snd);
4669 
4670 	/* Free the RX lists. */
4671 	bge_free_rx_ring_std(sc);
4672 
4673 	/* Free jumbo RX list. */
4674 	if (sc->bge_flags & BGE_JUMBO_RING)
4675 		bge_free_rx_ring_jumbo(sc);
4676 
4677 	/* Free TX buffers. */
4678 	bge_free_tx_ring(sc);
4679 
4680 	/*
4681 	 * Isolate/power down the PHY, but leave the media selection
4682 	 * unchanged so that things will be put back to normal when
4683 	 * we bring the interface back up.
4684 	 */
4685 	if (!(sc->bge_flags & BGE_FIBER_TBI)) {
4686 		mii = &sc->bge_mii;
4687 		itmp = ifp->if_flags;
4688 		ifp->if_flags |= IFF_UP;
4689 		ifm = mii->mii_media.ifm_cur;
4690 		mtmp = ifm->ifm_media;
4691 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
4692 		mii_mediachg(mii);
4693 		ifm->ifm_media = mtmp;
4694 		ifp->if_flags = itmp;
4695 	}
4696 
4697 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4698 
4699 	if (!softonly) {
4700 		/* Clear MAC's link state (PHY may still have link UP). */
4701 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4702 	}
4703 }
4704 
4705 void
4706 bge_link_upd(struct bge_softc *sc)
4707 {
4708 	struct ifnet *ifp = &sc->arpcom.ac_if;
4709 	struct mii_data *mii = &sc->bge_mii;
4710 	u_int32_t status;
4711 	int link;
4712 
4713 	/* Clear 'pending link event' flag */
4714 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
4715 
4716 	/*
4717 	 * Process link state changes.
4718 	 * Grrr. The link status word in the status block does
4719 	 * not work correctly on the BCM5700 rev AX and BX chips,
4720 	 * according to all available information. Hence, we have
4721 	 * to enable MII interrupts in order to properly obtain
4722 	 * async link changes. Unfortunately, this also means that
4723 	 * we have to read the MAC status register to detect link
4724 	 * changes, thereby adding an additional register access to
4725 	 * the interrupt handler.
4726 	 *
4727 	 */
4728 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
4729 		status = CSR_READ_4(sc, BGE_MAC_STS);
4730 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
4731 			mii_pollstat(mii);
4732 
4733 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4734 			    mii->mii_media_status & IFM_ACTIVE &&
4735 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4736 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4737 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4738 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4739 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4740 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4741 
4742 			/* Clear the interrupt */
4743 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4744 			    BGE_EVTENB_MI_INTERRUPT);
4745 			bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr,
4746 			    BRGPHY_MII_ISR);
4747 			bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr,
4748 			    BRGPHY_MII_IMR, BRGPHY_INTRS);
4749 		}
4750 		return;
4751 	}
4752 
4753 	if (sc->bge_flags & BGE_FIBER_TBI) {
4754 		status = CSR_READ_4(sc, BGE_MAC_STS);
4755 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4756 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4757 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4758 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
4759 					BGE_CLRBIT(sc, BGE_MAC_MODE,
4760 					    BGE_MACMODE_TBI_SEND_CFGS);
4761 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4762 				status = CSR_READ_4(sc, BGE_MAC_MODE);
4763 				link = (status & BGE_MACMODE_HALF_DUPLEX) ?
4764 				    LINK_STATE_HALF_DUPLEX :
4765 				    LINK_STATE_FULL_DUPLEX;
4766 				ifp->if_baudrate = IF_Gbps(1);
4767 				if (ifp->if_link_state != link) {
4768 					ifp->if_link_state = link;
4769 					if_link_state_change(ifp);
4770 				}
4771 			}
4772 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
4773 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4774 			link = LINK_STATE_DOWN;
4775 			ifp->if_baudrate = 0;
4776 			if (ifp->if_link_state != link) {
4777 				ifp->if_link_state = link;
4778 				if_link_state_change(ifp);
4779 			}
4780 		}
4781 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
4782 		/*
4783 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4784 		 * in status word always set. Workaround this bug by reading
4785 		 * PHY link status directly.
4786 		 */
4787 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
4788 		    BGE_STS_LINK : 0;
4789 
4790 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
4791 			mii_pollstat(mii);
4792 
4793 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4794 			    mii->mii_media_status & IFM_ACTIVE &&
4795 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4796 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4797 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4798 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4799 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4800 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4801 		}
4802 	} else {
4803 		/*
4804 		 * For controllers that call mii_tick, we have to poll
4805 		 * link status.
4806 		 */
4807 		mii_pollstat(mii);
4808 	}
4809 
4810 	/* Clear the attention */
4811 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4812 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4813 	    BGE_MACSTAT_LINK_CHANGED);
4814 }
4815