xref: /openbsd-src/sys/dev/pci/if_bge.c (revision ff0e7be1ebbcc809ea8ad2b6dafe215824da9e46)
1 /*	$OpenBSD: if_bge.c,v 1.400 2023/01/18 23:31:37 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 #include <sys/atomic.h>
88 
89 #include <net/if.h>
90 #include <net/if_media.h>
91 
92 #include <netinet/in.h>
93 #include <netinet/if_ether.h>
94 
95 #if NBPFILTER > 0
96 #include <net/bpf.h>
97 #endif
98 
99 #if defined(__sparc64__) || defined(__HAVE_FDT)
100 #include <dev/ofw/openfirm.h>
101 #endif
102 
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 
107 #include <dev/mii/mii.h>
108 #include <dev/mii/miivar.h>
109 #include <dev/mii/miidevs.h>
110 #include <dev/mii/brgphyreg.h>
111 
112 #include <dev/pci/if_bgereg.h>
113 
114 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
115 
116 const struct bge_revision * bge_lookup_rev(u_int32_t);
117 int bge_can_use_msi(struct bge_softc *);
118 int bge_probe(struct device *, void *, void *);
119 void bge_attach(struct device *, struct device *, void *);
120 int bge_detach(struct device *, int);
121 int bge_activate(struct device *, int);
122 
123 const struct cfattach bge_ca = {
124 	sizeof(struct bge_softc), bge_probe, bge_attach, bge_detach,
125 	bge_activate
126 };
127 
128 struct cfdriver bge_cd = {
129 	NULL, "bge", DV_IFNET
130 };
131 
132 void bge_txeof(struct bge_softc *);
133 void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
134 void bge_rxeof(struct bge_softc *);
135 
136 void bge_tick(void *);
137 void bge_stats_update(struct bge_softc *);
138 void bge_stats_update_regs(struct bge_softc *);
139 int bge_cksum_pad(struct mbuf *);
140 int bge_encap(struct bge_softc *, struct mbuf *, int *);
141 int bge_compact_dma_runt(struct mbuf *);
142 
143 int bge_intr(void *);
144 void bge_start(struct ifqueue *);
145 int bge_ioctl(struct ifnet *, u_long, caddr_t);
146 int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *);
147 void bge_init(void *);
148 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
149 void bge_stop(struct bge_softc *, int);
150 void bge_watchdog(struct ifnet *);
151 int bge_ifmedia_upd(struct ifnet *);
152 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
153 
154 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
155 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
156 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
157 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
158 
159 void bge_iff(struct bge_softc *);
160 
161 int bge_newbuf_jumbo(struct bge_softc *, int);
162 int bge_init_rx_ring_jumbo(struct bge_softc *);
163 void bge_fill_rx_ring_jumbo(struct bge_softc *);
164 void bge_free_rx_ring_jumbo(struct bge_softc *);
165 
166 int bge_newbuf(struct bge_softc *, int);
167 int bge_init_rx_ring_std(struct bge_softc *);
168 void bge_rxtick(void *);
169 void bge_fill_rx_ring_std(struct bge_softc *);
170 void bge_free_rx_ring_std(struct bge_softc *);
171 
172 void bge_free_tx_ring(struct bge_softc *);
173 int bge_init_tx_ring(struct bge_softc *);
174 
175 void bge_chipinit(struct bge_softc *);
176 int bge_blockinit(struct bge_softc *);
177 u_int32_t bge_dma_swap_options(struct bge_softc *);
178 int bge_phy_addr(struct bge_softc *);
179 
180 u_int32_t bge_readmem_ind(struct bge_softc *, int);
181 void bge_writemem_ind(struct bge_softc *, int, int);
182 void bge_writereg_ind(struct bge_softc *, int, int);
183 void bge_writembx(struct bge_softc *, int, int);
184 
185 int bge_miibus_readreg(struct device *, int, int);
186 void bge_miibus_writereg(struct device *, int, int, int);
187 void bge_miibus_statchg(struct device *);
188 
189 #define BGE_RESET_SHUTDOWN	0
190 #define BGE_RESET_START		1
191 #define BGE_RESET_SUSPEND	2
192 void bge_sig_post_reset(struct bge_softc *, int);
193 void bge_sig_legacy(struct bge_softc *, int);
194 void bge_sig_pre_reset(struct bge_softc *, int);
195 void bge_stop_fw(struct bge_softc *, int);
196 void bge_reset(struct bge_softc *);
197 void bge_link_upd(struct bge_softc *);
198 
199 void bge_ape_lock_init(struct bge_softc *);
200 void bge_ape_read_fw_ver(struct bge_softc *);
201 int bge_ape_lock(struct bge_softc *, int);
202 void bge_ape_unlock(struct bge_softc *, int);
203 void bge_ape_send_event(struct bge_softc *, uint32_t);
204 void bge_ape_driver_state_change(struct bge_softc *, int);
205 
206 #ifdef BGE_DEBUG
207 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
208 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
209 int	bgedebug = 0;
210 #else
211 #define DPRINTF(x)
212 #define DPRINTFN(n,x)
213 #endif
214 
215 /*
216  * Various supported device vendors/types and their names. Note: the
217  * spec seems to indicate that the hardware still has Alteon's vendor
218  * ID burned into it, though it will always be overridden by the vendor
219  * ID in the EEPROM. Just to be safe, we cover all possibilities.
220  */
221 const struct pci_matchid bge_devices[] = {
222 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
223 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
224 
225 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
226 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
227 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
228 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
229 
230 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
231 
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717 },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718 },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719 },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725 },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727 },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
271 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
272 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
273 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
274 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
275 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 },
276 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E },
277 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S },
278 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE },
279 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762 },
280 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 },
281 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
282 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
283 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
284 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
285 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 },
286 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F },
287 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G },
288 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
289 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
290 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
291 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
292 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
293 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
294 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
295 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
296 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
297 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
298 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
299 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 },
300 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761 },
301 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762 },
302 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57764 },
303 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765 },
304 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766 },
305 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57767 },
306 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 },
307 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781 },
308 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782 },
309 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785 },
310 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786 },
311 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57787 },
312 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 },
313 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 },
314 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791 },
315 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795 },
316 
317 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
318 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
319 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
320 
321 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
322 
323 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
324 };
325 
326 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_JUMBO_CAPABLE)
327 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_5700_FAMILY)
328 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_5705_PLUS)
329 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_5714_FAMILY)
330 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_575X_PLUS)
331 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGE_5755_PLUS)
332 #define BGE_IS_5717_PLUS(sc)		((sc)->bge_flags & BGE_5717_PLUS)
333 #define BGE_IS_57765_PLUS(sc)		((sc)->bge_flags & BGE_57765_PLUS)
334 
335 static const struct bge_revision {
336 	u_int32_t		br_chipid;
337 	const char		*br_name;
338 } bge_revisions[] = {
339 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
340 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
341 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
342 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
343 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
344 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
345 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
346 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
347 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
348 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
349 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
350 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
351 	/* the 5702 and 5703 share the same ASIC ID */
352 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
353 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
354 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
355 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
356 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
357 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
358 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
359 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
360 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
361 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
362 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
363 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
364 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
365 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
366 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
367 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
368 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
369 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
370 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
371 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
372 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
373 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
374 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
375 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
376 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
377 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
378 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
379 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
380 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
381 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
382 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
383 	{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
384 	{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
385 	{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
386 	{ BGE_CHIPID_BCM5719_A1, "BCM5719 A1" },
387 	{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
388 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
389 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
390 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
391 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
392 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
393 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
394 	{ BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
395 	{ BGE_CHIPID_BCM5762_B0, "BCM5762 B0" },
396 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
397 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
398 	/* the 5754 and 5787 share the same ASIC ID */
399 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
400 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
401 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
402 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
403 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
404 	{ BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
405 	{ BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
406 	{ BGE_CHIPID_BCM57766_A0, "BCM57766 A0" },
407 	{ BGE_CHIPID_BCM57766_A1, "BCM57766 A1" },
408 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
409 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
410 
411 	{ 0, NULL }
412 };
413 
414 /*
415  * Some defaults for major revisions, so that newer steppings
416  * that we don't know about have a shot at working.
417  */
418 static const struct bge_revision bge_majorrevs[] = {
419 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
420 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
421 	/* 5702 and 5703 share the same ASIC ID */
422 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
423 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
424 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
425 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
426 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
427 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
428 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
429 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
430 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
431 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
432 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
433 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
434 	/* 5754 and 5787 share the same ASIC ID */
435 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
436 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
437 	{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
438 	{ BGE_ASICREV_BCM57766, "unknown BCM57766" },
439 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
440 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
441 	{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
442 	{ BGE_ASICREV_BCM5720, "unknown BCM5720" },
443 	{ BGE_ASICREV_BCM5762, "unknown BCM5762" },
444 
445 	{ 0, NULL }
446 };
447 
448 u_int32_t
449 bge_readmem_ind(struct bge_softc *sc, int off)
450 {
451 	struct pci_attach_args	*pa = &(sc->bge_pa);
452 	u_int32_t val;
453 
454 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
455 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
456 		return (0);
457 
458 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
459 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
460 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
461 	return (val);
462 }
463 
464 void
465 bge_writemem_ind(struct bge_softc *sc, int off, int val)
466 {
467 	struct pci_attach_args	*pa = &(sc->bge_pa);
468 
469 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
470 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
471 		return;
472 
473 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
474 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
475 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0);
476 }
477 
478 void
479 bge_writereg_ind(struct bge_softc *sc, int off, int val)
480 {
481 	struct pci_attach_args	*pa = &(sc->bge_pa);
482 
483 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
484 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
485 }
486 
487 void
488 bge_writembx(struct bge_softc *sc, int off, int val)
489 {
490 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
491 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
492 
493 	CSR_WRITE_4(sc, off, val);
494 }
495 
496 /*
497  * Clear all stale locks and select the lock for this driver instance.
498  */
499 void
500 bge_ape_lock_init(struct bge_softc *sc)
501 {
502 	struct pci_attach_args *pa = &(sc->bge_pa);
503 	uint32_t bit, regbase;
504 	int i;
505 
506 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
507 		regbase = BGE_APE_LOCK_GRANT;
508 	else
509 		regbase = BGE_APE_PER_LOCK_GRANT;
510 
511 	/* Clear any stale locks. */
512 	for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
513 		switch (i) {
514 		case BGE_APE_LOCK_PHY0:
515 		case BGE_APE_LOCK_PHY1:
516 		case BGE_APE_LOCK_PHY2:
517 		case BGE_APE_LOCK_PHY3:
518 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
519 			break;
520 		default:
521 			if (pa->pa_function == 0)
522 				bit = BGE_APE_LOCK_GRANT_DRIVER0;
523 			else
524 				bit = (1 << pa->pa_function);
525 		}
526 		APE_WRITE_4(sc, regbase + 4 * i, bit);
527 	}
528 
529 	/* Select the PHY lock based on the device's function number. */
530 	switch (pa->pa_function) {
531 	case 0:
532 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
533 		break;
534 	case 1:
535 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
536 		break;
537 	case 2:
538 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
539 		break;
540 	case 3:
541 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
542 		break;
543 	default:
544 		printf("%s: PHY lock not supported on function %d\n",
545 		    sc->bge_dev.dv_xname, pa->pa_function);
546 		break;
547 	}
548 }
549 
550 /*
551  * Check for APE firmware, set flags, and print version info.
552  */
553 void
554 bge_ape_read_fw_ver(struct bge_softc *sc)
555 {
556 	const char *fwtype;
557 	uint32_t apedata, features;
558 
559 	/* Check for a valid APE signature in shared memory. */
560 	apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
561 	if (apedata != BGE_APE_SEG_SIG_MAGIC) {
562 		sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
563 		return;
564 	}
565 
566 	/* Check if APE firmware is running. */
567 	apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
568 	if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
569 		printf("%s: APE signature found but FW status not ready! "
570 		    "0x%08x\n", sc->bge_dev.dv_xname, apedata);
571 		return;
572 	}
573 
574 	sc->bge_mfw_flags |= BGE_MFW_ON_APE;
575 
576 	/* Fetch the APE firmware type and version. */
577 	apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
578 	features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
579 	if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
580 		sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
581 		fwtype = "NCSI";
582 	} else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
583 		sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
584 		fwtype = "DASH";
585 	} else
586 		fwtype = "UNKN";
587 
588 	/* Print the APE firmware version. */
589 	printf(", APE firmware %s %d.%d.%d.%d", fwtype,
590 	    (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
591 	    (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
592 	    (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
593 	    (apedata & BGE_APE_FW_VERSION_BLDMSK));
594 }
595 
596 int
597 bge_ape_lock(struct bge_softc *sc, int locknum)
598 {
599 	struct pci_attach_args *pa = &(sc->bge_pa);
600 	uint32_t bit, gnt, req, status;
601 	int i, off;
602 
603 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
604 		return (0);
605 
606 	/* Lock request/grant registers have different bases. */
607 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
608 		req = BGE_APE_LOCK_REQ;
609 		gnt = BGE_APE_LOCK_GRANT;
610 	} else {
611 		req = BGE_APE_PER_LOCK_REQ;
612 		gnt = BGE_APE_PER_LOCK_GRANT;
613 	}
614 
615 	off = 4 * locknum;
616 
617 	switch (locknum) {
618 	case BGE_APE_LOCK_GPIO:
619 		/* Lock required when using GPIO. */
620 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
621 			return (0);
622 		if (pa->pa_function == 0)
623 			bit = BGE_APE_LOCK_REQ_DRIVER0;
624 		else
625 			bit = (1 << pa->pa_function);
626 		break;
627 	case BGE_APE_LOCK_GRC:
628 		/* Lock required to reset the device. */
629 		if (pa->pa_function == 0)
630 			bit = BGE_APE_LOCK_REQ_DRIVER0;
631 		else
632 			bit = (1 << pa->pa_function);
633 		break;
634 	case BGE_APE_LOCK_MEM:
635 		/* Lock required when accessing certain APE memory. */
636 		if (pa->pa_function == 0)
637 			bit = BGE_APE_LOCK_REQ_DRIVER0;
638 		else
639 			bit = (1 << pa->pa_function);
640 		break;
641 	case BGE_APE_LOCK_PHY0:
642 	case BGE_APE_LOCK_PHY1:
643 	case BGE_APE_LOCK_PHY2:
644 	case BGE_APE_LOCK_PHY3:
645 		/* Lock required when accessing PHYs. */
646 		bit = BGE_APE_LOCK_REQ_DRIVER0;
647 		break;
648 	default:
649 		return (EINVAL);
650 	}
651 
652 	/* Request a lock. */
653 	APE_WRITE_4(sc, req + off, bit);
654 
655 	/* Wait up to 1 second to acquire lock. */
656 	for (i = 0; i < 20000; i++) {
657 		status = APE_READ_4(sc, gnt + off);
658 		if (status == bit)
659 			break;
660 		DELAY(50);
661 	}
662 
663 	/* Handle any errors. */
664 	if (status != bit) {
665 		printf("%s: APE lock %d request failed! "
666 		    "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
667 		    sc->bge_dev.dv_xname,
668 		    locknum, req + off, bit & 0xFFFF, gnt + off,
669 		    status & 0xFFFF);
670 		/* Revoke the lock request. */
671 		APE_WRITE_4(sc, gnt + off, bit);
672 		return (EBUSY);
673 	}
674 
675 	return (0);
676 }
677 
678 void
679 bge_ape_unlock(struct bge_softc *sc, int locknum)
680 {
681 	struct pci_attach_args *pa = &(sc->bge_pa);
682 	uint32_t bit, gnt;
683 	int off;
684 
685 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
686 		return;
687 
688 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
689 		gnt = BGE_APE_LOCK_GRANT;
690 	else
691 		gnt = BGE_APE_PER_LOCK_GRANT;
692 
693 	off = 4 * locknum;
694 
695 	switch (locknum) {
696 	case BGE_APE_LOCK_GPIO:
697 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
698 			return;
699 		if (pa->pa_function == 0)
700 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
701 		else
702 			bit = (1 << pa->pa_function);
703 		break;
704 	case BGE_APE_LOCK_GRC:
705 		if (pa->pa_function == 0)
706 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
707 		else
708 			bit = (1 << pa->pa_function);
709 		break;
710 	case BGE_APE_LOCK_MEM:
711 		if (pa->pa_function == 0)
712 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
713 		else
714 			bit = (1 << pa->pa_function);
715 		break;
716 	case BGE_APE_LOCK_PHY0:
717 	case BGE_APE_LOCK_PHY1:
718 	case BGE_APE_LOCK_PHY2:
719 	case BGE_APE_LOCK_PHY3:
720 		bit = BGE_APE_LOCK_GRANT_DRIVER0;
721 		break;
722 	default:
723 		return;
724 	}
725 
726 	APE_WRITE_4(sc, gnt + off, bit);
727 }
728 
729 /*
730  * Send an event to the APE firmware.
731  */
732 void
733 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
734 {
735 	uint32_t apedata;
736 	int i;
737 
738 	/* NCSI does not support APE events. */
739 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
740 		return;
741 
742 	/* Wait up to 1ms for APE to service previous event. */
743 	for (i = 10; i > 0; i--) {
744 		if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
745 			break;
746 		apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
747 		if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
748 			APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
749 			    BGE_APE_EVENT_STATUS_EVENT_PENDING);
750 			bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
751 			APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
752 			break;
753 		}
754 		bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
755 		DELAY(100);
756 	}
757 	if (i == 0) {
758 		printf("%s: APE event 0x%08x send timed out\n",
759 		    sc->bge_dev.dv_xname, event);
760 	}
761 }
762 
763 void
764 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
765 {
766 	uint32_t apedata, event;
767 
768 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
769 		return;
770 
771 	switch (kind) {
772 	case BGE_RESET_START:
773 		/* If this is the first load, clear the load counter. */
774 		apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
775 		if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
776 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
777 		else {
778 			apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
779 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
780 		}
781 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
782 		    BGE_APE_HOST_SEG_SIG_MAGIC);
783 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
784 		    BGE_APE_HOST_SEG_LEN_MAGIC);
785 
786 		/* Add some version info if bge(4) supports it. */
787 		APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
788 		    BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
789 		APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
790 		    BGE_APE_HOST_BEHAV_NO_PHYLOCK);
791 		APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
792 		    BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
793 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
794 		    BGE_APE_HOST_DRVR_STATE_START);
795 		event = BGE_APE_EVENT_STATUS_STATE_START;
796 		break;
797 	case BGE_RESET_SHUTDOWN:
798 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
799 		    BGE_APE_HOST_DRVR_STATE_UNLOAD);
800 		event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
801 		break;
802 	case BGE_RESET_SUSPEND:
803 		event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
804 		break;
805 	default:
806 		return;
807 	}
808 
809 	bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
810 	    BGE_APE_EVENT_STATUS_STATE_CHNGE);
811 }
812 
813 
814 u_int8_t
815 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
816 {
817 	u_int32_t access, byte = 0;
818 	int i;
819 
820 	/* Lock. */
821 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
822 	for (i = 0; i < 8000; i++) {
823 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
824 			break;
825 		DELAY(20);
826 	}
827 	if (i == 8000)
828 		return (1);
829 
830 	/* Enable access. */
831 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
832 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
833 
834 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
835 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
836 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
837 		DELAY(10);
838 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
839 			DELAY(10);
840 			break;
841 		}
842 	}
843 
844 	if (i == BGE_TIMEOUT * 10) {
845 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
846 		return (1);
847 	}
848 
849 	/* Get result. */
850 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
851 
852 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
853 
854 	/* Disable access. */
855 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
856 
857 	/* Unlock. */
858 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
859 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
860 
861 	return (0);
862 }
863 
864 /*
865  * Read a sequence of bytes from NVRAM.
866  */
867 
868 int
869 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
870 {
871 	int err = 0, i;
872 	u_int8_t byte = 0;
873 
874 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
875 		return (1);
876 
877 	for (i = 0; i < cnt; i++) {
878 		err = bge_nvram_getbyte(sc, off + i, &byte);
879 		if (err)
880 			break;
881 		*(dest + i) = byte;
882 	}
883 
884 	return (err ? 1 : 0);
885 }
886 
887 /*
888  * Read a byte of data stored in the EEPROM at address 'addr.' The
889  * BCM570x supports both the traditional bitbang interface and an
890  * auto access interface for reading the EEPROM. We use the auto
891  * access method.
892  */
893 u_int8_t
894 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
895 {
896 	int i;
897 	u_int32_t byte = 0;
898 
899 	/*
900 	 * Enable use of auto EEPROM access so we can avoid
901 	 * having to use the bitbang method.
902 	 */
903 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
904 
905 	/* Reset the EEPROM, load the clock period. */
906 	CSR_WRITE_4(sc, BGE_EE_ADDR,
907 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
908 	DELAY(20);
909 
910 	/* Issue the read EEPROM command. */
911 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
912 
913 	/* Wait for completion */
914 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
915 		DELAY(10);
916 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
917 			break;
918 	}
919 
920 	if (i == BGE_TIMEOUT * 10) {
921 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
922 		return (1);
923 	}
924 
925 	/* Get result. */
926 	byte = CSR_READ_4(sc, BGE_EE_DATA);
927 
928 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
929 
930 	return (0);
931 }
932 
933 /*
934  * Read a sequence of bytes from the EEPROM.
935  */
936 int
937 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
938 {
939 	int i, error = 0;
940 	u_int8_t byte = 0;
941 
942 	for (i = 0; i < cnt; i++) {
943 		error = bge_eeprom_getbyte(sc, off + i, &byte);
944 		if (error)
945 			break;
946 		*(dest + i) = byte;
947 	}
948 
949 	return (error ? 1 : 0);
950 }
951 
952 int
953 bge_miibus_readreg(struct device *dev, int phy, int reg)
954 {
955 	struct bge_softc *sc = (struct bge_softc *)dev;
956 	u_int32_t val, autopoll;
957 	int i;
958 
959 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
960 		return (0);
961 
962 	/* Reading with autopolling on may trigger PCI errors */
963 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
964 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
965 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
966 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
967 		DELAY(80);
968 	}
969 
970 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
971 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
972 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
973 
974 	for (i = 0; i < 200; i++) {
975 		delay(1);
976 		val = CSR_READ_4(sc, BGE_MI_COMM);
977 		if (!(val & BGE_MICOMM_BUSY))
978 			break;
979 		delay(10);
980 	}
981 
982 	if (i == 200) {
983 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
984 		val = 0;
985 		goto done;
986 	}
987 
988 	val = CSR_READ_4(sc, BGE_MI_COMM);
989 
990 done:
991 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
992 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
993 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
994 		DELAY(80);
995 	}
996 
997 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
998 
999 	if (val & BGE_MICOMM_READFAIL)
1000 		return (0);
1001 
1002 	return (val & 0xFFFF);
1003 }
1004 
1005 void
1006 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
1007 {
1008 	struct bge_softc *sc = (struct bge_softc *)dev;
1009 	u_int32_t autopoll;
1010 	int i;
1011 
1012 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
1013 	    (reg == MII_100T2CR || reg == BRGPHY_MII_AUXCTL))
1014 		return;
1015 
1016 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1017 		return;
1018 
1019 	/* Reading with autopolling on may trigger PCI errors */
1020 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
1021 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1022 		DELAY(40);
1023 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
1024 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1025 		DELAY(40); /* 40 usec is supposed to be adequate */
1026 	}
1027 
1028 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
1029 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
1030 	CSR_READ_4(sc, BGE_MI_COMM); /* force write */
1031 
1032 	for (i = 0; i < 200; i++) {
1033 		delay(1);
1034 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
1035 			break;
1036 		delay(10);
1037 	}
1038 
1039 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
1040 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1041 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
1042 		DELAY(40);
1043 	}
1044 
1045 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1046 
1047 	if (i == 200) {
1048 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
1049 	}
1050 }
1051 
1052 void
1053 bge_miibus_statchg(struct device *dev)
1054 {
1055 	struct bge_softc *sc = (struct bge_softc *)dev;
1056 	struct mii_data *mii = &sc->bge_mii;
1057 	u_int32_t mac_mode, rx_mode, tx_mode;
1058 
1059 	/*
1060 	 * Get flow control negotiation result.
1061 	 */
1062 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1063 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
1064 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1065 
1066 	if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
1067 	    mii->mii_media_status & IFM_ACTIVE &&
1068 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1069 		BGE_STS_SETBIT(sc, BGE_STS_LINK);
1070 	else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
1071 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
1072 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
1073 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
1074 
1075 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
1076 		return;
1077 
1078 	/* Set the port mode (MII/GMII) to match the link speed. */
1079 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1080 	    ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1081 	tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1082 	rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1083 
1084 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1085 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1086 		mac_mode |= BGE_PORTMODE_GMII;
1087 	else
1088 		mac_mode |= BGE_PORTMODE_MII;
1089 
1090 	/* Set MAC flow control behavior to match link flow control settings. */
1091 	tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1092 	rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1093 	if (mii->mii_media_active & IFM_FDX) {
1094 		if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
1095 			tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1096 		if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
1097 			rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1098 	} else
1099 		mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1100 
1101 	CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
1102 	DELAY(40);
1103 	CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1104 	CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1105 }
1106 
1107 /*
1108  * Initialize a standard receive ring descriptor.
1109  */
1110 int
1111 bge_newbuf(struct bge_softc *sc, int i)
1112 {
1113 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
1114 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
1115 	struct mbuf		*m;
1116 	int			error;
1117 
1118 	m = MCLGETL(NULL, M_DONTWAIT, sc->bge_rx_std_len);
1119 	if (!m)
1120 		return (ENOBUFS);
1121 	m->m_len = m->m_pkthdr.len = sc->bge_rx_std_len;
1122 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1123 	    m_adj(m, ETHER_ALIGN);
1124 
1125 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1126 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1127 	if (error) {
1128 		m_freem(m);
1129 		return (ENOBUFS);
1130 	}
1131 
1132 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1133 	    BUS_DMASYNC_PREREAD);
1134 	sc->bge_cdata.bge_rx_std_chain[i] = m;
1135 
1136 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1137 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1138 		i * sizeof (struct bge_rx_bd),
1139 	    sizeof (struct bge_rx_bd),
1140 	    BUS_DMASYNC_POSTWRITE);
1141 
1142 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
1143 	r->bge_flags = BGE_RXBDFLAG_END;
1144 	r->bge_len = m->m_len;
1145 	r->bge_idx = i;
1146 
1147 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1148 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
1149 		i * sizeof (struct bge_rx_bd),
1150 	    sizeof (struct bge_rx_bd),
1151 	    BUS_DMASYNC_PREWRITE);
1152 
1153 	return (0);
1154 }
1155 
1156 /*
1157  * Initialize a Jumbo receive ring descriptor.
1158  */
1159 int
1160 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1161 {
1162 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1163 	struct bge_ext_rx_bd	*r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1164 	struct mbuf		*m;
1165 	int			error;
1166 
1167 	m = MCLGETL(NULL, M_DONTWAIT, BGE_JLEN);
1168 	if (!m)
1169 		return (ENOBUFS);
1170 	m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1171 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
1172 	    m_adj(m, ETHER_ALIGN);
1173 
1174 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
1175 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1176 	if (error) {
1177 		m_freem(m);
1178 		return (ENOBUFS);
1179 	}
1180 
1181 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
1182 	    BUS_DMASYNC_PREREAD);
1183 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1184 
1185 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1186 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1187 		i * sizeof (struct bge_ext_rx_bd),
1188 	    sizeof (struct bge_ext_rx_bd),
1189 	    BUS_DMASYNC_POSTWRITE);
1190 
1191 	/*
1192 	 * Fill in the extended RX buffer descriptor.
1193 	 */
1194 	r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1195 	r->bge_bd.bge_idx = i;
1196 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1197 	switch (dmap->dm_nsegs) {
1198 	case 4:
1199 		BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr);
1200 		r->bge_len3 = dmap->dm_segs[3].ds_len;
1201 		/* FALLTHROUGH */
1202 	case 3:
1203 		BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr);
1204 		r->bge_len2 = dmap->dm_segs[2].ds_len;
1205 		/* FALLTHROUGH */
1206 	case 2:
1207 		BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr);
1208 		r->bge_len1 = dmap->dm_segs[1].ds_len;
1209 		/* FALLTHROUGH */
1210 	case 1:
1211 		BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr);
1212 		r->bge_bd.bge_len = dmap->dm_segs[0].ds_len;
1213 		break;
1214 	default:
1215 		panic("%s: %d segments", __func__, dmap->dm_nsegs);
1216 	}
1217 
1218 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1219 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1220 		i * sizeof (struct bge_ext_rx_bd),
1221 	    sizeof (struct bge_ext_rx_bd),
1222 	    BUS_DMASYNC_PREWRITE);
1223 
1224 	return (0);
1225 }
1226 
1227 int
1228 bge_init_rx_ring_std(struct bge_softc *sc)
1229 {
1230 	int i;
1231 
1232 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
1233 		return (0);
1234 
1235 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1236 		if (bus_dmamap_create(sc->bge_dmatag, sc->bge_rx_std_len, 1,
1237 		    sc->bge_rx_std_len, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1238 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
1239 			printf("%s: unable to create dmamap for slot %d\n",
1240 			    sc->bge_dev.dv_xname, i);
1241 			goto uncreate;
1242 		}
1243 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1244 		    sizeof(struct bge_rx_bd));
1245 	}
1246 
1247 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
1248 
1249 	/* lwm must be greater than the replenish threshold */
1250 	if_rxr_init(&sc->bge_std_ring, 17, BGE_STD_RX_RING_CNT);
1251 	bge_fill_rx_ring_std(sc);
1252 
1253 	SET(sc->bge_flags, BGE_RXRING_VALID);
1254 
1255 	return (0);
1256 
1257 uncreate:
1258 	while (--i) {
1259 		bus_dmamap_destroy(sc->bge_dmatag,
1260 		    sc->bge_cdata.bge_rx_std_map[i]);
1261 	}
1262 	return (1);
1263 }
1264 
1265 /*
1266  * When the refill timeout for a ring is active, that ring is so empty
1267  * that no more packets can be received on it, so the interrupt handler
1268  * will not attempt to refill it, meaning we don't need to protect against
1269  * interrupts here.
1270  */
1271 
1272 void
1273 bge_rxtick(void *arg)
1274 {
1275 	struct bge_softc *sc = arg;
1276 
1277 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID) &&
1278 	    if_rxr_inuse(&sc->bge_std_ring) <= 8)
1279 		bge_fill_rx_ring_std(sc);
1280 }
1281 
1282 void
1283 bge_rxtick_jumbo(void *arg)
1284 {
1285 	struct bge_softc *sc = arg;
1286 
1287 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) &&
1288 	    if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1289 		bge_fill_rx_ring_jumbo(sc);
1290 }
1291 
1292 void
1293 bge_fill_rx_ring_std(struct bge_softc *sc)
1294 {
1295 	int i;
1296 	int post = 0;
1297 	u_int slots;
1298 
1299 	i = sc->bge_std;
1300 	for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT);
1301 	    slots > 0; slots--) {
1302 		BGE_INC(i, BGE_STD_RX_RING_CNT);
1303 
1304 		if (bge_newbuf(sc, i) != 0)
1305 			break;
1306 
1307 		sc->bge_std = i;
1308 		post = 1;
1309 	}
1310 	if_rxr_put(&sc->bge_std_ring, slots);
1311 
1312 	if (post)
1313 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1314 
1315 	/*
1316 	 * bge always needs more than 8 packets on the ring. if we cant do
1317 	 * that now, then try again later.
1318 	 */
1319 	if (if_rxr_inuse(&sc->bge_std_ring) <= 8)
1320 		timeout_add(&sc->bge_rxtimeout, 1);
1321 }
1322 
1323 void
1324 bge_free_rx_ring_std(struct bge_softc *sc)
1325 {
1326 	bus_dmamap_t dmap;
1327 	struct mbuf *m;
1328 	int i;
1329 
1330 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
1331 		return;
1332 
1333 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1334 		dmap = sc->bge_cdata.bge_rx_std_map[i];
1335 		m = sc->bge_cdata.bge_rx_std_chain[i];
1336 		if (m != NULL) {
1337 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1338 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1339 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1340 			m_freem(m);
1341 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1342 		}
1343 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1344 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
1345 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
1346 		    sizeof(struct bge_rx_bd));
1347 	}
1348 
1349 	CLR(sc->bge_flags, BGE_RXRING_VALID);
1350 }
1351 
1352 int
1353 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1354 {
1355 	volatile struct bge_rcb *rcb;
1356 	int i;
1357 
1358 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1359 		return (0);
1360 
1361 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1362 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,
1363 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1364 		    &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) {
1365 			printf("%s: unable to create dmamap for slot %d\n",
1366 			    sc->bge_dev.dv_xname, i);
1367 			goto uncreate;
1368 		}
1369 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1370 		    sizeof(struct bge_ext_rx_bd));
1371 	}
1372 
1373 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
1374 
1375 	/* lwm must be greater than the replenish threshold */
1376 	if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT);
1377 	bge_fill_rx_ring_jumbo(sc);
1378 
1379 	SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1380 
1381 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1382 	rcb->bge_maxlen_flags =
1383 	    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1384 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1385 
1386 	return (0);
1387 
1388 uncreate:
1389 	while (--i) {
1390 		bus_dmamap_destroy(sc->bge_dmatag,
1391 		    sc->bge_cdata.bge_rx_jumbo_map[i]);
1392 	}
1393 	return (1);
1394 }
1395 
1396 void
1397 bge_fill_rx_ring_jumbo(struct bge_softc *sc)
1398 {
1399 	int i;
1400 	int post = 0;
1401 	u_int slots;
1402 
1403 	i = sc->bge_jumbo;
1404 	for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT);
1405 	    slots > 0; slots--) {
1406 		BGE_INC(i, BGE_JUMBO_RX_RING_CNT);
1407 
1408 		if (bge_newbuf_jumbo(sc, i) != 0)
1409 			break;
1410 
1411 		sc->bge_jumbo = i;
1412 		post = 1;
1413 	}
1414 	if_rxr_put(&sc->bge_jumbo_ring, slots);
1415 
1416 	if (post)
1417 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1418 
1419 	/*
1420 	 * bge always needs more than 8 packets on the ring. if we cant do
1421 	 * that now, then try again later.
1422 	 */
1423 	if (if_rxr_inuse(&sc->bge_jumbo_ring) <= 8)
1424 		timeout_add(&sc->bge_rxtimeout_jumbo, 1);
1425 }
1426 
1427 void
1428 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1429 {
1430 	bus_dmamap_t dmap;
1431 	struct mbuf *m;
1432 	int i;
1433 
1434 	if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1435 		return;
1436 
1437 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1438 		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1439 		m = sc->bge_cdata.bge_rx_jumbo_chain[i];
1440 		if (m != NULL) {
1441 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1442 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1443 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1444 			m_freem(m);
1445 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1446 		}
1447 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1448 		sc->bge_cdata.bge_rx_jumbo_map[i] = NULL;
1449 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1450 		    sizeof(struct bge_ext_rx_bd));
1451 	}
1452 
1453 	CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1454 }
1455 
1456 void
1457 bge_free_tx_ring(struct bge_softc *sc)
1458 {
1459 	int i;
1460 
1461 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1462 		return;
1463 
1464 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1465 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1466 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1467 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1468 			sc->bge_cdata.bge_tx_map[i] = NULL;
1469 		}
1470 		bzero(&sc->bge_rdata->bge_tx_ring[i],
1471 		    sizeof(struct bge_tx_bd));
1472 
1473 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_txdma[i]);
1474 	}
1475 
1476 	sc->bge_flags &= ~BGE_TXRING_VALID;
1477 }
1478 
1479 int
1480 bge_init_tx_ring(struct bge_softc *sc)
1481 {
1482 	int i;
1483 	bus_size_t txsegsz, txmaxsegsz;
1484 
1485 	if (sc->bge_flags & BGE_TXRING_VALID)
1486 		return (0);
1487 
1488 	sc->bge_txcnt = 0;
1489 	sc->bge_tx_saved_considx = 0;
1490 
1491 	/* Initialize transmit producer index for host-memory send ring. */
1492 	sc->bge_tx_prodidx = 0;
1493 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1494 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1495 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1496 
1497 	/* NIC-memory send ring not used; initialize to zero. */
1498 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1499 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1500 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1501 
1502 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1503 		txsegsz = 4096;
1504 		txmaxsegsz = BGE_JLEN;
1505 	} else {
1506 		txsegsz = MCLBYTES;
1507 		txmaxsegsz = MCLBYTES;
1508 	}
1509 
1510 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1511 		if (bus_dmamap_create(sc->bge_dmatag, txmaxsegsz,
1512 		    BGE_NTXSEG, txsegsz, 0, BUS_DMA_NOWAIT, &sc->bge_txdma[i]))
1513 			return (ENOBUFS);
1514 	}
1515 
1516 	sc->bge_flags |= BGE_TXRING_VALID;
1517 
1518 	return (0);
1519 }
1520 
1521 void
1522 bge_iff(struct bge_softc *sc)
1523 {
1524 	struct arpcom		*ac = &sc->arpcom;
1525 	struct ifnet		*ifp = &ac->ac_if;
1526 	struct ether_multi	*enm;
1527 	struct ether_multistep  step;
1528 	u_int8_t		hashes[16];
1529 	u_int32_t		h, rxmode;
1530 
1531 	/* First, zot all the existing filters. */
1532 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1533 	ifp->if_flags &= ~IFF_ALLMULTI;
1534 	memset(hashes, 0x00, sizeof(hashes));
1535 
1536 	if (ifp->if_flags & IFF_PROMISC) {
1537 		ifp->if_flags |= IFF_ALLMULTI;
1538 		rxmode |= BGE_RXMODE_RX_PROMISC;
1539 	} else if (ac->ac_multirangecnt > 0) {
1540 		ifp->if_flags |= IFF_ALLMULTI;
1541 		memset(hashes, 0xff, sizeof(hashes));
1542 	} else {
1543 		ETHER_FIRST_MULTI(step, ac, enm);
1544 		while (enm != NULL) {
1545 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1546 
1547 			setbit(hashes, h & 0x7F);
1548 
1549 			ETHER_NEXT_MULTI(step, enm);
1550 		}
1551 	}
1552 
1553 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1554 	    hashes, sizeof(hashes));
1555 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1556 }
1557 
1558 void
1559 bge_sig_pre_reset(struct bge_softc *sc, int type)
1560 {
1561 	/* no bge_asf_mode. */
1562 
1563 	if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1564 		bge_ape_driver_state_change(sc, type);
1565 }
1566 
1567 void
1568 bge_sig_post_reset(struct bge_softc *sc, int type)
1569 {
1570 	/* no bge_asf_mode. */
1571 
1572 	if (type == BGE_RESET_SHUTDOWN)
1573 		bge_ape_driver_state_change(sc, type);
1574 }
1575 
1576 void
1577 bge_sig_legacy(struct bge_softc *sc, int type)
1578 {
1579 	/* no bge_asf_mode. */
1580 }
1581 
1582 void
1583 bge_stop_fw(struct bge_softc *sc, int type)
1584 {
1585 	/* no bge_asf_mode. */
1586 }
1587 
1588 u_int32_t
1589 bge_dma_swap_options(struct bge_softc *sc)
1590 {
1591 	u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS;
1592 
1593 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
1594 		dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1595 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE |
1596 		    BGE_MODECTL_HTX2B_ENABLE;
1597 	}
1598 
1599 	return (dma_options);
1600 }
1601 
1602 int
1603 bge_phy_addr(struct bge_softc *sc)
1604 {
1605 	struct pci_attach_args *pa = &(sc->bge_pa);
1606 	int phy_addr = 1;
1607 
1608 	switch (BGE_ASICREV(sc->bge_chipid)) {
1609 	case BGE_ASICREV_BCM5717:
1610 	case BGE_ASICREV_BCM5719:
1611 	case BGE_ASICREV_BCM5720:
1612 		phy_addr = pa->pa_function;
1613 		if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
1614 			phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
1615 			    BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
1616 		} else {
1617 			phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
1618 			    BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
1619 		}
1620 	}
1621 
1622 	return (phy_addr);
1623 }
1624 
1625 /*
1626  * Do endian, PCI and DMA initialization.
1627  */
1628 void
1629 bge_chipinit(struct bge_softc *sc)
1630 {
1631 	struct pci_attach_args	*pa = &(sc->bge_pa);
1632 	u_int32_t dma_rw_ctl, misc_ctl, mode_ctl;
1633 	int i;
1634 
1635 	/* Set endianness before we access any non-PCI registers. */
1636 	misc_ctl = BGE_INIT;
1637 	if (sc->bge_flags & BGE_TAGGED_STATUS)
1638 		misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1639 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1640 	    misc_ctl);
1641 
1642 	/*
1643 	 * Clear the MAC statistics block in the NIC's
1644 	 * internal memory.
1645 	 */
1646 	for (i = BGE_STATS_BLOCK;
1647 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1648 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1649 
1650 	for (i = BGE_STATUS_BLOCK;
1651 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1652 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1653 
1654 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
1655 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
1656 		/*
1657 		 * For the 57766 and non Ax versions of 57765, bootcode
1658 		 * needs to setup the PCIE Fast Training Sequence (FTS)
1659 		 * value to prevent transmit hangs.
1660 		 */
1661 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
1662 		    CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
1663 			CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
1664 			BGE_CPMU_PADRNG_CTL_RDIV2);
1665 		}
1666 	}
1667 
1668 	/*
1669 	 * Set up the PCI DMA control register.
1670 	 */
1671 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1672 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1673 
1674 	if (sc->bge_flags & BGE_PCIE) {
1675 		if (sc->bge_mps >= 256)
1676 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1677 		else
1678 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1679 	} else if (sc->bge_flags & BGE_PCIX) {
1680 		/* PCI-X bus */
1681 		if (BGE_IS_5714_FAMILY(sc)) {
1682 			/* 256 bytes for read and write. */
1683 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1684 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1685 
1686 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1687 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1688 			else
1689 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1690 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1691 			/* 1536 bytes for read, 384 bytes for write. */
1692 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1693 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1694 		} else {
1695 			/* 384 bytes for read and write. */
1696 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1697 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1698 			    (0x0F);
1699 		}
1700 
1701 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1702 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1703 			u_int32_t tmp;
1704 
1705 			/* Set ONEDMA_ATONCE for hardware workaround. */
1706 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1707 			if (tmp == 6 || tmp == 7)
1708 				dma_rw_ctl |=
1709 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1710 
1711 			/* Set PCI-X DMA write workaround. */
1712 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1713 		}
1714 	} else {
1715 		/* Conventional PCI bus: 256 bytes for read and write. */
1716 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1717 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1718 
1719 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1720 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1721 			dma_rw_ctl |= 0x0F;
1722 	}
1723 
1724 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1725 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1726 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1727 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1728 
1729 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1730 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1731 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1732 
1733 	if (BGE_IS_5717_PLUS(sc)) {
1734 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1735 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1736 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1737 
1738 		/*
1739 		 * Enable HW workaround for controllers that misinterpret
1740 		 * a status tag update and leave interrupts permanently
1741 		 * disabled.
1742 		 */
1743 		if (!BGE_IS_57765_PLUS(sc) &&
1744 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
1745 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762)
1746 			dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1747 	}
1748 
1749 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1750 
1751 	/*
1752 	 * Set up general mode register.
1753 	 */
1754 	mode_ctl = bge_dma_swap_options(sc);
1755 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
1756 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
1757 		/* Retain Host-2-BMC settings written by APE firmware. */
1758 		mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1759 		    (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1760 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1761 		    BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1762 	}
1763 	mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1764 	    BGE_MODECTL_TX_NO_PHDR_CSUM;
1765 
1766 	/*
1767 	 * BCM5701 B5 have a bug causing data corruption when using
1768 	 * 64-bit DMA reads, which can be terminated early and then
1769 	 * completed later as 32-bit accesses, in combination with
1770 	 * certain bridges.
1771 	 */
1772 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1773 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1774 		mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1775 
1776 	CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1777 
1778 	/*
1779 	 * Disable memory write invalidate.  Apparently it is not supported
1780 	 * properly by these devices.
1781 	 */
1782 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1783 	    PCI_COMMAND_INVALIDATE_ENABLE);
1784 
1785 #ifdef __brokenalpha__
1786 	/*
1787 	 * Must ensure that we do not cross an 8K (bytes) boundary
1788 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1789 	 * restriction on some ALPHA platforms with early revision
1790 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1791 	 */
1792 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1793 	    BGE_PCI_READ_BNDRY_1024);
1794 #endif
1795 
1796 	/* Set the timer prescaler (always 66MHz) */
1797 	CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1798 
1799 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1800 		DELAY(40);	/* XXX */
1801 
1802 		/* Put PHY into ready state */
1803 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1804 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1805 		DELAY(40);
1806 	}
1807 }
1808 
1809 int
1810 bge_blockinit(struct bge_softc *sc)
1811 {
1812 	volatile struct bge_rcb		*rcb;
1813 	vaddr_t			rcb_addr;
1814 	bge_hostaddr		taddr;
1815 	u_int32_t		dmactl, rdmareg, mimode, val;
1816 	int			i, limit;
1817 
1818 	/*
1819 	 * Initialize the memory window pointer register so that
1820 	 * we can access the first 32K of internal NIC RAM. This will
1821 	 * allow us to set up the TX send ring RCBs and the RX return
1822 	 * ring RCBs, plus other things which live in NIC memory.
1823 	 */
1824 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1825 
1826 	/* Configure mbuf memory pool */
1827 	if (!BGE_IS_5705_PLUS(sc)) {
1828 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1829 		    BGE_BUFFPOOL_1);
1830 
1831 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1832 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1833 		else
1834 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1835 
1836 		/* Configure DMA resource pool */
1837 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1838 		    BGE_DMA_DESCRIPTORS);
1839 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1840 	}
1841 
1842 	/* Configure mbuf pool watermarks */
1843 	/* new Broadcom docs strongly recommend these: */
1844 	if (BGE_IS_5717_PLUS(sc)) {
1845 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1846 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1847 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1848 	} else if (BGE_IS_5705_PLUS(sc)) {
1849 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1850 
1851 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1852 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1853 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1854 		} else {
1855 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1856 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1857 		}
1858 	} else {
1859 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1860 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1861 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1862 	}
1863 
1864 	/* Configure DMA resource watermarks */
1865 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1866 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1867 
1868 	/* Enable buffer manager */
1869 	val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1870 	/*
1871 	 * Change the arbitration algorithm of TXMBUF read request to
1872 	 * round-robin instead of priority based for BCM5719.  When
1873 	 * TXFIFO is almost empty, RDMA will hold its request until
1874 	 * TXFIFO is not almost empty.
1875 	 */
1876 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
1877 		val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1878 	CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1879 
1880 	/* Poll for buffer manager start indication */
1881 	for (i = 0; i < 2000; i++) {
1882 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1883 			break;
1884 		DELAY(10);
1885 	}
1886 
1887 	if (i == 2000) {
1888 		printf("%s: buffer manager failed to start\n",
1889 		    sc->bge_dev.dv_xname);
1890 		return (ENXIO);
1891 	}
1892 
1893 	/* Enable flow-through queues */
1894 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1895 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1896 
1897 	/* Wait until queue initialization is complete */
1898 	for (i = 0; i < 2000; i++) {
1899 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1900 			break;
1901 		DELAY(10);
1902 	}
1903 
1904 	if (i == 2000) {
1905 		printf("%s: flow-through queue init failed\n",
1906 		    sc->bge_dev.dv_xname);
1907 		return (ENXIO);
1908 	}
1909 
1910 	/*
1911 	 * Summary of rings supported by the controller:
1912 	 *
1913 	 * Standard Receive Producer Ring
1914 	 * - This ring is used to feed receive buffers for "standard"
1915 	 *   sized frames (typically 1536 bytes) to the controller.
1916 	 *
1917 	 * Jumbo Receive Producer Ring
1918 	 * - This ring is used to feed receive buffers for jumbo sized
1919 	 *   frames (i.e. anything bigger than the "standard" frames)
1920 	 *   to the controller.
1921 	 *
1922 	 * Mini Receive Producer Ring
1923 	 * - This ring is used to feed receive buffers for "mini"
1924 	 *   sized frames to the controller.
1925 	 * - This feature required external memory for the controller
1926 	 *   but was never used in a production system.  Should always
1927 	 *   be disabled.
1928 	 *
1929 	 * Receive Return Ring
1930 	 * - After the controller has placed an incoming frame into a
1931 	 *   receive buffer that buffer is moved into a receive return
1932 	 *   ring.  The driver is then responsible to passing the
1933 	 *   buffer up to the stack.  Many versions of the controller
1934 	 *   support multiple RR rings.
1935 	 *
1936 	 * Send Ring
1937 	 * - This ring is used for outgoing frames.  Many versions of
1938 	 *   the controller support multiple send rings.
1939 	 */
1940 
1941 	/* Initialize the standard RX ring control block */
1942 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1943 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1944 	if (BGE_IS_5717_PLUS(sc)) {
1945 		/*
1946 		 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
1947 		 * Bits 15-2 : Maximum RX frame size
1948 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
1949 		 * Bit 0     : Reserved
1950 		 */
1951 		rcb->bge_maxlen_flags =
1952 		    BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2);
1953 	} else if (BGE_IS_5705_PLUS(sc)) {
1954 		/*
1955 		 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
1956 		 * Bits 15-2 : Reserved (should be 0)
1957 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1958 		 * Bit 0     : Reserved
1959 		 */
1960 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1961 	} else {
1962 		/*
1963 		 * Ring size is always XXX entries
1964 		 * Bits 31-16: Maximum RX frame size
1965 		 * Bits 15-2 : Reserved (should be 0)
1966 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
1967 		 * Bit 0     : Reserved
1968 		 */
1969 		rcb->bge_maxlen_flags =
1970 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1971 	}
1972 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1973 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
1974 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
1975 		rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
1976 	else
1977 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1978 	/* Write the standard receive producer ring control block. */
1979 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1980 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1981 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1982 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1983 
1984 	/* Reset the standard receive producer ring producer index. */
1985 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1986 
1987 	/*
1988 	 * Initialize the Jumbo RX ring control block
1989 	 * We set the 'ring disabled' bit in the flags
1990 	 * field until we're actually ready to start
1991 	 * using this ring (i.e. once we set the MTU
1992 	 * high enough to require it).
1993 	 */
1994 	if (sc->bge_flags & BGE_JUMBO_RING) {
1995 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1996 		BGE_HOSTADDR(rcb->bge_hostaddr,
1997 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1998 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1999 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2000 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2001 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2002 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2003 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2004 		else
2005 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2006 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2007 		    rcb->bge_hostaddr.bge_addr_hi);
2008 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2009 		    rcb->bge_hostaddr.bge_addr_lo);
2010 		/* Program the jumbo receive producer ring RCB parameters. */
2011 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2012 		    rcb->bge_maxlen_flags);
2013 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2014 		/* Reset the jumbo receive producer ring producer index. */
2015 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2016 	}
2017 
2018 	/* Disable the mini receive producer ring RCB. */
2019 	if (BGE_IS_5700_FAMILY(sc)) {
2020 		/* Set up dummy disabled mini ring RCB */
2021 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
2022 		rcb->bge_maxlen_flags =
2023 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2024 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2025 		    rcb->bge_maxlen_flags);
2026 		/* Reset the mini receive producer ring producer index. */
2027 		bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2028 
2029 		/* XXX why? */
2030 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2031 		    offsetof(struct bge_ring_data, bge_info),
2032 		    sizeof (struct bge_gib),
2033 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2034 	}
2035 
2036 	/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2037 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2038 		if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2039 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2040 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2041 			CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2042 			    (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2043 	}
2044 	/*
2045 	 * The BD ring replenish thresholds control how often the
2046 	 * hardware fetches new BD's from the producer rings in host
2047 	 * memory.  Setting the value too low on a busy system can
2048 	 * starve the hardware and recue the throughput.
2049 	 *
2050 	 * Set the BD ring replenish thresholds. The recommended
2051 	 * values are 1/8th the number of descriptors allocated to
2052 	 * each ring, but since we try to avoid filling the entire
2053 	 * ring we set these to the minimal value of 8.  This needs to
2054 	 * be done on several of the supported chip revisions anyway,
2055 	 * to work around HW bugs.
2056 	 */
2057 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
2058 	if (sc->bge_flags & BGE_JUMBO_RING)
2059 		CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
2060 
2061 	if (BGE_IS_5717_PLUS(sc)) {
2062 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
2063 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
2064 	}
2065 
2066 	/*
2067 	 * Disable all send rings by setting the 'ring disabled' bit
2068 	 * in the flags field of all the TX send ring control blocks,
2069 	 * located in NIC memory.
2070 	 */
2071 	if (BGE_IS_5700_FAMILY(sc)) {
2072 		/* 5700 to 5704 had 16 send rings. */
2073 		limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2074 	} else if (BGE_IS_57765_PLUS(sc) ||
2075 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2076 		limit = 2;
2077 	else if (BGE_IS_5717_PLUS(sc))
2078 		limit = 4;
2079 	else
2080 		limit = 1;
2081 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2082 	for (i = 0; i < limit; i++) {
2083 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2084 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2085 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2086 		rcb_addr += sizeof(struct bge_rcb);
2087 	}
2088 
2089 	/* Configure send ring RCB 0 (we use only the first ring) */
2090 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2091 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
2092 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2093 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2094 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2095 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2096 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2097 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
2098 	else
2099 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
2100 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2101 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2102 	    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2103 
2104 	/*
2105 	 * Disable all receive return rings by setting the
2106 	 * 'ring disabled' bit in the flags field of all the receive
2107 	 * return ring control blocks, located in NIC memory.
2108 	 */
2109 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2110 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2111 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2112 		/* Should be 17, use 16 until we get an SRAM map. */
2113 		limit = 16;
2114 	} else if (BGE_IS_5700_FAMILY(sc))
2115 		limit = BGE_RX_RINGS_MAX;
2116 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2117 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 ||
2118 	    BGE_IS_57765_PLUS(sc))
2119 		limit = 4;
2120 	else
2121 		limit = 1;
2122 	/* Disable all receive return rings */
2123 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2124 	for (i = 0; i < limit; i++) {
2125 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
2126 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
2127 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2128 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
2129 			BGE_RCB_FLAG_RING_DISABLED));
2130 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
2131 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2132 		    (i * (sizeof(u_int64_t))), 0);
2133 		rcb_addr += sizeof(struct bge_rcb);
2134 	}
2135 
2136 	/*
2137 	 * Set up receive return ring 0.  Note that the NIC address
2138 	 * for RX return rings is 0x0.  The return rings live entirely
2139 	 * within the host, so the nicaddr field in the RCB isn't used.
2140 	 */
2141 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2142 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
2143 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2144 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2145 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
2146 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
2147 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2148 
2149 	/* Set random backoff seed for TX */
2150 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2151 	    (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
2152 	     sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
2153 	     sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) &
2154 	    BGE_TX_BACKOFF_SEED_MASK);
2155 
2156 	/* Set inter-packet gap */
2157 	val = 0x2620;
2158 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2159 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2160 		val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2161 		    (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2162 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2163 
2164 	/*
2165 	 * Specify which ring to use for packets that don't match
2166 	 * any RX rules.
2167 	 */
2168 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2169 
2170 	/*
2171 	 * Configure number of RX lists. One interrupt distribution
2172 	 * list, sixteen active lists, one bad frames class.
2173 	 */
2174 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2175 
2176 	/* Initialize RX list placement stats mask. */
2177 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF);
2178 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2179 
2180 	/* Disable host coalescing until we get it set up */
2181 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2182 
2183 	/* Poll to make sure it's shut down. */
2184 	for (i = 0; i < 2000; i++) {
2185 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2186 			break;
2187 		DELAY(10);
2188 	}
2189 
2190 	if (i == 2000) {
2191 		printf("%s: host coalescing engine failed to idle\n",
2192 		    sc->bge_dev.dv_xname);
2193 		return (ENXIO);
2194 	}
2195 
2196 	/* Set up host coalescing defaults */
2197 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2198 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2199 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2200 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2201 	if (!(BGE_IS_5705_PLUS(sc))) {
2202 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2203 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2204 	}
2205 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
2206 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
2207 
2208 	/* Set up address of statistics block */
2209 	if (!(BGE_IS_5705_PLUS(sc))) {
2210 		BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
2211 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
2212 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
2213 
2214 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2215 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2216 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2217 	}
2218 
2219 	/* Set up address of status block */
2220 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
2221 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
2222 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
2223 
2224 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
2225 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
2226 
2227 	/* Set up status block size. */
2228 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2229 	    sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2230 		val = BGE_STATBLKSZ_FULL;
2231 		bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
2232 	} else {
2233 		val = BGE_STATBLKSZ_32BYTE;
2234 		bzero(&sc->bge_rdata->bge_status_block, 32);
2235 	}
2236 
2237 	/* Turn on host coalescing state machine */
2238 	CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2239 
2240 	/* Turn on RX BD completion state machine and enable attentions */
2241 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
2242 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
2243 
2244 	/* Turn on RX list placement state machine */
2245 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2246 
2247 	/* Turn on RX list selector state machine. */
2248 	if (!(BGE_IS_5705_PLUS(sc)))
2249 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2250 
2251 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2252 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2253 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2254 	    BGE_MACMODE_FRMHDR_DMA_ENB;
2255 
2256 	if (sc->bge_flags & BGE_FIBER_TBI)
2257 	    val |= BGE_PORTMODE_TBI;
2258 	else if (sc->bge_flags & BGE_FIBER_MII)
2259 	    val |= BGE_PORTMODE_GMII;
2260 	else
2261 	    val |= BGE_PORTMODE_MII;
2262 
2263 	/* Allow APE to send/receive frames. */
2264 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2265 		val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2266 
2267 	/* Turn on DMA, clear stats */
2268 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2269 	DELAY(40);
2270 
2271 	/* Set misc. local control, enable interrupts on attentions */
2272 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2273 
2274 #ifdef notdef
2275 	/* Assert GPIO pins for PHY reset */
2276 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
2277 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
2278 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
2279 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
2280 #endif
2281 
2282 	/* Turn on DMA completion state machine */
2283 	if (!(BGE_IS_5705_PLUS(sc)))
2284 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2285 
2286 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
2287 
2288 	/* Enable host coalescing bug fix. */
2289 	if (BGE_IS_5755_PLUS(sc))
2290 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2291 
2292 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
2293 		val |= BGE_WDMAMODE_BURST_ALL_DATA;
2294 
2295 	/* Turn on write DMA state machine */
2296 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2297 	DELAY(40);
2298 
2299 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
2300 
2301 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
2302 		val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2303 
2304 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2305 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2306 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2307 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2308 		       BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2309 		       BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2310 
2311 	if (sc->bge_flags & BGE_PCIE)
2312 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2313 
2314 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
2315 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2316 		val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2317 		    BGE_RDMAMODE_H2BNC_VLAN_DET;
2318 		/*
2319 		 * Allow multiple outstanding read requests from
2320 		 * non-LSO read DMA engine.
2321 		 */
2322 		val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2323 	}
2324 
2325 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2326 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2327 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2328 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
2329 	    BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
2330 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
2331 			rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2332 		else
2333 			rdmareg = BGE_RDMA_RSRVCTRL;
2334 		dmactl = CSR_READ_4(sc, rdmareg);
2335 		/*
2336 		 * Adjust tx margin to prevent TX data corruption and
2337 		 * fix internal FIFO overflow.
2338 		 */
2339 		if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2340 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2341 			dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2342 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2343 			    BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2344 			dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2345 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2346 			    BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2347 		}
2348 		/*
2349 		 * Enable fix for read DMA FIFO overruns.
2350 		 * The fix is to limit the number of RX BDs
2351 		 * the hardware would fetch at a time.
2352 		 */
2353 		CSR_WRITE_4(sc, rdmareg, dmactl |
2354 		    BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2355 	}
2356 
2357 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
2358 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2359 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2360 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2361 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2362 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2363 		/*
2364 		 * Allow 4KB burst length reads for non-LSO frames.
2365 		 * Enable 512B burst length reads for buffer descriptors.
2366 		 */
2367 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2368 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2369 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2370 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2371 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
2372 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2373 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2374 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2375 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2376 	}
2377 
2378 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2379 	DELAY(40);
2380 
2381 	if (sc->bge_flags & BGE_RDMA_BUG) {
2382 		for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2383 			val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2384 			if ((val & 0xFFFF) > ETHER_MAX_LEN)
2385 				break;
2386 			if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN)
2387 				break;
2388 		}
2389 		if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2390 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2391 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
2392 				val |= BGE_RDMA_TX_LENGTH_WA_5719;
2393 			else
2394 				val |= BGE_RDMA_TX_LENGTH_WA_5720;
2395 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2396 		}
2397 	}
2398 
2399 	/* Turn on RX data completion state machine */
2400 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2401 
2402 	/* Turn on RX BD initiator state machine */
2403 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2404 
2405 	/* Turn on RX data and RX BD initiator state machine */
2406 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2407 
2408 	/* Turn on Mbuf cluster free state machine */
2409 	if (!BGE_IS_5705_PLUS(sc))
2410 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2411 
2412 	/* Turn on send BD completion state machine */
2413 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2414 
2415 	/* Turn on send data completion state machine */
2416 	val = BGE_SDCMODE_ENABLE;
2417 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
2418 		val |= BGE_SDCMODE_CDELAY;
2419 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2420 
2421 	/* Turn on send data initiator state machine */
2422 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2423 
2424 	/* Turn on send BD initiator state machine */
2425 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2426 
2427 	/* Turn on send BD selector state machine */
2428 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2429 
2430 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF);
2431 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2432 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
2433 
2434 	/* ack/clear link change events */
2435 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2436 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2437 	    BGE_MACSTAT_LINK_CHANGED);
2438 
2439 	/* Enable PHY auto polling (for MII/GMII only) */
2440 	if (sc->bge_flags & BGE_FIBER_TBI) {
2441 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2442  	} else {
2443 		if ((sc->bge_flags & BGE_CPMU_PRESENT) != 0)
2444 			mimode = BGE_MIMODE_500KHZ_CONST;
2445 		else
2446 			mimode = BGE_MIMODE_BASE;
2447 		if (BGE_IS_5700_FAMILY(sc) ||
2448 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
2449 			mimode |= BGE_MIMODE_AUTOPOLL;
2450 			BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
2451 		}
2452 		mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
2453 		CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
2454 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
2455 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2456 			    BGE_EVTENB_MI_INTERRUPT);
2457 	}
2458 
2459 	/*
2460 	 * Clear any pending link state attention.
2461 	 * Otherwise some link state change events may be lost until attention
2462 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
2463 	 * It's not necessary on newer BCM chips - perhaps enabling link
2464 	 * state change attentions implies clearing pending attention.
2465 	 */
2466 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2467 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2468 	    BGE_MACSTAT_LINK_CHANGED);
2469 
2470 	/* Enable link state change attentions. */
2471 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2472 
2473 	return (0);
2474 }
2475 
2476 const struct bge_revision *
2477 bge_lookup_rev(u_int32_t chipid)
2478 {
2479 	const struct bge_revision *br;
2480 
2481 	for (br = bge_revisions; br->br_name != NULL; br++) {
2482 		if (br->br_chipid == chipid)
2483 			return (br);
2484 	}
2485 
2486 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
2487 		if (br->br_chipid == BGE_ASICREV(chipid))
2488 			return (br);
2489 	}
2490 
2491 	return (NULL);
2492 }
2493 
2494 int
2495 bge_can_use_msi(struct bge_softc *sc)
2496 {
2497 	int can_use_msi = 0;
2498 
2499 	switch (BGE_ASICREV(sc->bge_chipid)) {
2500 	case BGE_ASICREV_BCM5714_A0:
2501 	case BGE_ASICREV_BCM5714:
2502 		/*
2503 		 * Apparently, MSI doesn't work when these chips are
2504 		 * configured in single-port mode.
2505 		 */
2506 		break;
2507 	case BGE_ASICREV_BCM5750:
2508 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
2509 		    BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
2510 			can_use_msi = 1;
2511 		break;
2512 	default:
2513 		if (BGE_IS_575X_PLUS(sc))
2514 			can_use_msi = 1;
2515 	}
2516 
2517 	return (can_use_msi);
2518 }
2519 
2520 /*
2521  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2522  * against our list and return its name if we find a match. Note
2523  * that since the Broadcom controller contains VPD support, we
2524  * can get the device name string from the controller itself instead
2525  * of the compiled-in string. This is a little slow, but it guarantees
2526  * we'll always announce the right product name.
2527  */
2528 int
2529 bge_probe(struct device *parent, void *match, void *aux)
2530 {
2531 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
2532 }
2533 
2534 void
2535 bge_attach(struct device *parent, struct device *self, void *aux)
2536 {
2537 	struct bge_softc	*sc = (struct bge_softc *)self;
2538 	struct pci_attach_args	*pa = aux;
2539 	pci_chipset_tag_t	pc = pa->pa_pc;
2540 	const struct bge_revision *br;
2541 	pcireg_t		pm_ctl, memtype, subid, reg;
2542 	pci_intr_handle_t	ih;
2543 	const char		*intrstr = NULL;
2544 	int			gotenaddr = 0;
2545 	u_int32_t		hwcfg = 0;
2546 	u_int32_t		mac_addr = 0;
2547 	u_int32_t		misccfg;
2548 	struct ifnet		*ifp;
2549 	caddr_t			kva;
2550 #ifdef __sparc64__
2551 	char			name[32];
2552 #endif
2553 
2554 	sc->bge_pa = *pa;
2555 
2556 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
2557 
2558 	/*
2559 	 * Map control/status registers.
2560 	 */
2561 	DPRINTFN(5, ("Map control/status regs\n"));
2562 
2563 	DPRINTFN(5, ("pci_mapreg_map\n"));
2564 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2565 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
2566 	    &sc->bge_bhandle, NULL, &sc->bge_bsize, 0)) {
2567 		printf(": can't find mem space\n");
2568 		return;
2569 	}
2570 
2571 	/*
2572 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2573 	 * can clobber the chip's PCI config-space power control registers,
2574 	 * leaving the card in D3 powersave state.
2575 	 * We do not have memory-mapped registers in this state,
2576 	 * so force device into D0 state before starting initialization.
2577 	 */
2578 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2579 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2580 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2581 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2582 	DELAY(1000);	/* 27 usec is allegedly sufficient */
2583 
2584 	/*
2585 	 * Save ASIC rev.
2586 	 */
2587 	sc->bge_chipid =
2588 	     (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL)
2589 	      >> BGE_PCIMISCCTL_ASICREV_SHIFT);
2590 
2591 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
2592 		switch (PCI_PRODUCT(pa->pa_id)) {
2593 		case PCI_PRODUCT_BROADCOM_BCM5717:
2594 		case PCI_PRODUCT_BROADCOM_BCM5718:
2595 		case PCI_PRODUCT_BROADCOM_BCM5719:
2596 		case PCI_PRODUCT_BROADCOM_BCM5720:
2597 		case PCI_PRODUCT_BROADCOM_BCM5725:
2598 		case PCI_PRODUCT_BROADCOM_BCM5727:
2599 		case PCI_PRODUCT_BROADCOM_BCM5762:
2600 		case PCI_PRODUCT_BROADCOM_BCM57764:
2601 		case PCI_PRODUCT_BROADCOM_BCM57767:
2602 		case PCI_PRODUCT_BROADCOM_BCM57787:
2603 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2604 			    BGE_PCI_GEN2_PRODID_ASICREV);
2605 			break;
2606 		case PCI_PRODUCT_BROADCOM_BCM57761:
2607 		case PCI_PRODUCT_BROADCOM_BCM57762:
2608 		case PCI_PRODUCT_BROADCOM_BCM57765:
2609 		case PCI_PRODUCT_BROADCOM_BCM57766:
2610 		case PCI_PRODUCT_BROADCOM_BCM57781:
2611 		case PCI_PRODUCT_BROADCOM_BCM57782:
2612 		case PCI_PRODUCT_BROADCOM_BCM57785:
2613 		case PCI_PRODUCT_BROADCOM_BCM57786:
2614 		case PCI_PRODUCT_BROADCOM_BCM57791:
2615 		case PCI_PRODUCT_BROADCOM_BCM57795:
2616 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2617 			    BGE_PCI_GEN15_PRODID_ASICREV);
2618 			break;
2619 		default:
2620 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
2621 			    BGE_PCI_PRODID_ASICREV);
2622 			break;
2623 		}
2624 	}
2625 
2626 	sc->bge_phy_addr = bge_phy_addr(sc);
2627 
2628 	printf(", ");
2629 	br = bge_lookup_rev(sc->bge_chipid);
2630 	if (br == NULL)
2631 		printf("unknown ASIC (0x%x)", sc->bge_chipid);
2632 	else
2633 		printf("%s (0x%x)", br->br_name, sc->bge_chipid);
2634 
2635 	/*
2636 	 * PCI Express or PCI-X controller check.
2637 	 */
2638 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
2639 	    &sc->bge_expcap, NULL) != 0) {
2640 		/* Extract supported maximum payload size. */
2641 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
2642 		    PCI_PCIE_DCAP);
2643 		sc->bge_mps = 128 << (reg & 0x7);
2644 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2645 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
2646 			sc->bge_expmrq = (fls(2048) - 8) << 12;
2647 		else
2648 			sc->bge_expmrq = (fls(4096) - 8) << 12;
2649 		/* Disable PCIe Active State Power Management (ASPM). */
2650 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
2651 		    sc->bge_expcap + PCI_PCIE_LCSR);
2652 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
2653 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2654 		    sc->bge_expcap + PCI_PCIE_LCSR, reg);
2655 		sc->bge_flags |= BGE_PCIE;
2656 	} else {
2657 		if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2658 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
2659 			sc->bge_flags |= BGE_PCIX;
2660 	}
2661 
2662 	/*
2663 	 * SEEPROM check.
2664 	 */
2665 #ifdef __sparc64__
2666 	/*
2667 	 * Onboard interfaces on UltraSPARC systems generally don't
2668 	 * have a SEEPROM fitted.  These interfaces, and cards that
2669 	 * have FCode, are named "network" by the PROM, whereas cards
2670 	 * without FCode show up as "ethernet".  Since we don't really
2671 	 * need the information from the SEEPROM on cards that have
2672 	 * FCode it's fine to pretend they don't have one.
2673 	 */
2674 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
2675 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
2676 		sc->bge_flags |= BGE_NO_EEPROM;
2677 #endif
2678 
2679 	/* Save chipset family. */
2680 	switch (BGE_ASICREV(sc->bge_chipid)) {
2681 	case BGE_ASICREV_BCM5762:
2682 	case BGE_ASICREV_BCM57765:
2683 	case BGE_ASICREV_BCM57766:
2684 		sc->bge_flags |= BGE_57765_PLUS;
2685 		/* FALLTHROUGH */
2686 	case BGE_ASICREV_BCM5717:
2687 	case BGE_ASICREV_BCM5719:
2688 	case BGE_ASICREV_BCM5720:
2689 		sc->bge_flags |= BGE_5717_PLUS | BGE_5755_PLUS | BGE_575X_PLUS |
2690 		    BGE_5705_PLUS | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING |
2691 		    BGE_JUMBO_FRAME;
2692 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
2693 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
2694 			/*
2695 			 * Enable work around for DMA engine miscalculation
2696 			 * of TXMBUF available space.
2697 			 */
2698 			sc->bge_flags |= BGE_RDMA_BUG;
2699 
2700 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 &&
2701 			    sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
2702 				/* Jumbo frame on BCM5719 A0 does not work. */
2703 				sc->bge_flags &= ~(BGE_JUMBO_CAPABLE |
2704 				    BGE_JUMBO_RING | BGE_JUMBO_FRAME);
2705 			}
2706 		}
2707 		break;
2708 	case BGE_ASICREV_BCM5755:
2709 	case BGE_ASICREV_BCM5761:
2710 	case BGE_ASICREV_BCM5784:
2711 	case BGE_ASICREV_BCM5785:
2712 	case BGE_ASICREV_BCM5787:
2713 	case BGE_ASICREV_BCM57780:
2714 		sc->bge_flags |= BGE_5755_PLUS | BGE_575X_PLUS | BGE_5705_PLUS;
2715 		break;
2716 	case BGE_ASICREV_BCM5700:
2717 	case BGE_ASICREV_BCM5701:
2718 	case BGE_ASICREV_BCM5703:
2719 	case BGE_ASICREV_BCM5704:
2720 		sc->bge_flags |= BGE_5700_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING;
2721 		break;
2722 	case BGE_ASICREV_BCM5714_A0:
2723 	case BGE_ASICREV_BCM5780:
2724 	case BGE_ASICREV_BCM5714:
2725 		sc->bge_flags |= BGE_5714_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_STD;
2726 		/* FALLTHROUGH */
2727 	case BGE_ASICREV_BCM5750:
2728 	case BGE_ASICREV_BCM5752:
2729 	case BGE_ASICREV_BCM5906:
2730 		sc->bge_flags |= BGE_575X_PLUS;
2731 		/* FALLTHROUGH */
2732 	case BGE_ASICREV_BCM5705:
2733 		sc->bge_flags |= BGE_5705_PLUS;
2734 		break;
2735 	}
2736 
2737 	if (sc->bge_flags & BGE_JUMBO_STD)
2738 		sc->bge_rx_std_len = BGE_JLEN;
2739 	else
2740 		sc->bge_rx_std_len = MCLBYTES;
2741 
2742 	/*
2743 	 * When using the BCM5701 in PCI-X mode, data corruption has
2744 	 * been observed in the first few bytes of some received packets.
2745 	 * Aligning the packet buffer in memory eliminates the corruption.
2746 	 * Unfortunately, this misaligns the packet payloads.  On platforms
2747 	 * which do not support unaligned accesses, we will realign the
2748 	 * payloads by copying the received packets.
2749 	 */
2750 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
2751 	    sc->bge_flags & BGE_PCIX)
2752 		sc->bge_flags |= BGE_RX_ALIGNBUG;
2753 
2754 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2755 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
2756 	    PCI_VENDOR(subid) == DELL_VENDORID)
2757 		sc->bge_phy_flags |= BGE_PHY_NO_3LED;
2758 
2759 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
2760 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
2761 
2762 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2763 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
2764 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
2765 		sc->bge_flags |= BGE_IS_5788;
2766 
2767 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
2768 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
2769 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2770 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2771 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
2772 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
2773 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
2774 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
2775 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
2776 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
2777 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
2778 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
2779 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
2780 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
2781 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2782 		sc->bge_phy_flags |= BGE_PHY_10_100_ONLY;
2783 
2784 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2785 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2786 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2787 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2788 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2789 		sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
2790 
2791 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2792 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2793 		sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
2794 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2795 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2796 		sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
2797 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2798 		sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
2799 
2800 	if ((BGE_IS_5705_PLUS(sc)) &&
2801 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2802 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2803 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
2804 	    !BGE_IS_5717_PLUS(sc)) {
2805 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2806 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2807 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2808 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2809 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2810 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2811 				sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
2812 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2813 				sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
2814 		} else
2815 			sc->bge_phy_flags |= BGE_PHY_BER_BUG;
2816 	}
2817 
2818 	/* Identify chips with APE processor. */
2819 	switch (BGE_ASICREV(sc->bge_chipid)) {
2820 	case BGE_ASICREV_BCM5717:
2821 	case BGE_ASICREV_BCM5719:
2822 	case BGE_ASICREV_BCM5720:
2823 	case BGE_ASICREV_BCM5761:
2824 	case BGE_ASICREV_BCM5762:
2825 		sc->bge_flags |= BGE_APE;
2826 		break;
2827 	}
2828 
2829 	/* Chips with APE need BAR2 access for APE registers/memory. */
2830 	if ((sc->bge_flags & BGE_APE) != 0) {
2831 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
2832 		if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
2833 		    &sc->bge_apetag, &sc->bge_apehandle, NULL,
2834 		    &sc->bge_apesize, 0)) {
2835 			printf(": couldn't map BAR2 memory\n");
2836 			goto fail_1;
2837 		}
2838 
2839 		/* Enable APE register/memory access by host driver. */
2840 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2841 		reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
2842 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
2843 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
2844 		pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
2845 
2846 		bge_ape_lock_init(sc);
2847 		bge_ape_read_fw_ver(sc);
2848 	}
2849 
2850 	/* Identify the chips that use an CPMU. */
2851 	if (BGE_IS_5717_PLUS(sc) ||
2852 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2853 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2854 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
2855 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
2856 		sc->bge_flags |= BGE_CPMU_PRESENT;
2857 
2858 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI,
2859 	    &sc->bge_msicap, NULL)) {
2860 		if (bge_can_use_msi(sc) == 0)
2861 			pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
2862 	}
2863 
2864 	DPRINTFN(5, ("pci_intr_map\n"));
2865 	if (pci_intr_map_msi(pa, &ih) == 0)
2866 		sc->bge_flags |= BGE_MSI;
2867 	else if (pci_intr_map(pa, &ih)) {
2868 		printf(": couldn't map interrupt\n");
2869 		goto fail_1;
2870 	}
2871 
2872 	/*
2873 	 * All controllers except BCM5700 supports tagged status but
2874 	 * we use tagged status only for MSI case on BCM5717. Otherwise
2875 	 * MSI on BCM5717 does not work.
2876 	 */
2877 	if (BGE_IS_5717_PLUS(sc) && sc->bge_flags & BGE_MSI)
2878 		sc->bge_flags |= BGE_TAGGED_STATUS;
2879 
2880 	DPRINTFN(5, ("pci_intr_string\n"));
2881 	intrstr = pci_intr_string(pc, ih);
2882 
2883 	/* Try to reset the chip. */
2884 	DPRINTFN(5, ("bge_reset\n"));
2885 	bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
2886 	bge_reset(sc);
2887 
2888 	bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
2889 	bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
2890 
2891 	bge_chipinit(sc);
2892 
2893 #if defined(__sparc64__) || defined(__HAVE_FDT)
2894 	if (!gotenaddr && PCITAG_NODE(pa->pa_tag)) {
2895 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
2896 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
2897 			gotenaddr = 1;
2898 	}
2899 #endif
2900 
2901 	/*
2902 	 * Get station address from the EEPROM.
2903 	 */
2904 	if (!gotenaddr) {
2905 		mac_addr = bge_readmem_ind(sc, 0x0c14);
2906 		if ((mac_addr >> 16) == 0x484b) {
2907 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2908 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2909 			mac_addr = bge_readmem_ind(sc, 0x0c18);
2910 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2911 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2912 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2913 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2914 			gotenaddr = 1;
2915 		}
2916 	}
2917 	if (!gotenaddr) {
2918 		int mac_offset = BGE_EE_MAC_OFFSET;
2919 
2920 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2921 			mac_offset = BGE_EE_MAC_OFFSET_5906;
2922 
2923 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2924 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
2925 			gotenaddr = 1;
2926 	}
2927 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
2928 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2929 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
2930 			gotenaddr = 1;
2931 	}
2932 
2933 #ifdef __sparc64__
2934 	if (!gotenaddr) {
2935 		extern void myetheraddr(u_char *);
2936 
2937 		myetheraddr(sc->arpcom.ac_enaddr);
2938 		gotenaddr = 1;
2939 	}
2940 #endif
2941 
2942 	if (!gotenaddr) {
2943 		printf(": failed to read station address\n");
2944 		goto fail_2;
2945 	}
2946 
2947 	/* Allocate the general information block and ring buffers. */
2948 	sc->bge_dmatag = pa->pa_dmat;
2949 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
2950 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2951 	    PAGE_SIZE, 0, &sc->bge_ring_seg, 1, &sc->bge_ring_nseg,
2952 	    BUS_DMA_NOWAIT)) {
2953 		printf(": can't alloc rx buffers\n");
2954 		goto fail_2;
2955 	}
2956 	DPRINTFN(5, ("bus_dmamem_map\n"));
2957 	if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
2958 	    sc->bge_ring_nseg, sizeof(struct bge_ring_data), &kva,
2959 	    BUS_DMA_NOWAIT)) {
2960 		printf(": can't map dma buffers (%lu bytes)\n",
2961 		    sizeof(struct bge_ring_data));
2962 		goto fail_3;
2963 	}
2964 	DPRINTFN(5, ("bus_dmamap_create\n"));
2965 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2966 	    sizeof(struct bge_ring_data), 0,
2967 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2968 		printf(": can't create dma map\n");
2969 		goto fail_4;
2970 	}
2971 	DPRINTFN(5, ("bus_dmamap_load\n"));
2972 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2973 			    sizeof(struct bge_ring_data), NULL,
2974 			    BUS_DMA_NOWAIT)) {
2975 		goto fail_5;
2976 	}
2977 
2978 	DPRINTFN(5, ("bzero\n"));
2979 	sc->bge_rdata = (struct bge_ring_data *)kva;
2980 
2981 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2982 
2983 	/* Set default tuneable values. */
2984 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2985 	sc->bge_rx_coal_ticks = 150;
2986 	sc->bge_rx_max_coal_bds = 64;
2987 	sc->bge_tx_coal_ticks = 300;
2988 	sc->bge_tx_max_coal_bds = 400;
2989 
2990 	/* 5705 limits RX return ring to 512 entries. */
2991 	if (BGE_IS_5700_FAMILY(sc) || BGE_IS_5717_PLUS(sc))
2992 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2993 	else
2994 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2995 
2996 	/* Set up ifnet structure */
2997 	ifp = &sc->arpcom.ac_if;
2998 	ifp->if_softc = sc;
2999 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3000 	ifp->if_xflags = IFXF_MPSAFE;
3001 	ifp->if_ioctl = bge_ioctl;
3002 	ifp->if_qstart = bge_start;
3003 	ifp->if_watchdog = bge_watchdog;
3004 	ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
3005 
3006 	DPRINTFN(5, ("bcopy\n"));
3007 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
3008 
3009 	ifp->if_capabilities = IFCAP_VLAN_MTU;
3010 
3011 #if NVLAN > 0
3012 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
3013 #endif
3014 
3015 	/*
3016 	 * 5700 B0 chips do not support checksumming correctly due
3017 	 * to hardware bugs.
3018 	 *
3019 	 * It seems all controllers have a bug that can generate UDP
3020 	 * datagrams with a checksum value 0 when TX UDP checksum
3021 	 * offloading is enabled. Generating UDP checksum value 0 is
3022 	 * a violation of RFC 768.
3023 	 */
3024 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
3025 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4;
3026 
3027 	if (BGE_IS_JUMBO_CAPABLE(sc))
3028 		ifp->if_hardmtu = BGE_JUMBO_MTU;
3029 
3030 	/*
3031 	 * Do MII setup.
3032 	 */
3033 	DPRINTFN(5, ("mii setup\n"));
3034 	sc->bge_mii.mii_ifp = ifp;
3035 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
3036 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
3037 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
3038 
3039 	/*
3040 	 * Figure out what sort of media we have by checking the hardware
3041 	 * config word in the first 32K of internal NIC memory, or fall back to
3042 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
3043 	 * this value seems to be unset. If that's the case, we have to rely on
3044 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
3045 	 * SysKonnect SK-9D41.
3046 	 */
3047 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
3048 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
3049 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
3050 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3051 		    sizeof(hwcfg))) {
3052 			printf(": failed to read media type\n");
3053 			goto fail_6;
3054 		}
3055 		hwcfg = ntohl(hwcfg);
3056 	}
3057 
3058 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
3059 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
3060 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3061 		if (BGE_IS_5700_FAMILY(sc))
3062 		    sc->bge_flags |= BGE_FIBER_TBI;
3063 		else
3064 		    sc->bge_flags |= BGE_FIBER_MII;
3065 	}
3066 
3067 	/* Take advantage of single-shot MSI. */
3068 	if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_MSI)
3069 		CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3070 		    ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3071 
3072 	/* Hookup IRQ last. */
3073 	DPRINTFN(5, ("pci_intr_establish\n"));
3074 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
3075 	    bge_intr, sc, sc->bge_dev.dv_xname);
3076 	if (sc->bge_intrhand == NULL) {
3077 		printf(": couldn't establish interrupt");
3078 		if (intrstr != NULL)
3079 			printf(" at %s", intrstr);
3080 		printf("\n");
3081 		goto fail_6;
3082 	}
3083 
3084 	/*
3085 	 * A Broadcom chip was detected. Inform the world.
3086 	 */
3087 	printf(": %s, address %s\n", intrstr,
3088 	    ether_sprintf(sc->arpcom.ac_enaddr));
3089 
3090 	if (sc->bge_flags & BGE_FIBER_TBI) {
3091 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3092 		    bge_ifmedia_sts);
3093 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
3094 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
3095 			    0, NULL);
3096 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
3097 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
3098 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3099 	} else {
3100 		int mii_flags;
3101 
3102 		/*
3103 		 * Do transceiver setup.
3104 		 */
3105 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
3106 			     bge_ifmedia_sts);
3107 		mii_flags = MIIF_DOPAUSE;
3108 		if (sc->bge_flags & BGE_FIBER_MII)
3109 			mii_flags |= MIIF_HAVEFIBER;
3110 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
3111 		    sc->bge_phy_addr, MII_OFFSET_ANY, mii_flags);
3112 
3113 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
3114 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
3115 			ifmedia_add(&sc->bge_mii.mii_media,
3116 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
3117 			ifmedia_set(&sc->bge_mii.mii_media,
3118 				    IFM_ETHER|IFM_MANUAL);
3119 		} else
3120 			ifmedia_set(&sc->bge_mii.mii_media,
3121 				    IFM_ETHER|IFM_AUTO);
3122 	}
3123 
3124 	/*
3125 	 * Call MI attach routine.
3126 	 */
3127 	if_attach(ifp);
3128 	ether_ifattach(ifp);
3129 
3130 	timeout_set(&sc->bge_timeout, bge_tick, sc);
3131 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
3132 	timeout_set(&sc->bge_rxtimeout_jumbo, bge_rxtick_jumbo, sc);
3133 	return;
3134 
3135 fail_6:
3136 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
3137 
3138 fail_5:
3139 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3140 
3141 fail_4:
3142 	bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,
3143 	    sizeof(struct bge_ring_data));
3144 
3145 fail_3:
3146 	bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg);
3147 
3148 fail_2:
3149 	if ((sc->bge_flags & BGE_APE) != 0)
3150 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
3151 		    sc->bge_apesize);
3152 
3153 fail_1:
3154 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
3155 }
3156 
3157 int
3158 bge_detach(struct device *self, int flags)
3159 {
3160 	struct bge_softc *sc = (struct bge_softc *)self;
3161 	struct ifnet *ifp = &sc->arpcom.ac_if;
3162 
3163 	bge_stop(sc, 1);
3164 
3165 	if (sc->bge_intrhand)
3166 		pci_intr_disestablish(sc->bge_pa.pa_pc, sc->bge_intrhand);
3167 
3168 	/* Detach any PHYs we might have. */
3169 	if (LIST_FIRST(&sc->bge_mii.mii_phys) != NULL)
3170 		mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3171 
3172 	/* Delete any remaining media. */
3173 	ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY);
3174 
3175 	ether_ifdetach(ifp);
3176 	if_detach(ifp);
3177 
3178 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
3179 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
3180 	bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata,
3181 	    sizeof(struct bge_ring_data));
3182 	bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg);
3183 
3184 	if ((sc->bge_flags & BGE_APE) != 0)
3185 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
3186 		    sc->bge_apesize);
3187 
3188 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
3189 	return (0);
3190 }
3191 
3192 int
3193 bge_activate(struct device *self, int act)
3194 {
3195 	struct bge_softc *sc = (struct bge_softc *)self;
3196 	struct ifnet *ifp = &sc->arpcom.ac_if;
3197 	int rv = 0;
3198 
3199 	switch (act) {
3200 	case DVACT_SUSPEND:
3201 		rv = config_activate_children(self, act);
3202 		if (ifp->if_flags & IFF_RUNNING)
3203 			bge_stop(sc, 0);
3204 		break;
3205 	case DVACT_RESUME:
3206 		if (ifp->if_flags & IFF_UP)
3207 			bge_init(sc);
3208 		break;
3209 	default:
3210 		rv = config_activate_children(self, act);
3211 		break;
3212 	}
3213 	return (rv);
3214 }
3215 
3216 void
3217 bge_reset(struct bge_softc *sc)
3218 {
3219 	struct pci_attach_args *pa = &sc->bge_pa;
3220 	pcireg_t cachesize, command, devctl;
3221 	u_int32_t reset, mac_mode, mac_mode_mask, val;
3222 	void (*write_op)(struct bge_softc *, int, int);
3223 	int i;
3224 
3225 	mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
3226 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3227 		mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
3228 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
3229 
3230 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
3231 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) {
3232 		if (sc->bge_flags & BGE_PCIE)
3233 			write_op = bge_writembx;
3234 		else
3235 			write_op = bge_writemem_ind;
3236 	} else
3237 		write_op = bge_writereg_ind;
3238 
3239 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
3240 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701 &&
3241 	    !(sc->bge_flags & BGE_NO_EEPROM)) {
3242 		CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
3243 		for (i = 0; i < 8000; i++) {
3244 			if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
3245 			    BGE_NVRAMSWARB_GNT1)
3246 				break;
3247 			DELAY(20);
3248 		}
3249 		if (i == 8000)
3250 			printf("%s: nvram lock timed out\n",
3251 			    sc->bge_dev.dv_xname);
3252 	}
3253 	/* Take APE lock when performing reset. */
3254 	bge_ape_lock(sc, BGE_APE_LOCK_GRC);
3255 
3256 	/* Save some important PCI state. */
3257 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
3258 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
3259 
3260 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3261 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3262 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3263 
3264 	/* Disable fastboot on controllers that support it. */
3265 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
3266 	    BGE_IS_5755_PLUS(sc))
3267 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
3268 
3269 	/*
3270 	 * Write the magic number to SRAM at offset 0xB50.
3271 	 * When firmware finishes its initialization it will
3272 	 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
3273 	 */
3274 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3275 
3276 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
3277 
3278 	if (sc->bge_flags & BGE_PCIE) {
3279 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
3280 		    !BGE_IS_5717_PLUS(sc)) {
3281 			if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
3282 				/* PCI Express 1.0 system */
3283 				CSR_WRITE_4(sc, 0x7e2c, 0x20);
3284 			}
3285 		}
3286 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
3287 			/*
3288 			 * Prevent PCI Express link training
3289 			 * during global reset.
3290 			 */
3291 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
3292 			reset |= (1<<29);
3293 		}
3294 	}
3295 
3296 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3297 		val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3298 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
3299 		    val | BGE_VCPU_STATUS_DRV_RESET);
3300                 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
3301                 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
3302                     val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
3303 
3304                 sc->bge_flags |= BGE_NO_EEPROM;
3305         }
3306 
3307 	/*
3308 	 * Set GPHY Power Down Override to leave GPHY
3309 	 * powered up in D0 uninitialized.
3310 	 */
3311 	if (BGE_IS_5705_PLUS(sc) &&
3312 	    (sc->bge_flags & BGE_CPMU_PRESENT) == 0)
3313 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
3314 
3315 	/* Issue global reset */
3316 	write_op(sc, BGE_MISC_CFG, reset);
3317 
3318 	if (sc->bge_flags & BGE_PCIE)
3319 		DELAY(100 * 1000);
3320 	else
3321 		DELAY(1000);
3322 
3323 	if (sc->bge_flags & BGE_PCIE) {
3324 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3325 			pcireg_t v;
3326 
3327 			DELAY(500000); /* wait for link training to complete */
3328 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
3329 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
3330 		}
3331 
3332 		devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3333 		    PCI_PCIE_DCSR);
3334 		/* Clear enable no snoop and disable relaxed ordering. */
3335 		devctl &= ~(PCI_PCIE_DCSR_ERO | PCI_PCIE_DCSR_ENS);
3336 		/* Set PCI Express max payload size. */
3337 		devctl = (devctl & ~PCI_PCIE_DCSR_MPS) | sc->bge_expmrq;
3338 		/* Clear error status. */
3339 		devctl |= PCI_PCIE_DCSR_CEE | PCI_PCIE_DCSR_NFE |
3340 		    PCI_PCIE_DCSR_FEE | PCI_PCIE_DCSR_URE;
3341 		pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap +
3342 		    PCI_PCIE_DCSR, devctl);
3343 	}
3344 
3345 	/* Reset some of the PCI state that got zapped by reset */
3346 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
3347 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
3348 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW);
3349 	val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
3350 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
3351 	    (sc->bge_flags & BGE_PCIX) != 0)
3352 		val |= BGE_PCISTATE_RETRY_SAME_DMA;
3353 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
3354 		val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3355 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3356 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3357 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, val);
3358 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
3359 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
3360 
3361 	/* Re-enable MSI, if necessary, and enable memory arbiter. */
3362 	if (BGE_IS_5714_FAMILY(sc)) {
3363 		/* This chip disables MSI on reset. */
3364 		if (sc->bge_flags & BGE_MSI) {
3365 			val = pci_conf_read(pa->pa_pc, pa->pa_tag,
3366 			    sc->bge_msicap + PCI_MSI_MC);
3367 			pci_conf_write(pa->pa_pc, pa->pa_tag,
3368 			    sc->bge_msicap + PCI_MSI_MC,
3369 			    val | PCI_MSI_MC_MSIE);
3370 			val = CSR_READ_4(sc, BGE_MSI_MODE);
3371 			CSR_WRITE_4(sc, BGE_MSI_MODE,
3372 			    val | BGE_MSIMODE_ENABLE);
3373 		}
3374 		val = CSR_READ_4(sc, BGE_MARB_MODE);
3375 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
3376 	} else
3377 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3378 
3379 	/* Fix up byte swapping */
3380 	CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
3381 
3382 	val = CSR_READ_4(sc, BGE_MAC_MODE);
3383 	val = (val & ~mac_mode_mask) | mac_mode;
3384 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
3385 	DELAY(40);
3386 
3387 	bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
3388 
3389 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3390 		for (i = 0; i < BGE_TIMEOUT; i++) {
3391 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3392 			if (val & BGE_VCPU_STATUS_INIT_DONE)
3393 				break;
3394 			DELAY(100);
3395 		}
3396 
3397 		if (i >= BGE_TIMEOUT)
3398 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
3399 	} else {
3400 		/*
3401 		 * Poll until we see 1's complement of the magic number.
3402 		 * This indicates that the firmware initialization
3403 		 * is complete.  We expect this to fail if no SEEPROM
3404 		 * is fitted.
3405 		 */
3406 		for (i = 0; i < BGE_TIMEOUT * 10; i++) {
3407 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3408 			if (val == ~BGE_MAGIC_NUMBER)
3409 				break;
3410 			DELAY(10);
3411 		}
3412 
3413 		if ((i >= BGE_TIMEOUT * 10) &&
3414 		    (!(sc->bge_flags & BGE_NO_EEPROM)))
3415 			printf("%s: firmware handshake timed out\n",
3416 			   sc->bge_dev.dv_xname);
3417 		/* BCM57765 A0 needs additional time before accessing. */
3418 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
3419 			DELAY(10 * 1000);       /* XXX */
3420 	}
3421 
3422 	/*
3423 	 * The 5704 in TBI mode apparently needs some special
3424 	 * adjustment to ensure the SERDES drive level is set
3425 	 * to 1.2V.
3426 	 */
3427 	if (sc->bge_flags & BGE_FIBER_TBI &&
3428 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3429 		val = CSR_READ_4(sc, BGE_SERDES_CFG);
3430 		val = (val & ~0xFFF) | 0x880;
3431 		CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
3432 	}
3433 
3434 	if (sc->bge_flags & BGE_PCIE &&
3435 	    !BGE_IS_5717_PLUS(sc) &&
3436 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
3437 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
3438 		/* Enable Data FIFO protection. */
3439 		val = CSR_READ_4(sc, 0x7c00);
3440 		CSR_WRITE_4(sc, 0x7c00, val | (1<<25));
3441 	}
3442 
3443 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
3444 		BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
3445 		    CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3446 }
3447 
3448 /*
3449  * Frame reception handling. This is called if there's a frame
3450  * on the receive return list.
3451  *
3452  * Note: we have to be able to handle two possibilities here:
3453  * 1) the frame is from the jumbo receive ring
3454  * 2) the frame is from the standard receive ring
3455  */
3456 
3457 void
3458 bge_rxeof(struct bge_softc *sc)
3459 {
3460 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3461 	struct ifnet *ifp;
3462 	uint16_t rx_prod, rx_cons;
3463 	int stdcnt = 0, jumbocnt = 0;
3464 	bus_dmamap_t dmamap;
3465 	bus_addr_t offset, toff;
3466 	bus_size_t tlen;
3467 	int tosync;
3468 	int livelocked;
3469 
3470 	rx_cons = sc->bge_rx_saved_considx;
3471 	rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
3472 
3473 	/* Nothing to do */
3474 	if (rx_cons == rx_prod)
3475 		return;
3476 
3477 	ifp = &sc->arpcom.ac_if;
3478 
3479 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3480 	    offsetof(struct bge_ring_data, bge_status_block),
3481 	    sizeof (struct bge_status_block),
3482 	    BUS_DMASYNC_POSTREAD);
3483 
3484 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
3485 	tosync = rx_prod - rx_cons;
3486 
3487 	toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
3488 
3489 	if (tosync < 0) {
3490 		tlen = (sc->bge_return_ring_cnt - rx_cons) *
3491 		    sizeof (struct bge_rx_bd);
3492 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3493 		    toff, tlen, BUS_DMASYNC_POSTREAD);
3494 		tosync = -tosync;
3495 	}
3496 
3497 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3498 	    offset, tosync * sizeof (struct bge_rx_bd),
3499 	    BUS_DMASYNC_POSTREAD);
3500 
3501 	while (rx_cons != rx_prod) {
3502 		struct bge_rx_bd	*cur_rx;
3503 		u_int32_t		rxidx;
3504 		struct mbuf		*m = NULL;
3505 
3506 		cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
3507 
3508 		rxidx = cur_rx->bge_idx;
3509 		BGE_INC(rx_cons, sc->bge_return_ring_cnt);
3510 
3511 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3512 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3513 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3514 
3515 			jumbocnt++;
3516 
3517 			dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx];
3518 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3519 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3520 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3521 
3522 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3523 				m_freem(m);
3524 				continue;
3525 			}
3526 		} else {
3527 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3528 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3529 
3530 			stdcnt++;
3531 
3532 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3533 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3534 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3535 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3536 
3537 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3538 				m_freem(m);
3539 				continue;
3540 			}
3541 		}
3542 
3543 #ifdef __STRICT_ALIGNMENT
3544 		/*
3545 		 * The i386 allows unaligned accesses, but for other
3546 		 * platforms we must make sure the payload is aligned.
3547 		 */
3548 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
3549 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
3550 			    cur_rx->bge_len);
3551 			m->m_data += ETHER_ALIGN;
3552 		}
3553 #endif
3554 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3555 
3556 		bge_rxcsum(sc, cur_rx, m);
3557 
3558 #if NVLAN > 0
3559 		if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING &&
3560 		    cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3561 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
3562 			m->m_flags |= M_VLANTAG;
3563 		}
3564 #endif
3565 
3566 		ml_enqueue(&ml, m);
3567 	}
3568 
3569 	sc->bge_rx_saved_considx = rx_cons;
3570 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3571 
3572 	livelocked = ifiq_input(&ifp->if_rcv, &ml);
3573 	if (stdcnt) {
3574 		if_rxr_put(&sc->bge_std_ring, stdcnt);
3575 		if (livelocked)
3576 			if_rxr_livelocked(&sc->bge_std_ring);
3577 		bge_fill_rx_ring_std(sc);
3578 	}
3579 	if (jumbocnt) {
3580 		if_rxr_put(&sc->bge_jumbo_ring, jumbocnt);
3581 		if (livelocked)
3582 			if_rxr_livelocked(&sc->bge_jumbo_ring);
3583 		bge_fill_rx_ring_jumbo(sc);
3584 	}
3585 }
3586 
3587 void
3588 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
3589 {
3590 	if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3591 		/*
3592 		 * 5700 B0 chips do not support checksumming correctly due
3593 		 * to hardware bugs.
3594 		 */
3595 		return;
3596 	} else if (BGE_IS_5717_PLUS(sc)) {
3597 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
3598 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3599 			    (cur_rx->bge_error_flag &
3600 			    BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
3601 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3602 
3603 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
3604 				m->m_pkthdr.csum_flags |=
3605 				    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3606                         }
3607                 }
3608         } else {
3609 		if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM &&
3610 		    cur_rx->bge_ip_csum == 0xFFFF)
3611 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3612 
3613 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3614 		    m->m_pkthdr.len >= ETHER_MIN_NOPAD &&
3615 		    cur_rx->bge_tcp_udp_csum == 0xFFFF) {
3616 			m->m_pkthdr.csum_flags |=
3617 			    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
3618 		}
3619 	}
3620 }
3621 
3622 void
3623 bge_txeof(struct bge_softc *sc)
3624 {
3625 	struct bge_tx_bd *cur_tx = NULL;
3626 	struct ifnet *ifp;
3627 	bus_dmamap_t dmamap;
3628 	bus_addr_t offset, toff;
3629 	bus_size_t tlen;
3630 	int tosync, freed, txcnt;
3631 	u_int32_t cons, newcons;
3632 	struct mbuf *m;
3633 
3634 	/* Nothing to do */
3635 	cons = sc->bge_tx_saved_considx;
3636 	newcons = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx;
3637 	if (cons == newcons)
3638 		return;
3639 
3640 	ifp = &sc->arpcom.ac_if;
3641 
3642 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3643 	    offsetof(struct bge_ring_data, bge_status_block),
3644 	    sizeof (struct bge_status_block),
3645 	    BUS_DMASYNC_POSTREAD);
3646 
3647 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
3648 	tosync = newcons - cons;
3649 
3650 	toff = offset + (cons * sizeof (struct bge_tx_bd));
3651 
3652 	if (tosync < 0) {
3653 		tlen = (BGE_TX_RING_CNT - cons) * sizeof (struct bge_tx_bd);
3654 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3655 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3656 		tosync = -tosync;
3657 	}
3658 
3659 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3660 	    offset, tosync * sizeof (struct bge_tx_bd),
3661 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3662 
3663 	/*
3664 	 * Go through our tx ring and free mbufs for those
3665 	 * frames that have been sent.
3666 	 */
3667 	freed = 0;
3668 	while (cons != newcons) {
3669 		cur_tx = &sc->bge_rdata->bge_tx_ring[cons];
3670 		m = sc->bge_cdata.bge_tx_chain[cons];
3671 		if (m != NULL) {
3672 			dmamap = sc->bge_cdata.bge_tx_map[cons];
3673 
3674 			sc->bge_cdata.bge_tx_chain[cons] = NULL;
3675 			sc->bge_cdata.bge_tx_map[cons] = NULL;
3676 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3677 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3678 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
3679 
3680 			m_freem(m);
3681 		}
3682 		freed++;
3683 		BGE_INC(cons, BGE_TX_RING_CNT);
3684 	}
3685 
3686 	txcnt = atomic_sub_int_nv(&sc->bge_txcnt, freed);
3687 
3688 	sc->bge_tx_saved_considx = cons;
3689 
3690 	if (ifq_is_oactive(&ifp->if_snd))
3691 		ifq_restart(&ifp->if_snd);
3692 	else if (txcnt == 0)
3693 		ifp->if_timer = 0;
3694 }
3695 
3696 int
3697 bge_intr(void *xsc)
3698 {
3699 	struct bge_softc *sc;
3700 	struct ifnet *ifp;
3701 	u_int32_t statusword, statustag;
3702 
3703 	sc = xsc;
3704 	ifp = &sc->arpcom.ac_if;
3705 
3706 	/* read status word from status block */
3707 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3708 	    offsetof(struct bge_ring_data, bge_status_block),
3709 	    sizeof (struct bge_status_block),
3710 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3711 
3712 	statusword = sc->bge_rdata->bge_status_block.bge_status;
3713 	statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
3714 
3715 	if (sc->bge_flags & BGE_TAGGED_STATUS) {
3716 		if (sc->bge_lasttag == statustag &&
3717 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3718 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3719 			return (0);
3720 		sc->bge_lasttag = statustag;
3721 	} else {
3722 		if (!(statusword & BGE_STATFLAG_UPDATED) &&
3723 		    (CSR_READ_4(sc, BGE_PCI_PCISTATE) &
3724 		     BGE_PCISTATE_INTR_NOT_ACTIVE))
3725 			return (0);
3726 		/* Ack interrupt and stop others from occurring. */
3727 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3728 		statustag = 0;
3729 	}
3730 
3731 	/* clear status word */
3732 	sc->bge_rdata->bge_status_block.bge_status = 0;
3733 
3734 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3735 	    offsetof(struct bge_ring_data, bge_status_block),
3736 	    sizeof (struct bge_status_block),
3737 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3738 
3739 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3740 	    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
3741 	    BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) {
3742 		KERNEL_LOCK();
3743 		bge_link_upd(sc);
3744 		KERNEL_UNLOCK();
3745 	}
3746 
3747 	/* Re-enable interrupts. */
3748 	bge_writembx(sc, BGE_MBX_IRQ0_LO, statustag);
3749 
3750 	if (ifp->if_flags & IFF_RUNNING) {
3751 		/* Check RX return ring producer/consumer */
3752 		bge_rxeof(sc);
3753 
3754 		/* Check TX ring producer/consumer */
3755 		bge_txeof(sc);
3756 	}
3757 
3758 	return (1);
3759 }
3760 
3761 void
3762 bge_tick(void *xsc)
3763 {
3764 	struct bge_softc *sc = xsc;
3765 	struct mii_data *mii = &sc->bge_mii;
3766 	int s;
3767 
3768 	s = splnet();
3769 
3770 	if (BGE_IS_5705_PLUS(sc))
3771 		bge_stats_update_regs(sc);
3772 	else
3773 		bge_stats_update(sc);
3774 
3775 	if (sc->bge_flags & BGE_FIBER_TBI) {
3776 		/*
3777 		 * Since in TBI mode auto-polling can't be used we should poll
3778 		 * link status manually. Here we register pending link event
3779 		 * and trigger interrupt.
3780 		 */
3781 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3782 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3783 	} else {
3784 		/*
3785 		 * Do not touch PHY if we have link up. This could break
3786 		 * IPMI/ASF mode or produce extra input errors.
3787 		 * (extra input errors was reported for bcm5701 & bcm5704).
3788 		 */
3789 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3790 			mii_tick(mii);
3791 	}
3792 
3793 	timeout_add_sec(&sc->bge_timeout, 1);
3794 
3795 	splx(s);
3796 }
3797 
3798 void
3799 bge_stats_update_regs(struct bge_softc *sc)
3800 {
3801 	struct ifnet *ifp = &sc->arpcom.ac_if;
3802 
3803 	sc->bge_tx_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
3804 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
3805 
3806 	sc->bge_rx_overruns += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
3807 
3808 	/*
3809 	 * XXX
3810 	 * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter
3811 	 * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0
3812 	 * controllers includes the number of unwanted multicast frames.
3813 	 * This comes from a silicon bug and known workaround to get rough
3814 	 * (not exact) counter is to enable interrupt on MBUF low watermark
3815 	 * attention. This can be accomplished by setting BGE_HCCMODE_ATTN
3816 	 * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE
3817 	 * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However
3818 	 * that change would generate more interrupts and there are still
3819 	 * possibilities of losing multiple frames during
3820 	 * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that
3821 	 * the workaround still would not get correct counter I don't think
3822 	 * it's worth to implement it. So ignore reading the counter on
3823 	 * controllers that have the silicon bug.
3824 	 */
3825 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
3826 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 &&
3827 	    sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
3828 	    sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
3829 		sc->bge_rx_discards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
3830 
3831 	sc->bge_rx_inerrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
3832 
3833 	ifp->if_collisions = sc->bge_tx_collisions;
3834 	ifp->if_ierrors = sc->bge_rx_discards + sc->bge_rx_inerrors;
3835 
3836 	if (sc->bge_flags & BGE_RDMA_BUG) {
3837 		u_int32_t val, ucast, mcast, bcast;
3838 
3839 		ucast = CSR_READ_4(sc, BGE_MAC_STATS +
3840 		    offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts));
3841 		mcast = CSR_READ_4(sc, BGE_MAC_STATS +
3842 		    offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts));
3843 		bcast = CSR_READ_4(sc, BGE_MAC_STATS +
3844 		    offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts));
3845 
3846 		/*
3847 		 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
3848 		 * frames, it's safe to disable workaround for DMA engine's
3849 		 * miscalculation of TXMBUF space.
3850 		 */
3851 		if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) {
3852 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
3853 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
3854 				val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
3855 			else
3856 				val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
3857 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
3858 			sc->bge_flags &= ~BGE_RDMA_BUG;
3859 		}
3860 	}
3861 }
3862 
3863 void
3864 bge_stats_update(struct bge_softc *sc)
3865 {
3866 	struct ifnet *ifp = &sc->arpcom.ac_if;
3867 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3868 	u_int32_t cnt;
3869 
3870 #define READ_STAT(sc, stats, stat) \
3871 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3872 
3873 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
3874 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
3875 	sc->bge_tx_collisions = cnt;
3876 
3877 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
3878 	sc->bge_rx_overruns = cnt;
3879 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
3880 	ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors);
3881 	sc->bge_rx_inerrors = cnt;
3882 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
3883 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
3884 	sc->bge_rx_discards = cnt;
3885 
3886 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
3887 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
3888 	sc->bge_tx_discards = cnt;
3889 
3890 #undef READ_STAT
3891 }
3892 
3893 /*
3894  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3895  */
3896 int
3897 bge_compact_dma_runt(struct mbuf *pkt)
3898 {
3899 	struct mbuf	*m, *prev, *n = NULL;
3900 	int 		totlen, newprevlen;
3901 
3902 	prev = NULL;
3903 	totlen = 0;
3904 
3905 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3906 		int mlen = m->m_len;
3907 		int shortfall = 8 - mlen ;
3908 
3909 		totlen += mlen;
3910 		if (mlen == 0)
3911 			continue;
3912 		if (mlen >= 8)
3913 			continue;
3914 
3915 		/* If we get here, mbuf data is too small for DMA engine.
3916 		 * Try to fix by shuffling data to prev or next in chain.
3917 		 * If that fails, do a compacting deep-copy of the whole chain.
3918 		 */
3919 
3920 		/* Internal frag. If fits in prev, copy it there. */
3921 		if (prev && m_trailingspace(prev) >= m->m_len) {
3922 			bcopy(m->m_data, prev->m_data+prev->m_len, mlen);
3923 			prev->m_len += mlen;
3924 			m->m_len = 0;
3925 			/* XXX stitch chain */
3926 			prev->m_next = m_free(m);
3927 			m = prev;
3928 			continue;
3929 		} else if (m->m_next != NULL &&
3930 			   m_trailingspace(m) >= shortfall &&
3931 			   m->m_next->m_len >= (8 + shortfall)) {
3932 			/* m is writable and have enough data in next, pull up. */
3933 
3934 			bcopy(m->m_next->m_data, m->m_data+m->m_len, shortfall);
3935 			m->m_len += shortfall;
3936 			m->m_next->m_len -= shortfall;
3937 			m->m_next->m_data += shortfall;
3938 		} else if (m->m_next == NULL || 1) {
3939 			/* Got a runt at the very end of the packet.
3940 			 * borrow data from the tail of the preceding mbuf and
3941 			 * update its length in-place. (The original data is still
3942 			 * valid, so we can do this even if prev is not writable.)
3943 			 */
3944 
3945 			/* if we'd make prev a runt, just move all of its data. */
3946 #ifdef DEBUG
3947 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3948 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3949 #endif
3950 			if ((prev->m_len - shortfall) < 8)
3951 				shortfall = prev->m_len;
3952 
3953 			newprevlen = prev->m_len - shortfall;
3954 
3955 			MGET(n, M_NOWAIT, MT_DATA);
3956 			if (n == NULL)
3957 				return (ENOBUFS);
3958 			KASSERT(m->m_len + shortfall < MLEN
3959 				/*,
3960 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3961 
3962 			/* first copy the data we're stealing from prev */
3963 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
3964 
3965 			/* update prev->m_len accordingly */
3966 			prev->m_len -= shortfall;
3967 
3968 			/* copy data from runt m */
3969 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
3970 
3971 			/* n holds what we stole from prev, plus m */
3972 			n->m_len = shortfall + m->m_len;
3973 
3974 			/* stitch n into chain and free m */
3975 			n->m_next = m->m_next;
3976 			prev->m_next = n;
3977 			/* KASSERT(m->m_next == NULL); */
3978 			m->m_next = NULL;
3979 			m_free(m);
3980 			m = n;	/* for continuing loop */
3981 		}
3982 	}
3983 	return (0);
3984 }
3985 
3986 /*
3987  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3988  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3989  * but when such padded frames employ the bge IP/TCP checksum offload,
3990  * the hardware checksum assist gives incorrect results (possibly
3991  * from incorporating its own padding into the UDP/TCP checksum; who knows).
3992  * If we pad such runts with zeros, the onboard checksum comes out correct.
3993  */
3994 int
3995 bge_cksum_pad(struct mbuf *m)
3996 {
3997 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
3998 	struct mbuf *last;
3999 
4000 	/* If there's only the packet-header and we can pad there, use it. */
4001 	if (m->m_pkthdr.len == m->m_len && m_trailingspace(m) >= padlen) {
4002 		last = m;
4003 	} else {
4004 		/*
4005 		 * Walk packet chain to find last mbuf. We will either
4006 		 * pad there, or append a new mbuf and pad it.
4007 		 */
4008 		for (last = m; last->m_next != NULL; last = last->m_next);
4009 		if (m_trailingspace(last) < padlen) {
4010 			/* Allocate new empty mbuf, pad it. Compact later. */
4011 			struct mbuf *n;
4012 
4013 			MGET(n, M_DONTWAIT, MT_DATA);
4014 			if (n == NULL)
4015 				return (ENOBUFS);
4016 			n->m_len = 0;
4017 			last->m_next = n;
4018 			last = n;
4019 		}
4020 	}
4021 
4022 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
4023 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
4024 	last->m_len += padlen;
4025 	m->m_pkthdr.len += padlen;
4026 
4027 	return (0);
4028 }
4029 
4030 /*
4031  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
4032  * pointers to descriptors.
4033  */
4034 int
4035 bge_encap(struct bge_softc *sc, struct mbuf *m, int *txinc)
4036 {
4037 	struct bge_tx_bd	*f = NULL;
4038 	u_int32_t		frag, cur;
4039 	u_int16_t		csum_flags = 0;
4040 	bus_dmamap_t		dmamap;
4041 	int			i = 0;
4042 
4043 	cur = frag = (sc->bge_tx_prodidx + *txinc) % BGE_TX_RING_CNT;
4044 
4045 	if (m->m_pkthdr.csum_flags) {
4046 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
4047 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
4048 		if (m->m_pkthdr.csum_flags &
4049 		    (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) {
4050 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
4051 			if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
4052 			    bge_cksum_pad(m) != 0)
4053 				return (ENOBUFS);
4054 		}
4055 	}
4056 
4057 	if (sc->bge_flags & BGE_JUMBO_FRAME &&
4058 	    m->m_pkthdr.len > ETHER_MAX_LEN)
4059 		csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
4060 
4061 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
4062 		goto doit;
4063 
4064 	/*
4065 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
4066 	 * less than eight bytes.  If we encounter a teeny mbuf
4067 	 * at the end of a chain, we can pad.  Otherwise, copy.
4068 	 */
4069 	if (bge_compact_dma_runt(m) != 0)
4070 		return (ENOBUFS);
4071 
4072 doit:
4073 	dmamap = sc->bge_txdma[cur];
4074 
4075 	/*
4076 	 * Start packing the mbufs in this chain into
4077 	 * the fragment pointers. Stop when we run out
4078 	 * of fragments or hit the end of the mbuf chain.
4079 	 */
4080 	switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,
4081 	    BUS_DMA_NOWAIT)) {
4082 	case 0:
4083 		break;
4084 	case EFBIG:
4085 		if (m_defrag(m, M_DONTWAIT) == 0 &&
4086 		    bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m,
4087 		     BUS_DMA_NOWAIT) == 0)
4088 			break;
4089 
4090 		/* FALLTHROUGH */
4091 	default:
4092 		return (ENOBUFS);
4093 	}
4094 
4095 	for (i = 0; i < dmamap->dm_nsegs; i++) {
4096 		f = &sc->bge_rdata->bge_tx_ring[frag];
4097 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
4098 			break;
4099 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
4100 		f->bge_len = dmamap->dm_segs[i].ds_len;
4101 		f->bge_flags = csum_flags;
4102 		f->bge_vlan_tag = 0;
4103 #if NVLAN > 0
4104 		if (m->m_flags & M_VLANTAG) {
4105 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
4106 			f->bge_vlan_tag = m->m_pkthdr.ether_vtag;
4107 		}
4108 #endif
4109 		cur = frag;
4110 		BGE_INC(frag, BGE_TX_RING_CNT);
4111 	}
4112 
4113 	if (i < dmamap->dm_nsegs)
4114 		goto fail_unload;
4115 
4116 	if (frag == sc->bge_tx_saved_considx)
4117 		goto fail_unload;
4118 
4119 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
4120 	    BUS_DMASYNC_PREWRITE);
4121 
4122 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
4123 	sc->bge_cdata.bge_tx_chain[cur] = m;
4124 	sc->bge_cdata.bge_tx_map[cur] = dmamap;
4125 
4126 	*txinc += dmamap->dm_nsegs;
4127 
4128 	return (0);
4129 
4130 fail_unload:
4131 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
4132 
4133 	return (ENOBUFS);
4134 }
4135 
4136 /*
4137  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4138  * to the mbuf data regions directly in the transmit descriptors.
4139  */
4140 void
4141 bge_start(struct ifqueue *ifq)
4142 {
4143 	struct ifnet *ifp = ifq->ifq_if;
4144 	struct bge_softc *sc = ifp->if_softc;
4145 	struct mbuf *m;
4146 	int txinc;
4147 
4148 	if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4149 		ifq_purge(ifq);
4150 		return;
4151 	}
4152 
4153 	txinc = 0;
4154 	while (1) {
4155 		/* Check if we have enough free send BDs. */
4156 		if (sc->bge_txcnt + txinc + BGE_NTXSEG + 16 >=
4157 		    BGE_TX_RING_CNT) {
4158 			ifq_set_oactive(ifq);
4159 			break;
4160 		}
4161 
4162 		m = ifq_dequeue(ifq);
4163 		if (m == NULL)
4164 			break;
4165 
4166 		if (bge_encap(sc, m, &txinc) != 0) {
4167 			m_freem(m);
4168 			continue;
4169 		}
4170 
4171 #if NBPFILTER > 0
4172 		if (ifp->if_bpf)
4173 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
4174 #endif
4175 	}
4176 
4177 	if (txinc != 0) {
4178 		/* Transmit */
4179 		sc->bge_tx_prodidx = (sc->bge_tx_prodidx + txinc) %
4180 		    BGE_TX_RING_CNT;
4181 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
4182 		if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
4183 			bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO,
4184 			    sc->bge_tx_prodidx);
4185 
4186 		atomic_add_int(&sc->bge_txcnt, txinc);
4187 
4188 		/*
4189 		 * Set a timeout in case the chip goes out to lunch.
4190 		 */
4191 		ifp->if_timer = 5;
4192 	}
4193 }
4194 
4195 void
4196 bge_init(void *xsc)
4197 {
4198 	struct bge_softc *sc = xsc;
4199 	struct ifnet *ifp;
4200 	u_int16_t *m;
4201 	u_int32_t mode;
4202 	int s;
4203 
4204 	s = splnet();
4205 
4206 	ifp = &sc->arpcom.ac_if;
4207 
4208 	/* Cancel pending I/O and flush buffers. */
4209 	bge_stop(sc, 0);
4210 	bge_sig_pre_reset(sc, BGE_RESET_START);
4211 	bge_reset(sc);
4212 	bge_sig_legacy(sc, BGE_RESET_START);
4213 	bge_sig_post_reset(sc, BGE_RESET_START);
4214 
4215 	bge_chipinit(sc);
4216 
4217 	/*
4218 	 * Init the various state machines, ring
4219 	 * control blocks and firmware.
4220 	 */
4221 	if (bge_blockinit(sc)) {
4222 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
4223 		splx(s);
4224 		return;
4225 	}
4226 
4227 	/* Specify MRU. */
4228 	if (BGE_IS_JUMBO_CAPABLE(sc))
4229 		CSR_WRITE_4(sc, BGE_RX_MTU,
4230 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
4231 	else
4232 		CSR_WRITE_4(sc, BGE_RX_MTU,
4233 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
4234 
4235 	/* Load our MAC address. */
4236 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
4237 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4238 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4239 
4240 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
4241 		/* Disable hardware decapsulation of VLAN frames. */
4242 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
4243 	}
4244 
4245 	/* Program promiscuous mode and multicast filters. */
4246 	bge_iff(sc);
4247 
4248 	/* Init RX ring. */
4249 	bge_init_rx_ring_std(sc);
4250 
4251 	/*
4252 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
4253 	 * memory to ensure that the chip has in fact read the first
4254 	 * entry of the ring.
4255 	 */
4256 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
4257 		u_int32_t		v, i;
4258 		for (i = 0; i < 10; i++) {
4259 			DELAY(20);
4260 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
4261 			if (v == (MCLBYTES - ETHER_ALIGN))
4262 				break;
4263 		}
4264 		if (i == 10)
4265 			printf("%s: 5705 A0 chip failed to load RX ring\n",
4266 			    sc->bge_dev.dv_xname);
4267 	}
4268 
4269 	/* Init Jumbo RX ring. */
4270 	if (sc->bge_flags & BGE_JUMBO_RING)
4271 		bge_init_rx_ring_jumbo(sc);
4272 
4273 	/* Init our RX return ring index */
4274 	sc->bge_rx_saved_considx = 0;
4275 
4276 	/* Init our RX/TX stat counters. */
4277 	sc->bge_tx_collisions = 0;
4278 	sc->bge_rx_discards = 0;
4279 	sc->bge_rx_inerrors = 0;
4280 	sc->bge_rx_overruns = 0;
4281 	sc->bge_tx_discards = 0;
4282 
4283 	/* Init TX ring. */
4284 	bge_init_tx_ring(sc);
4285 
4286 	/* Enable TX MAC state machine lockup fix. */
4287 	mode = CSR_READ_4(sc, BGE_TX_MODE);
4288 	if (BGE_IS_5755_PLUS(sc) ||
4289 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
4290 		mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
4291 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 ||
4292 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) {
4293 		mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4294 		mode |= CSR_READ_4(sc, BGE_TX_MODE) &
4295 		    (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
4296 	}
4297 
4298 	/* Turn on transmitter */
4299 	CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
4300 	DELAY(100);
4301 
4302 	mode = CSR_READ_4(sc, BGE_RX_MODE);
4303 	if (BGE_IS_5755_PLUS(sc))
4304 		mode |= BGE_RXMODE_IPV6_ENABLE;
4305 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762)
4306 		mode |= BGE_RXMODE_IPV4_FRAG_FIX;
4307 
4308 	/* Turn on receiver */
4309 	CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
4310 	DELAY(10);
4311 
4312 	/*
4313 	 * Set the number of good frames to receive after RX MBUF
4314 	 * Low Watermark has been reached. After the RX MAC receives
4315 	 * this number of frames, it will drop subsequent incoming
4316 	 * frames until the MBUF High Watermark is reached.
4317 	 */
4318 	if (BGE_IS_57765_PLUS(sc))
4319 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
4320 	else
4321 		CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4322 
4323 	/* Tell firmware we're alive. */
4324 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4325 
4326 	/* Enable host interrupts. */
4327 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4328 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4329 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4330 
4331 	bge_ifmedia_upd(ifp);
4332 
4333 	ifp->if_flags |= IFF_RUNNING;
4334 	ifq_clr_oactive(&ifp->if_snd);
4335 
4336 	splx(s);
4337 
4338 	timeout_add_sec(&sc->bge_timeout, 1);
4339 }
4340 
4341 /*
4342  * Set media options.
4343  */
4344 int
4345 bge_ifmedia_upd(struct ifnet *ifp)
4346 {
4347 	struct bge_softc *sc = ifp->if_softc;
4348 	struct mii_data *mii = &sc->bge_mii;
4349 	struct ifmedia *ifm = &sc->bge_ifmedia;
4350 
4351 	/* If this is a 1000baseX NIC, enable the TBI port. */
4352 	if (sc->bge_flags & BGE_FIBER_TBI) {
4353 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4354 			return (EINVAL);
4355 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
4356 		case IFM_AUTO:
4357 			/*
4358 			 * The BCM5704 ASIC appears to have a special
4359 			 * mechanism for programming the autoneg
4360 			 * advertisement registers in TBI mode.
4361 			 */
4362 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
4363 				u_int32_t sgdig;
4364 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
4365 				if (sgdig & BGE_SGDIGSTS_DONE) {
4366 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
4367 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
4368 					sgdig |= BGE_SGDIGCFG_AUTO |
4369 					    BGE_SGDIGCFG_PAUSE_CAP |
4370 					    BGE_SGDIGCFG_ASYM_PAUSE;
4371 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
4372 					    sgdig | BGE_SGDIGCFG_SEND);
4373 					DELAY(5);
4374 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
4375 				}
4376 			}
4377 			break;
4378 		case IFM_1000_SX:
4379 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4380 				BGE_CLRBIT(sc, BGE_MAC_MODE,
4381 				    BGE_MACMODE_HALF_DUPLEX);
4382 			} else {
4383 				BGE_SETBIT(sc, BGE_MAC_MODE,
4384 				    BGE_MACMODE_HALF_DUPLEX);
4385 			}
4386 			DELAY(40);
4387 			break;
4388 		default:
4389 			return (EINVAL);
4390 		}
4391 		/* XXX 802.3x flow control for 1000BASE-SX */
4392 		return (0);
4393 	}
4394 
4395 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
4396 	if (mii->mii_instance) {
4397 		struct mii_softc *miisc;
4398 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4399 			mii_phy_reset(miisc);
4400 	}
4401 	mii_mediachg(mii);
4402 
4403 	/*
4404 	 * Force an interrupt so that we will call bge_link_upd
4405 	 * if needed and clear any pending link state attention.
4406 	 * Without this we are not getting any further interrupts
4407 	 * for link state changes and thus will not UP the link and
4408 	 * not be able to send in bge_start. The only way to get
4409 	 * things working was to receive a packet and get a RX intr.
4410 	 */
4411 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
4412 	    sc->bge_flags & BGE_IS_5788)
4413 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4414 	else
4415 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4416 
4417 	return (0);
4418 }
4419 
4420 /*
4421  * Report current media status.
4422  */
4423 void
4424 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4425 {
4426 	struct bge_softc *sc = ifp->if_softc;
4427 	struct mii_data *mii = &sc->bge_mii;
4428 
4429 	if (sc->bge_flags & BGE_FIBER_TBI) {
4430 		ifmr->ifm_status = IFM_AVALID;
4431 		ifmr->ifm_active = IFM_ETHER;
4432 		if (CSR_READ_4(sc, BGE_MAC_STS) &
4433 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
4434 			ifmr->ifm_status |= IFM_ACTIVE;
4435 		} else {
4436 			ifmr->ifm_active |= IFM_NONE;
4437 			return;
4438 		}
4439 		ifmr->ifm_active |= IFM_1000_SX;
4440 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4441 			ifmr->ifm_active |= IFM_HDX;
4442 		else
4443 			ifmr->ifm_active |= IFM_FDX;
4444 		return;
4445 	}
4446 
4447 	mii_pollstat(mii);
4448 	ifmr->ifm_status = mii->mii_media_status;
4449 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4450 	    sc->bge_flowflags;
4451 }
4452 
4453 int
4454 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4455 {
4456 	struct bge_softc *sc = ifp->if_softc;
4457 	struct ifreq *ifr = (struct ifreq *) data;
4458 	int s, error = 0;
4459 	struct mii_data *mii;
4460 
4461 	s = splnet();
4462 
4463 	switch(command) {
4464 	case SIOCSIFADDR:
4465 		ifp->if_flags |= IFF_UP;
4466 		if (!(ifp->if_flags & IFF_RUNNING))
4467 			bge_init(sc);
4468 		break;
4469 
4470 	case SIOCSIFFLAGS:
4471 		if (ifp->if_flags & IFF_UP) {
4472 			if (ifp->if_flags & IFF_RUNNING)
4473 				error = ENETRESET;
4474 			else
4475 				bge_init(sc);
4476 		} else {
4477 			if (ifp->if_flags & IFF_RUNNING)
4478 				bge_stop(sc, 0);
4479 		}
4480 		break;
4481 
4482 	case SIOCSIFMEDIA:
4483 		/* XXX Flow control is not supported for 1000BASE-SX */
4484 		if (sc->bge_flags & BGE_FIBER_TBI) {
4485 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4486 			sc->bge_flowflags = 0;
4487 		}
4488 
4489 		/* Flow control requires full-duplex mode. */
4490 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4491 		    (ifr->ifr_media & IFM_FDX) == 0) {
4492 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
4493 		}
4494 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4495 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4496 				/* We can do both TXPAUSE and RXPAUSE. */
4497 				ifr->ifr_media |=
4498 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4499 			}
4500 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4501 		}
4502 		/* FALLTHROUGH */
4503 	case SIOCGIFMEDIA:
4504 		if (sc->bge_flags & BGE_FIBER_TBI) {
4505 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4506 			    command);
4507 		} else {
4508 			mii = &sc->bge_mii;
4509 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4510 			    command);
4511 		}
4512 		break;
4513 
4514 	case SIOCGIFRXR:
4515 		error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
4516 		break;
4517 
4518 	default:
4519 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
4520 	}
4521 
4522 	if (error == ENETRESET) {
4523 		if (ifp->if_flags & IFF_RUNNING)
4524 			bge_iff(sc);
4525 		error = 0;
4526 	}
4527 
4528 	splx(s);
4529 	return (error);
4530 }
4531 
4532 int
4533 bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri)
4534 {
4535 	struct if_rxring_info ifr[2];
4536 	u_int n = 0;
4537 
4538 	memset(ifr, 0, sizeof(ifr));
4539 
4540 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) {
4541 		ifr[n].ifr_size = sc->bge_rx_std_len;
4542 		strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name));
4543 		ifr[n].ifr_info = sc->bge_std_ring;
4544 
4545 		n++;
4546 	}
4547 
4548 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) {
4549 		ifr[n].ifr_size = BGE_JLEN;
4550 		strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name));
4551 		ifr[n].ifr_info = sc->bge_jumbo_ring;
4552 
4553 		n++;
4554 	}
4555 
4556 	return (if_rxr_info_ioctl(ifri, n, ifr));
4557 }
4558 
4559 void
4560 bge_watchdog(struct ifnet *ifp)
4561 {
4562 	struct bge_softc *sc;
4563 
4564 	sc = ifp->if_softc;
4565 
4566 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
4567 
4568 	bge_init(sc);
4569 
4570 	ifp->if_oerrors++;
4571 }
4572 
4573 void
4574 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
4575 {
4576 	int i;
4577 
4578 	BGE_CLRBIT(sc, reg, bit);
4579 
4580 	for (i = 0; i < BGE_TIMEOUT; i++) {
4581 		if ((CSR_READ_4(sc, reg) & bit) == 0)
4582 			return;
4583 		delay(100);
4584 	}
4585 
4586 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
4587 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
4588 }
4589 
4590 /*
4591  * Stop the adapter and free any mbufs allocated to the
4592  * RX and TX lists.
4593  */
4594 void
4595 bge_stop(struct bge_softc *sc, int softonly)
4596 {
4597 	struct ifnet *ifp = &sc->arpcom.ac_if;
4598 	struct ifmedia_entry *ifm;
4599 	struct mii_data *mii;
4600 	int mtmp, itmp;
4601 
4602 	timeout_del(&sc->bge_timeout);
4603 	timeout_del(&sc->bge_rxtimeout);
4604 	timeout_del(&sc->bge_rxtimeout_jumbo);
4605 
4606 	ifp->if_flags &= ~IFF_RUNNING;
4607 	ifp->if_timer = 0;
4608 
4609 	if (!softonly) {
4610 		/*
4611 		 * Tell firmware we're shutting down.
4612 		 */
4613 		/* bge_stop_fw(sc); */
4614 		bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
4615 
4616 		/*
4617 		 * Disable all of the receiver blocks
4618 		 */
4619 		bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4620 		bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4621 		bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4622 		if (BGE_IS_5700_FAMILY(sc))
4623 			bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4624 		bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4625 		bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4626 		bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4627 
4628 		/*
4629 		 * Disable all of the transmit blocks
4630 		 */
4631 		bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4632 		bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4633 		bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4634 		bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4635 		bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4636 		if (BGE_IS_5700_FAMILY(sc))
4637 			bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4638 		bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4639 
4640 		/*
4641 		 * Shut down all of the memory managers and related
4642 		 * state machines.
4643 		 */
4644 		bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4645 		bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4646 		if (BGE_IS_5700_FAMILY(sc))
4647 			bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4648 
4649 		CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4650 		CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4651 
4652 		if (!BGE_IS_5705_PLUS(sc)) {
4653 			bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4654 			bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4655 		}
4656 
4657 		bge_reset(sc);
4658 		bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
4659 		bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
4660 
4661 		/*
4662 		 * Tell firmware we're shutting down.
4663 		 */
4664 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4665 	}
4666 
4667 	intr_barrier(sc->bge_intrhand);
4668 	ifq_barrier(&ifp->if_snd);
4669 
4670 	ifq_clr_oactive(&ifp->if_snd);
4671 
4672 	/* Free the RX lists. */
4673 	bge_free_rx_ring_std(sc);
4674 
4675 	/* Free jumbo RX list. */
4676 	if (sc->bge_flags & BGE_JUMBO_RING)
4677 		bge_free_rx_ring_jumbo(sc);
4678 
4679 	/* Free TX buffers. */
4680 	bge_free_tx_ring(sc);
4681 
4682 	/*
4683 	 * Isolate/power down the PHY, but leave the media selection
4684 	 * unchanged so that things will be put back to normal when
4685 	 * we bring the interface back up.
4686 	 */
4687 	if (!(sc->bge_flags & BGE_FIBER_TBI)) {
4688 		mii = &sc->bge_mii;
4689 		itmp = ifp->if_flags;
4690 		ifp->if_flags |= IFF_UP;
4691 		ifm = mii->mii_media.ifm_cur;
4692 		mtmp = ifm->ifm_media;
4693 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
4694 		mii_mediachg(mii);
4695 		ifm->ifm_media = mtmp;
4696 		ifp->if_flags = itmp;
4697 	}
4698 
4699 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4700 
4701 	if (!softonly) {
4702 		/* Clear MAC's link state (PHY may still have link UP). */
4703 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4704 	}
4705 }
4706 
4707 void
4708 bge_link_upd(struct bge_softc *sc)
4709 {
4710 	struct ifnet *ifp = &sc->arpcom.ac_if;
4711 	struct mii_data *mii = &sc->bge_mii;
4712 	u_int32_t status;
4713 	int link;
4714 
4715 	/* Clear 'pending link event' flag */
4716 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
4717 
4718 	/*
4719 	 * Process link state changes.
4720 	 * Grrr. The link status word in the status block does
4721 	 * not work correctly on the BCM5700 rev AX and BX chips,
4722 	 * according to all available information. Hence, we have
4723 	 * to enable MII interrupts in order to properly obtain
4724 	 * async link changes. Unfortunately, this also means that
4725 	 * we have to read the MAC status register to detect link
4726 	 * changes, thereby adding an additional register access to
4727 	 * the interrupt handler.
4728 	 *
4729 	 */
4730 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
4731 		status = CSR_READ_4(sc, BGE_MAC_STS);
4732 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
4733 			mii_pollstat(mii);
4734 
4735 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4736 			    mii->mii_media_status & IFM_ACTIVE &&
4737 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4738 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4739 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4740 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4741 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4742 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4743 
4744 			/* Clear the interrupt */
4745 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
4746 			    BGE_EVTENB_MI_INTERRUPT);
4747 			bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr,
4748 			    BRGPHY_MII_ISR);
4749 			bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr,
4750 			    BRGPHY_MII_IMR, BRGPHY_INTRS);
4751 		}
4752 		return;
4753 	}
4754 
4755 	if (sc->bge_flags & BGE_FIBER_TBI) {
4756 		status = CSR_READ_4(sc, BGE_MAC_STS);
4757 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
4758 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
4759 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4760 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
4761 					BGE_CLRBIT(sc, BGE_MAC_MODE,
4762 					    BGE_MACMODE_TBI_SEND_CFGS);
4763 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
4764 				status = CSR_READ_4(sc, BGE_MAC_MODE);
4765 				link = (status & BGE_MACMODE_HALF_DUPLEX) ?
4766 				    LINK_STATE_HALF_DUPLEX :
4767 				    LINK_STATE_FULL_DUPLEX;
4768 				ifp->if_baudrate = IF_Gbps(1);
4769 				if (ifp->if_link_state != link) {
4770 					ifp->if_link_state = link;
4771 					if_link_state_change(ifp);
4772 				}
4773 			}
4774 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
4775 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4776 			link = LINK_STATE_DOWN;
4777 			ifp->if_baudrate = 0;
4778 			if (ifp->if_link_state != link) {
4779 				ifp->if_link_state = link;
4780 				if_link_state_change(ifp);
4781 			}
4782 		}
4783 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
4784 		/*
4785 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
4786 		 * in status word always set. Workaround this bug by reading
4787 		 * PHY link status directly.
4788 		 */
4789 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
4790 		    BGE_STS_LINK : 0;
4791 
4792 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
4793 			mii_pollstat(mii);
4794 
4795 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
4796 			    mii->mii_media_status & IFM_ACTIVE &&
4797 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
4798 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
4799 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
4800 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
4801 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
4802 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
4803 		}
4804 	} else {
4805 		/*
4806 		 * For controllers that call mii_tick, we have to poll
4807 		 * link status.
4808 		 */
4809 		mii_pollstat(mii);
4810 	}
4811 
4812 	/* Clear the attention */
4813 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
4814 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
4815 	    BGE_MACSTAT_LINK_CHANGED);
4816 }
4817