xref: /openbsd-src/sys/dev/pci/if_bge.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: if_bge.c,v 1.311 2012/07/04 13:24:41 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wind River Systems
5  * Copyright (c) 1997, 1998, 1999, 2001
6  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36  */
37 
38 /*
39  * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, Jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "bpfilter.h"
76 #include "vlan.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/sockio.h>
81 #include <sys/mbuf.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/timeout.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 
92 #ifdef INET
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip.h>
97 #include <netinet/if_ether.h>
98 #endif
99 
100 #if NVLAN > 0
101 #include <net/if_types.h>
102 #include <net/if_vlan_var.h>
103 #endif
104 
105 #if NBPFILTER > 0
106 #include <net/bpf.h>
107 #endif
108 
109 #ifdef __sparc64__
110 #include <sparc64/autoconf.h>
111 #include <dev/ofw/openfirm.h>
112 #endif
113 
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117 
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122 
123 #include <dev/pci/if_bgereg.h>
124 
125 #define ETHER_MIN_NOPAD		(ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
126 
127 const struct bge_revision * bge_lookup_rev(u_int32_t);
128 int bge_probe(struct device *, void *, void *);
129 void bge_attach(struct device *, struct device *, void *);
130 int bge_activate(struct device *, int);
131 
132 struct cfattach bge_ca = {
133 	sizeof(struct bge_softc), bge_probe, bge_attach, NULL, bge_activate
134 };
135 
136 struct cfdriver bge_cd = {
137 	NULL, "bge", DV_IFNET
138 };
139 
140 void bge_txeof(struct bge_softc *);
141 void bge_rxeof(struct bge_softc *);
142 
143 void bge_tick(void *);
144 void bge_stats_update(struct bge_softc *);
145 void bge_stats_update_regs(struct bge_softc *);
146 int bge_cksum_pad(struct mbuf *);
147 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
148 int bge_compact_dma_runt(struct mbuf *);
149 
150 int bge_intr(void *);
151 void bge_start(struct ifnet *);
152 int bge_ioctl(struct ifnet *, u_long, caddr_t);
153 void bge_init(void *);
154 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
155 void bge_stop(struct bge_softc *);
156 void bge_watchdog(struct ifnet *);
157 int bge_ifmedia_upd(struct ifnet *);
158 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
159 
160 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *);
161 int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
162 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
163 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
164 
165 void bge_iff(struct bge_softc *);
166 
167 int bge_newbuf_jumbo(struct bge_softc *, int);
168 int bge_init_rx_ring_jumbo(struct bge_softc *);
169 void bge_fill_rx_ring_jumbo(struct bge_softc *);
170 void bge_free_rx_ring_jumbo(struct bge_softc *);
171 
172 int bge_newbuf(struct bge_softc *, int);
173 int bge_init_rx_ring_std(struct bge_softc *);
174 void bge_rxtick(void *);
175 void bge_fill_rx_ring_std(struct bge_softc *);
176 void bge_free_rx_ring_std(struct bge_softc *);
177 
178 void bge_free_tx_ring(struct bge_softc *);
179 int bge_init_tx_ring(struct bge_softc *);
180 
181 void bge_chipinit(struct bge_softc *);
182 int bge_blockinit(struct bge_softc *);
183 
184 u_int32_t bge_readmem_ind(struct bge_softc *, int);
185 void bge_writemem_ind(struct bge_softc *, int, int);
186 void bge_writereg_ind(struct bge_softc *, int, int);
187 void bge_writembx(struct bge_softc *, int, int);
188 
189 int bge_miibus_readreg(struct device *, int, int);
190 void bge_miibus_writereg(struct device *, int, int, int);
191 void bge_miibus_statchg(struct device *);
192 
193 void bge_reset(struct bge_softc *);
194 void bge_link_upd(struct bge_softc *);
195 
196 #ifdef BGE_DEBUG
197 #define DPRINTF(x)	do { if (bgedebug) printf x; } while (0)
198 #define DPRINTFN(n,x)	do { if (bgedebug >= (n)) printf x; } while (0)
199 int	bgedebug = 0;
200 #else
201 #define DPRINTF(x)
202 #define DPRINTFN(n,x)
203 #endif
204 
205 /*
206  * Various supported device vendors/types and their names. Note: the
207  * spec seems to indicate that the hardware still has Alteon's vendor
208  * ID burned into it, though it will always be overridden by the vendor
209  * ID in the EEPROM. Just to be safe, we cover all possibilities.
210  */
211 const struct pci_matchid bge_devices[] = {
212 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
213 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
214 
215 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
216 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
217 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 },
218 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
219 
220 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
221 
222 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
223 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
224 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
225 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
226 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
227 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
228 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
229 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
230 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
231 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
232 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
234 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
236 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
238 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
240 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
241 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
242 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717 },
243 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718 },
244 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
245 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
246 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 },
247 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
248 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
249 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
250 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
251 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
252 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
253 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
254 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
255 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
256 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
257 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
258 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
259 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
260 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 },
261 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E },
262 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S },
263 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE },
264 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 },
265 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
266 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
267 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
268 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
269 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 },
270 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F },
271 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G },
272 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
273 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
274 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
275 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
276 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
277 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
278 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
279 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
280 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
281 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
282 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
283 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 },
284 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761 },
285 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765 },
286 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 },
287 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781 },
288 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785 },
289 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 },
290 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 },
291 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791 },
292 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795 },
293 
294 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 },
295 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 },
296 	{ PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN },
297 
298 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
299 
300 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 }
301 };
302 
303 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_5705_PLUS)
304 #define BGE_IS_5750_PLUS(sc)		((sc)->bge_flags & BGE_5750_PLUS)
305 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGE_5755_PLUS)
306 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_5700_FAMILY)
307 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_5714_FAMILY)
308 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_JUMBO_CAPABLE)
309 
310 static const struct bge_revision {
311 	u_int32_t		br_chipid;
312 	const char		*br_name;
313 } bge_revisions[] = {
314 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
315 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
316 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
317 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
318 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
319 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
320 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
321 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
322 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
323 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
324 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
325 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
326 	/* the 5702 and 5703 share the same ASIC ID */
327 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
328 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
329 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
330 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
331 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
332 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
333 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
334 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
335 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
336 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
337 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
338 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
339 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
340 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
341 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
342 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
343 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
344 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
345 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
346 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
347 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
348 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
349 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
350 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
351 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
352 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
353 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
354 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
355 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
356 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
357 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
358 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
359 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
360 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
361 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
362 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
363 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
364 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
365 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
366 	/* the 5754 and 5787 share the same ASIC ID */
367 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
368 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
369 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
370 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
371 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
372 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
373 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
374 
375 	{ 0, NULL }
376 };
377 
378 /*
379  * Some defaults for major revisions, so that newer steppings
380  * that we don't know about have a shot at working.
381  */
382 static const struct bge_revision bge_majorrevs[] = {
383 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
384 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
385 	/* 5702 and 5703 share the same ASIC ID */
386 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
387 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
388 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
389 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
390 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
391 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
392 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
393 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
394 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
395 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
396 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
397 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
398 	/* 5754 and 5787 share the same ASIC ID */
399 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
400 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
401 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
402 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
403 	{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
404 
405 	{ 0, NULL }
406 };
407 
408 u_int32_t
409 bge_readmem_ind(struct bge_softc *sc, int off)
410 {
411 	struct pci_attach_args	*pa = &(sc->bge_pa);
412 
413 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
414 	return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA));
415 }
416 
417 void
418 bge_writemem_ind(struct bge_softc *sc, int off, int val)
419 {
420 	struct pci_attach_args	*pa = &(sc->bge_pa);
421 
422 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
423 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
424 }
425 
426 void
427 bge_writereg_ind(struct bge_softc *sc, int off, int val)
428 {
429 	struct pci_attach_args	*pa = &(sc->bge_pa);
430 
431 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
432 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
433 }
434 
435 void
436 bge_writembx(struct bge_softc *sc, int off, int val)
437 {
438 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
439 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
440 
441 	CSR_WRITE_4(sc, off, val);
442 }
443 
444 u_int8_t
445 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
446 {
447 	u_int32_t access, byte = 0;
448 	int i;
449 
450 	/* Lock. */
451 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
452 	for (i = 0; i < 8000; i++) {
453 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
454 			break;
455 		DELAY(20);
456 	}
457 	if (i == 8000)
458 		return (1);
459 
460 	/* Enable access. */
461 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
462 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
463 
464 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
465 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
466 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
467 		DELAY(10);
468 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
469 			DELAY(10);
470 			break;
471 		}
472 	}
473 
474 	if (i == BGE_TIMEOUT * 10) {
475 		printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname);
476 		return (1);
477 	}
478 
479 	/* Get result. */
480 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
481 
482 	*dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF;
483 
484 	/* Disable access. */
485 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
486 
487 	/* Unlock. */
488 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
489 	CSR_READ_4(sc, BGE_NVRAM_SWARB);
490 
491 	return (0);
492 }
493 
494 /*
495  * Read a sequence of bytes from NVRAM.
496  */
497 
498 int
499 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
500 {
501 	int err = 0, i;
502 	u_int8_t byte = 0;
503 
504 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
505 		return (1);
506 
507 	for (i = 0; i < cnt; i++) {
508 		err = bge_nvram_getbyte(sc, off + i, &byte);
509 		if (err)
510 			break;
511 		*(dest + i) = byte;
512 	}
513 
514 	return (err ? 1 : 0);
515 }
516 
517 /*
518  * Read a byte of data stored in the EEPROM at address 'addr.' The
519  * BCM570x supports both the traditional bitbang interface and an
520  * auto access interface for reading the EEPROM. We use the auto
521  * access method.
522  */
523 u_int8_t
524 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
525 {
526 	int i;
527 	u_int32_t byte = 0;
528 
529 	/*
530 	 * Enable use of auto EEPROM access so we can avoid
531 	 * having to use the bitbang method.
532 	 */
533 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
534 
535 	/* Reset the EEPROM, load the clock period. */
536 	CSR_WRITE_4(sc, BGE_EE_ADDR,
537 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
538 	DELAY(20);
539 
540 	/* Issue the read EEPROM command. */
541 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
542 
543 	/* Wait for completion */
544 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
545 		DELAY(10);
546 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
547 			break;
548 	}
549 
550 	if (i == BGE_TIMEOUT * 10) {
551 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
552 		return (1);
553 	}
554 
555 	/* Get result. */
556 	byte = CSR_READ_4(sc, BGE_EE_DATA);
557 
558 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
559 
560 	return (0);
561 }
562 
563 /*
564  * Read a sequence of bytes from the EEPROM.
565  */
566 int
567 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
568 {
569 	int err = 0, i;
570 	u_int8_t byte = 0;
571 
572 	for (i = 0; i < cnt; i++) {
573 		err = bge_eeprom_getbyte(sc, off + i, &byte);
574 		if (err)
575 			break;
576 		*(dest + i) = byte;
577 	}
578 
579 	return (err ? 1 : 0);
580 }
581 
582 int
583 bge_miibus_readreg(struct device *dev, int phy, int reg)
584 {
585 	struct bge_softc *sc = (struct bge_softc *)dev;
586 	u_int32_t val, autopoll;
587 	int i;
588 
589 	/*
590 	 * Broadcom's own driver always assumes the internal
591 	 * PHY is at GMII address 1. On some chips, the PHY responds
592 	 * to accesses at all addresses, which could cause us to
593 	 * bogusly attach the PHY 32 times at probe type. Always
594 	 * restricting the lookup to address 1 is simpler than
595 	 * trying to figure out which chips revisions should be
596 	 * special-cased.
597 	 */
598 	if (phy != 1)
599 		return (0);
600 
601 	/* Reading with autopolling on may trigger PCI errors */
602 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
603 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
604 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
605 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
606 		DELAY(40);
607 	}
608 
609 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
610 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
611 
612 	for (i = 0; i < 200; i++) {
613 		delay(1);
614 		val = CSR_READ_4(sc, BGE_MI_COMM);
615 		if (!(val & BGE_MICOMM_BUSY))
616 			break;
617 		delay(10);
618 	}
619 
620 	if (i == 200) {
621 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
622 		val = 0;
623 		goto done;
624 	}
625 
626 	val = CSR_READ_4(sc, BGE_MI_COMM);
627 
628 done:
629 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
630 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
631 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
632 		DELAY(40);
633 	}
634 
635 	if (val & BGE_MICOMM_READFAIL)
636 		return (0);
637 
638 	return (val & 0xFFFF);
639 }
640 
641 void
642 bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
643 {
644 	struct bge_softc *sc = (struct bge_softc *)dev;
645 	u_int32_t autopoll;
646 	int i;
647 
648 	/* Reading with autopolling on may trigger PCI errors */
649 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
650 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
651 		DELAY(40);
652 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
653 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
654 		DELAY(10); /* 40 usec is supposed to be adequate */
655 	}
656 
657 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
658 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
659 
660 	for (i = 0; i < 200; i++) {
661 		delay(1);
662 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
663 			break;
664 		delay(10);
665 	}
666 
667 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
668 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
669 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
670 		DELAY(40);
671 	}
672 
673 	if (i == 200) {
674 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
675 	}
676 }
677 
678 void
679 bge_miibus_statchg(struct device *dev)
680 {
681 	struct bge_softc *sc = (struct bge_softc *)dev;
682 	struct mii_data *mii = &sc->bge_mii;
683 
684 	/*
685 	 * Get flow control negotiation result.
686 	 */
687 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
688 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
689 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
690 		mii->mii_media_active &= ~IFM_ETH_FMASK;
691 	}
692 
693 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
694 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
695 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
696 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
697 	else
698 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
699 
700 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
701 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
702 	else
703 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
704 
705 	/*
706 	 * 802.3x flow control
707 	 */
708 	if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
709 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
710 	else
711 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
712 
713 	if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
714 		BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
715 	else
716 		BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
717 }
718 
719 /*
720  * Intialize a standard receive ring descriptor.
721  */
722 int
723 bge_newbuf(struct bge_softc *sc, int i)
724 {
725 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_std_map[i];
726 	struct bge_rx_bd	*r = &sc->bge_rdata->bge_rx_std_ring[i];
727 	struct mbuf		*m;
728 	int			error;
729 
730 	m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
731 	if (!m)
732 		return (ENOBUFS);
733 	m->m_len = m->m_pkthdr.len = MCLBYTES;
734 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
735 	    m_adj(m, ETHER_ALIGN);
736 
737 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
738 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
739 	if (error) {
740 		m_freem(m);
741 		return (ENOBUFS);
742 	}
743 
744 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
745 	    BUS_DMASYNC_PREREAD);
746 	sc->bge_cdata.bge_rx_std_chain[i] = m;
747 
748 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
749 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
750 		i * sizeof (struct bge_rx_bd),
751 	    sizeof (struct bge_rx_bd),
752 	    BUS_DMASYNC_POSTWRITE);
753 
754 	BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr);
755 	r->bge_flags = BGE_RXBDFLAG_END;
756 	r->bge_len = m->m_len;
757 	r->bge_idx = i;
758 
759 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
760 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
761 		i * sizeof (struct bge_rx_bd),
762 	    sizeof (struct bge_rx_bd),
763 	    BUS_DMASYNC_PREWRITE);
764 
765 	sc->bge_std_cnt++;
766 
767 	return (0);
768 }
769 
770 /*
771  * Initialize a Jumbo receive ring descriptor.
772  */
773 int
774 bge_newbuf_jumbo(struct bge_softc *sc, int i)
775 {
776 	bus_dmamap_t		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
777 	struct bge_ext_rx_bd	*r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
778 	struct mbuf		*m;
779 	int			error;
780 
781 	m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, BGE_JLEN);
782 	if (!m)
783 		return (ENOBUFS);
784 	m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
785 	if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
786 	    m_adj(m, ETHER_ALIGN);
787 
788 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m,
789 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
790 	if (error) {
791 		m_freem(m);
792 		return (ENOBUFS);
793 	}
794 
795 	bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize,
796 	    BUS_DMASYNC_PREREAD);
797 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
798 
799 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
800 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
801 		i * sizeof (struct bge_ext_rx_bd),
802 	    sizeof (struct bge_ext_rx_bd),
803 	    BUS_DMASYNC_POSTWRITE);
804 
805 	/*
806 	 * Fill in the extended RX buffer descriptor.
807 	 */
808 	r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
809 	r->bge_bd.bge_idx = i;
810 	r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
811 	switch (dmap->dm_nsegs) {
812 	case 4:
813 		BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr);
814 		r->bge_len3 = dmap->dm_segs[3].ds_len;
815 		/* FALLTHROUGH */
816 	case 3:
817 		BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr);
818 		r->bge_len2 = dmap->dm_segs[2].ds_len;
819 		/* FALLTHROUGH */
820 	case 2:
821 		BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr);
822 		r->bge_len1 = dmap->dm_segs[1].ds_len;
823 		/* FALLTHROUGH */
824 	case 1:
825 		BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr);
826 		r->bge_bd.bge_len = dmap->dm_segs[0].ds_len;
827 		break;
828 	default:
829 		panic("%s: %d segments", __func__, dmap->dm_nsegs);
830 	}
831 
832 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
833 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
834 		i * sizeof (struct bge_ext_rx_bd),
835 	    sizeof (struct bge_ext_rx_bd),
836 	    BUS_DMASYNC_PREWRITE);
837 
838 	sc->bge_jumbo_cnt++;
839 
840 	return (0);
841 }
842 
843 /*
844  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
845  * that's 1MB or memory, which is a lot. For now, we fill only the first
846  * 256 ring entries and hope that our CPU is fast enough to keep up with
847  * the NIC.
848  */
849 int
850 bge_init_rx_ring_std(struct bge_softc *sc)
851 {
852 	int i;
853 
854 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID))
855 		return (0);
856 
857 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
858 		if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 0,
859 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
860 		    &sc->bge_cdata.bge_rx_std_map[i]) != 0) {
861 			printf("%s: unable to create dmamap for slot %d\n",
862 			    sc->bge_dev.dv_xname, i);
863 			goto uncreate;
864 		}
865 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
866 		    sizeof(struct bge_rx_bd));
867 	}
868 
869 	sc->bge_std = BGE_STD_RX_RING_CNT - 1;
870 	sc->bge_std_cnt = 0;
871 	bge_fill_rx_ring_std(sc);
872 
873 	SET(sc->bge_flags, BGE_RXRING_VALID);
874 
875 	return (0);
876 
877 uncreate:
878 	while (--i) {
879 		bus_dmamap_destroy(sc->bge_dmatag,
880 		    sc->bge_cdata.bge_rx_std_map[i]);
881 	}
882 	return (1);
883 }
884 
885 void
886 bge_rxtick(void *arg)
887 {
888 	struct bge_softc *sc = arg;
889 	int s;
890 
891 	s = splnet();
892 	if (ISSET(sc->bge_flags, BGE_RXRING_VALID) &&
893 	    sc->bge_std_cnt <= 8)
894 		bge_fill_rx_ring_std(sc);
895 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) &&
896 	    sc->bge_jumbo_cnt <= 8)
897 		bge_fill_rx_ring_jumbo(sc);
898 	splx(s);
899 }
900 
901 void
902 bge_fill_rx_ring_std(struct bge_softc *sc)
903 {
904 	int i;
905 	int post = 0;
906 
907 	i = sc->bge_std;
908 	while (sc->bge_std_cnt < BGE_STD_RX_RING_CNT) {
909 		BGE_INC(i, BGE_STD_RX_RING_CNT);
910 
911 		if (bge_newbuf(sc, i) != 0)
912 			break;
913 
914 		sc->bge_std = i;
915 		post = 1;
916 	}
917 
918 	if (post)
919 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
920 
921 	/*
922 	 * bge always needs more than 8 packets on the ring. if we cant do
923 	 * that now, then try again later.
924 	 */
925 	if (sc->bge_std_cnt <= 8)
926 		timeout_add(&sc->bge_rxtimeout, 1);
927 }
928 
929 void
930 bge_free_rx_ring_std(struct bge_softc *sc)
931 {
932 	bus_dmamap_t dmap;
933 	struct mbuf *m;
934 	int i;
935 
936 	if (!ISSET(sc->bge_flags, BGE_RXRING_VALID))
937 		return;
938 
939 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
940 		dmap = sc->bge_cdata.bge_rx_std_map[i];
941 		m = sc->bge_cdata.bge_rx_std_chain[i];
942 		if (m != NULL) {
943 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
944 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
945 			bus_dmamap_unload(sc->bge_dmatag, dmap);
946 			m_freem(m);
947 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
948 		}
949 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
950 		sc->bge_cdata.bge_rx_std_map[i] = NULL;
951 		bzero(&sc->bge_rdata->bge_rx_std_ring[i],
952 		    sizeof(struct bge_rx_bd));
953 	}
954 
955 	CLR(sc->bge_flags, BGE_RXRING_VALID);
956 }
957 
958 int
959 bge_init_rx_ring_jumbo(struct bge_softc *sc)
960 {
961 	volatile struct bge_rcb *rcb;
962 	int i;
963 
964 	if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
965 		return (0);
966 
967 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
968 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0,
969 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
970 		    &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) {
971 			printf("%s: unable to create dmamap for slot %d\n",
972 			    sc->bge_dev.dv_xname, i);
973 			goto uncreate;
974 		}
975 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
976 		    sizeof(struct bge_ext_rx_bd));
977 	}
978 
979 	sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1;
980 	sc->bge_jumbo_cnt = 0;
981 	bge_fill_rx_ring_jumbo(sc);
982 
983 	SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
984 
985 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
986 	rcb->bge_maxlen_flags =
987 	    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
988 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
989 
990 	return (0);
991 
992 uncreate:
993 	while (--i) {
994 		bus_dmamap_destroy(sc->bge_dmatag,
995 		    sc->bge_cdata.bge_rx_jumbo_map[i]);
996 	}
997 	return (1);
998 }
999 
1000 void
1001 bge_fill_rx_ring_jumbo(struct bge_softc *sc)
1002 {
1003 	int i;
1004 	int post = 0;
1005 
1006 	i = sc->bge_jumbo;
1007 	while (sc->bge_jumbo_cnt < BGE_JUMBO_RX_RING_CNT) {
1008 		BGE_INC(i, BGE_JUMBO_RX_RING_CNT);
1009 
1010 		if (bge_newbuf_jumbo(sc, i) != 0)
1011 			break;
1012 
1013 		sc->bge_jumbo = i;
1014 		post = 1;
1015 	}
1016 
1017 	if (post)
1018 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1019 
1020 	/*
1021 	 * bge always needs more than 8 packets on the ring. if we cant do
1022 	 * that now, then try again later.
1023 	 */
1024 	if (sc->bge_jumbo_cnt <= 8)
1025 		timeout_add(&sc->bge_rxtimeout, 1);
1026 }
1027 
1028 void
1029 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1030 {
1031 	bus_dmamap_t dmap;
1032 	struct mbuf *m;
1033 	int i;
1034 
1035 	if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID))
1036 		return;
1037 
1038 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1039 		dmap = sc->bge_cdata.bge_rx_jumbo_map[i];
1040 		m = sc->bge_cdata.bge_rx_jumbo_chain[i];
1041 		if (m != NULL) {
1042 			bus_dmamap_sync(sc->bge_dmatag, dmap, 0,
1043 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1044 			bus_dmamap_unload(sc->bge_dmatag, dmap);
1045 			m_freem(m);
1046 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1047 		}
1048 		bus_dmamap_destroy(sc->bge_dmatag, dmap);
1049 		sc->bge_cdata.bge_rx_jumbo_map[i] = NULL;
1050 		bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
1051 		    sizeof(struct bge_ext_rx_bd));
1052 	}
1053 
1054 	CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID);
1055 }
1056 
1057 void
1058 bge_free_tx_ring(struct bge_softc *sc)
1059 {
1060 	int i;
1061 	struct txdmamap_pool_entry *dma;
1062 
1063 	if (!(sc->bge_flags & BGE_TXRING_VALID))
1064 		return;
1065 
1066 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1067 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1068 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1069 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1070 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1071 					    link);
1072 			sc->txdma[i] = 0;
1073 		}
1074 		bzero(&sc->bge_rdata->bge_tx_ring[i],
1075 		    sizeof(struct bge_tx_bd));
1076 	}
1077 
1078 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1079 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1080 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1081 		free(dma, M_DEVBUF);
1082 	}
1083 
1084 	sc->bge_flags &= ~BGE_TXRING_VALID;
1085 }
1086 
1087 int
1088 bge_init_tx_ring(struct bge_softc *sc)
1089 {
1090 	int i;
1091 	bus_dmamap_t dmamap;
1092 	struct txdmamap_pool_entry *dma;
1093 
1094 	if (sc->bge_flags & BGE_TXRING_VALID)
1095 		return (0);
1096 
1097 	sc->bge_txcnt = 0;
1098 	sc->bge_tx_saved_considx = 0;
1099 
1100 	/* Initialize transmit producer index for host-memory send ring. */
1101 	sc->bge_tx_prodidx = 0;
1102 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1103 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1104 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1105 
1106 	/* NIC-memory send ring not used; initialize to zero. */
1107 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1108 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1109 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1110 
1111 	SLIST_INIT(&sc->txdma_list);
1112 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1113 		if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
1114 		    BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
1115 		    &dmamap))
1116 			return (ENOBUFS);
1117 		if (dmamap == NULL)
1118 			panic("dmamap NULL in bge_init_tx_ring");
1119 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1120 		if (dma == NULL) {
1121 			printf("%s: can't alloc txdmamap_pool_entry\n",
1122 			    sc->bge_dev.dv_xname);
1123 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1124 			return (ENOMEM);
1125 		}
1126 		dma->dmamap = dmamap;
1127 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1128 	}
1129 
1130 	sc->bge_flags |= BGE_TXRING_VALID;
1131 
1132 	return (0);
1133 }
1134 
1135 void
1136 bge_iff(struct bge_softc *sc)
1137 {
1138 	struct arpcom		*ac = &sc->arpcom;
1139 	struct ifnet		*ifp = &ac->ac_if;
1140 	struct ether_multi	*enm;
1141 	struct ether_multistep  step;
1142 	u_int8_t		hashes[16];
1143 	u_int32_t		h, rxmode;
1144 
1145 	/* First, zot all the existing filters. */
1146 	rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1147 	ifp->if_flags &= ~IFF_ALLMULTI;
1148 	memset(hashes, 0x00, sizeof(hashes));
1149 
1150 	if (ifp->if_flags & IFF_PROMISC) {
1151 		ifp->if_flags |= IFF_ALLMULTI;
1152 		rxmode |= BGE_RXMODE_RX_PROMISC;
1153 	} else if (ac->ac_multirangecnt > 0) {
1154 		ifp->if_flags |= IFF_ALLMULTI;
1155 		memset(hashes, 0xff, sizeof(hashes));
1156 	} else {
1157 		ETHER_FIRST_MULTI(step, ac, enm);
1158 		while (enm != NULL) {
1159 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1160 
1161 			setbit(hashes, h & 0x7F);
1162 
1163 			ETHER_NEXT_MULTI(step, enm);
1164 		}
1165 	}
1166 
1167 	bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1168 	    hashes, sizeof(hashes));
1169 	CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1170 }
1171 
1172 /*
1173  * Do endian, PCI and DMA initialization.
1174  */
1175 void
1176 bge_chipinit(struct bge_softc *sc)
1177 {
1178 	struct pci_attach_args	*pa = &(sc->bge_pa);
1179 	u_int32_t dma_rw_ctl;
1180 	int i;
1181 
1182 	/* Set endianness before we access any non-PCI registers. */
1183 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1184 	    BGE_INIT);
1185 
1186 	/* Clear the MAC control register */
1187 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1188 
1189 	/*
1190 	 * Clear the MAC statistics block in the NIC's
1191 	 * internal memory.
1192 	 */
1193 	for (i = BGE_STATS_BLOCK;
1194 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1195 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1196 
1197 	for (i = BGE_STATUS_BLOCK;
1198 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1199 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1200 
1201 	/*
1202 	 * Set up the PCI DMA control register.
1203 	 */
1204 	dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1205 	    BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1206 
1207 	if (sc->bge_flags & BGE_PCIE) {
1208 		/* Read watermark not used, 128 bytes for write. */
1209 		dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1210 	} else if (sc->bge_flags & BGE_PCIX) {
1211 		/* PCI-X bus */
1212 		if (BGE_IS_5714_FAMILY(sc)) {
1213 			/* 256 bytes for read and write. */
1214 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1215 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1216 
1217 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1218 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1219 			else
1220 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1221 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1222 			/* 1536 bytes for read, 384 bytes for write. */
1223 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1224 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1225 		} else {
1226 			/* 384 bytes for read and write. */
1227 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1228 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1229 			    (0x0F);
1230 		}
1231 
1232 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1233 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1234 			u_int32_t tmp;
1235 
1236 			/* Set ONEDMA_ATONCE for hardware workaround. */
1237 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1238 			if (tmp == 6 || tmp == 7)
1239 				dma_rw_ctl |=
1240 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1241 
1242 			/* Set PCI-X DMA write workaround. */
1243 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1244 		}
1245 	} else {
1246 		/* Conventional PCI bus: 256 bytes for read and write. */
1247 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1248 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1249 
1250 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
1251 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
1252 			dma_rw_ctl |= 0x0F;
1253 	}
1254 
1255 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1256 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
1257 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1258 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
1259 
1260 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1261 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1262 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1263 
1264 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1265 
1266 	/*
1267 	 * Set up general mode register.
1268 	 */
1269 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1270 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1271 		    BGE_MODECTL_TX_NO_PHDR_CSUM);
1272 
1273 	/*
1274 	 * BCM5701 B5 have a bug causing data corruption when using
1275 	 * 64-bit DMA reads, which can be terminated early and then
1276 	 * completed later as 32-bit accesses, in combination with
1277 	 * certain bridges.
1278 	 */
1279 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1280 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1281 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32);
1282 
1283 	/*
1284 	 * Disable memory write invalidate.  Apparently it is not supported
1285 	 * properly by these devices.
1286 	 */
1287 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1288 	    PCI_COMMAND_INVALIDATE_ENABLE);
1289 
1290 #ifdef __brokenalpha__
1291 	/*
1292 	 * Must insure that we do not cross an 8K (bytes) boundary
1293 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
1294 	 * restriction on some ALPHA platforms with early revision
1295 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
1296 	 */
1297 	PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1298 	    BGE_PCI_READ_BNDRY_1024);
1299 #endif
1300 
1301 	/* Set the timer prescaler (always 66MHz) */
1302 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1303 
1304 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1305 		DELAY(40);	/* XXX */
1306 
1307 		/* Put PHY into ready state */
1308 		BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1309 		CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1310 		DELAY(40);
1311 	}
1312 }
1313 
1314 int
1315 bge_blockinit(struct bge_softc *sc)
1316 {
1317 	volatile struct bge_rcb		*rcb;
1318 	vaddr_t			rcb_addr;
1319 	int			i;
1320 	bge_hostaddr		taddr;
1321 	u_int32_t		val;
1322 
1323 	/*
1324 	 * Initialize the memory window pointer register so that
1325 	 * we can access the first 32K of internal NIC RAM. This will
1326 	 * allow us to set up the TX send ring RCBs and the RX return
1327 	 * ring RCBs, plus other things which live in NIC memory.
1328 	 */
1329 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1330 
1331 	/* Configure mbuf memory pool */
1332 	if (BGE_IS_5700_FAMILY(sc)) {
1333 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1334 		    BGE_BUFFPOOL_1);
1335 
1336 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1337 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1338 		else
1339 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1340 
1341 		/* Configure DMA resource pool */
1342 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1343 		    BGE_DMA_DESCRIPTORS);
1344 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1345 	}
1346 
1347 	/* Configure mbuf pool watermarks */
1348 	/* new Broadcom docs strongly recommend these: */
1349 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1350 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765) {
1351 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1352 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1353 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1354 	} else if (BGE_IS_5705_PLUS(sc)) {
1355 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1356 
1357 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1358 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1359 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1360 		} else {
1361 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1362 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1363 		}
1364 	} else {
1365 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1366 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1367 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1368 	}
1369 
1370 	/* Configure DMA resource watermarks */
1371 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1372 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1373 
1374 	/* Enable buffer manager */
1375 	CSR_WRITE_4(sc, BGE_BMAN_MODE,
1376 	    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1377 
1378 	/* Poll for buffer manager start indication */
1379 	for (i = 0; i < 2000; i++) {
1380 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1381 			break;
1382 		DELAY(10);
1383 	}
1384 
1385 	if (i == 2000) {
1386 		printf("%s: buffer manager failed to start\n",
1387 		    sc->bge_dev.dv_xname);
1388 		return (ENXIO);
1389 	}
1390 
1391 	/* Enable flow-through queues */
1392 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1393 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1394 
1395 	/* Wait until queue initialization is complete */
1396 	for (i = 0; i < 2000; i++) {
1397 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1398 			break;
1399 		DELAY(10);
1400 	}
1401 
1402 	if (i == 2000) {
1403 		printf("%s: flow-through queue init failed\n",
1404 		    sc->bge_dev.dv_xname);
1405 		return (ENXIO);
1406 	}
1407 
1408 	/* Initialize the standard RX ring control block */
1409 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1410 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1411 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1412 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765)
1413 		rcb->bge_maxlen_flags = (BGE_RCB_MAXLEN_FLAGS(512, 0) |
1414 					(ETHER_MAX_DIX_LEN << 2));
1415 	else if (BGE_IS_5705_PLUS(sc))
1416 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1417 	else
1418 		rcb->bge_maxlen_flags =
1419 		    BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1420 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1421 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1422 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1423 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1424 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1425 
1426 	/*
1427 	 * Initialize the Jumbo RX ring control block
1428 	 * We set the 'ring disabled' bit in the flags
1429 	 * field until we're actually ready to start
1430 	 * using this ring (i.e. once we set the MTU
1431 	 * high enough to require it).
1432 	 */
1433 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1434 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1435 		BGE_HOSTADDR(rcb->bge_hostaddr,
1436 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1437 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1438 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
1439 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1440 
1441 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1442 		    rcb->bge_hostaddr.bge_addr_hi);
1443 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1444 		    rcb->bge_hostaddr.bge_addr_lo);
1445 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1446 		    rcb->bge_maxlen_flags);
1447 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR,
1448 		    rcb->bge_nicaddr);
1449 
1450 		/* Set up dummy disabled mini ring RCB */
1451 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1452 		rcb->bge_maxlen_flags =
1453 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1454 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1455 		    rcb->bge_maxlen_flags);
1456 
1457 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1458 		    offsetof(struct bge_ring_data, bge_info),
1459 		    sizeof (struct bge_gib),
1460 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1461 	}
1462 
1463 	/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
1464 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1465 		if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
1466 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
1467 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
1468 			CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
1469 			    (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
1470 	}
1471 	/*
1472 	 * Set the BD ring replenish thresholds. The recommended
1473 	 * values are 1/8th the number of descriptors allocated to
1474 	 * each ring, but since we try to avoid filling the entire
1475 	 * ring we set these to the minimal value of 8.  This needs to
1476 	 * be done on several of the supported chip revisions anyway,
1477 	 * to work around HW bugs.
1478 	 */
1479 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
1480 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
1481 
1482 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1483 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765) {
1484 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
1485 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
1486 	}
1487 
1488 	/*
1489 	 * Disable all unused send rings by setting the 'ring disabled'
1490 	 * bit in the flags field of all the TX send ring control blocks.
1491 	 * These are located in NIC memory.
1492 	 */
1493 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1494 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1495 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1496 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1497 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1498 		rcb_addr += sizeof(struct bge_rcb);
1499 	}
1500 
1501 	/* Configure TX RCB 0 (we use only the first ring) */
1502 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1503 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1504 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1505 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1506 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1507 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1508 	if (BGE_IS_5700_FAMILY(sc))
1509 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1510 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1511 
1512 	/* Disable all unused RX return rings */
1513 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1514 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1515 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1516 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1517 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1518 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1519 			BGE_RCB_FLAG_RING_DISABLED));
1520 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1521 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1522 		    (i * (sizeof(u_int64_t))), 0);
1523 		rcb_addr += sizeof(struct bge_rcb);
1524 	}
1525 
1526 	/* Initialize RX ring indexes */
1527 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1528 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1529 	bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1530 
1531 	/*
1532 	 * Set up RX return ring 0
1533 	 * Note that the NIC address for RX return rings is 0x00000000.
1534 	 * The return rings live entirely within the host, so the
1535 	 * nicaddr field in the RCB isn't used.
1536 	 */
1537 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1538 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1539 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1540 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1541 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1542 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1543 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1544 
1545 	/* Set random backoff seed for TX */
1546 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1547 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1548 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1549 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1550 	    BGE_TX_BACKOFF_SEED_MASK);
1551 
1552 	/* Set inter-packet gap */
1553 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1554 
1555 	/*
1556 	 * Specify which ring to use for packets that don't match
1557 	 * any RX rules.
1558 	 */
1559 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1560 
1561 	/*
1562 	 * Configure number of RX lists. One interrupt distribution
1563 	 * list, sixteen active lists, one bad frames class.
1564 	 */
1565 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1566 
1567 	/* Inialize RX list placement stats mask. */
1568 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1569 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1570 
1571 	/* Disable host coalescing until we get it set up */
1572 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1573 
1574 	/* Poll to make sure it's shut down. */
1575 	for (i = 0; i < 2000; i++) {
1576 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1577 			break;
1578 		DELAY(10);
1579 	}
1580 
1581 	if (i == 2000) {
1582 		printf("%s: host coalescing engine failed to idle\n",
1583 		    sc->bge_dev.dv_xname);
1584 		return (ENXIO);
1585 	}
1586 
1587 	/* Set up host coalescing defaults */
1588 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1589 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1590 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1591 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1592 	if (BGE_IS_5700_FAMILY(sc)) {
1593 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1594 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1595 	}
1596 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1597 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1598 
1599 	/* Set up address of statistics block */
1600 	if (BGE_IS_5700_FAMILY(sc)) {
1601 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1602 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1603 			    BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1604 
1605 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1606 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1607 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1608 	}
1609 
1610 	/* Set up address of status block */
1611 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1612 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1613 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1614 
1615 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1616 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1617 
1618 	/* Turn on host coalescing state machine */
1619 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1620 
1621 	/* Turn on RX BD completion state machine and enable attentions */
1622 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1623 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1624 
1625 	/* Turn on RX list placement state machine */
1626 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1627 
1628 	/* Turn on RX list selector state machine. */
1629 	if (BGE_IS_5700_FAMILY(sc))
1630 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1631 
1632 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
1633 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
1634 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
1635 	    BGE_MACMODE_FRMHDR_DMA_ENB;
1636 
1637 	if (sc->bge_flags & BGE_PHY_FIBER_TBI)
1638 	    val |= BGE_PORTMODE_TBI;
1639 	else if (sc->bge_flags & BGE_PHY_FIBER_MII)
1640 	    val |= BGE_PORTMODE_GMII;
1641 	else
1642 	    val |= BGE_PORTMODE_MII;
1643 
1644 	/* Turn on DMA, clear stats */
1645 	CSR_WRITE_4(sc, BGE_MAC_MODE, val);
1646 
1647 	/* Set misc. local control, enable interrupts on attentions */
1648 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1649 
1650 #ifdef notdef
1651 	/* Assert GPIO pins for PHY reset */
1652 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1653 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1654 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1655 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1656 #endif
1657 
1658 	/* Turn on DMA completion state machine */
1659 	if (BGE_IS_5700_FAMILY(sc))
1660 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1661 
1662 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1663 
1664 	/* Enable host coalescing bug fix. */
1665 	if (BGE_IS_5755_PLUS(sc))
1666 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
1667 
1668 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
1669 		val |= BGE_WDMAMODE_BURST_ALL_DATA;
1670 
1671 	/* Turn on write DMA state machine */
1672 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1673 
1674 	val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS;
1675 
1676 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
1677 		val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
1678 
1679 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
1680 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
1681 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
1682 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
1683 		       BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
1684 		       BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
1685 
1686 	if (sc->bge_flags & BGE_PCIE)
1687 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1688 
1689 	/* Turn on read DMA state machine */
1690 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1691 
1692 	/* Turn on RX data completion state machine */
1693 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1694 
1695 	/* Turn on RX BD initiator state machine */
1696 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1697 
1698 	/* Turn on RX data and RX BD initiator state machine */
1699 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1700 
1701 	/* Turn on Mbuf cluster free state machine */
1702 	if (BGE_IS_5700_FAMILY(sc))
1703 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1704 
1705 	/* Turn on send BD completion state machine */
1706 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1707 
1708 	val = BGE_SDCMODE_ENABLE;
1709 
1710 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
1711 		val |= BGE_SDCMODE_CDELAY;
1712 
1713 	/* Turn on send data completion state machine */
1714 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
1715 
1716 	/* Turn on send data initiator state machine */
1717 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1718 
1719 	/* Turn on send BD initiator state machine */
1720 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1721 
1722 	/* Turn on send BD selector state machine */
1723 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1724 
1725 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1726 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1727 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1728 
1729 	/* ack/clear link change events */
1730 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1731 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1732 	    BGE_MACSTAT_LINK_CHANGED);
1733 
1734 	/* Enable PHY auto polling (for MII/GMII only) */
1735 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
1736 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1737  	} else {
1738 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
1739 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1740 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
1741 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1742 			    BGE_EVTENB_MI_INTERRUPT);
1743 	}
1744 
1745 	/*
1746 	 * Clear any pending link state attention.
1747 	 * Otherwise some link state change events may be lost until attention
1748 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
1749 	 * It's not necessary on newer BCM chips - perhaps enabling link
1750 	 * state change attentions implies clearing pending attention.
1751 	 */
1752 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1753 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1754 	    BGE_MACSTAT_LINK_CHANGED);
1755 
1756 	/* Enable link state change attentions. */
1757 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1758 
1759 	return (0);
1760 }
1761 
1762 const struct bge_revision *
1763 bge_lookup_rev(u_int32_t chipid)
1764 {
1765 	const struct bge_revision *br;
1766 
1767 	for (br = bge_revisions; br->br_name != NULL; br++) {
1768 		if (br->br_chipid == chipid)
1769 			return (br);
1770 	}
1771 
1772 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
1773 		if (br->br_chipid == BGE_ASICREV(chipid))
1774 			return (br);
1775 	}
1776 
1777 	return (NULL);
1778 }
1779 
1780 /*
1781  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1782  * against our list and return its name if we find a match. Note
1783  * that since the Broadcom controller contains VPD support, we
1784  * can get the device name string from the controller itself instead
1785  * of the compiled-in string. This is a little slow, but it guarantees
1786  * we'll always announce the right product name.
1787  */
1788 int
1789 bge_probe(struct device *parent, void *match, void *aux)
1790 {
1791 	return (pci_matchbyid(aux, bge_devices, nitems(bge_devices)));
1792 }
1793 
1794 void
1795 bge_attach(struct device *parent, struct device *self, void *aux)
1796 {
1797 	struct bge_softc	*sc = (struct bge_softc *)self;
1798 	struct pci_attach_args	*pa = aux;
1799 	pci_chipset_tag_t	pc = pa->pa_pc;
1800 	const struct bge_revision *br;
1801 	pcireg_t		pm_ctl, memtype, subid, reg;
1802 	pci_intr_handle_t	ih;
1803 	const char		*intrstr = NULL;
1804 	bus_size_t		size;
1805 	bus_dma_segment_t	seg;
1806 	int			rseg, gotenaddr = 0, aspm_off;
1807 	u_int32_t		hwcfg = 0;
1808 	u_int32_t		mac_addr = 0;
1809 	u_int32_t		misccfg;
1810 	struct ifnet		*ifp;
1811 	caddr_t			kva;
1812 #ifdef __sparc64__
1813 	char			name[32];
1814 #endif
1815 
1816 	sc->bge_pa = *pa;
1817 
1818 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1819 
1820 	/*
1821 	 * Map control/status registers.
1822 	 */
1823 	DPRINTFN(5, ("Map control/status regs\n"));
1824 
1825 	DPRINTFN(5, ("pci_mapreg_map\n"));
1826 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1827 	if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag,
1828 	    &sc->bge_bhandle, NULL, &size, 0)) {
1829 		printf(": can't find mem space\n");
1830 		return;
1831 	}
1832 
1833 	DPRINTFN(5, ("pci_intr_map\n"));
1834 	if (pci_intr_map(pa, &ih)) {
1835 		printf(": couldn't map interrupt\n");
1836 		goto fail_1;
1837 	}
1838 
1839 	DPRINTFN(5, ("pci_intr_string\n"));
1840 	intrstr = pci_intr_string(pc, ih);
1841 
1842 	/*
1843 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
1844 	 * can clobber the chip's PCI config-space power control registers,
1845 	 * leaving the card in D3 powersave state.
1846 	 * We do not have memory-mapped registers in this state,
1847 	 * so force device into D0 state before starting initialization.
1848 	 */
1849 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
1850 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
1851 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
1852 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1853 	DELAY(1000);	/* 27 usec is allegedly sufficent */
1854 
1855 	/*
1856 	 * Save ASIC rev.
1857 	 */
1858 	sc->bge_chipid =
1859 	     (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL)
1860 	      >> BGE_PCIMISCCTL_ASICREV_SHIFT);
1861 
1862 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
1863 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717 ||
1864 		    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718)
1865 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
1866 			    BGE_PCI_GEN2_PRODID_ASICREV);
1867 		else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57761 ||
1868 			 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57765 ||
1869 			 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57781 ||
1870 			 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57785 ||
1871 			 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
1872 			 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795)
1873 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
1874 			    BGE_PCI_GEN15_PRODID_ASICREV);
1875 		else
1876 			sc->bge_chipid = pci_conf_read(pc, pa->pa_tag,
1877 			    BGE_PCI_PRODID_ASICREV);
1878 	}
1879 
1880 	printf(", ");
1881 	br = bge_lookup_rev(sc->bge_chipid);
1882 	if (br == NULL)
1883 		printf("unknown ASIC (0x%x)", sc->bge_chipid);
1884 	else
1885 		printf("%s (0x%x)", br->br_name, sc->bge_chipid);
1886 
1887 	/*
1888 	 * PCI Express or PCI-X controller check.
1889 	 */
1890 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1891 	    &aspm_off, NULL) != 0) {
1892 		/* Disable PCIe Active State Power Management (ASPM). */
1893 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
1894 		    aspm_off + PCI_PCIE_LCSR);
1895 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1);
1896 		pci_conf_write(pa->pa_pc, pa->pa_tag,
1897 		    aspm_off + PCI_PCIE_LCSR, reg);
1898 		sc->bge_flags |= BGE_PCIE;
1899 	} else {
1900 		if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
1901 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
1902 			sc->bge_flags |= BGE_PCIX;
1903 	}
1904 
1905 	/*
1906 	 * SEEPROM check.
1907 	 */
1908 #ifdef __sparc64__
1909 	/*
1910 	 * Onboard interfaces on UltraSPARC systems generally don't
1911 	 * have a SEEPROM fitted.  These interfaces, and cards that
1912 	 * have FCode, are named "network" by the PROM, whereas cards
1913 	 * without FCode show up as "ethernet".  Since we don't really
1914 	 * need the information from the SEEPROM on cards that have
1915 	 * FCode it's fine to pretend they don't have one.
1916 	 */
1917 	if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name,
1918 	    sizeof(name)) > 0 && strcmp(name, "network") == 0)
1919 		sc->bge_flags |= BGE_NO_EEPROM;
1920 #endif
1921 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1922 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 ||
1923 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1924 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1925 		sc->bge_flags |= BGE_5700_FAMILY;
1926 
1927 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 ||
1928 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 ||
1929 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
1930 		sc->bge_flags |= BGE_5714_FAMILY;
1931 
1932 	/* Intentionally exclude BGE_ASICREV_BCM5906 */
1933 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
1934 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1935 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
1936 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
1937 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
1938 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
1939 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 ||
1940 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
1941 		sc->bge_flags |= BGE_5755_PLUS;
1942 
1943 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
1944 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1945 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 ||
1946 	    BGE_IS_5755_PLUS(sc) ||
1947 	    BGE_IS_5714_FAMILY(sc))
1948 		sc->bge_flags |= BGE_5750_PLUS;
1949 
1950 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 ||
1951 	    BGE_IS_5750_PLUS(sc))
1952 		sc->bge_flags |= BGE_5705_PLUS;
1953 
1954 	/*
1955 	 * When using the BCM5701 in PCI-X mode, data corruption has
1956 	 * been observed in the first few bytes of some received packets.
1957 	 * Aligning the packet buffer in memory eliminates the corruption.
1958 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1959 	 * which do not support unaligned accesses, we will realign the
1960 	 * payloads by copying the received packets.
1961 	 */
1962 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1963 	    sc->bge_flags & BGE_PCIX)
1964 		sc->bge_flags |= BGE_RX_ALIGNBUG;
1965 
1966 	if (BGE_IS_5700_FAMILY(sc))
1967 		sc->bge_flags |= BGE_JUMBO_CAPABLE;
1968 
1969 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1970 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
1971 	    PCI_VENDOR(subid) == DELL_VENDORID)
1972 		sc->bge_flags |= BGE_NO_3LED;
1973 
1974 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
1975 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
1976 
1977 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1978 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
1979 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
1980 		sc->bge_flags |= BGE_IS_5788;
1981 
1982 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
1983 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
1984 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1985 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1986 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
1987 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1988 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1989 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1990 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
1991 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
1992 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1993 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
1994 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
1995 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
1996 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1997 		sc->bge_flags |= BGE_10_100_ONLY;
1998 
1999 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2000 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
2001 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
2002 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
2003 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2004 		sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
2005 
2006 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
2007 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
2008 		sc->bge_flags |= BGE_PHY_CRC_BUG;
2009 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
2010 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
2011 		sc->bge_flags |= BGE_PHY_ADC_BUG;
2012 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
2013 		sc->bge_flags |= BGE_PHY_5704_A0_BUG;
2014 
2015 	if ((BGE_IS_5705_PLUS(sc)) &&
2016 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
2017 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2018 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2019 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 &&
2020 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) {
2021 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2022 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
2023 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
2024 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
2025 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
2026 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
2027 				sc->bge_flags |= BGE_PHY_JITTER_BUG;
2028 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
2029 				sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
2030 		} else
2031 			sc->bge_flags |= BGE_PHY_BER_BUG;
2032 	}
2033 
2034 	/* Try to reset the chip. */
2035 	DPRINTFN(5, ("bge_reset\n"));
2036 	bge_reset(sc);
2037 
2038 	bge_chipinit(sc);
2039 
2040 #ifdef __sparc64__
2041 	if (!gotenaddr) {
2042 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
2043 		    sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
2044 			gotenaddr = 1;
2045 	}
2046 #endif
2047 
2048 	/*
2049 	 * Get station address from the EEPROM.
2050 	 */
2051 	if (!gotenaddr) {
2052 		mac_addr = bge_readmem_ind(sc, 0x0c14);
2053 		if ((mac_addr >> 16) == 0x484b) {
2054 			sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
2055 			sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
2056 			mac_addr = bge_readmem_ind(sc, 0x0c18);
2057 			sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
2058 			sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
2059 			sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
2060 			sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
2061 			gotenaddr = 1;
2062 		}
2063 	}
2064 	if (!gotenaddr) {
2065 		int mac_offset = BGE_EE_MAC_OFFSET;
2066 
2067 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
2068 			mac_offset = BGE_EE_MAC_OFFSET_5906;
2069 
2070 		if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2071 		    mac_offset + 2, ETHER_ADDR_LEN) == 0)
2072 			gotenaddr = 1;
2073 	}
2074 	if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
2075 		if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2076 		    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
2077 			gotenaddr = 1;
2078 	}
2079 
2080 #ifdef __sparc64__
2081 	if (!gotenaddr) {
2082 		extern void myetheraddr(u_char *);
2083 
2084 		myetheraddr(sc->arpcom.ac_enaddr);
2085 		gotenaddr = 1;
2086 	}
2087 #endif
2088 
2089 	if (!gotenaddr) {
2090 		printf(": failed to read station address\n");
2091 		goto fail_1;
2092 	}
2093 
2094 	/* Allocate the general information block and ring buffers. */
2095 	sc->bge_dmatag = pa->pa_dmat;
2096 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
2097 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2098 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2099 		printf(": can't alloc rx buffers\n");
2100 		goto fail_1;
2101 	}
2102 	DPRINTFN(5, ("bus_dmamem_map\n"));
2103 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2104 			   sizeof(struct bge_ring_data), &kva,
2105 			   BUS_DMA_NOWAIT)) {
2106 		printf(": can't map dma buffers (%lu bytes)\n",
2107 		    sizeof(struct bge_ring_data));
2108 		goto fail_2;
2109 	}
2110 	DPRINTFN(5, ("bus_dmamem_create\n"));
2111 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2112 	    sizeof(struct bge_ring_data), 0,
2113 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2114 		printf(": can't create dma map\n");
2115 		goto fail_3;
2116 	}
2117 	DPRINTFN(5, ("bus_dmamem_load\n"));
2118 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2119 			    sizeof(struct bge_ring_data), NULL,
2120 			    BUS_DMA_NOWAIT)) {
2121 		goto fail_4;
2122 	}
2123 
2124 	DPRINTFN(5, ("bzero\n"));
2125 	sc->bge_rdata = (struct bge_ring_data *)kva;
2126 
2127 	bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
2128 
2129 	/* Set default tuneable values. */
2130 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2131 	sc->bge_rx_coal_ticks = 150;
2132 	sc->bge_rx_max_coal_bds = 64;
2133 	sc->bge_tx_coal_ticks = 300;
2134 	sc->bge_tx_max_coal_bds = 400;
2135 
2136 	/* 5705 limits RX return ring to 512 entries. */
2137 	if (BGE_IS_5700_FAMILY(sc) ||
2138 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
2139 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765)
2140 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
2141 	else
2142 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
2143 
2144 	/* Set up ifnet structure */
2145 	ifp = &sc->arpcom.ac_if;
2146 	ifp->if_softc = sc;
2147 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2148 	ifp->if_ioctl = bge_ioctl;
2149 	ifp->if_start = bge_start;
2150 	ifp->if_watchdog = bge_watchdog;
2151 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
2152 	IFQ_SET_READY(&ifp->if_snd);
2153 
2154 	/* lwm must be greater than the replenish threshold */
2155 	m_clsetwms(ifp, MCLBYTES, 17, BGE_STD_RX_RING_CNT);
2156 	m_clsetwms(ifp, BGE_JLEN, 17, BGE_JUMBO_RX_RING_CNT);
2157 
2158 	DPRINTFN(5, ("bcopy\n"));
2159 	bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2160 
2161 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2162 
2163 #if NVLAN > 0
2164 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2165 #endif
2166 
2167 	/*
2168 	 * 5700 B0 chips do not support checksumming correctly due
2169 	 * to hardware bugs.
2170 	 */
2171 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
2172 		ifp->if_capabilities |= IFCAP_CSUM_IPv4;
2173 #if 0	/* TCP/UDP checksum offload breaks with pf(4) */
2174 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
2175 #endif
2176 
2177 	if (BGE_IS_JUMBO_CAPABLE(sc))
2178 		ifp->if_hardmtu = BGE_JUMBO_MTU;
2179 
2180 	/*
2181 	 * Do MII setup.
2182 	 */
2183 	DPRINTFN(5, ("mii setup\n"));
2184 	sc->bge_mii.mii_ifp = ifp;
2185 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
2186 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
2187 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
2188 
2189 	/*
2190 	 * Figure out what sort of media we have by checking the hardware
2191 	 * config word in the first 32K of internal NIC memory, or fall back to
2192 	 * examining the EEPROM if necessary.  Note: on some BCM5700 cards,
2193 	 * this value seems to be unset. If that's the case, we have to rely on
2194 	 * identifying the NIC by its PCI subsystem ID, as we do below for the
2195 	 * SysKonnect SK-9D41.
2196 	 */
2197 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
2198 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2199 	else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
2200 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
2201 		    sizeof(hwcfg))) {
2202 			printf(": failed to read media type\n");
2203 			goto fail_5;
2204 		}
2205 		hwcfg = ntohl(hwcfg);
2206 	}
2207 
2208 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
2209 	if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 ||
2210 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
2211 		if (BGE_IS_5700_FAMILY(sc))
2212 		    sc->bge_flags |= BGE_PHY_FIBER_TBI;
2213 		else
2214 		    sc->bge_flags |= BGE_PHY_FIBER_MII;
2215 	}
2216 
2217 	/* Hookup IRQ last. */
2218 	DPRINTFN(5, ("pci_intr_establish\n"));
2219 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc,
2220 	    sc->bge_dev.dv_xname);
2221 	if (sc->bge_intrhand == NULL) {
2222 		printf(": couldn't establish interrupt");
2223 		if (intrstr != NULL)
2224 			printf(" at %s", intrstr);
2225 		printf("\n");
2226 		goto fail_5;
2227 	}
2228 
2229 	/*
2230 	 * A Broadcom chip was detected. Inform the world.
2231 	 */
2232 	printf(": %s, address %s\n", intrstr,
2233 	    ether_sprintf(sc->arpcom.ac_enaddr));
2234 
2235 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2236 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2237 		    bge_ifmedia_sts);
2238 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2239 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2240 			    0, NULL);
2241 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2242 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2243 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2244 	} else {
2245 		int mii_flags;
2246 
2247 		/*
2248 		 * Do transceiver setup.
2249 		 */
2250 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2251 			     bge_ifmedia_sts);
2252 		mii_flags = MIIF_DOPAUSE;
2253 		if (sc->bge_flags & BGE_PHY_FIBER_MII)
2254 			mii_flags |= MIIF_HAVEFIBER;
2255 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2256 			   MII_PHY_ANY, MII_OFFSET_ANY, mii_flags);
2257 
2258 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2259 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2260 			ifmedia_add(&sc->bge_mii.mii_media,
2261 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
2262 			ifmedia_set(&sc->bge_mii.mii_media,
2263 				    IFM_ETHER|IFM_MANUAL);
2264 		} else
2265 			ifmedia_set(&sc->bge_mii.mii_media,
2266 				    IFM_ETHER|IFM_AUTO);
2267 	}
2268 
2269 	/*
2270 	 * Call MI attach routine.
2271 	 */
2272 	if_attach(ifp);
2273 	ether_ifattach(ifp);
2274 
2275 	timeout_set(&sc->bge_timeout, bge_tick, sc);
2276 	timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc);
2277 	return;
2278 
2279 fail_5:
2280 	bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
2281 
2282 fail_4:
2283 	bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2284 
2285 fail_3:
2286 	bus_dmamem_unmap(sc->bge_dmatag, kva,
2287 	    sizeof(struct bge_ring_data));
2288 
2289 fail_2:
2290 	bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2291 
2292 fail_1:
2293 	bus_space_unmap(sc->bge_btag, sc->bge_bhandle, size);
2294 }
2295 
2296 int
2297 bge_activate(struct device *self, int act)
2298 {
2299 	struct bge_softc *sc = (struct bge_softc *)self;
2300 	struct ifnet *ifp = &sc->arpcom.ac_if;
2301 	int rv = 0;
2302 
2303 	switch (act) {
2304 	case DVACT_QUIESCE:
2305 		rv = config_activate_children(self, act);
2306 		break;
2307 	case DVACT_SUSPEND:
2308 		rv = config_activate_children(self, act);
2309 		if (ifp->if_flags & IFF_RUNNING)
2310 			bge_stop(sc);
2311 		break;
2312 	case DVACT_RESUME:
2313 		if (ifp->if_flags & IFF_UP)
2314 			bge_init(sc);
2315 		rv = config_activate_children(self, act);
2316 		break;
2317 	}
2318 	return (rv);
2319 }
2320 
2321 void
2322 bge_reset(struct bge_softc *sc)
2323 {
2324 	struct pci_attach_args *pa = &sc->bge_pa;
2325 	pcireg_t cachesize, command, pcistate, new_pcistate;
2326 	u_int32_t reset;
2327 	int i, val = 0;
2328 
2329 	/* Save some important PCI state. */
2330 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2331 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2332 	pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2333 
2334 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2335 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2336 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2337 
2338 	/* Disable fastboot on controllers that support it. */
2339 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2340 	    BGE_IS_5755_PLUS(sc))
2341 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2342 
2343 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2344 
2345 	if (sc->bge_flags & BGE_PCIE) {
2346 		if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
2347 			/* PCI Express 1.0 system */
2348 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2349 		}
2350 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2351 			/*
2352 			 * Prevent PCI Express link training
2353 			 * during global reset.
2354 			 */
2355 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2356 			reset |= (1<<29);
2357 		}
2358 	}
2359 
2360 	/*
2361 	 * Set GPHY Power Down Override to leave GPHY
2362 	 * powered up in D0 uninitialized.
2363 	 */
2364 	if (BGE_IS_5705_PLUS(sc))
2365 		reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
2366 
2367 	/* Issue global reset */
2368 	bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2369 
2370 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2371 		u_int32_t status, ctrl;
2372 
2373 		status = CSR_READ_4(sc, BGE_VCPU_STATUS);
2374 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2375 		    status | BGE_VCPU_STATUS_DRV_RESET);
2376 		ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2377 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2378 		    ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2379 
2380 		sc->bge_flags |= BGE_NO_EEPROM;
2381 	}
2382 
2383 	DELAY(1000);
2384 
2385 	if (sc->bge_flags & BGE_PCIE) {
2386 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2387 			pcireg_t v;
2388 
2389 			DELAY(500000); /* wait for link training to complete */
2390 			v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
2391 			pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
2392 		}
2393 
2394 		/*
2395 		 * Set PCI Express max payload size to 128 bytes
2396 		 * and clear error status.
2397 		 */
2398 		pci_conf_write(pa->pa_pc, pa->pa_tag,
2399 		    BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2400 	}
2401 
2402 	/* Reset some of the PCI state that got zapped by reset */
2403 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2404 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2405 	    BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2406 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2407 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2408 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2409 
2410 	/* Enable memory arbiter. */
2411 	if (BGE_IS_5714_FAMILY(sc)) {
2412 		u_int32_t val;
2413 
2414 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2415 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2416 	} else
2417 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2418 
2419  	/*
2420 	 * Prevent PXE restart: write a magic number to the
2421 	 * general communications memory at 0xB50.
2422 	 */
2423 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2424 
2425 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2426 		for (i = 0; i < BGE_TIMEOUT; i++) {
2427 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
2428 			if (val & BGE_VCPU_STATUS_INIT_DONE)
2429 				break;
2430 			DELAY(100);
2431 		}
2432 
2433 		if (i >= BGE_TIMEOUT)
2434 			printf("%s: reset timed out\n", sc->bge_dev.dv_xname);
2435 	} else {
2436 		/*
2437 		 * Poll until we see 1's complement of the magic number.
2438 		 * This indicates that the firmware initialization
2439 		 * is complete.  We expect this to fail if no SEEPROM
2440 		 * is fitted.
2441 		 */
2442 		for (i = 0; i < BGE_TIMEOUT; i++) {
2443 			val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2444 			if (val == ~BGE_MAGIC_NUMBER)
2445 				break;
2446 			DELAY(10);
2447 		}
2448 
2449 		if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM)))
2450 			printf("%s: firmware handshake timed out\n",
2451 			   sc->bge_dev.dv_xname);
2452 	}
2453 
2454 	/*
2455 	 * XXX Wait for the value of the PCISTATE register to
2456 	 * return to its original pre-reset state. This is a
2457 	 * fairly good indicator of reset completion. If we don't
2458 	 * wait for the reset to fully complete, trying to read
2459 	 * from the device's non-PCI registers may yield garbage
2460 	 * results.
2461 	 */
2462 	for (i = 0; i < BGE_TIMEOUT; i++) {
2463 		new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2464 		    BGE_PCI_PCISTATE);
2465 		if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2466 		    (pcistate & ~BGE_PCISTATE_RESERVED))
2467 			break;
2468 		DELAY(10);
2469 	}
2470 	if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2471 	    (pcistate & ~BGE_PCISTATE_RESERVED)) {
2472 		DPRINTFN(5, ("%s: pcistate failed to revert\n",
2473 		    sc->bge_dev.dv_xname));
2474 	}
2475 
2476 	/* Fix up byte swapping */
2477 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2478 
2479 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2480 
2481 	/*
2482 	 * The 5704 in TBI mode apparently needs some special
2483 	 * adjustment to insure the SERDES drive level is set
2484 	 * to 1.2V.
2485 	 */
2486 	if (sc->bge_flags & BGE_PHY_FIBER_TBI &&
2487 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2488 		u_int32_t serdescfg;
2489 
2490 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2491 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2492 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2493 	}
2494 
2495 	if (sc->bge_flags & BGE_PCIE &&
2496 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
2497 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
2498 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
2499 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765) {
2500 		u_int32_t v;
2501 
2502 		/* Enable PCI Express bug fix */
2503 		v = CSR_READ_4(sc, 0x7c00);
2504 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2505 	}
2506 	DELAY(10000);
2507 }
2508 
2509 /*
2510  * Frame reception handling. This is called if there's a frame
2511  * on the receive return list.
2512  *
2513  * Note: we have to be able to handle two possibilities here:
2514  * 1) the frame is from the jumbo receive ring
2515  * 2) the frame is from the standard receive ring
2516  */
2517 
2518 void
2519 bge_rxeof(struct bge_softc *sc)
2520 {
2521 	struct ifnet *ifp;
2522 	uint16_t rx_prod, rx_cons;
2523 	int stdcnt = 0, jumbocnt = 0;
2524 	bus_dmamap_t dmamap;
2525 	bus_addr_t offset, toff;
2526 	bus_size_t tlen;
2527 	int tosync;
2528 
2529 	rx_cons = sc->bge_rx_saved_considx;
2530 	rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
2531 
2532 	/* Nothing to do */
2533 	if (rx_cons == rx_prod)
2534 		return;
2535 
2536 	ifp = &sc->arpcom.ac_if;
2537 
2538 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2539 	    offsetof(struct bge_ring_data, bge_status_block),
2540 	    sizeof (struct bge_status_block),
2541 	    BUS_DMASYNC_POSTREAD);
2542 
2543 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2544 	tosync = rx_prod - rx_cons;
2545 
2546 	toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
2547 
2548 	if (tosync < 0) {
2549 		tlen = (sc->bge_return_ring_cnt - rx_cons) *
2550 		    sizeof (struct bge_rx_bd);
2551 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2552 		    toff, tlen, BUS_DMASYNC_POSTREAD);
2553 		tosync = -tosync;
2554 	}
2555 
2556 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2557 	    offset, tosync * sizeof (struct bge_rx_bd),
2558 	    BUS_DMASYNC_POSTREAD);
2559 
2560 	while (rx_cons != rx_prod) {
2561 		struct bge_rx_bd	*cur_rx;
2562 		u_int32_t		rxidx;
2563 		struct mbuf		*m = NULL;
2564 
2565 		cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
2566 
2567 		rxidx = cur_rx->bge_idx;
2568 		BGE_INC(rx_cons, sc->bge_return_ring_cnt);
2569 
2570 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2571 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2572 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2573 
2574 			jumbocnt++;
2575 			sc->bge_jumbo_cnt--;
2576 
2577 			dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx];
2578 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
2579 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2580 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2581 
2582 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2583 				m_freem(m);
2584 				continue;
2585 			}
2586 		} else {
2587 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2588 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2589 
2590 			stdcnt++;
2591 			sc->bge_std_cnt--;
2592 
2593 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2594 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
2595 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2596 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
2597 
2598 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2599 				m_freem(m);
2600 				continue;
2601 			}
2602 		}
2603 
2604 		ifp->if_ipackets++;
2605 #ifdef __STRICT_ALIGNMENT
2606 		/*
2607 		 * The i386 allows unaligned accesses, but for other
2608 		 * platforms we must make sure the payload is aligned.
2609 		 */
2610 		if (sc->bge_flags & BGE_RX_ALIGNBUG) {
2611 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2612 			    cur_rx->bge_len);
2613 			m->m_data += ETHER_ALIGN;
2614 		}
2615 #endif
2616 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2617 		m->m_pkthdr.rcvif = ifp;
2618 
2619 		/*
2620 		 * 5700 B0 chips do not support checksumming correctly due
2621 		 * to hardware bugs.
2622 		 */
2623 		if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
2624 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2625 				if (cur_rx->bge_ip_csum == 0xFFFF)
2626 					m->m_pkthdr.csum_flags |=
2627 					    M_IPV4_CSUM_IN_OK;
2628 				else
2629 					m->m_pkthdr.csum_flags |=
2630 					    M_IPV4_CSUM_IN_BAD;
2631 			}
2632 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2633 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
2634 				if (cur_rx->bge_tcp_udp_csum == 0xFFFF)
2635 					m->m_pkthdr.csum_flags |=
2636 					    M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK;
2637 			}
2638 		}
2639 
2640 #if NVLAN > 0
2641 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2642 			m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag;
2643 			m->m_flags |= M_VLANTAG;
2644 		}
2645 #endif
2646 
2647 #if NBPFILTER > 0
2648 		/*
2649 		 * Handle BPF listeners. Let the BPF user see the packet.
2650 		 */
2651 		if (ifp->if_bpf)
2652 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
2653 #endif
2654 
2655 		ether_input_mbuf(ifp, m);
2656 	}
2657 
2658 	sc->bge_rx_saved_considx = rx_cons;
2659 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2660 	if (stdcnt)
2661 		bge_fill_rx_ring_std(sc);
2662 	if (jumbocnt)
2663 		bge_fill_rx_ring_jumbo(sc);
2664 }
2665 
2666 void
2667 bge_txeof(struct bge_softc *sc)
2668 {
2669 	struct bge_tx_bd *cur_tx = NULL;
2670 	struct ifnet *ifp;
2671 	struct txdmamap_pool_entry *dma;
2672 	bus_addr_t offset, toff;
2673 	bus_size_t tlen;
2674 	int tosync;
2675 	struct mbuf *m;
2676 
2677 	/* Nothing to do */
2678 	if (sc->bge_tx_saved_considx ==
2679 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx)
2680 		return;
2681 
2682 	ifp = &sc->arpcom.ac_if;
2683 
2684 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2685 	    offsetof(struct bge_ring_data, bge_status_block),
2686 	    sizeof (struct bge_status_block),
2687 	    BUS_DMASYNC_POSTREAD);
2688 
2689 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
2690 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2691 	    sc->bge_tx_saved_considx;
2692 
2693 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2694 
2695 	if (tosync < 0) {
2696 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2697 		    sizeof (struct bge_tx_bd);
2698 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2699 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2700 		tosync = -tosync;
2701 	}
2702 
2703 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2704 	    offset, tosync * sizeof (struct bge_tx_bd),
2705 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2706 
2707 	/*
2708 	 * Go through our tx ring and free mbufs for those
2709 	 * frames that have been sent.
2710 	 */
2711 	while (sc->bge_tx_saved_considx !=
2712 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2713 		u_int32_t		idx = 0;
2714 
2715 		idx = sc->bge_tx_saved_considx;
2716 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2717 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2718 			ifp->if_opackets++;
2719 		m = sc->bge_cdata.bge_tx_chain[idx];
2720 		if (m != NULL) {
2721 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2722 			dma = sc->txdma[idx];
2723 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2724 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2725 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2726 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2727 			sc->txdma[idx] = NULL;
2728 
2729 			m_freem(m);
2730 		}
2731 		sc->bge_txcnt--;
2732 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2733 	}
2734 
2735 	if (sc->bge_txcnt < BGE_TX_RING_CNT - 16)
2736 		ifp->if_flags &= ~IFF_OACTIVE;
2737 	if (sc->bge_txcnt == 0)
2738 		ifp->if_timer = 0;
2739 }
2740 
2741 int
2742 bge_intr(void *xsc)
2743 {
2744 	struct bge_softc *sc;
2745 	struct ifnet *ifp;
2746 	u_int32_t statusword;
2747 
2748 	sc = xsc;
2749 	ifp = &sc->arpcom.ac_if;
2750 
2751 	/* It is possible for the interrupt to arrive before
2752 	 * the status block is updated prior to the interrupt.
2753 	 * Reading the PCI State register will confirm whether the
2754 	 * interrupt is ours and will flush the status block.
2755 	 */
2756 
2757 	/* read status word from status block */
2758 	statusword = sc->bge_rdata->bge_status_block.bge_status;
2759 
2760 	if ((statusword & BGE_STATFLAG_UPDATED) ||
2761 	    (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
2762 
2763 		/* Ack interrupt and stop others from occurring. */
2764 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
2765 
2766 		/* clear status word */
2767 		sc->bge_rdata->bge_status_block.bge_status = 0;
2768 
2769 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
2770 		    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
2771 		    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
2772 			bge_link_upd(sc);
2773 
2774 		if (ifp->if_flags & IFF_RUNNING) {
2775 			/* Check RX return ring producer/consumer */
2776 			bge_rxeof(sc);
2777 
2778 			/* Check TX ring producer/consumer */
2779 			bge_txeof(sc);
2780 		}
2781 
2782 		/* Re-enable interrupts. */
2783 		bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
2784 
2785 		bge_start(ifp);
2786 
2787 		return (1);
2788 	} else
2789 		return (0);
2790 }
2791 
2792 void
2793 bge_tick(void *xsc)
2794 {
2795 	struct bge_softc *sc = xsc;
2796 	struct mii_data *mii = &sc->bge_mii;
2797 	int s;
2798 
2799 	s = splnet();
2800 
2801 	if (BGE_IS_5705_PLUS(sc))
2802 		bge_stats_update_regs(sc);
2803 	else
2804 		bge_stats_update(sc);
2805 
2806 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
2807 		/*
2808 		 * Since in TBI mode auto-polling can't be used we should poll
2809 		 * link status manually. Here we register pending link event
2810 		 * and trigger interrupt.
2811 		 */
2812 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
2813 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2814 	} else {
2815 		/*
2816 		 * Do not touch PHY if we have link up. This could break
2817 		 * IPMI/ASF mode or produce extra input errors.
2818 		 * (extra input errors was reported for bcm5701 & bcm5704).
2819 		 */
2820 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
2821 			mii_tick(mii);
2822 	}
2823 
2824 	timeout_add_sec(&sc->bge_timeout, 1);
2825 
2826 	splx(s);
2827 }
2828 
2829 void
2830 bge_stats_update_regs(struct bge_softc *sc)
2831 {
2832 	struct ifnet *ifp = &sc->arpcom.ac_if;
2833 
2834 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
2835 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
2836 
2837 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
2838 
2839 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
2840 
2841 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
2842 }
2843 
2844 void
2845 bge_stats_update(struct bge_softc *sc)
2846 {
2847 	struct ifnet *ifp = &sc->arpcom.ac_if;
2848 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2849 	u_int32_t cnt;
2850 
2851 #define READ_STAT(sc, stats, stat) \
2852 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2853 
2854 	cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
2855 	ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions);
2856 	sc->bge_tx_collisions = cnt;
2857 
2858 	cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2859 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards);
2860 	sc->bge_rx_discards = cnt;
2861 
2862 	cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
2863 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_inerrors);
2864 	sc->bge_rx_inerrors = cnt;
2865 
2866 	cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
2867 	ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_overruns);
2868 	sc->bge_rx_overruns = cnt;
2869 
2870 	cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2871 	ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards);
2872 	sc->bge_tx_discards = cnt;
2873 
2874 #undef READ_STAT
2875 }
2876 
2877 /*
2878  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
2879  */
2880 int
2881 bge_compact_dma_runt(struct mbuf *pkt)
2882 {
2883 	struct mbuf	*m, *prev, *n = NULL;
2884 	int 		totlen, newprevlen;
2885 
2886 	prev = NULL;
2887 	totlen = 0;
2888 
2889 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
2890 		int mlen = m->m_len;
2891 		int shortfall = 8 - mlen ;
2892 
2893 		totlen += mlen;
2894 		if (mlen == 0)
2895 			continue;
2896 		if (mlen >= 8)
2897 			continue;
2898 
2899 		/* If we get here, mbuf data is too small for DMA engine.
2900 		 * Try to fix by shuffling data to prev or next in chain.
2901 		 * If that fails, do a compacting deep-copy of the whole chain.
2902 		 */
2903 
2904 		/* Internal frag. If fits in prev, copy it there. */
2905 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
2906 			bcopy(m->m_data, prev->m_data+prev->m_len, mlen);
2907 			prev->m_len += mlen;
2908 			m->m_len = 0;
2909 			/* XXX stitch chain */
2910 			prev->m_next = m_free(m);
2911 			m = prev;
2912 			continue;
2913 		} else if (m->m_next != NULL &&
2914 			   M_TRAILINGSPACE(m) >= shortfall &&
2915 			   m->m_next->m_len >= (8 + shortfall)) {
2916 			/* m is writable and have enough data in next, pull up. */
2917 
2918 			bcopy(m->m_next->m_data, m->m_data+m->m_len, shortfall);
2919 			m->m_len += shortfall;
2920 			m->m_next->m_len -= shortfall;
2921 			m->m_next->m_data += shortfall;
2922 		} else if (m->m_next == NULL || 1) {
2923 			/* Got a runt at the very end of the packet.
2924 			 * borrow data from the tail of the preceding mbuf and
2925 			 * update its length in-place. (The original data is still
2926 			 * valid, so we can do this even if prev is not writable.)
2927 			 */
2928 
2929 			/* if we'd make prev a runt, just move all of its data. */
2930 #ifdef DEBUG
2931 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
2932 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
2933 #endif
2934 			if ((prev->m_len - shortfall) < 8)
2935 				shortfall = prev->m_len;
2936 
2937 			newprevlen = prev->m_len - shortfall;
2938 
2939 			MGET(n, M_NOWAIT, MT_DATA);
2940 			if (n == NULL)
2941 				return (ENOBUFS);
2942 			KASSERT(m->m_len + shortfall < MLEN
2943 				/*,
2944 				  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
2945 
2946 			/* first copy the data we're stealing from prev */
2947 			bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
2948 
2949 			/* update prev->m_len accordingly */
2950 			prev->m_len -= shortfall;
2951 
2952 			/* copy data from runt m */
2953 			bcopy(m->m_data, n->m_data + shortfall, m->m_len);
2954 
2955 			/* n holds what we stole from prev, plus m */
2956 			n->m_len = shortfall + m->m_len;
2957 
2958 			/* stitch n into chain and free m */
2959 			n->m_next = m->m_next;
2960 			prev->m_next = n;
2961 			/* KASSERT(m->m_next == NULL); */
2962 			m->m_next = NULL;
2963 			m_free(m);
2964 			m = n;	/* for continuing loop */
2965 		}
2966 	}
2967 	return (0);
2968 }
2969 
2970 /*
2971  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2972  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2973  * but when such padded frames employ the bge IP/TCP checksum offload,
2974  * the hardware checksum assist gives incorrect results (possibly
2975  * from incorporating its own padding into the UDP/TCP checksum; who knows).
2976  * If we pad such runts with zeros, the onboard checksum comes out correct.
2977  */
2978 int
2979 bge_cksum_pad(struct mbuf *m)
2980 {
2981 	int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
2982 	struct mbuf *last;
2983 
2984 	/* If there's only the packet-header and we can pad there, use it. */
2985 	if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
2986 		last = m;
2987 	} else {
2988 		/*
2989 		 * Walk packet chain to find last mbuf. We will either
2990 		 * pad there, or append a new mbuf and pad it.
2991 		 */
2992 		for (last = m; last->m_next != NULL; last = last->m_next);
2993 		if (M_TRAILINGSPACE(last) < padlen) {
2994 			/* Allocate new empty mbuf, pad it. Compact later. */
2995 			struct mbuf *n;
2996 
2997 			MGET(n, M_DONTWAIT, MT_DATA);
2998 			if (n == NULL)
2999 				return (ENOBUFS);
3000 			n->m_len = 0;
3001 			last->m_next = n;
3002 			last = n;
3003 		}
3004 	}
3005 
3006 	/* Now zero the pad area, to avoid the bge cksum-assist bug. */
3007 	memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3008 	last->m_len += padlen;
3009 	m->m_pkthdr.len += padlen;
3010 
3011 	return (0);
3012 }
3013 
3014 /*
3015  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3016  * pointers to descriptors.
3017  */
3018 int
3019 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
3020 {
3021 	struct bge_tx_bd	*f = NULL;
3022 	u_int32_t		frag, cur;
3023 	u_int16_t		csum_flags = 0;
3024 	struct txdmamap_pool_entry *dma;
3025 	bus_dmamap_t dmamap;
3026 	int			i = 0;
3027 
3028 	cur = frag = *txidx;
3029 
3030 	if (m_head->m_pkthdr.csum_flags) {
3031 		if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
3032 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3033 		if (m_head->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT |
3034 		    M_UDP_CSUM_OUT)) {
3035 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3036 			if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD &&
3037 			    bge_cksum_pad(m_head) != 0)
3038 				return (ENOBUFS);
3039 		}
3040 	}
3041 
3042 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
3043 		goto doit;
3044 
3045 	/*
3046 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
3047 	 * less than eight bytes.  If we encounter a teeny mbuf
3048 	 * at the end of a chain, we can pad.  Otherwise, copy.
3049 	 */
3050 	if (bge_compact_dma_runt(m_head) != 0)
3051 		return (ENOBUFS);
3052 
3053 doit:
3054 	dma = SLIST_FIRST(&sc->txdma_list);
3055 	if (dma == NULL)
3056 		return (ENOBUFS);
3057 	dmamap = dma->dmamap;
3058 
3059 	/*
3060 	 * Start packing the mbufs in this chain into
3061 	 * the fragment pointers. Stop when we run out
3062 	 * of fragments or hit the end of the mbuf chain.
3063 	 */
3064 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3065 	    BUS_DMA_NOWAIT))
3066 		return (ENOBUFS);
3067 
3068 	/* Check if we have enough free send BDs. */
3069 	if (sc->bge_txcnt + dmamap->dm_nsegs >= BGE_TX_RING_CNT)
3070 		goto fail_unload;
3071 
3072 	for (i = 0; i < dmamap->dm_nsegs; i++) {
3073 		f = &sc->bge_rdata->bge_tx_ring[frag];
3074 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3075 			break;
3076 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
3077 		f->bge_len = dmamap->dm_segs[i].ds_len;
3078 		f->bge_flags = csum_flags;
3079 		f->bge_vlan_tag = 0;
3080 #if NVLAN > 0
3081 		if (m_head->m_flags & M_VLANTAG) {
3082 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3083 			f->bge_vlan_tag = m_head->m_pkthdr.ether_vtag;
3084 		}
3085 #endif
3086 		cur = frag;
3087 		BGE_INC(frag, BGE_TX_RING_CNT);
3088 	}
3089 
3090 	if (i < dmamap->dm_nsegs)
3091 		goto fail_unload;
3092 
3093 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3094 	    BUS_DMASYNC_PREWRITE);
3095 
3096 	if (frag == sc->bge_tx_saved_considx)
3097 		goto fail_unload;
3098 
3099 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3100 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
3101 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3102 	sc->txdma[cur] = dma;
3103 	sc->bge_txcnt += dmamap->dm_nsegs;
3104 
3105 	*txidx = frag;
3106 
3107 	return (0);
3108 
3109 fail_unload:
3110 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
3111 
3112 	return (ENOBUFS);
3113 }
3114 
3115 /*
3116  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3117  * to the mbuf data regions directly in the transmit descriptors.
3118  */
3119 void
3120 bge_start(struct ifnet *ifp)
3121 {
3122 	struct bge_softc *sc;
3123 	struct mbuf *m_head;
3124 	u_int32_t prodidx;
3125 	int pkts;
3126 
3127 	sc = ifp->if_softc;
3128 
3129 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3130 		return;
3131 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
3132 		return;
3133 
3134 	prodidx = sc->bge_tx_prodidx;
3135 
3136 	for (pkts = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) {
3137 		if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
3138 			ifp->if_flags |= IFF_OACTIVE;
3139 			break;
3140 		}
3141 
3142 		IFQ_POLL(&ifp->if_snd, m_head);
3143 		if (m_head == NULL)
3144 			break;
3145 
3146 		/*
3147 		 * Pack the data into the transmit ring. If we
3148 		 * don't have room, set the OACTIVE flag and wait
3149 		 * for the NIC to drain the ring.
3150 		 */
3151 		if (bge_encap(sc, m_head, &prodidx)) {
3152 			ifp->if_flags |= IFF_OACTIVE;
3153 			break;
3154 		}
3155 
3156 		/* now we are committed to transmit the packet */
3157 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
3158 		pkts++;
3159 
3160 #if NBPFILTER > 0
3161 		/*
3162 		 * If there's a BPF listener, bounce a copy of this frame
3163 		 * to him.
3164 		 */
3165 		if (ifp->if_bpf)
3166 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
3167 #endif
3168 	}
3169 	if (pkts == 0)
3170 		return;
3171 
3172 	/* Transmit */
3173 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3174 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
3175 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3176 
3177 	sc->bge_tx_prodidx = prodidx;
3178 
3179 	/*
3180 	 * Set a timeout in case the chip goes out to lunch.
3181 	 */
3182 	ifp->if_timer = 5;
3183 }
3184 
3185 void
3186 bge_init(void *xsc)
3187 {
3188 	struct bge_softc *sc = xsc;
3189 	struct ifnet *ifp;
3190 	u_int16_t *m;
3191 	u_int32_t rxmode;
3192 	int s;
3193 
3194 	s = splnet();
3195 
3196 	ifp = &sc->arpcom.ac_if;
3197 
3198 	/* Cancel pending I/O and flush buffers. */
3199 	bge_stop(sc);
3200 	bge_reset(sc);
3201 	bge_chipinit(sc);
3202 
3203 	/*
3204 	 * Init the various state machines, ring
3205 	 * control blocks and firmware.
3206 	 */
3207 	if (bge_blockinit(sc)) {
3208 		printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
3209 		splx(s);
3210 		return;
3211 	}
3212 
3213 	/* Specify MRU. */
3214 	if (BGE_IS_JUMBO_CAPABLE(sc))
3215 		CSR_WRITE_4(sc, BGE_RX_MTU,
3216 			BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
3217 	else
3218 		CSR_WRITE_4(sc, BGE_RX_MTU,
3219 			ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
3220 
3221 	/* Load our MAC address. */
3222 	m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
3223 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3224 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3225 
3226 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
3227 		/* Disable hardware decapsulation of VLAN frames. */
3228 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
3229 	}
3230 
3231 	/* Program promiscuous mode and multicast filters. */
3232 	bge_iff(sc);
3233 
3234 	/* Init RX ring. */
3235 	bge_init_rx_ring_std(sc);
3236 
3237 	/*
3238 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
3239 	 * memory to insure that the chip has in fact read the first
3240 	 * entry of the ring.
3241 	 */
3242 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
3243 		u_int32_t		v, i;
3244 		for (i = 0; i < 10; i++) {
3245 			DELAY(20);
3246 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
3247 			if (v == (MCLBYTES - ETHER_ALIGN))
3248 				break;
3249 		}
3250 		if (i == 10)
3251 			printf("%s: 5705 A0 chip failed to load RX ring\n",
3252 			    sc->bge_dev.dv_xname);
3253 	}
3254 
3255 	/* Init Jumbo RX ring. */
3256 	if (BGE_IS_JUMBO_CAPABLE(sc))
3257 		bge_init_rx_ring_jumbo(sc);
3258 
3259 	/* Init our RX return ring index */
3260 	sc->bge_rx_saved_considx = 0;
3261 
3262 	/* Init our RX/TX stat counters. */
3263 	sc->bge_tx_collisions = 0;
3264 	sc->bge_rx_discards = 0;
3265 	sc->bge_rx_inerrors = 0;
3266 	sc->bge_rx_overruns = 0;
3267 	sc->bge_tx_discards = 0;
3268 
3269 	/* Init TX ring. */
3270 	bge_init_tx_ring(sc);
3271 
3272 	/* Turn on transmitter */
3273 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3274 
3275 	rxmode = BGE_RXMODE_ENABLE;
3276 
3277 	if (BGE_IS_5755_PLUS(sc))
3278 		rxmode |= BGE_RXMODE_RX_IPV6_CSUM_ENABLE;
3279 
3280 	/* Turn on receiver */
3281 	BGE_SETBIT(sc, BGE_RX_MODE, rxmode);
3282 
3283 	CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3284 
3285 	/* Tell firmware we're alive. */
3286 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3287 
3288 	/* Enable host interrupts. */
3289 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3290 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3291 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3292 
3293 	bge_ifmedia_upd(ifp);
3294 
3295 	ifp->if_flags |= IFF_RUNNING;
3296 	ifp->if_flags &= ~IFF_OACTIVE;
3297 
3298 	splx(s);
3299 
3300 	timeout_add_sec(&sc->bge_timeout, 1);
3301 }
3302 
3303 /*
3304  * Set media options.
3305  */
3306 int
3307 bge_ifmedia_upd(struct ifnet *ifp)
3308 {
3309 	struct bge_softc *sc = ifp->if_softc;
3310 	struct mii_data *mii = &sc->bge_mii;
3311 	struct ifmedia *ifm = &sc->bge_ifmedia;
3312 
3313 	/* If this is a 1000baseX NIC, enable the TBI port. */
3314 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3315 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3316 			return (EINVAL);
3317 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
3318 		case IFM_AUTO:
3319 			/*
3320 			 * The BCM5704 ASIC appears to have a special
3321 			 * mechanism for programming the autoneg
3322 			 * advertisement registers in TBI mode.
3323 			 */
3324 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3325 				u_int32_t sgdig;
3326 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
3327 				if (sgdig & BGE_SGDIGSTS_DONE) {
3328 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3329 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3330 					sgdig |= BGE_SGDIGCFG_AUTO |
3331 					    BGE_SGDIGCFG_PAUSE_CAP |
3332 					    BGE_SGDIGCFG_ASYM_PAUSE;
3333 					CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3334 					    sgdig | BGE_SGDIGCFG_SEND);
3335 					DELAY(5);
3336 					CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3337 				}
3338 			}
3339 			break;
3340 		case IFM_1000_SX:
3341 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3342 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3343 				    BGE_MACMODE_HALF_DUPLEX);
3344 			} else {
3345 				BGE_SETBIT(sc, BGE_MAC_MODE,
3346 				    BGE_MACMODE_HALF_DUPLEX);
3347 			}
3348 			break;
3349 		default:
3350 			return (EINVAL);
3351 		}
3352 		/* XXX 802.3x flow control for 1000BASE-SX */
3353 		return (0);
3354 	}
3355 
3356 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
3357 	if (mii->mii_instance) {
3358 		struct mii_softc *miisc;
3359 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3360 			mii_phy_reset(miisc);
3361 	}
3362 	mii_mediachg(mii);
3363 
3364 	/*
3365 	 * Force an interrupt so that we will call bge_link_upd
3366 	 * if needed and clear any pending link state attention.
3367 	 * Without this we are not getting any further interrupts
3368 	 * for link state changes and thus will not UP the link and
3369 	 * not be able to send in bge_start. The only way to get
3370 	 * things working was to receive a packet and get a RX intr.
3371 	 */
3372 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
3373 	    sc->bge_flags & BGE_IS_5788)
3374 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3375 	else
3376 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
3377 
3378 	return (0);
3379 }
3380 
3381 /*
3382  * Report current media status.
3383  */
3384 void
3385 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3386 {
3387 	struct bge_softc *sc = ifp->if_softc;
3388 	struct mii_data *mii = &sc->bge_mii;
3389 
3390 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3391 		ifmr->ifm_status = IFM_AVALID;
3392 		ifmr->ifm_active = IFM_ETHER;
3393 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3394 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3395 			ifmr->ifm_status |= IFM_ACTIVE;
3396 		} else {
3397 			ifmr->ifm_active |= IFM_NONE;
3398 			return;
3399 		}
3400 		ifmr->ifm_active |= IFM_1000_SX;
3401 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3402 			ifmr->ifm_active |= IFM_HDX;
3403 		else
3404 			ifmr->ifm_active |= IFM_FDX;
3405 		return;
3406 	}
3407 
3408 	mii_pollstat(mii);
3409 	ifmr->ifm_status = mii->mii_media_status;
3410 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3411 	    sc->bge_flowflags;
3412 }
3413 
3414 int
3415 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3416 {
3417 	struct bge_softc *sc = ifp->if_softc;
3418 	struct ifaddr *ifa = (struct ifaddr *) data;
3419 	struct ifreq *ifr = (struct ifreq *) data;
3420 	int s, error = 0;
3421 	struct mii_data *mii;
3422 
3423 	s = splnet();
3424 
3425 	switch(command) {
3426 	case SIOCSIFADDR:
3427 		ifp->if_flags |= IFF_UP;
3428 		if (!(ifp->if_flags & IFF_RUNNING))
3429 			bge_init(sc);
3430 #ifdef INET
3431 		if (ifa->ifa_addr->sa_family == AF_INET)
3432 			arp_ifinit(&sc->arpcom, ifa);
3433 #endif /* INET */
3434 		break;
3435 
3436 	case SIOCSIFFLAGS:
3437 		if (ifp->if_flags & IFF_UP) {
3438 			if (ifp->if_flags & IFF_RUNNING)
3439 				error = ENETRESET;
3440 			else
3441 				bge_init(sc);
3442 		} else {
3443 			if (ifp->if_flags & IFF_RUNNING)
3444 				bge_stop(sc);
3445 		}
3446 		break;
3447 
3448 	case SIOCSIFMEDIA:
3449 		/* XXX Flow control is not supported for 1000BASE-SX */
3450 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3451 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3452 			sc->bge_flowflags = 0;
3453 		}
3454 
3455 		/* Flow control requires full-duplex mode. */
3456 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3457 		    (ifr->ifr_media & IFM_FDX) == 0) {
3458 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
3459 		}
3460 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3461 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3462 				/* We can do both TXPAUSE and RXPAUSE. */
3463 				ifr->ifr_media |=
3464 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3465 			}
3466 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3467 		}
3468 		/* FALLTHROUGH */
3469 	case SIOCGIFMEDIA:
3470 		if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3471 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3472 			    command);
3473 		} else {
3474 			mii = &sc->bge_mii;
3475 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3476 			    command);
3477 		}
3478 		break;
3479 
3480 	default:
3481 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
3482 	}
3483 
3484 	if (error == ENETRESET) {
3485 		if (ifp->if_flags & IFF_RUNNING)
3486 			bge_iff(sc);
3487 		error = 0;
3488 	}
3489 
3490 	splx(s);
3491 	return (error);
3492 }
3493 
3494 void
3495 bge_watchdog(struct ifnet *ifp)
3496 {
3497 	struct bge_softc *sc;
3498 
3499 	sc = ifp->if_softc;
3500 
3501 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3502 
3503 	bge_init(sc);
3504 
3505 	ifp->if_oerrors++;
3506 }
3507 
3508 void
3509 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
3510 {
3511 	int i;
3512 
3513 	BGE_CLRBIT(sc, reg, bit);
3514 
3515 	for (i = 0; i < BGE_TIMEOUT; i++) {
3516 		if ((CSR_READ_4(sc, reg) & bit) == 0)
3517 			return;
3518 		delay(100);
3519 	}
3520 
3521 	DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3522 	    sc->bge_dev.dv_xname, (u_long) reg, bit));
3523 }
3524 
3525 /*
3526  * Stop the adapter and free any mbufs allocated to the
3527  * RX and TX lists.
3528  */
3529 void
3530 bge_stop(struct bge_softc *sc)
3531 {
3532 	struct ifnet *ifp = &sc->arpcom.ac_if;
3533 	struct ifmedia_entry *ifm;
3534 	struct mii_data *mii;
3535 	int mtmp, itmp;
3536 
3537 	timeout_del(&sc->bge_timeout);
3538 	timeout_del(&sc->bge_rxtimeout);
3539 
3540 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3541 
3542 	/*
3543 	 * Disable all of the receiver blocks
3544 	 */
3545 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3546 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3547 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3548 	if (BGE_IS_5700_FAMILY(sc))
3549 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3550 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3551 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3552 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3553 
3554 	/*
3555 	 * Disable all of the transmit blocks
3556 	 */
3557 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3558 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3559 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3560 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3561 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3562 	if (BGE_IS_5700_FAMILY(sc))
3563 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3564 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3565 
3566 	/*
3567 	 * Shut down all of the memory managers and related
3568 	 * state machines.
3569 	 */
3570 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3571 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3572 	if (BGE_IS_5700_FAMILY(sc))
3573 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3574 
3575 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3576 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3577 
3578 	if (BGE_IS_5700_FAMILY(sc)) {
3579 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3580 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3581 	}
3582 
3583 	/* Disable host interrupts. */
3584 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3585 	bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3586 
3587 	/*
3588 	 * Tell firmware we're shutting down.
3589 	 */
3590 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3591 
3592 	/* Free the RX lists. */
3593 	bge_free_rx_ring_std(sc);
3594 
3595 	/* Free jumbo RX list. */
3596 	if (BGE_IS_JUMBO_CAPABLE(sc))
3597 		bge_free_rx_ring_jumbo(sc);
3598 
3599 	/* Free TX buffers. */
3600 	bge_free_tx_ring(sc);
3601 
3602 	/*
3603 	 * Isolate/power down the PHY, but leave the media selection
3604 	 * unchanged so that things will be put back to normal when
3605 	 * we bring the interface back up.
3606 	 */
3607 	if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) {
3608 		mii = &sc->bge_mii;
3609 		itmp = ifp->if_flags;
3610 		ifp->if_flags |= IFF_UP;
3611 		ifm = mii->mii_media.ifm_cur;
3612 		mtmp = ifm->ifm_media;
3613 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3614 		mii_mediachg(mii);
3615 		ifm->ifm_media = mtmp;
3616 		ifp->if_flags = itmp;
3617 	}
3618 
3619 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3620 
3621 	/* Clear MAC's link state (PHY may still have link UP). */
3622 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3623 }
3624 
3625 void
3626 bge_link_upd(struct bge_softc *sc)
3627 {
3628 	struct ifnet *ifp = &sc->arpcom.ac_if;
3629 	struct mii_data *mii = &sc->bge_mii;
3630 	u_int32_t status;
3631 	int link;
3632 
3633 	/* Clear 'pending link event' flag */
3634 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
3635 
3636 	/*
3637 	 * Process link state changes.
3638 	 * Grrr. The link status word in the status block does
3639 	 * not work correctly on the BCM5700 rev AX and BX chips,
3640 	 * according to all available information. Hence, we have
3641 	 * to enable MII interrupts in order to properly obtain
3642 	 * async link changes. Unfortunately, this also means that
3643 	 * we have to read the MAC status register to detect link
3644 	 * changes, thereby adding an additional register access to
3645 	 * the interrupt handler.
3646 	 *
3647 	 */
3648 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3649 		status = CSR_READ_4(sc, BGE_MAC_STS);
3650 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
3651 			mii_pollstat(mii);
3652 
3653 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3654 			    mii->mii_media_status & IFM_ACTIVE &&
3655 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3656 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3657 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3658 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3659 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3660 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3661 
3662 			/* Clear the interrupt */
3663 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3664 			    BGE_EVTENB_MI_INTERRUPT);
3665 			bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
3666 			bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
3667 			    BRGPHY_INTRS);
3668 		}
3669 		return;
3670 	}
3671 
3672 	if (sc->bge_flags & BGE_PHY_FIBER_TBI) {
3673 		status = CSR_READ_4(sc, BGE_MAC_STS);
3674 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3675 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
3676 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3677 				if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
3678 					BGE_CLRBIT(sc, BGE_MAC_MODE,
3679 					    BGE_MACMODE_TBI_SEND_CFGS);
3680 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3681 				status = CSR_READ_4(sc, BGE_MAC_MODE);
3682 				ifp->if_link_state =
3683 				    (status & BGE_MACMODE_HALF_DUPLEX) ?
3684 				    LINK_STATE_HALF_DUPLEX :
3685 				    LINK_STATE_FULL_DUPLEX;
3686 				if_link_state_change(ifp);
3687 				ifp->if_baudrate = IF_Gbps(1);
3688 			}
3689 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
3690 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3691 			ifp->if_link_state = LINK_STATE_DOWN;
3692 			if_link_state_change(ifp);
3693 			ifp->if_baudrate = 0;
3694 		}
3695         /*
3696 	 * Discard link events for MII/GMII cards if MI auto-polling disabled.
3697 	 * This should not happen since mii callouts are locked now, but
3698 	 * we keep this check for debug.
3699 	 */
3700 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
3701 		/*
3702 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3703 		 * in status word always set. Workaround this bug by reading
3704 		 * PHY link status directly.
3705 		 */
3706 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
3707 		    BGE_STS_LINK : 0;
3708 
3709 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
3710 			mii_pollstat(mii);
3711 
3712 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
3713 			    mii->mii_media_status & IFM_ACTIVE &&
3714 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3715 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
3716 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
3717 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
3718 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3719 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
3720 		}
3721 	}
3722 
3723 	/* Clear the attention */
3724 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3725 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3726 	    BGE_MACSTAT_LINK_CHANGED);
3727 }
3728